From e9445157de8c2585b2f05c9cab8a6aaa8fa97fd9 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 24 Sep 2024 14:56:08 -0700 Subject: [PATCH 01/11] delete old stuff --- .circleci/check_pr_status.sh | 26 - .circleci/config.yml | 359 - .coveragerc | 7 - .dockerignore | 21 - .flake8 | 4 - .github/ISSUE_TEMPLATE/bug_report.yaml | 59 - .github/ISSUE_TEMPLATE/feature_request.yaml | 38 - .github/PULL_REQUEST_TEMPLATE/bug_fix.md | 59 - .../PULL_REQUEST_TEMPLATE/feature_change.md | 54 - .../performance_improvement.md | 55 - .github/auto_assign.yml | 7 - .github/dependabot.yml | 8 - .github/pull_request_template.md | 10 - .github/workflows/auto-assign.yml | 15 - .github/workflows/docker_release.yml | 51 - .../e2e-multiple-bittensor-tests.yml | 115 - .github/workflows/e2e-subtensor-tests.yaml | 105 - .github/workflows/release.yml | 72 - .gitignore | 216 - .test_durations | 268 - Dockerfile | 40 - LICENSE | 16 - Makefile | 26 - README.md | 435 -- VERSION | 1 - bin/btcli | 50 - bittensor/__init__.py | 371 -- bittensor/axon.py | 1528 ----- bittensor/btlogging/__init__.py | 28 - bittensor/btlogging/defines.py | 28 - bittensor/btlogging/format.py | 222 - bittensor/btlogging/helpers.py | 88 - bittensor/btlogging/loggingmachine.py | 511 -- bittensor/chain_data.py | 1204 ---- bittensor/cli.py | 398 -- bittensor/commands/__init__.py | 131 - bittensor/commands/check_coldkey_swap.py | 128 - bittensor/commands/delegates.py | 1153 ---- bittensor/commands/identity.py | 344 - bittensor/commands/inspect.py | 279 - bittensor/commands/list.py | 128 - bittensor/commands/metagraph.py | 268 - bittensor/commands/misc.py | 117 - bittensor/commands/network.py | 672 -- bittensor/commands/overview.py | 778 --- bittensor/commands/register.py | 613 -- bittensor/commands/root.py | 681 -- bittensor/commands/senate.py | 670 -- bittensor/commands/stake.py | 1401 ---- bittensor/commands/transfer.py | 133 - bittensor/commands/unstake.py | 443 -- bittensor/commands/utils.py | 283 - bittensor/commands/wallets.py | 1101 ---- bittensor/commands/weights.py | 290 - bittensor/config.py | 415 -- bittensor/constants.py | 43 - bittensor/dendrite.py | 868 --- bittensor/errors.py | 185 - bittensor/extrinsics/__init__.py | 16 - bittensor/extrinsics/commit_weights.py | 127 - bittensor/extrinsics/delegation.py | 528 -- bittensor/extrinsics/network.py | 250 - bittensor/extrinsics/prometheus.py | 140 - bittensor/extrinsics/registration.py | 543 -- bittensor/extrinsics/root.py | 237 - bittensor/extrinsics/senate.py | 275 - bittensor/extrinsics/serving.py | 286 - bittensor/extrinsics/set_weights.py | 136 - bittensor/extrinsics/staking.py | 760 --- bittensor/extrinsics/transfer.py | 164 - bittensor/extrinsics/unstaking.py | 470 -- bittensor/keyfile.py | 866 --- bittensor/metagraph.py | 1191 ---- bittensor/mock/__init__.py | 18 - bittensor/mock/keyfile_mock.py | 90 - bittensor/mock/subtensor_mock.py | 1469 ----- bittensor/mock/wallet_mock.py | 127 - bittensor/stream.py | 152 - bittensor/subnets.py | 74 - bittensor/subtensor.py | 5871 ----------------- bittensor/synapse.py | 864 --- bittensor/tensor.py | 250 - bittensor/threadpool.py | 295 - bittensor/types.py | 42 - bittensor/utils/__init__.py | 282 - bittensor/utils/_register_cuda.py | 126 - bittensor/utils/axon_utils.py | 38 - bittensor/utils/balance.py | 285 - bittensor/utils/formatting.py | 123 - bittensor/utils/networking.py | 198 - bittensor/utils/registration.py | 1170 ---- bittensor/utils/subtensor.py | 173 - bittensor/utils/test_utils.py | 22 - bittensor/utils/version.py | 103 - bittensor/utils/wallet_utils.py | 168 - bittensor/utils/weight_utils.py | 406 -- bittensor/wallet.py | 873 --- contrib/CODE_REVIEW_DOCS.md | 72 - contrib/CONTRIBUTING.md | 299 - contrib/DEBUGGING.md | 161 - contrib/DEVELOPMENT_WORKFLOW.md | 159 - contrib/RELEASE_GUIDELINES.md | 87 - contrib/STYLE.md | 350 - contrib/TESTING.md | 94 - docker-compose.yml | 10 - example.env | 5 - mypy.ini | 18 - requirements/cubit.txt | 3 - requirements/dev.txt | 20 - requirements/prod.txt | 35 - requirements/torch.txt | 1 - scripts/check_compatibility.sh | 76 - scripts/check_pre_submit.sh | 18 - scripts/check_requirements_changes.sh | 10 - scripts/create_wallet.sh | 13 - scripts/environments/README.md | 21 - scripts/environments/apple_m1_environment.yml | 272 - scripts/install.sh | 298 - scripts/post_install_cli.py | 29 - scripts/run.sh | 18 - setup.py | 98 - tests/__init__.py | 18 - tests/e2e_tests/__init__.py | 0 tests/e2e_tests/conftest.py | 104 - tests/e2e_tests/multistep/__init__.py | 0 tests/e2e_tests/multistep/test_axon.py | 112 - tests/e2e_tests/multistep/test_dendrite.py | 164 - tests/e2e_tests/multistep/test_emissions.py | 283 - tests/e2e_tests/multistep/test_incentive.py | 247 - tests/e2e_tests/subcommands/__init__.py | 0 .../subcommands/delegation/__init__.py | 0 .../delegation/test_set_delegate_take.py | 61 - .../subcommands/hyperparams/__init__.py | 0 .../hyperparams/test_liquid_alpha.py | 278 - .../subcommands/register/__init__.py | 0 .../subcommands/register/test_swap_hotkey.py | 566 -- tests/e2e_tests/subcommands/root/__init__.py | 0 .../root/test_root_delegate_list.py | 24 - .../test_root_register_add_member_senate.py | 120 - .../subcommands/root/test_root_senate_vote.py | 49 - .../root/test_root_view_proposal.py | 45 - tests/e2e_tests/subcommands/stake/__init__.py | 0 .../subcommands/stake/test_childkeys.py | 472 -- .../stake/test_stake_add_remove.py | 81 - .../subcommands/stake/test_stake_show.py | 37 - .../e2e_tests/subcommands/subnet/__init__.py | 0 .../e2e_tests/subcommands/subnet/test_list.py | 29 - .../subcommands/subnet/test_metagraph.py | 122 - .../e2e_tests/subcommands/wallet/__init__.py | 0 .../subcommands/wallet/test_faucet.py | 92 - .../e2e_tests/subcommands/wallet/test_list.py | 72 - .../subcommands/wallet/test_transfer.py | 35 - .../wallet/test_wallet_creations.py | 505 -- .../e2e_tests/subcommands/weights/__init__.py | 0 .../weights/test_commit_weights.py | 246 - tests/e2e_tests/utils.py | 214 - tests/helpers/__init__.py | 32 - tests/helpers/helpers.py | 172 - tests/integration_tests/__init__.py | 0 tests/integration_tests/test_cli.py | 2752 -------- .../integration_tests/test_cli_no_network.py | 1533 ----- .../test_metagraph_integration.py | 114 - .../test_subtensor_integration.py | 850 --- tests/pytest.ini | 3 - tests/unit_tests/__init__.py | 0 tests/unit_tests/conftest.py | 13 - .../unit_tests/extrinsics/test_delegation.py | 459 -- tests/unit_tests/extrinsics/test_init.py | 49 - tests/unit_tests/extrinsics/test_network.py | 157 - .../unit_tests/extrinsics/test_prometheus.py | 154 - .../extrinsics/test_registration.py | 401 -- tests/unit_tests/extrinsics/test_root.py | 308 - tests/unit_tests/extrinsics/test_senate.py | 237 - tests/unit_tests/extrinsics/test_serving.py | 374 -- .../unit_tests/extrinsics/test_set_weights.py | 102 - tests/unit_tests/extrinsics/test_staking.py | 551 -- tests/unit_tests/extrinsics/test_unstaking.py | 332 - tests/unit_tests/factories/__init__.py | 0 tests/unit_tests/factories/neuron_factory.py | 63 - tests/unit_tests/test_axon.py | 781 --- tests/unit_tests/test_chain_data.py | 621 -- tests/unit_tests/test_dendrite.py | 415 -- tests/unit_tests/test_keyfile.py | 643 -- tests/unit_tests/test_logging.py | 170 - tests/unit_tests/test_metagraph.py | 206 - tests/unit_tests/test_overview.py | 266 - tests/unit_tests/test_subtensor.py | 2353 ------- tests/unit_tests/test_synapse.py | 266 - tests/unit_tests/test_tensor.py | 243 - tests/unit_tests/test_wallet.py | 517 -- tests/unit_tests/utils/__init__.py | 0 tests/unit_tests/utils/test_balance.py | 509 -- tests/unit_tests/utils/test_networking.py | 167 - tests/unit_tests/utils/test_registration.py | 45 - tests/unit_tests/utils/test_subtensor.py | 99 - tests/unit_tests/utils/test_utils.py | 328 - tests/unit_tests/utils/test_version.py | 168 - tests/unit_tests/utils/test_weight_utils.py | 534 -- 198 files changed, 59957 deletions(-) delete mode 100755 .circleci/check_pr_status.sh delete mode 100644 .circleci/config.yml delete mode 100644 .coveragerc delete mode 100644 .dockerignore delete mode 100644 .flake8 delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.yaml delete mode 100644 .github/PULL_REQUEST_TEMPLATE/bug_fix.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE/feature_change.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE/performance_improvement.md delete mode 100644 .github/auto_assign.yml delete mode 100644 .github/dependabot.yml delete mode 100644 .github/pull_request_template.md delete mode 100644 .github/workflows/auto-assign.yml delete mode 100644 .github/workflows/docker_release.yml delete mode 100644 .github/workflows/e2e-multiple-bittensor-tests.yml delete mode 100644 .github/workflows/e2e-subtensor-tests.yaml delete mode 100644 .github/workflows/release.yml delete mode 100644 .gitignore delete mode 100644 .test_durations delete mode 100644 Dockerfile delete mode 100644 LICENSE delete mode 100644 Makefile delete mode 100644 README.md delete mode 100644 VERSION delete mode 100755 bin/btcli delete mode 100644 bittensor/__init__.py delete mode 100644 bittensor/axon.py delete mode 100644 bittensor/btlogging/__init__.py delete mode 100644 bittensor/btlogging/defines.py delete mode 100644 bittensor/btlogging/format.py delete mode 100644 bittensor/btlogging/helpers.py delete mode 100644 bittensor/btlogging/loggingmachine.py delete mode 100644 bittensor/chain_data.py delete mode 100644 bittensor/cli.py delete mode 100644 bittensor/commands/__init__.py delete mode 100644 bittensor/commands/check_coldkey_swap.py delete mode 100644 bittensor/commands/delegates.py delete mode 100644 bittensor/commands/identity.py delete mode 100644 bittensor/commands/inspect.py delete mode 100644 bittensor/commands/list.py delete mode 100644 bittensor/commands/metagraph.py delete mode 100644 bittensor/commands/misc.py delete mode 100644 bittensor/commands/network.py delete mode 100644 bittensor/commands/overview.py delete mode 100644 bittensor/commands/register.py delete mode 100644 bittensor/commands/root.py delete mode 100644 bittensor/commands/senate.py delete mode 100644 bittensor/commands/stake.py delete mode 100644 bittensor/commands/transfer.py delete mode 100644 bittensor/commands/unstake.py delete mode 100644 bittensor/commands/utils.py delete mode 100644 bittensor/commands/wallets.py delete mode 100644 bittensor/commands/weights.py delete mode 100644 bittensor/config.py delete mode 100644 bittensor/constants.py delete mode 100644 bittensor/dendrite.py delete mode 100644 bittensor/errors.py delete mode 100644 bittensor/extrinsics/__init__.py delete mode 100644 bittensor/extrinsics/commit_weights.py delete mode 100644 bittensor/extrinsics/delegation.py delete mode 100644 bittensor/extrinsics/network.py delete mode 100644 bittensor/extrinsics/prometheus.py delete mode 100644 bittensor/extrinsics/registration.py delete mode 100644 bittensor/extrinsics/root.py delete mode 100644 bittensor/extrinsics/senate.py delete mode 100644 bittensor/extrinsics/serving.py delete mode 100644 bittensor/extrinsics/set_weights.py delete mode 100644 bittensor/extrinsics/staking.py delete mode 100644 bittensor/extrinsics/transfer.py delete mode 100644 bittensor/extrinsics/unstaking.py delete mode 100644 bittensor/keyfile.py delete mode 100644 bittensor/metagraph.py delete mode 100644 bittensor/mock/__init__.py delete mode 100644 bittensor/mock/keyfile_mock.py delete mode 100644 bittensor/mock/subtensor_mock.py delete mode 100644 bittensor/mock/wallet_mock.py delete mode 100644 bittensor/stream.py delete mode 100644 bittensor/subnets.py delete mode 100644 bittensor/subtensor.py delete mode 100644 bittensor/synapse.py delete mode 100644 bittensor/tensor.py delete mode 100644 bittensor/threadpool.py delete mode 100644 bittensor/types.py delete mode 100644 bittensor/utils/__init__.py delete mode 100644 bittensor/utils/_register_cuda.py delete mode 100644 bittensor/utils/axon_utils.py delete mode 100644 bittensor/utils/balance.py delete mode 100644 bittensor/utils/formatting.py delete mode 100644 bittensor/utils/networking.py delete mode 100644 bittensor/utils/registration.py delete mode 100644 bittensor/utils/subtensor.py delete mode 100644 bittensor/utils/test_utils.py delete mode 100644 bittensor/utils/version.py delete mode 100644 bittensor/utils/wallet_utils.py delete mode 100644 bittensor/utils/weight_utils.py delete mode 100644 bittensor/wallet.py delete mode 100644 contrib/CODE_REVIEW_DOCS.md delete mode 100644 contrib/CONTRIBUTING.md delete mode 100644 contrib/DEBUGGING.md delete mode 100644 contrib/DEVELOPMENT_WORKFLOW.md delete mode 100644 contrib/RELEASE_GUIDELINES.md delete mode 100644 contrib/STYLE.md delete mode 100644 contrib/TESTING.md delete mode 100644 docker-compose.yml delete mode 100644 example.env delete mode 100644 mypy.ini delete mode 100644 requirements/cubit.txt delete mode 100644 requirements/dev.txt delete mode 100644 requirements/prod.txt delete mode 100644 requirements/torch.txt delete mode 100755 scripts/check_compatibility.sh delete mode 100755 scripts/check_pre_submit.sh delete mode 100755 scripts/check_requirements_changes.sh delete mode 100755 scripts/create_wallet.sh delete mode 100644 scripts/environments/README.md delete mode 100644 scripts/environments/apple_m1_environment.yml delete mode 100755 scripts/install.sh delete mode 100644 scripts/post_install_cli.py delete mode 100755 scripts/run.sh delete mode 100644 setup.py delete mode 100644 tests/__init__.py delete mode 100644 tests/e2e_tests/__init__.py delete mode 100644 tests/e2e_tests/conftest.py delete mode 100644 tests/e2e_tests/multistep/__init__.py delete mode 100644 tests/e2e_tests/multistep/test_axon.py delete mode 100644 tests/e2e_tests/multistep/test_dendrite.py delete mode 100644 tests/e2e_tests/multistep/test_emissions.py delete mode 100644 tests/e2e_tests/multistep/test_incentive.py delete mode 100644 tests/e2e_tests/subcommands/__init__.py delete mode 100644 tests/e2e_tests/subcommands/delegation/__init__.py delete mode 100644 tests/e2e_tests/subcommands/delegation/test_set_delegate_take.py delete mode 100644 tests/e2e_tests/subcommands/hyperparams/__init__.py delete mode 100644 tests/e2e_tests/subcommands/hyperparams/test_liquid_alpha.py delete mode 100644 tests/e2e_tests/subcommands/register/__init__.py delete mode 100644 tests/e2e_tests/subcommands/register/test_swap_hotkey.py delete mode 100644 tests/e2e_tests/subcommands/root/__init__.py delete mode 100644 tests/e2e_tests/subcommands/root/test_root_delegate_list.py delete mode 100644 tests/e2e_tests/subcommands/root/test_root_register_add_member_senate.py delete mode 100644 tests/e2e_tests/subcommands/root/test_root_senate_vote.py delete mode 100644 tests/e2e_tests/subcommands/root/test_root_view_proposal.py delete mode 100644 tests/e2e_tests/subcommands/stake/__init__.py delete mode 100644 tests/e2e_tests/subcommands/stake/test_childkeys.py delete mode 100644 tests/e2e_tests/subcommands/stake/test_stake_add_remove.py delete mode 100644 tests/e2e_tests/subcommands/stake/test_stake_show.py delete mode 100644 tests/e2e_tests/subcommands/subnet/__init__.py delete mode 100644 tests/e2e_tests/subcommands/subnet/test_list.py delete mode 100644 tests/e2e_tests/subcommands/subnet/test_metagraph.py delete mode 100644 tests/e2e_tests/subcommands/wallet/__init__.py delete mode 100644 tests/e2e_tests/subcommands/wallet/test_faucet.py delete mode 100644 tests/e2e_tests/subcommands/wallet/test_list.py delete mode 100644 tests/e2e_tests/subcommands/wallet/test_transfer.py delete mode 100644 tests/e2e_tests/subcommands/wallet/test_wallet_creations.py delete mode 100644 tests/e2e_tests/subcommands/weights/__init__.py delete mode 100644 tests/e2e_tests/subcommands/weights/test_commit_weights.py delete mode 100644 tests/e2e_tests/utils.py delete mode 100644 tests/helpers/__init__.py delete mode 100644 tests/helpers/helpers.py delete mode 100644 tests/integration_tests/__init__.py delete mode 100644 tests/integration_tests/test_cli.py delete mode 100644 tests/integration_tests/test_cli_no_network.py delete mode 100644 tests/integration_tests/test_metagraph_integration.py delete mode 100644 tests/integration_tests/test_subtensor_integration.py delete mode 100644 tests/pytest.ini delete mode 100644 tests/unit_tests/__init__.py delete mode 100644 tests/unit_tests/conftest.py delete mode 100644 tests/unit_tests/extrinsics/test_delegation.py delete mode 100644 tests/unit_tests/extrinsics/test_init.py delete mode 100644 tests/unit_tests/extrinsics/test_network.py delete mode 100644 tests/unit_tests/extrinsics/test_prometheus.py delete mode 100644 tests/unit_tests/extrinsics/test_registration.py delete mode 100644 tests/unit_tests/extrinsics/test_root.py delete mode 100644 tests/unit_tests/extrinsics/test_senate.py delete mode 100644 tests/unit_tests/extrinsics/test_serving.py delete mode 100644 tests/unit_tests/extrinsics/test_set_weights.py delete mode 100644 tests/unit_tests/extrinsics/test_staking.py delete mode 100644 tests/unit_tests/extrinsics/test_unstaking.py delete mode 100644 tests/unit_tests/factories/__init__.py delete mode 100644 tests/unit_tests/factories/neuron_factory.py delete mode 100644 tests/unit_tests/test_axon.py delete mode 100644 tests/unit_tests/test_chain_data.py delete mode 100644 tests/unit_tests/test_dendrite.py delete mode 100644 tests/unit_tests/test_keyfile.py delete mode 100644 tests/unit_tests/test_logging.py delete mode 100644 tests/unit_tests/test_metagraph.py delete mode 100644 tests/unit_tests/test_overview.py delete mode 100644 tests/unit_tests/test_subtensor.py delete mode 100644 tests/unit_tests/test_synapse.py delete mode 100644 tests/unit_tests/test_tensor.py delete mode 100644 tests/unit_tests/test_wallet.py delete mode 100644 tests/unit_tests/utils/__init__.py delete mode 100644 tests/unit_tests/utils/test_balance.py delete mode 100644 tests/unit_tests/utils/test_networking.py delete mode 100644 tests/unit_tests/utils/test_registration.py delete mode 100644 tests/unit_tests/utils/test_subtensor.py delete mode 100644 tests/unit_tests/utils/test_utils.py delete mode 100644 tests/unit_tests/utils/test_version.py delete mode 100644 tests/unit_tests/utils/test_weight_utils.py diff --git a/.circleci/check_pr_status.sh b/.circleci/check_pr_status.sh deleted file mode 100755 index 4b31a29698..0000000000 --- a/.circleci/check_pr_status.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Extract the repository owner -REPO_OWNER=$(echo $CIRCLE_PULL_REQUEST | awk -F'/' '{print $(NF-3)}') - -# Extract the repository name -REPO_NAME=$(echo $CIRCLE_PULL_REQUEST | awk -F'/' '{print $(NF-2)}') - -# Extract the pull request number -PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | awk -F'/' '{print $NF}') - - -PR_DETAILS=$(curl -s \ - "https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/pulls/$PR_NUMBER") - - -IS_DRAFT=$(echo "$PR_DETAILS" | jq -r .draft) -echo $IS_DRAFT - -if [ "$IS_DRAFT" == "true" ]; then - echo "This PR is a draft. Skipping the workflow." - exit 1 -else - echo "This PR is not a draft. Proceeding with the workflow." - exit 0 -fi diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 90f49d54eb..0000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,359 +0,0 @@ -version: 2.1 - -orbs: - python: circleci/python@2.1.1 - python-lib: dialogue/python-lib@0.1.55 - -jobs: - check-if-pr-is-draft: - docker: - - image: cimg/python:3.10 - steps: - - checkout - - run: - name: Install jq - command: sudo apt-get update && sudo apt-get install -y jq - - run: - name: Check if PR is a draft - command: .circleci/check_pr_status.sh - - ruff: - resource_class: small - parameters: - python-version: - type: string - docker: - - image: cimg/python:<< parameters.python-version >> - - steps: - - checkout - - - restore_cache: - name: Restore cached ruff venv - keys: - - v2-pypi-py-ruff-<< parameters.python-version >> - - - run: - name: Update & Activate ruff venv - command: | - python -m venv .venv - . .venv/bin/activate - python -m pip install --upgrade pip - pip install ruff -c requirements/dev.txt - - - save_cache: - name: Save cached ruff venv - paths: - - ".venv/" - key: v2-pypi-py-ruff-<< parameters.python-version >> - - - run: - name: Ruff format check - command: | - . .venv/bin/activate - ruff format --diff . - - check_compatibility: - parameters: - python_version: - type: string - docker: - - image: cimg/python:3.10 - steps: - - checkout - - run: - name: Check if requirements files have changed - command: ./scripts/check_requirements_changes.sh - - run: - name: Install dependencies and Check compatibility - command: | - if [ "$REQUIREMENTS_CHANGED" == "true" ]; then - sudo apt-get update - sudo apt-get install -y jq curl - ./scripts/check_compatibility.sh << parameters.python_version >> - else - echo "Skipping compatibility checks..." - fi - - build-and-test: - resource_class: medium - parallelism: 2 - parameters: - python-version: - type: string - docker: - - image: cimg/python:<< parameters.python-version >> - - steps: - - checkout - - - restore_cache: - name: Restore cached venv - keys: - - v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} - - v2-pypi-py<< parameters.python-version >> - - - run: - name: Update & Activate venv - command: | - python -m venv .venv - . .venv/bin/activate - python -m pip install --upgrade pip - python -m pip install '.[dev]' - - - save_cache: - name: Save cached venv - paths: - - "env/" - key: v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} - - - run: - name: Install Bittensor - command: | - . .venv/bin/activate - pip install -e '.[dev]' - - - run: - name: Instantiate Mock Wallet - command: | - . .venv/bin/activate - ./scripts/create_wallet.sh - - - run: - name: Unit Tests - no_output_timeout: 20m - command: | - . .venv/bin/activate - export PYTHONUNBUFFERED=1 - pytest -n2 --reruns 3 --durations=0 --verbose --junitxml=test-results/unit_tests.xml \ - --cov=. --cov-append --cov-config .coveragerc \ - --splits $CIRCLE_NODE_TOTAL --group $((CIRCLE_NODE_INDEX + 1)) \ - --splitting-algorithm duration_based_chunks --store-durations --durations-path .test_durations \ - tests/unit_tests/ - - - run: - name: Integration Tests - no_output_timeout: 30m - command: | - . .venv/bin/activate - export PYTHONUNBUFFERED=1 - pytest -n2 --reruns 3 --reruns-delay 15 --durations=0 --verbose --junitxml=test-results/integration_tests.xml \ - --cov=. --cov-append --cov-config .coveragerc \ - --splits $CIRCLE_NODE_TOTAL --group $((CIRCLE_NODE_INDEX + 1)) \ - --splitting-algorithm duration_based_chunks --store-durations --durations-path .test_durations \ - tests/integration_tests/ - - - store_test_results: - path: test-results - - store_artifacts: - path: test-results - - - #- when: - #condition: - #equal: ["3.10.5", << parameters.python-version >> ] - #steps: - #- run: - #name: Upload Coverage - #command: | - #. .venv/bin/activate && coveralls - #env: - #CI_NAME: circleci - #CI_BUILD_NUMBER: $CIRCLE_BUILD_NUM - #CI_BUILD_URL: $CIRCLE_BUILD_URL - #CI_BRANCH: $CIRCLE_BRANCH - #CI_JOB_ID: $CIRCLE_NODE_INDEX - #COVERALLS_PARALLEL: true - - - lint-and-type-check: - resource_class: medium - parallelism: 2 - parameters: - python-version: - type: string - docker: - - image: cimg/python:<< parameters.python-version >> - - steps: - - checkout - - - restore_cache: - name: Restore cached venv - keys: - - v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} - - v2-pypi-py<< parameters.python-version >> - - - run: - name: Update & Activate venv - command: | - python -m venv .venv - . .venv/bin/activate - python -m pip install --upgrade pip - python -m pip install '.[dev]' - pip install flake8 - - - save_cache: - name: Save cached venv - paths: - - "env/" - key: v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} - - - run: - name: Install Bittensor - command: | - . .venv/bin/activate - pip install -e '.[dev]' - - - run: - name: Lint with flake8 - command: | - . .venv/bin/activate - python -m flake8 bittensor/ --count - - - run: - name: Type check with mypy - command: | - . .venv/bin/activate - python -m mypy --ignore-missing-imports bittensor/ - - unit-tests-all-python-versions: - docker: - - image: cimg/python:3.10 - steps: - - run: - name: Placeholder command - command: echo "Success, only runs if all python versions ran" - - coveralls: - docker: - - image: cimg/python:3.10 - steps: - - run: - name: Combine Coverage - command: | - pip3 install --upgrade coveralls - coveralls --finish --rcfile .coveragerc || echo "Failed to upload coverage" - - check-version-updated: - docker: - - image: cimg/python:3.10 - steps: - - checkout - - - run: - name: Version is updated - command: | - [[ $(git diff-tree --no-commit-id --name-only -r HEAD..master | grep bittensor/__init__.py | wc -l) == 1 ]] && echo "bittensor/__init__.py has changed" - [[ $(git diff-tree --no-commit-id --name-only -r HEAD..master | grep VERSION | wc -l) == 1 ]] && echo "VERSION has changed" - - check-changelog-updated: - docker: - - image: cimg/python:3.10 - steps: - - checkout - - run: - name: File CHANGELOG.md is updated - command: | - [[ $(git diff-tree --no-commit-id --name-only -r HEAD..master | grep CHANGELOG.md | wc -l) == 1 ]] && echo "CHANGELOG.md has changed" - - check-version-not-released: - docker: - - image: cimg/python:3.10 - steps: - - checkout - - run: - name: Git tag does not exist for the current version - command: | - [[ $(git tag | grep `cat VERSION` | wc -l) == 0 ]] && echo "VERSION is not a tag" - - run: - name: Pypi package 'bittensor' does not exist for the current version - command: | - [[ $(pip index versions bittensor | grep `cat VERSION` | wc -l) == 0 ]] && echo "Pypi package 'bittensor' does not exist" - - run: - name: Docker image 'opentensorfdn/bittensor' does not exist for the current version - command: | - [[ $(docker manifest inspect opentensorfdn/bittensor:`cat VERSION` > /dev/null 2> /dev/null ; echo $?) == 1 ]] && echo "Docker image 'opentensorfdn/bittensor:`cat VERSION`' does not exist in dockerhub" - - release-dry-run: - docker: - - image: cimg/python:3.10 - steps: - - checkout - - setup_remote_docker: - version: 20.10.14 - docker_layer_caching: true - - run: - name: Executing release script - command: | - ./scripts/release/release.sh --github-token ${GH_API_ACCESS_TOKEN} - -workflows: - compatibility_checks: - jobs: - - check_compatibility: - python_version: "3.9" - name: check-compatibility-3.9 - - check_compatibility: - python_version: "3.10" - name: check-compatibility-3.10 - - check_compatibility: - python_version: "3.11" - name: check-compatibility-3.11 - - pr-requirements: - jobs: - - check-if-pr-is-draft - - ruff: - python-version: "3.9.13" - requires: - - check-if-pr-is-draft - - build-and-test: - matrix: - parameters: - python-version: ["3.9.13", "3.10.6", "3.11.4"] - requires: - - check-if-pr-is-draft - - unit-tests-all-python-versions: - requires: - - build-and-test - - lint-and-type-check: - matrix: - parameters: - python-version: ["3.9.13", "3.10.6", "3.11.4"] - requires: - - check-if-pr-is-draft - #- coveralls: - #requires: - #- build-and-test - - release-branches-requirements: - jobs: - - check-version-updated: - filters: - branches: - only: - - /^(release|hotfix)/.*/ - - check-changelog-updated: - filters: - branches: - only: - - /^(release|hotfix)/.*/ - - release-dry-run: - filters: - branches: - only: - - /^(release|hotfix)/.*/ - - release-requirements: - jobs: - - check-version-not-released: - filters: - branches: - only: - - master - - release-dry-run: - filters: - branches: - only: - - master diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index b0e422abef..0000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -omit = - ./nuclei/* - ./routers/* - ./setup.py - ./tests/* - ./env/* diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index eabfb03301..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,21 +0,0 @@ -**/data/ -**/*.log -**/*.png -**/*.pstats -**/*.ipynb -**/bittensor.egg-info/* -**/lib/* -**/build/* -**/dist/* -**/runs/* -**/env/* -**/venv/* -**/tmp/* -**/test_results/* -**/__pycache__/* -**/.circleci -**/.git -**/.github -**/.hypothesis -**/.vscode -**/.gitignore diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 6b2eaa0333..0000000000 --- a/.flake8 +++ /dev/null @@ -1,4 +0,0 @@ -[flake8] -max-line-length = 120 -exclude = .git,__pycache__, __init__.py, docs/source/conf.py,old,build,dist,venv,.venv,.tox -select = E9,F63,F7,F82,F401 diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml deleted file mode 100644 index 5e875de9a0..0000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ /dev/null @@ -1,59 +0,0 @@ -name: Bug report -description: Create a report to help us improve -labels: [bug] -assignees: [] - -body: - - type: textarea - id: bug-description - attributes: - label: Describe the bug - description: A clear and concise description of what the bug is. - validations: - required: true - - - type: textarea - id: reproduce - attributes: - label: To Reproduce - description: Steps to reproduce the behavior. - placeholder: | - 1. Go to '...' - 2. Run command '...' - 3. Scroll down to '....' - 4. See error - validations: - required: true - - - type: textarea - id: expected-behavior - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: true - - - type: textarea - id: screenshots - attributes: - label: Screenshots - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - - type: input - id: environment - attributes: - label: Environment - description: Please specify your OS and Distro, and Bittensor Version. - placeholder: "OS and Distro: [e.g. Linux Ubuntu, Linux Fedora, etc.], Bittensor Version [e.g. 22]" - validations: - required: true - - - type: textarea - id: additional-context - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml deleted file mode 100644 index b9cd275add..0000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ /dev/null @@ -1,38 +0,0 @@ -name: Feature request -description: Suggest an idea for this project -labels: [feature] -assignees: [] - -body: - - type: textarea - id: problem-description - attributes: - label: Is your feature request related to a problem? Please describe. - description: A clear and concise description of what the problem is. - placeholder: "Ex. I'm always frustrated when [...]" - validations: - required: true - - - type: textarea - id: solution - attributes: - label: Describe the solution you'd like - description: A clear and concise description of what you want to happen. - validations: - required: true - - - type: textarea - id: alternatives - attributes: - label: Describe alternatives you've considered - description: A clear and concise description of any alternative solutions or features you've considered. - validations: - required: false - - - type: textarea - id: additional-context - attributes: - label: Additional context - description: Add any other context or screenshots about the feature request here. - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE/bug_fix.md b/.github/PULL_REQUEST_TEMPLATE/bug_fix.md deleted file mode 100644 index 8bf781b532..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE/bug_fix.md +++ /dev/null @@ -1,59 +0,0 @@ - - -### Bug - - - -### Description of the Change - - - -### Alternate Designs - - - -### Possible Drawbacks - - - -### Verification Process - - - -### Release Notes - - \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/feature_change.md b/.github/PULL_REQUEST_TEMPLATE/feature_change.md deleted file mode 100644 index 0b29a822b3..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE/feature_change.md +++ /dev/null @@ -1,54 +0,0 @@ -### Requirements for Adding, Changing, or Removing a Feature - -* Fill out the template below. Any pull request that does not include enough information to be reviewed in a timely manner may be closed at the maintainers' discretion. -* The pull request must contribute a change that has been endorsed by the maintainer team. See details in the template below. -* The pull request must update the test suite to exercise the updated functionality. -* After you create the pull request, all status checks must be pass before a maintainer reviews your contribution. - -### Description of the Change - - - -### Alternate Designs - - - -### Possible Drawbacks - - - -### Verification Process - - - -### Release Notes - - \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/performance_improvement.md b/.github/PULL_REQUEST_TEMPLATE/performance_improvement.md deleted file mode 100644 index 96e18c9d29..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE/performance_improvement.md +++ /dev/null @@ -1,55 +0,0 @@ -### Requirements for Contributing a Performance Improvement - -* Fill out the template below. Any pull request that does not include enough information to be reviewed in a timely manner may be closed at the maintainers' discretion. -* The pull request must only affect performance of existing functionality -* After you create the pull request, all status checks must be pass before a maintainer reviews your contribution. - -### Description of the Change - - - -### Quantitative Performance Benefits - - - -### Possible Drawbacks - - - -### Verification Process - - - -### Applicable Issues - - - -### Release Notes - - \ No newline at end of file diff --git a/.github/auto_assign.yml b/.github/auto_assign.yml deleted file mode 100644 index 900e2ceb85..0000000000 --- a/.github/auto_assign.yml +++ /dev/null @@ -1,7 +0,0 @@ -addReviewers: true - -# A list of team slugs to add as assignees -reviewers: - - opentensor/cortex - -numberOfReviewers: 0 \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index adff4d0aab..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "pip" - directory: "" - file: "requirements/prod.txt" - schedule: - interval: "daily" - open-pull-requests-limit: 0 # Only security updates will be opened as PRs diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 4a5da46aee..0000000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,10 +0,0 @@ -Welcome! - -Due to [GitHub limitations](https://github.com/orgs/community/discussions/4620), -please switch to **Preview** for links to render properly. - -Please choose the right template for your pull request: - -- 🐛 Are you fixing a bug? [Bug fix](?template=bug_fix.md) -- 📈 Are you improving performance? [Performance improvement](?template=performance_improvement.md) -- đŸ’» Are you changing functionality? [Feature change](?template=feature_change.md) diff --git a/.github/workflows/auto-assign.yml b/.github/workflows/auto-assign.yml deleted file mode 100644 index 3a952f91b8..0000000000 --- a/.github/workflows/auto-assign.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: Auto Assign Cortex to Pull Requests - -on: - pull_request: - types: [opened, reopened] - -jobs: - auto-assign: - runs-on: ubuntu-latest - steps: - - name: Auto-assign Cortex Team - uses: kentaro-m/auto-assign-action@v1.2.4 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - configuration-path: .github/auto_assign.yml \ No newline at end of file diff --git a/.github/workflows/docker_release.yml b/.github/workflows/docker_release.yml deleted file mode 100644 index dbb6c3bab8..0000000000 --- a/.github/workflows/docker_release.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Build and Push Docker Image -# https://github.com/sigstore/cosign -on: - workflow_dispatch: - inputs: - tag: - description: 'Docker image tag' - required: true - default: 'latest' - -jobs: - build-and-push: - runs-on: ubuntu-latest - - permissions: - contents: read - id-token: write - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Install cosign - uses: sigstore/cosign-installer@v3 - - - name: Log in to Docker Hub - uses: docker/login-action@v2 - with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build and push Docker image - uses: docker/build-push-action@v4 - with: - context: . - push: true - tags: | - opentensorfdn/bittensor:${{ github.event.inputs.tag }} - opentensorfdn/bittensor:latest - provenance: false - - - name: Sign the images with GitHub OIDC Token - env: - DIGEST: ${{ steps.build.outputs.digest }} - TAGS: ${{ steps.build.outputs.tags }} - run: | - echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} \ No newline at end of file diff --git a/.github/workflows/e2e-multiple-bittensor-tests.yml b/.github/workflows/e2e-multiple-bittensor-tests.yml deleted file mode 100644 index 470ccef4f8..0000000000 --- a/.github/workflows/e2e-multiple-bittensor-tests.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: E2E tests w/ multiple bittensor versions - -on: - workflow_dispatch: - inputs: - bittensor_versions: - description: 'Bittensor versions to test (comma-separated)' - required: true - default: '7.3.1,7.2.1' - bittensor_branch: - description: 'Branch of bittensor' - required: true - default: 'staging' - subtensor_branch: - description: 'Branch of subtensor' - required: true - default: 'testnet' - -env: - RUSTV: nightly-2024-03-05 - RUST_BACKTRACE: full - -jobs: - setup: - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - steps: - - name: Set up test matrix - id: set-matrix - run: | - versions=$(echo "${{ github.event.inputs.bittensor_versions }}" | jq -R -s -c 'split(",")| map(select(. != ""))') - echo "matrix=${versions}" >> $GITHUB_OUTPUT - - test: - needs: setup - runs-on: SubtensorCI - strategy: - fail-fast: false - matrix: - bittensor-version: ${{fromJson(needs.setup.outputs.matrix)}} - rust-target: - - x86_64-unknown-linux-gnu - env: - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} - steps: - - name: Check-out repository - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.bittensor_branch }} - - - name: Print working directory - run: | - pwd - ls -la - - - name: Install Python dependencies - run: | - python -m pip install --upgrade pip - pip install tox - - - name: Install Rust dependencies - run: | - sudo apt-get update - sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler - - - name: Install Rust ${{ env.RUSTV }} - uses: actions-rs/toolchain@v1.0.6 - with: - toolchain: ${{ env.RUSTV }} - components: rustfmt - profile: minimal - - - name: Add wasm32-unknown-unknown target - run: | - rustup target add wasm32-unknown-unknown --toolchain stable-x86_64-unknown-linux-gnu - rustup component add rust-src --toolchain stable-x86_64-unknown-linux-gnu - - - name: Clone subtensor repo - run: git clone https://github.com/opentensor/subtensor.git - - - name: Setup subtensor repo - working-directory: ${{ github.workspace }}/subtensor - run: git checkout ${{ github.event.inputs.subtensor_branch }} - - - name: Create tox.ini - run: | - cd ../.. - cat << EOF > tox.ini - [tox] - envlist = bt-${{ matrix.bittensor-version }} - - [testenv] - deps = - pytest - pytest-asyncio - anyio - nest_asyncio - bittensor==${{ matrix.bittensor-version }} - commands = - pytest ${{ github.workspace }}/tests/e2e_tests -v -s {posargs} - passenv = - LOCALNET_SH_PATH - - [pytest] - asyncio_mode = auto - EOF - - - name: Run tox - env: - LOCALNET_SH_PATH: ${{ github.workspace }}/subtensor/scripts/localnet.sh - run: | - cd ../.. - tox -c tox.ini -e bt-${{ matrix.bittensor-version }} diff --git a/.github/workflows/e2e-subtensor-tests.yaml b/.github/workflows/e2e-subtensor-tests.yaml deleted file mode 100644 index 0bc467a94d..0000000000 --- a/.github/workflows/e2e-subtensor-tests.yaml +++ /dev/null @@ -1,105 +0,0 @@ -name: E2E Subtensor Tests - -concurrency: - group: e2e-subtensor-${{ github.ref }} - cancel-in-progress: true - -on: - push: - branches: [main, development, staging] - - pull_request: - branches: [main, development, staging] - types: [ opened, synchronize, reopened, ready_for_review ] - - workflow_dispatch: - inputs: - verbose: - description: "Output more information when triggered manually" - required: false - default: "" - -env: - CARGO_TERM_COLOR: always - VERBOSE: ${{ github.event.inputs.verbose }} - -# job to run tests in parallel -jobs: - # Job to find all test files - find-tests: - runs-on: ubuntu-latest - if: ${{ github.event_name != 'pull_request' || github.event.pull_request.draft == false }} - outputs: - test-files: ${{ steps.get-tests.outputs.test-files }} - steps: - - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 - - - name: Find test files - id: get-tests - run: | - test_files=$(find tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "::set-output name=test-files::$test_files" - shell: bash - - # Job to run tests in parallel - run: - needs: find-tests - runs-on: SubtensorCI - timeout-minutes: 45 - strategy: - fail-fast: false # Allow other matrix jobs to run even if this job fails - max-parallel: 8 # Set the maximum number of parallel jobs - matrix: - rust-branch: - - nightly-2024-03-05 - rust-target: - - x86_64-unknown-linux-gnu - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-tests.outputs.test-files) }} - env: - RELEASE_NAME: development - RUSTV: ${{ matrix.rust-branch }} - RUST_BACKTRACE: full - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} - steps: - - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 - - - name: Install dependencies - run: | - sudo apt-get update && - sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler - - - name: Install Rust ${{ matrix.rust-branch }} - uses: actions-rs/toolchain@v1.0.6 - with: - toolchain: ${{ matrix.rust-branch }} - components: rustfmt - profile: minimal - - - name: Add wasm32-unknown-unknown target - run: | - rustup target add wasm32-unknown-unknown --toolchain stable-x86_64-unknown-linux-gnu - rustup component add rust-src --toolchain stable-x86_64-unknown-linux-gnu - - - name: Clone subtensor repo - run: git clone https://github.com/opentensor/subtensor.git - - - name: Setup subtensor repo - working-directory: ${{ github.workspace }}/subtensor - run: git checkout testnet - - - name: Run tests - run: | - python3 -m pip install -e .[dev] pytest - LOCALNET_SH_PATH="${{ github.workspace }}/subtensor/scripts/localnet.sh" pytest ${{ matrix.test-file }} -s - - - name: Retry failed tests - if: failure() - run: | - sleep 10 - python3 -m pip install -e .[dev] pytest - LOCALNET_SH_PATH="${{ github.workspace }}/subtensor/scripts/localnet.sh" pytest ${{ matrix.test-file }} -s diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 2cdfe5dfa0..0000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,72 +0,0 @@ -name: Build and Publish Python Package - -on: - workflow_dispatch: - inputs: - version: - description: 'Version to release' - required: true - type: string - -jobs: - build: - name: Build Python distribution - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install build wheel twine - - - name: Build package - run: python setup.py sdist bdist_wheel - - - name: Check if package version already exists - run: | - PACKAGE_NAME=$(python setup.py --name) - PACKAGE_VERSION=${{ github.event.inputs.version }} - if twine check dist/*; then - if pip install $PACKAGE_NAME==$PACKAGE_VERSION; then - echo "Error: Version $PACKAGE_VERSION of $PACKAGE_NAME already exists on PyPI" - exit 1 - else - echo "Version $PACKAGE_VERSION of $PACKAGE_NAME does not exist on PyPI. Proceeding with upload." - fi - else - echo "Error: Twine check failed." - exit 1 - fi - - - name: Upload artifact - uses: actions/upload-artifact@v3 - with: - name: dist - path: dist/ - - approve-and-publish: - needs: build - runs-on: ubuntu-latest - environment: release - permissions: - contents: read - id-token: write - - steps: - - name: Download artifact - uses: actions/download-artifact@v3 - with: - name: dist - path: dist/ - - - name: Publish package distributions to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - verbose: true - print-hash: true \ No newline at end of file diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 5cc3b79913..0000000000 --- a/.gitignore +++ /dev/null @@ -1,216 +0,0 @@ -# Byte-compiled / optimized / DLL files -**/__pycache__/ -*.py[cod] -*$py.class -*.pyc - -# Remove notebooks. -*.ipynb - -# weigths and biases -wandb/ - -*.csv -*.torch -*.pt -*.log - -# runs/data/models/logs/~ -data/ -**/data/ - -# C extensions -*.so - -# IDE -*.idea/ - -# VSCODE -.vscode/ - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ -# Generated by Cargo -# will have compiled files and executables -**/target/ -# These are backup files generated by rustfmt -**/*.rs.bk - -.DS_Store - -# The cache for docker container dependency -.cargo - -# The cache for chain data in container -.local - -# State folder for all neurons. -**/data/* -!data/.gitkeep - -# misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -# PIPY Stuff -bittensor.egg-info -bittensor*.egg -bdist.* - -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -**/build/* -**/dist/* -**/runs/* -**/env/* -**/data/* -**/.data/* -**/tmp/* - -**/.bash_history -**/*.xml -**/*.pstats -**/*.png - -# Replicate library -**/.replicate -replicate.yaml -**/run.sh - -# Notebooks -*.ipynb - -tests/zombienet/bin/**/* \ No newline at end of file diff --git a/.test_durations b/.test_durations deleted file mode 100644 index 8cb7d74bff..0000000000 --- a/.test_durations +++ /dev/null @@ -1,268 +0,0 @@ -{ - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_delegate_stake": 32.565206749999994, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_inspect": 2.0870491260000037, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_metagraph": 17.437785333, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_neuron_run_reregister_false": 35.75446520799999, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_nominate": 38.171487959, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview": 54.78253583300001, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_all": 303.709275458, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_no_wallet": 33.569985001, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_not_in_first_subnet": 7.832046707999993, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_hotkeys_config": 1.235335959000004, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_by_bad_column_name": 34.20312183400001, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_by_config": 1.4365408759999951, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_order_config": 1.4505757079999952, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_order_config_bad_sort_type": 34.18927604199999, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_width_config": 1.6561556670000002, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_hotkeys_config": 1.2479347909999987, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_sort_by_config": 34.193473041, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_sort_order_config": 1.436726291999996, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_width_config": 1.449721043000011, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_recycle_register": 48.5383515, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_register": 6.655044251, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_set_weights": 0.006143250000008038, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake": 44.89659891599999, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_all_hotkeys": 31.83300620899999, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_exclude_hotkeys_from_all": 0.0015482090000062954, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_multiple_hotkeys_max_stake": 0.0011364169999907858, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_multiple_hotkeys_max_stake_not_enough_balance": 0.0009022089999959348, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_single_hotkey_max_stake": 0.0009031669999970404, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_single_hotkey_max_stake_enough_stake": 0.0012163340000057588, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_single_hotkey_max_stake_not_enough_balance": 0.0009654589999996688, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_specific_hotkeys": 357.5746072910001, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_transfer": 16.976931332999996, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_transfer_not_enough_balance": 22.429711792000006, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_undelegate_stake": 27.56590779199999, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_all_hotkeys": 38.311913373, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_exclude_hotkeys_from_all": 0.0018990010000123903, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_multiple_hotkeys_max_stake": 0.0010086670000006848, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_specific_hotkeys": 0.0012716660000009483, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_delegate": 0.0012134169999740152, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_list_delegates": 12.917025874999979, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_list_subnets": 0.32005762600000764, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_run_reregister_false": 2.500768667000017, - "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_run_synapse_all": 8.177792832999984, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_btcli_help": 0.05371037599999795, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_check_configs": 0.5839849989999948, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_list": 0.015767583999995338, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_list_no_wallet": 0.004536540000003697, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_new_coldkey": 0.005761207000013258, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_new_hotkey": 0.003966625999993312, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_regen_coldkey": 0.00497241600000109, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_regen_coldkeypub": 0.00346216599999849, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_regen_hotkey": 0.004310167000014076, - "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_register_cuda_use_cuda_flag": 2.813618584000004, - "tests/integration_tests/test_dataset.py::test_change_data_size": 9.975283208999997, - "tests/integration_tests/test_dataset.py::test_construct_text_corpus": 5.504439667999989, - "tests/integration_tests/test_dataset.py::test_fail_IPFS_server": 2.991185999999985, - "tests/integration_tests/test_dataset.py::test_mock": 0.11688258300000598, - "tests/integration_tests/test_dataset.py::test_mock_function": 0.11185374999999453, - "tests/integration_tests/test_dataset.py::test_next": 5.809825165999982, - "tests/integration_tests/test_dataset.py::test_text_dataset": 0.003949084000012704, - "tests/integration_tests/test_dendrite.py::test_dendrite_backoff": 0.3834034169999967, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor": 0.005605251000005751, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_endpoint_len_error": 0.0010508339999972804, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_endpoint_type_error": 0.0009945420000008198, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_input_len_error": 0.0010635420000113527, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_mismatch_len_error": 0.0009768319999921005, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_shape_error": 0.0010397920000002614, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_type_error": 0.0020723339999904056, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text": 0.005868083999999385, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_endpoints_tensor": 0.04405566500001612, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_list_string": 0.01698745900000631, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_multiple_endpoints_tensor": 0.01505404200000271, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_multiple_endpoints_tensor_list": 0.01597050000000877, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_non_list": 0.0058105829999988146, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_singular": 0.016635499999992476, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_singular_no_batch_size": 0.01967587499999013, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_singular_string": 0.02379695900000911, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_tensor_list": 0.00768116700000121, - "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_tensor_list_singular": 0.007751000000013164, - "tests/integration_tests/test_dendrite.py::test_dendrite_to_df": 0.6830525419999987, - "tests/integration_tests/test_dendrite.py::test_failing_synapse": 0.652249334000004, - "tests/integration_tests/test_dendrite.py::test_successful_synapse": 0.5847192090000135, - "tests/integration_tests/test_ipfs.py::test_ipfs_init": 0.005554707999998243, - "tests/integration_tests/test_ipfs.py::test_retrieve_directory": 0.20729179199999237, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_create": 0.08020704100000131, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_decrypt_keyfile_data_legacy": 3.0671192910000045, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_keyfile_mock": 0.018454082999994625, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_keyfile_mock_func": 0.019594999999995366, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_legacy_coldkey": 0.030612376000000552, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_overwriting": 0.031093917000006854, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_user_interface": 0.017205207999992922, - "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_validate_password": 0.01777775099999701, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_full_sync": 3.6405804169999954, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_lite_sync": 3.6356975829999953, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_load_sync_save": 3.243659209999997, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_parameters": 3.0838419149999936, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_print_empty": 2.6707623749999954, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_properties": 3.287473416999994, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_state_dict": 3.296576874000003, - "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_sync_block_0": 4.055834208, - "tests/integration_tests/test_priority_thread_pool.py::test_priority_thread_pool": 0.002472417000006999, - "tests/integration_tests/test_prometheus.py::TestPrometheus::test_init_prometheus_failed": 1.491444625000014, - "tests/integration_tests/test_prometheus.py::TestPrometheus::test_init_prometheus_success": 1.6381353319999903, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_balance": 2.5954937909999956, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_balances": 1.9654992910000004, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_current_block": 0.3812910839999972, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_uid_by_hotkey_on_subnet": 0.6584294999999969, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_hotkey_register": 0.46409241699998915, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_hotkey_register_failed": 0.3542701670000099, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_network_overrides": 0.953627209000004, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_failed": 1.788183917000012, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_multiprocessed_already_registered": 0.9777173749999974, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_partly_failed": 1.5698486670000023, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_stale_then_continue": 0.781868541999998, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_set_weights": 0.6006925410000008, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_set_weights_failed": 0.3889112079999961, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_set_weights_inclusion": 0.4296055830000114, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_stake": 0.1843938319999836, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_stake_failed": 0.3917970010000005, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_stake_inclusion": 0.38589883299999883, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer": 2.0724527499999965, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_dest_as_bytes": 1.2727416259999842, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_failed": 1.2812408760000125, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_inclusion": 1.2405266240000117, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_invalid_dest": 0.4117500419999942, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_unstake": 0.4006357079999958, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_unstake_failed": 0.4873798340000093, - "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_unstake_inclusion": 0.3860250829999927, - "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_ip_not_set_dont_use_internal_ip": 0.006879416000003857, - "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_ip_port_set_full_address_internal": 0.004500209000006805, - "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_ip_set_full_address_internal": 0.08792841500000037, - "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_port_not_set_use_internal_port": 0.004651376000000873, - "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_port_set_full_address_internal": 0.00591749999999891, - "tests/unit_tests/bittensor_tests/test_axon.py::test_axon_is_destroyed": 0.040033332000000144, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_causal_lm_next_shape_error": 0.0009744579999990677, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_causal_lm_shape_error": 0.001580541999999241, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_deserialization_error": 0.0005970819999987498, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_grads_shape_error": 0.001092959000000171, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_invalid_request": 0.0007582499999996273, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_last_hidden_shape_error": 0.0008626240000007002, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_exception": 0.0010987509999997869, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_causal_lm": 0.0032578749999991885, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_causal_lm_next": 0.002431750000001287, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_hidden": 0.001287251000000822, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_text_priority": 0.0034178330000074197, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_timeout": 0.0009528730000010199, - "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_seq_2_seq_shape_error": 0.0010720409999995795, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_batch_shape_error": 0.0007811660000003329, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causal_lm_next_state_exception": 0.0009985000000014566, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causal_lm_state_exception": 0.002173708000000829, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallm_shape_error": 0.0006132079999998652, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallm_success": 0.019581957999998956, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallmnext_shape_error": 0.0007552919999991303, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallmnext_success": 0.022651415999999536, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_deserialization_empty": 0.0009227910000007, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_deserialization_error": 0.0008193749999989564, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_empty_request": 0.0011124170000007538, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_joint_faulty_synapse": 0.01353250000000017, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_joint_missing_synapse": 0.013988917000000711, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_joint_success": 0.0509341249999995, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_last_hidden_shape_error": 0.0008222500000005795, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_last_hidden_state_exception": 0.0009832080000000687, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_last_hidden_success": 0.0017997490000007943, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_not_implemented": 0.001580126000000348, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_priority_2nd_request_timeout": 2.009712416999996, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_priority_timeout": 27.006205707000003, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_response_deserialization_error": 0.0009404579999996443, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_2_seq_shape_error": 0.0009308739999998039, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_2_seq_state_exception": 0.0013031659999995782, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_2_seq_success": 0.0018539589999990724, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_shape_error": 0.0008392500000002912, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_tensor_success_priority": 0.07963441700000029, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_timeout": 0.0021218760000003556, - "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_unknown_error": 0.000990500999999533, - "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_backward_fails": 0.006330292000001236, - "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_backward_works": 0.012263416000003247, - "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_forward_fails": 0.004834957999989342, - "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_forward_works": 0.015886249999994106, - "tests/unit_tests/bittensor_tests/test_axon.py::test_sign_v2": 0.0025120420000011023, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_add": 0.18219254200000012, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_add_invalid_type": 0.12365654300000006, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_add_other_not_balance": 0.14650508300000098, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_div_invalid_type": 0.12069516600000174, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_eq_invalid_type": 0.1321914169999996, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_eq_other_not_balance": 0.13415275000000015, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_floordiv": 0.2226764569999995, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_floordiv_other_not_balance": 0.23913508399999994, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_init": 0.12514987600000005, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_init_from_invalid_value": 0.0004109170000008433, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_mul": 0.19085399900000066, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_mul_invalid_type": 0.16508675100000048, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_mul_other_not_balance": 0.2507777079999993, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_neq_none": 0.12535729200000034, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_not_eq_none": 0.14622908400000068, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_radd_other_not_balance": 0.1727647920000006, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rfloordiv_other_not_balance": 0.21285375000000073, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rmul_other_not_balance": 0.17940537499999998, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rsub_other_not_balance": 0.19510154200000063, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rtruediv_other_not_balance": 0.32300358299999843, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_sub": 0.20487529099999868, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_sub_invalid_type": 0.13107362499999908, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_sub_other_not_balance": 0.20876896000000222, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_truediv": 0.20615204100000106, - "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_truediv_other_not_balance": 0.20203299999999835, - "tests/unit_tests/bittensor_tests/test_config.py::test_loaded_config": 0.000341875000000158, - "tests/unit_tests/bittensor_tests/test_config.py::test_prefix": 1.4881067080000019, - "tests/unit_tests/bittensor_tests/test_config.py::test_strict": 0.003527500000000572, - "tests/unit_tests/bittensor_tests/test_config.py::test_to_defaults": 0.0006572089999998809, - "tests/unit_tests/bittensor_tests/test_endpoint.py::test_create_endpoint": 0.0035975830000012365, - "tests/unit_tests/bittensor_tests/test_endpoint.py::test_endpoint_fails_checks": 0.0009294989999997227, - "tests/unit_tests/bittensor_tests/test_endpoint.py::test_endpoint_to_tensor": 0.0014645410000007075, - "tests/unit_tests/bittensor_tests/test_endpoint.py::test_thrash_equality_of_endpoint": 0.5774439579999999, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_axon_receptor_forward_works": 0.0101347909999987, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward": 0.01403204099999833, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward_large": 0.0014666259999991382, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward_multiple": 0.0015117080000006666, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward_no_grad": 0.001954291000000552, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_call_time": 0.029393998999999837, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_del": 0.0004828739999975795, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_causal_lm_next_shape_error": 0.00045083400000045515, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_causal_lm_shape_error": 0.0004375410000001523, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_last_hidden_shape_error": 0.00042408300000218446, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_seq_2_seq_shape_error": 0.000591667000000129, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_causal_lm": 0.0019801239999992504, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_causal_lm_next": 0.0015587079999992426, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_last_hidden": 0.0014038749999993883, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_seq_2_seq": 0.0012167919999974686, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_causal_lm": 0.0020301259999992993, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_causal_lm_next": 0.0013322070000008068, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_last_hidden": 0.0011474169999985406, - "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_seq_2_seq": 0.0011787070000028876, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_create_ed25519_keypair": 0.001834499999999295, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_create_keypair_from_private_key": 0.0005444169999986315, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_create_sr25519_keypair": 0.0015333330000011358, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_generate_mnemonic": 0.0003291669999967439, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_default_to_dev_mnemonic": 0.0019820840000015494, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_hard_path": 0.0019323339999992584, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_nested_hard_soft_path": 0.0018494169999989651, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_nested_soft_hard_path": 0.0020734170000000773, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_path_gt_32_bytes": 0.001790332999998867, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_soft_path": 0.0016932490000005629, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_unsupported_password": 0.00044658299999866813, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_incorrect_private_key_length_sr25519": 0.00047804200000101105, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_incorrect_public_key": 0.0003666670000015415, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_invalid_mnemic": 0.0004930830000002828, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_only_provide_public_key": 0.00045920699999868475, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_only_provide_ss58_address": 0.000522709000001953, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_provide_no_ss58_address_and_public_key": 0.0005050830000019602, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify": 0.0016591679999979903, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_ed25519": 0.0016544579999990816, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_hex_data": 0.001937792000001437, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_incorrect_signature": 0.001960749000000206, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_invalid_message": 0.00183941700000112, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_invalid_signature": 0.0016063319999997105, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_invalid_signature_ed25519": 0.001609873999999678, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_scale_bytes": 0.00196662400000136, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_missing_private_key": 0.0006992090000004225, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_unsupported_crypto_type": 0.0004697499999988253, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_unsupport_crypto_type": 0.0004740830000002916, - "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_verify_unsupported_crypto_type": 0.0007947079999990336, - "tests/unit_tests/bittensor_tests/test_metagraph.py::TestMetagraph::test_from_neurons": 0.8742741239999994, - "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_coreserver_reregister_flag_false_exit": 0.006013750000001039, - "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_coreserver_reregister_flag_true": 0.006052874999999958, - "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_model_output_check": 9.921326915999998, - "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_set_fine_tuning_params": 6.299140666000003, - "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreValidator::test_corevalidator_reregister_flag_false_exit": 0.008880706999999433 -} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 5a355b5cf6..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM python:3.11.8-bookworm - -LABEL bittensor.image.authors="bittensor.com" \ - bittensor.image.vendor="Bittensor" \ - bittensor.image.title="bittensor/bittensor" \ - bittensor.image.description="Bittensor: Incentivized Peer to Peer Neural Networks" \ - bittensor.image.source="https://github.com/opentensor/bittensor.git" \ - bittensor.image.revision="${VCS_REF}" \ - bittensor.image.created="${BUILD_DATE}" \ - bittensor.image.documentation="https://app.gitbook.com/@opentensor/s/bittensor/" -ARG DEBIAN_FRONTEND=noninteractive - -# Update the base image -RUN apt update && apt upgrade -y -# Install bittensor -## Install dependencies -RUN apt install -y curl sudo nano git htop netcat-openbsd wget unzip tmux apt-utils cmake build-essential -## Upgrade pip -RUN pip3 install --upgrade pip - -# Install nvm and pm2 -RUN curl -o install_nvm.sh https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.1/install.sh && \ - echo 'fabc489b39a5e9c999c7cab4d281cdbbcbad10ec2f8b9a7f7144ad701b6bfdc7 install_nvm.sh' | sha256sum --check && \ - bash install_nvm.sh - -RUN bash -c "source $HOME/.nvm/nvm.sh && \ - # use node 16 - nvm install 16 && \ - # install pm2 - npm install --location=global pm2" - -RUN mkdir -p /root/.bittensor/bittensor -COPY . /root/.bittensor/bittensor -RUN cd /root/.bittensor/bittensor && python3 -m pip install . - -# Increase ulimit to 1,000,000 -RUN prlimit --pid=$PPID --nofile=1000000 - -EXPOSE 8091 diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 8d10866d56..0000000000 --- a/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -The MIT License (MIT) -Copyright © 2021 Yuma Rao - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the “Software”), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of -the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Makefile b/Makefile deleted file mode 100644 index 344c3e4184..0000000000 --- a/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -SHELL:=/bin/bash - -init-venv: - python3 -m venv venv && source ./venv/bin/activate - -clean-venv: - source ./venv/bin/activate && \ - pip freeze > make_venv_to_uninstall.txt && \ - pip uninstall -r make_venv_to_uninstall.txt && \ - rm make_venv_to_uninstall.txt - -clean: - rm -rf dist/ && \ - rm -rf build/ && \ - rm -rf bittensor.egg-info/ && \ - rm -rf .pytest_cache/ && \ - rm -rf lib/ - -install: - python3 -m pip install . - -install-dev: - python3 -m pip install '.[dev]' - -install-cubit: - python3 -m pip install '.[cubit]' \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index b9284f2a5b..0000000000 --- a/README.md +++ /dev/null @@ -1,435 +0,0 @@ -
- -# **Bittensor** -[![Discord Chat](https://img.shields.io/discord/308323056592486420.svg)](https://discord.gg/bittensor) -[![PyPI version](https://badge.fury.io/py/bittensor.svg)](https://badge.fury.io/py/bittensor) -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) - ---- - -### Internet-scale Neural Networks - -[Discord](https://discord.gg/qasY3HA9F9) ‱ [Network](https://taostats.io/) ‱ [Research](https://bittensor.com/whitepaper) - -
- -Bittensor is a mining network, similar to Bitcoin, that includes built-in incentives designed to encourage computers to provide access to machine learning models in an efficient and censorship-resistant manner. These models can be queried by users seeking outputs from the network, for instance; generating text, audio, and images, or for extracting numerical representations of these input types. Under the hood, Bittensor’s *economic market*, is facilitated by a blockchain token mechanism, through which producers (***miners***) and the verification of the work done by those miners (***validators***) are rewarded. Miners host, train or otherwise procure machine learning systems into the network as a means of fulfilling the verification problems defined by the validators, like the ability to generate responses from prompts i.e. “What is the capital of Texas?. - -The token based mechanism under which the miners are incentivized ensures that they are constantly driven to make their knowledge output more useful, in terms of speed, intelligence and diversity. The value generated by the network is distributed directly to the individuals producing that value, without intermediaries. Anyone can participate in this endeavour, extract value from the network, and govern Bittensor. The network is open to all participants, and no individual or group has full control over what is learned, who can profit from it, or who can access it. - -To learn more about Bittensor, please read our [paper](https://bittensor.com/whitepaper). - -# Install -There are three ways to install Bittensor - -1. Through the installer: -```bash -$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/opentensor/bittensor/master/scripts/install.sh)" -``` -2. With pip: -```bash -$ pip3 install bittensor -``` -3. From source: -```bash -$ git clone https://github.com/opentensor/bittensor.git -$ python3 -m pip install -e bittensor/ -``` -4. Using Conda (recommended for **Apple M1**): -```bash -$ conda env create -f ~/.bittensor/bittensor/scripts/environments/apple_m1_environment.yml -$ conda activate bittensor -``` - -To test your installation, type: -```bash -$ btcli --help -``` -or using python -```python -import bittensor -``` - -#### CUDA -If you anticipate using PoW registration for subnets or the faucet (only available on staging), please install `cubit` as well for your version of python. You can find the Opentensor cubit implementation and instructions [here](https://github.com/opentensor/cubit). - -For example with python 3.10: -```bash -pip install https://github.com/opentensor/cubit/releases/download/v1.1.2/cubit-1.1.2-cp310-cp310-linux_x86_64.whl -``` - -# Wallets - -Wallets are the core ownership and identity technology around which all functions on Bittensor are carried out. Bittensor wallets consists of a coldkey and hotkey where the coldkey may contain many hotkeys, while each hotkey can only belong to a single coldkey. Coldkeys store funds securely, and operate functions such as transfers and staking, while hotkeys are used for all online operations such as signing queries, running miners and validating. - -Wallets can be created in two ways. -1. Using the python-api -```python -import bittensor -wallet = bittensor.wallet() -wallet.create_new_coldkey() -wallet.create_new_hotkey() -print (wallet) -"Wallet (default, default, ~/.bittensor/wallets/)" -``` -2. Or using btcli -> Use the subcommand `wallet` or it's alias `w`: -```bash -$ btcli wallet new_coldkey - Enter wallet name (default): - - IMPORTANT: Store this mnemonic in a secure (preferably offline place), as anyone who has possession of this mnemonic can use it to regenerate the key and access your tokens. - The mnemonic to the new coldkey is: - **** *** **** **** ***** **** *** **** **** **** ***** ***** - You can use the mnemonic to recreate the key in case it gets lost. The command to use to regenerate the key using this mnemonic is: - btcli w regen_coldkey --mnemonic post maid erode shy captain verify scan shoulder brisk mountain pelican elbow - -$ btcli wallet new_hotkey - Enter wallet name (default): d1 - Enter hotkey name (default): - - IMPORTANT: Store this mnemonic in a secure (preferably offline place), as anyone who has possession of this mnemonic can use it to regenerate the key and access your tokens. - The mnemonic to the new hotkey is: - **** *** **** **** ***** **** *** **** **** **** ***** ***** - You can use the mnemonic to recreate the key in case it gets lost. The command to use to regenerate the key using this mnemonic is: - btcli w regen_hotkey --mnemonic total steak hour bird hedgehog trim timber can friend dry worry text -``` -In both cases you should be able to view your keys by navigating to ~/.bittensor/wallets or viewed by running ```btcli wallet list``` -```bash -$ tree ~/.bittensor/ - .bittensor/ # Bittensor, root directory. - wallets/ # The folder containing all bittensor wallets. - default/ # The name of your wallet, "default" - coldkey # You encrypted coldkey. - coldkeypub.txt # Your coldkey public address - hotkeys/ # The folder containing all of your hotkeys. - default # You unencrypted hotkey information. -``` -Your default wallet ```Wallet (default, default, ~/.bittensor/wallets/)``` is always used unless you specify otherwise. Be sure to store your mnemonics safely. If you lose your password to your wallet, or the access to the machine where the wallet is stored, you can always regenerate the coldkey using the mnemonic you saved from above. -```bash -$ btcli wallet regen_coldkey --mnemonic **** *** **** **** ***** **** *** **** **** **** ***** ***** -``` - -## Using the cli -The Bittensor command line interface (`btcli`) is the primary command line tool for interacting with the Bittensor network. It can be used to deploy nodes, manage wallets, stake/unstake, nominate, transfer tokens, and more. - -### Basic Usage - -To get the list of all the available commands and their descriptions, you can use: - -```bash -btcli --help - -usage: btcli - -bittensor cli v{bittensor.__version__} - -commands: - subnets (s, subnet) - Commands for managing and viewing subnetworks. - root (r, roots) - Commands for managing and viewing the root network. - wallet (w, wallets) - Commands for managing and viewing wallets. - stake (st, stakes) - Commands for staking and removing stake from hotkey accounts. - sudo (su, sudos) - Commands for subnet management. - legacy (l) - Miscellaneous commands. -``` - -### Example Commands - -#### Viewing Senate Proposals -```bash -btcli root proposals -``` - -#### Viewing Senate Members -```bash -btcli root list_delegates -``` - -#### Viewing Proposal Votes -```bash -btcli root senate_vote --proposal=[PROPOSAL_HASH] -``` - -#### Registering for Senate -```bash -btcli root register -``` - -#### Leaving Senate -```bash -btcli root undelegate -``` - -#### Voting in Senate -```bash -btcli root senate_vote --proposal=[PROPOSAL_HASH] -``` - -#### Miscellaneous Commands -```bash -btcli legacy update -btcli legacy faucet -``` - -#### Managing Subnets -```bash -btcli subnets list -btcli subnets create -``` - -#### Managing Wallets -```bash -btcli wallet list -btcli wallet transfer -``` - -### Note - -Please replace the subcommands and arguments as necessary to suit your needs, and always refer to `btcli --help` or `btcli --help` for the most up-to-date and accurate information. - -For example: -```bash -btcli subnets --help - -usage: btcli subnets [-h] {list,metagraph,lock_cost,create,register,pow_register,hyperparameters} ... - -positional arguments: - {list,metagraph,lock_cost,create,register,pow_register,hyperparameters} - Commands for managing and viewing subnetworks. - list List all subnets on the network. - metagraph View a subnet metagraph information. - lock_cost Return the lock cost to register a subnet. - create Create a new bittensor subnetwork on this chain. - register Register a wallet to a network. - pow_register Register a wallet to a network using PoW. - hyperparameters View subnet hyperparameters. - -options: - -h, --help show this help message and exit -``` - -### Post-Installation Steps - -To enable autocompletion for Bittensor CLI, run the following commands: - -```bash -btcli --print-completion bash >> ~/.bashrc # For Bash -btcli --print-completion zsh >> ~/.zshrc # For Zsh -source ~/.bashrc # Reload Bash configuration to take effect -``` - -# The Bittensor Package -The bittensor package contains data structures for interacting with the bittensor ecosystem, writing miners, validators and querying the network. Additionally, it provides many utilities for efficient serialization of Tensors over the wire, performing data analysis of the network, and other useful utilities. - -In the 7.0.0 release, we have removed `torch` by default. However, you can still use `torch` by setting the environment variable -`USE_TORCH=1` and making sure that you have installed the `torch` library. -You can install `torch` by running `pip install bittensor[torch]` (if installing via PyPI), or by running `pip install -e ".[torch]"` (if installing from source). -We will not be adding any new functionality based on torch. - -Wallet: Interface over locally stored bittensor hot + coldkey styled wallets. -```python -import bittensor -# Bittensor's wallet maintenance class. -wallet = bittensor.wallet() -# Access the hotkey -wallet.hotkey -# Access the coldkey -wallet.coldkey ( requires decryption ) -# Sign data with the keypair. -wallet.coldkey.sign( data ) - -``` - -Subtensor: Interfaces with bittensor's blockchain and can perform operations like extracting state information or sending transactions. -```python -import bittensor -# Bittensor's chain interface. -subtensor = bittensor.subtensor() -# Get the chain block -subtensor.get_current_block() -# Transfer Tao to a destination address. -subtensor.transfer( wallet = wallet, dest = "xxxxxxx..xxxxx", amount = 10.0) -# Register a wallet onto a subnetwork -subtensor.register( wallet = wallet, netuid = 1 ) -``` - -Metagraph: Encapsulates the chain state of a particular subnetwork at a specific block. -```python -import bittensor -# Bittensor's chain state object. -metagraph = bittensor.metagraph( netuid = 1 ) -# Resync the graph with the most recent chain state -metagraph.sync() -# Get the list of stake values -print ( metagraph.S ) -# Get endpoint information for the entire subnetwork -print ( metagraph.axons ) -# Get the hotkey information for the miner in the 10th slot -print ( metagraph.hotkeys[ 10 ] ) -# Sync the metagraph at another block -metagraph.sync( block = 100000 ) -# Save the metagraph -metagraph.save() -# Load the same -metagraph.load() -``` - -Synapse: Responsible for defining the protocol definition between axon servers and dendrite clients -```python -class Topk( bittensor.Synapse ): - topk: int = 2 # Number of "top" elements to select - input: bittensor.Tensor = pydantic.Field(..., frozen=True) # Ensure that input cannot be set on the server side. - v: bittensor.Tensor = None - i: bittensor.Tensor = None - -def topk( synapse: Topk ) -> Topk: - v, i = torch.topk( synapse.input.deserialize(), k = synapse.topk ) - synapse.v = bittensor.Tensor.serialize( v ) - synapse.i = bittensor.Tensor.serialize( i ) - return synapse - -# Attach the forward function to the axon and start. -axon = bittensor.axon().attach( topk ).start() -``` - -Axon: Serves Synapse protocols with custom blacklist, priority and verify functions. - -```python -import bittensor - -class MySynapse( bittensor.Synapse ): - input: int = 1 - output: int = None - -# Define a custom request forwarding function -def forward( synapse: MySynapse ) -> MySynapse: - # Apply custom logic to synapse and return it - synapse.output = 2 - return synapse - -# Define a custom request verification function -def verify_my_synapse( synapse: MySynapse ): - # Apply custom verification logic to synapse - # Optionally raise Exception - -# Define a custom request blacklist function -def blacklist_my_synapse( synapse: MySynapse ) -> bool: - # Apply custom blacklist - # return False ( if non blacklisted ) or True ( if blacklisted ) - -# Define a custom request priority function -def prioritize_my_synape( synapse: MySynapse ) -> float: - # Apply custom priority - return 1.0 - -# Initialize Axon object with a custom configuration -my_axon = bittensor.axon(config=my_config, wallet=my_wallet, port=9090, ip="192.0.2.0", external_ip="203.0.113.0", external_port=7070) - -# Attach the endpoint with the specified verification and forwarding functions -my_axon.attach( - forward_fn = forward_my_synapse, - verify_fn=verify_my_synapse, - blacklist_fn = blacklist_my_synapse, - priority_fn = prioritize_my_synape -).start() -``` - -Dendrite: Represents the abstracted implementation of a network client module -designed to send requests to those endpoints to receive inputs. - -Example: -```python -dendrite_obj = dendrite( wallet = bittensor.wallet() ) -# pings the axon endpoint -await d( ) -# ping multiple axon endpoints -await d( [] ) -# Send custom synapse request to axon. -await d( bittensor.axon(), bittensor.Synapse() ) -# Query all metagraph objects. -await d( meta.axons, bittensor.Synapse() ) -``` - -## Setting weights on root network -Use the `root` subcommand to access setting weights on the network across subnets. - -```bash -btcli root weights --wallet.name --wallet.hotkey -Enter netuids (e.g. 0, 1, 2 ...): -# Here enter your selected netuids to set weights on -1, 2 - ->Enter weights (e.g. 0.09, 0.09, 0.09 ...): -# These do not need to sum to 1, we do normalization on the backend. -# Values must be > 0 -0.5, 10 - -Normalized weights: - tensor([ 0.5000, 10.0000]) -> tensor([0.0476, 0.9524]) - -Do you want to set the following root weights?: - weights: tensor([0.0476, 0.9524]) - uids: tensor([1, 2])? [y/n]: -y - -⠏ 📡 Setting root weights on test ... -``` - -## Bittensor Subnets API - -This guide provides instructions on how to extend the Bittensor Subnets API, a powerful interface for interacting with the Bittensor network across subnets. The Bittensor Subnets API facilitates querying across any subnet that has exposed API endpoints to unlock utility of the Bittensor decentralized network. - -The Bittensor Subnets API consists of abstract classes and a registry system to dynamically handle API interactions. It allows developers to implement custom logic for storing and retrieving data, while also providing a straightforward way for end users to interact with these functionalities. - -### Core Components - -- **APIRegistry**: A central registry that manages API handlers. It allows for dynamic retrieval of handlers based on keys. -- **SubnetsAPI (Abstract Base Class)**: Defines the structure for API implementations, including methods for querying the network and processing responses. -- **StoreUserAPI & RetrieveUserAPI**: Concrete implementations of the `SubnetsAPI` for storing and retrieving user data. - -### Implementing Custom Subnet APIs - -To implement your own subclasses of `bittensor.SubnetsAPI` to integrate an API into your subnet. - -1. **Inherit from `SubnetsAPI`**: Your class should inherit from the `SubnetsAPI` abstract base class. - -2. **Implement Required Methods**: Implement the `prepare_synapse` and `process_responses` abstract methods with your custom logic. - -That's it! For example: - -```python -import bittensor - -class CustomSubnetAPI(bittensor.SubnetsAPI): - def __init__(self, wallet: "bittensor.wallet"): - super().__init__(wallet) - # Custom initialization here - - def prepare_synapse(self, *args, **kwargs): - # Custom synapse preparation logic - pass - - def process_responses(self, responses): - # Custom response processing logic - pass -``` - -## Release -The release manager should follow the instructions of the [RELEASE_GUIDELINES.md](./RELEASE_GUIDELINES.md) document. - -## Contributions -Please review the [contributing guide](./contrib/CONTRIBUTING.md) for more information before making a pull request. - -## License -The MIT License (MIT) -Copyright © 2021 Yuma Rao - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -## Acknowledgments -**learning-at-home/hivemind** diff --git a/VERSION b/VERSION deleted file mode 100644 index b616717999..0000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -7.4.0 \ No newline at end of file diff --git a/bin/btcli b/bin/btcli deleted file mode 100755 index fa98536a09..0000000000 --- a/bin/btcli +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python - -import websocket - -import sys -import shtab -from bittensor import cli as btcli -from bittensor import logging as bt_logging - - -def main(): - # Create the parser with shtab support - parser = btcli.__create_parser__() - args, unknown = parser.parse_known_args() - - if args.print_completion: # Check for print-completion argument - print(shtab.complete(parser, args.print_completion)) - return - - try: - cli_instance = btcli(args=sys.argv[1:]) - cli_instance.run() - except KeyboardInterrupt: - print('KeyboardInterrupt') - except RuntimeError as e: - bt_logging.error(f'RuntimeError: {e}') - except websocket.WebSocketConnectionClosedException as e: - bt_logging.error(f'Subtensor related error. WebSocketConnectionClosedException: {e}') - - -if __name__ == '__main__': - main() - -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. diff --git a/bittensor/__init__.py b/bittensor/__init__.py deleted file mode 100644 index 9f4dbf1aa8..0000000000 --- a/bittensor/__init__.py +++ /dev/null @@ -1,371 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022-2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -import os -import warnings - -from rich.console import Console -from rich.traceback import install - - -if (NEST_ASYNCIO_ENV := os.getenv("NEST_ASYNCIO")) in ("1", None): - if NEST_ASYNCIO_ENV is None: - warnings.warn( - "NEST_ASYNCIO implicitly set to '1'. In the future, the default value will be '0'." - "If you use `nest_asyncio` make sure to add it explicitly to your project dependencies," - "as it will be removed from `bittensor` package dependencies in the future." - "To silence this warning, explicitly set the environment variable, e.g. `export NEST_ASYNCIO=0`.", - DeprecationWarning, - ) - # Install and apply nest asyncio to allow the async functions - # to run in a .ipynb - import nest_asyncio - - nest_asyncio.apply() - - -# Bittensor code and protocol version. -__version__ = "7.4.0" - -_version_split = __version__.split(".") -__version_info__ = tuple(int(part) for part in _version_split) -_version_int_base = 1000 -assert max(__version_info__) < _version_int_base - -__version_as_int__: int = sum( - e * (_version_int_base**i) for i, e in enumerate(reversed(__version_info__)) -) -assert __version_as_int__ < 2**31 # fits in int32 -__new_signature_version__ = 360 - -# Rich console. -__console__ = Console() -__use_console__ = True - -# Remove overdue locals in debug training. -install(show_locals=False) - - -def __getattr__(name): - if name == "version_split": - warnings.warn( - "version_split is deprecated and will be removed in future versions. Use __version__ instead.", - DeprecationWarning, - ) - return _version_split - raise AttributeError(f"module {__name__} has no attribute {name}") - - -def turn_console_off(): - global __use_console__ - global __console__ - from io import StringIO - - __use_console__ = False - __console__ = Console(file=StringIO(), stderr=False) - - -def turn_console_on(): - global __use_console__ - global __console__ - __use_console__ = True - __console__ = Console() - - -turn_console_off() - - -# Logging helpers. -def trace(on: bool = True): - logging.set_trace(on) - - -def debug(on: bool = True): - logging.set_debug(on) - - -# Substrate chain block time (seconds). -__blocktime__ = 12 - -# Pip address for versioning -__pipaddress__ = "https://pypi.org/pypi/bittensor/json" - -# Raw GitHub url for delegates registry file -__delegates_details_url__: str = "https://raw.githubusercontent.com/opentensor/bittensor-delegates/main/public/delegates.json" - -# Substrate ss58_format -__ss58_format__ = 42 - -# Wallet ss58 address length -__ss58_address_length__ = 48 - -__networks__ = ["local", "finney", "test", "archive"] - -__finney_entrypoint__ = "wss://entrypoint-finney.opentensor.ai:443" - -__finney_test_entrypoint__ = "wss://test.finney.opentensor.ai:443/" - -__archive_entrypoint__ = "wss://archive.chain.opentensor.ai:443/" - -# Needs to use wss:// -__bellagene_entrypoint__ = "wss://parachain.opentensor.ai:443" - - -if ( - BT_SUBTENSOR_CHAIN_ENDPOINT := os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") -) is not None: - __local_entrypoint__ = BT_SUBTENSOR_CHAIN_ENDPOINT -else: - __local_entrypoint__ = "ws://127.0.0.1:9944" - - -__tao_symbol__: str = chr(0x03C4) - -__rao_symbol__: str = chr(0x03C1) - -# Block Explorers map network to explorer url -# Must all be polkadotjs explorer urls -__network_explorer_map__ = { - "opentensor": { - "local": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer", - "endpoint": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer", - "finney": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer", - }, - "taostats": { - "local": "https://x.taostats.io", - "endpoint": "https://x.taostats.io", - "finney": "https://x.taostats.io", - }, -} - -# --- Type Registry --- -__type_registry__ = { - "types": { - "Balance": "u64", # Need to override default u128 - }, - "runtime_api": { - "NeuronInfoRuntimeApi": { - "methods": { - "get_neuron_lite": { - "params": [ - { - "name": "netuid", - "type": "u16", - }, - { - "name": "uid", - "type": "u16", - }, - ], - "type": "Vec", - }, - "get_neurons_lite": { - "params": [ - { - "name": "netuid", - "type": "u16", - }, - ], - "type": "Vec", - }, - } - }, - "StakeInfoRuntimeApi": { - "methods": { - "get_stake_info_for_coldkey": { - "params": [ - { - "name": "coldkey_account_vec", - "type": "Vec", - }, - ], - "type": "Vec", - }, - "get_stake_info_for_coldkeys": { - "params": [ - { - "name": "coldkey_account_vecs", - "type": "Vec>", - }, - ], - "type": "Vec", - }, - }, - }, - "SubnetInfoRuntimeApi": { - "methods": { - "get_subnet_hyperparams": { - "params": [ - { - "name": "netuid", - "type": "u16", - }, - ], - "type": "Vec", - } - } - }, - "SubnetRegistrationRuntimeApi": { - "methods": {"get_network_registration_cost": {"params": [], "type": "u64"}} - }, - "ColdkeySwapRuntimeApi": { - "methods": { - "get_scheduled_coldkey_swap": { - "params": [ - { - "name": "coldkey_account_vec", - "type": "Vec", - }, - ], - "type": "Vec", - }, - "get_remaining_arbitration_period": { - "params": [ - { - "name": "coldkey_account_vec", - "type": "Vec", - }, - ], - "type": "Vec", - }, - "get_coldkey_swap_destinations": { - "params": [ - { - "name": "coldkey_account_vec", - "type": "Vec", - }, - ], - "type": "Vec", - }, - } - }, - }, -} - -from .errors import ( - BlacklistedException, - ChainConnectionError, - ChainError, - ChainQueryError, - ChainTransactionError, - IdentityError, - InternalServerError, - InvalidRequestNameError, - KeyFileError, - MetadataError, - NominationError, - NotDelegateError, - NotRegisteredError, - NotVerifiedException, - PostProcessException, - PriorityException, - RegistrationError, - RunException, - StakeError, - SynapseDendriteNoneException, - SynapseParsingError, - TransferError, - UnknownSynapseError, - UnstakeError, -) - -from substrateinterface import Keypair # noqa: F401 -from .config import InvalidConfigFile, DefaultConfig, config, T -from .keyfile import ( - serialized_keypair_to_keyfile_data, - deserialize_keypair_from_keyfile_data, - validate_password, - ask_password_to_encrypt, - keyfile_data_is_encrypted_nacl, - keyfile_data_is_encrypted_ansible, - keyfile_data_is_encrypted_legacy, - keyfile_data_is_encrypted, - keyfile_data_encryption_method, - legacy_encrypt_keyfile_data, - encrypt_keyfile_data, - get_coldkey_password_from_environment, - decrypt_keyfile_data, - keyfile, - Mockkeyfile, -) -from .wallet import display_mnemonic_msg, wallet - -from .utils import ( - ss58_to_vec_u8, - unbiased_topk, - version_checking, - strtobool, - strtobool_with_default, - get_explorer_root_url_by_network_from_map, - get_explorer_url_for_network, - ss58_address_to_bytes, - U16_NORMALIZED_FLOAT, - U64_NORMALIZED_FLOAT, - u8_key_to_ss58, - hash, - wallet_utils, -) - -from .utils.balance import Balance as Balance -from .chain_data import ( - AxonInfo, - NeuronInfo, - NeuronInfoLite, - PrometheusInfo, - DelegateInfo, - StakeInfo, - SubnetInfo, - SubnetHyperparameters, - IPInfo, - ProposalCallData, - ProposalVoteData, -) - -# Allows avoiding name spacing conflicts and continue access to the `subtensor` module with `subtensor_module` name -from . import subtensor as subtensor_module - -# Double import allows using class `Subtensor` by referencing `bittensor.Subtensor` and `bittensor.subtensor`. -# This will be available for a while until we remove reference `bittensor.subtensor` -from .subtensor import Subtensor -from .subtensor import Subtensor as subtensor - -from .cli import cli as cli, COMMANDS as ALL_COMMANDS -from .btlogging import logging -from .metagraph import metagraph as metagraph -from .threadpool import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor - -from .synapse import TerminalInfo, Synapse -from .stream import StreamingSynapse -from .tensor import tensor, Tensor -from .axon import axon as axon -from .dendrite import dendrite as dendrite - -from .mock.keyfile_mock import MockKeyfile as MockKeyfile -from .mock.subtensor_mock import MockSubtensor as MockSubtensor -from .mock.wallet_mock import MockWallet as MockWallet - -from .subnets import SubnetsAPI as SubnetsAPI - -configs = [ - axon.config(), - subtensor.config(), - PriorityThreadPoolExecutor.config(), - wallet.config(), - logging.get_config(), -] -defaults = config.merge_all(configs) diff --git a/bittensor/axon.py b/bittensor/axon.py deleted file mode 100644 index 8cefadfe61..0000000000 --- a/bittensor/axon.py +++ /dev/null @@ -1,1528 +0,0 @@ -"""Create and initialize Axon, which services the forward and backward requests from other neurons.""" - -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import asyncio -import contextlib -import copy -import inspect -import json -import os -import threading -import time -import traceback -import typing -import uuid -import warnings -from inspect import signature, Signature, Parameter -from typing import List, Optional, Tuple, Callable, Any, Dict, Awaitable - -import uvicorn -from fastapi import APIRouter, Depends, FastAPI -from fastapi.responses import JSONResponse -from fastapi.routing import serialize_response -from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint -from starlette.requests import Request -from starlette.responses import Response -from substrateinterface import Keypair - -import bittensor -from bittensor.utils.axon_utils import allowed_nonce_window_ns, calculate_diff_seconds -from bittensor.constants import V_7_2_0 -from bittensor.errors import ( - BlacklistedException, - InvalidRequestNameError, - NotVerifiedException, - PostProcessException, - PriorityException, - SynapseDendriteNoneException, - SynapseException, - SynapseParsingError, - UnknownSynapseError, -) -from bittensor.threadpool import PriorityThreadPoolExecutor -from bittensor.utils import networking - - -class FastAPIThreadedServer(uvicorn.Server): - """ - The ``FastAPIThreadedServer`` class is a specialized server implementation for the Axon server in the Bittensor network. - - It extends the functionality of :func:`uvicorn.Server` to run the FastAPI application in a separate thread, allowing the Axon server to handle HTTP requests concurrently and non-blocking. - - This class is designed to facilitate the integration of FastAPI with the Axon's asynchronous architecture, ensuring efficient and scalable handling of network requests. - - Importance and Functionality - Threaded Execution - The class allows the FastAPI application to run in a separate thread, enabling concurrent handling of HTTP requests which is crucial for the performance and scalability of the Axon server. - - Seamless Integration - By running FastAPI in a threaded manner, this class ensures seamless integration of FastAPI's capabilities with the Axon server's asynchronous and multi-threaded architecture. - - Controlled Server Management - The methods start and stop provide controlled management of the server's lifecycle, ensuring that the server can be started and stopped as needed, which is vital for maintaining the Axon server's reliability and availability. - - Signal Handling - Overriding the default signal handlers prevents potential conflicts with the Axon server's main application flow, ensuring stable operation in various network conditions. - - Use Cases - Starting the Server - When the Axon server is initialized, it can use this class to start the FastAPI application in a separate thread, enabling it to begin handling HTTP requests immediately. - - Stopping the Server - During shutdown or maintenance of the Axon server, this class can be used to stop the FastAPI application gracefully, ensuring that all resources are properly released. - - Args: - should_exit (bool): Flag to indicate whether the server should stop running. - is_running (bool): Flag to indicate whether the server is currently running. - - The server overrides the default signal handlers to prevent interference with the main application flow and provides methods to start and stop the server in a controlled manner. - """ - - should_exit: bool = False - is_running: bool = False - - def install_signal_handlers(self): - """ - Overrides the default signal handlers provided by ``uvicorn.Server``. This method is essential to ensure that the signal handling in the threaded server does not interfere with the main application's flow, especially in a complex asynchronous environment like the Axon server. - """ - pass - - @contextlib.contextmanager - def run_in_thread(self): - """ - Manages the execution of the server in a separate thread, allowing the FastAPI application to run asynchronously without blocking the main thread of the Axon server. This method is a key component in enabling concurrent request handling in the Axon server. - - Yields: - None: This method yields control back to the caller while the server is running in the background thread. - """ - thread = threading.Thread(target=self.run, daemon=True) - thread.start() - try: - while not self.started: - time.sleep(1e-3) - yield - finally: - self.should_exit = True - thread.join() - - def _wrapper_run(self): - """ - A wrapper method for the :func:`run_in_thread` context manager. This method is used internally by the ``start`` method to initiate the server's execution in a separate thread. - """ - with self.run_in_thread(): - while not self.should_exit: - time.sleep(1e-3) - - def start(self): - """ - Starts the FastAPI server in a separate thread if it is not already running. This method sets up the server to handle HTTP requests concurrently, enabling the Axon server to efficiently manage - incoming network requests. - - The method ensures that the server starts running in a non-blocking manner, allowing the Axon server to continue its other operations seamlessly. - """ - if not self.is_running: - self.should_exit = False - thread = threading.Thread(target=self._wrapper_run, daemon=True) - thread.start() - self.is_running = True - - def stop(self): - """ - Signals the FastAPI server to stop running. This method sets the :func:`should_exit` flag to ``True``, indicating that the server should cease its operations and exit the running thread. - - Stopping the server is essential for controlled shutdowns and resource management in the Axon server, especially during maintenance or when redeploying with updated configurations. - """ - if self.is_running: - self.should_exit = True - - -class axon: - """ - The ``axon`` class in Bittensor is a fundamental component that serves as the server-side interface for a neuron within the Bittensor network. - - This class is responsible for managing - incoming requests from other neurons and implements various mechanisms to ensure efficient - and secure network interactions. - - An axon relies on a FastAPI router to create endpoints for different message types. These - endpoints are crucial for handling various request types that a neuron might receive. The - class is designed to be flexible and customizable, allowing users to specify custom rules - for forwarding, blacklisting, prioritizing, and verifying incoming requests. The class also - includes internal mechanisms to manage a thread pool, supporting concurrent handling of - requests with defined priority levels. - - Methods in this class are equipped to deal with incoming requests from various scenarios in the - network and serve as the server face for a neuron. It accepts multiple arguments, like wallet, - configuration parameters, ip address, server binding port, external ip, external port and max - workers. Key methods involve managing and operating the FastAPI application router, including - the attachment and operation of endpoints. - - Key Features: - - - FastAPI router integration for endpoint creation and management. - - Customizable request handling including forwarding, blacklisting, and prioritization. - - Verification of incoming requests against custom-defined functions. - - Thread pool management for concurrent request handling. - - Command-line argument support for user-friendly program interaction. - - Example Usage:: - - import bittensor - # Define your custom synapse class - class MySyanpse( bittensor.Synapse ): - input: int = 1 - output: int = None - - # Define a custom request forwarding function using your synapse class - def forward( synapse: MySyanpse ) -> MySyanpse: - # Apply custom logic to synapse and return it - synapse.output = 2 - return synapse - - # Define a custom request verification function - def verify_my_synapse( synapse: MySyanpse ): - # Apply custom verification logic to synapse - # Optionally raise Exception - assert synapse.input == 1 - ... - - # Define a custom request blacklist fucntion - def blacklist_my_synapse( synapse: MySyanpse ) -> bool: - # Apply custom blacklist - return False ( if non blacklisted ) or True ( if blacklisted ) - - # Define a custom request priority fucntion - def prioritize_my_synape( synapse: MySyanpse ) -> float: - # Apply custom priority - return 1.0 - - # Initialize Axon object with a custom configuration - my_axon = bittensor.axon( - config=my_config, - wallet=my_wallet, - port=9090, - ip="192.0.2.0", - external_ip="203.0.113.0", - external_port=7070 - ) - - # Attach the endpoint with the specified verification and forward functions. - my_axon.attach( - forward_fn = forward_my_synapse, - verify_fn = verify_my_synapse, - blacklist_fn = blacklist_my_synapse, - priority_fn = prioritize_my_synape - ) - - # Serve and start your axon. - my_axon.serve( - netuid = ... - subtensor = ... - ).start() - - # If you have multiple forwarding functions, you can chain attach them. - my_axon.attach( - forward_fn = forward_my_synapse, - verify_fn = verify_my_synapse, - blacklist_fn = blacklist_my_synapse, - priority_fn = prioritize_my_synape - ).attach( - forward_fn = forward_my_synapse_2, - verify_fn = verify_my_synapse_2, - blacklist_fn = blacklist_my_synapse_2, - priority_fn = prioritize_my_synape_2 - ).serve( - netuid = ... - subtensor = ... - ).start() - - Args: - wallet (bittensor.wallet, optional): Wallet with hotkey and coldkeypub. - config (bittensor.config, optional): Configuration parameters for the axon. - port (int, optional): Port for server binding. - ip (str, optional): Binding IP address. - external_ip (str, optional): External IP address to broadcast. - external_port (int, optional): External port to broadcast. - max_workers (int, optional): Number of active threads for request handling. - - Returns: - bittensor.axon: An instance of the axon class configured as per the provided arguments. - - Note: - This class is a core part of Bittensor's decentralized network for machine intelligence, - allowing neurons to communicate effectively and securely. - - Importance and Functionality - Endpoint Registration - This method dynamically registers API endpoints based on the Synapse used, allowing the Axon to respond to specific types of requests and synapses. - - Customization of Request Handling - By attaching different functions, the Axon can customize how it - handles, verifies, prioritizes, and potentially blocks incoming requests, making it adaptable to various network scenarios. - - Security and Efficiency - The method contributes to both the security (via verification and blacklisting) and efficiency (via prioritization) of request handling, which are crucial in a decentralized network environment. - - Flexibility - The ability to define custom functions for different aspects of request handling provides great flexibility, allowing the Axon to be tailored to specific needs and use cases within the Bittensor network. - - Error Handling and Validation - The method ensures that the attached functions meet the required - signatures, providing error handling to prevent runtime issues. - - """ - - def __init__( - self, - wallet: Optional["bittensor.wallet"] = None, - config: Optional["bittensor.config"] = None, - port: Optional[int] = None, - ip: Optional[str] = None, - external_ip: Optional[str] = None, - external_port: Optional[int] = None, - max_workers: Optional[int] = None, - ): - r"""Creates a new bittensor.Axon object from passed arguments. - Args: - config (:obj:`Optional[bittensor.config]`, `optional`): - bittensor.axon.config() - wallet (:obj:`Optional[bittensor.wallet]`, `optional`): - bittensor wallet with hotkey and coldkeypub. - port (:type:`Optional[int]`, `optional`): - Binding port. - ip (:type:`Optional[str]`, `optional`): - Binding ip. - external_ip (:type:`Optional[str]`, `optional`): - The external ip of the server to broadcast to the network. - external_port (:type:`Optional[int]`, `optional`): - The external port of the server to broadcast to the network. - max_workers (:type:`Optional[int]`, `optional`): - Used to create the threadpool if not passed, specifies the number of active threads servicing requests. - """ - # Build and check config. - if config is None: - config = axon.config() - config = copy.deepcopy(config) - config.axon.ip = ip or config.axon.get("ip", bittensor.defaults.axon.ip) - config.axon.port = port or config.axon.get("port", bittensor.defaults.axon.port) - config.axon.external_ip = external_ip or config.axon.get( - "external_ip", bittensor.defaults.axon.external_ip - ) - config.axon.external_port = external_port or config.axon.get( - "external_port", bittensor.defaults.axon.external_port - ) - config.axon.max_workers = max_workers or config.axon.get( - "max_workers", bittensor.defaults.axon.max_workers - ) - axon.check_config(config) - self.config = config # type: ignore [method-assign] - - # Get wallet or use default. - self.wallet = wallet or bittensor.wallet() - - # Build axon objects. - self.uuid = str(uuid.uuid1()) - self.ip = self.config.axon.ip - self.port = self.config.axon.port - self.external_ip = ( - self.config.axon.external_ip - if self.config.axon.external_ip is not None - else bittensor.utils.networking.get_external_ip() - ) - self.external_port = ( - self.config.axon.external_port - if self.config.axon.external_port is not None - else self.config.axon.port - ) - self.full_address = str(self.config.axon.ip) + ":" + str(self.config.axon.port) - self.started = False - - # Build middleware - self.thread_pool = bittensor.PriorityThreadPoolExecutor( - max_workers=self.config.axon.max_workers - ) - self.nonces: Dict[str, int] = {} - - # Request default functions. - self.forward_class_types: Dict[str, List[Signature]] = {} - self.blacklist_fns: Dict[str, Optional[Callable]] = {} - self.priority_fns: Dict[str, Optional[Callable]] = {} - self.forward_fns: Dict[str, Optional[Callable]] = {} - self.verify_fns: Dict[str, Optional[Callable]] = {} - - # Instantiate FastAPI - self.app = FastAPI() - log_level = "trace" if bittensor.logging.__trace_on__ else "critical" - self.fast_config = uvicorn.Config( - self.app, host="0.0.0.0", port=self.config.axon.port, log_level=log_level - ) - self.fast_server = FastAPIThreadedServer(config=self.fast_config) - self.router = APIRouter() - self.app.include_router(self.router) - - # Build ourselves as the middleware. - self.middleware_cls = AxonMiddleware - self.app.add_middleware(self.middleware_cls, axon=self) - - # Attach default forward. - def ping(r: bittensor.Synapse) -> bittensor.Synapse: - return r - - self.attach( - forward_fn=ping, verify_fn=None, blacklist_fn=None, priority_fn=None - ) - - def info(self) -> "bittensor.AxonInfo": - """Returns the axon info object associated with this axon.""" - return bittensor.AxonInfo( - version=bittensor.__version_as_int__, - ip=self.external_ip, - ip_type=networking.ip_version(self.external_ip), - port=self.external_port, - hotkey=self.wallet.hotkey.ss58_address, - coldkey=self.wallet.coldkeypub.ss58_address, - protocol=4, - placeholder1=0, - placeholder2=0, - ) - - def attach( - self, - forward_fn: Callable, - blacklist_fn: Optional[Callable] = None, - priority_fn: Optional[Callable] = None, - verify_fn: Optional[Callable] = None, - ) -> "bittensor.axon": - """ - - Attaches custom functions to the Axon server for handling incoming requests. This method enables - the Axon to define specific behaviors for request forwarding, verification, blacklisting, and - prioritization, thereby customizing its interaction within the Bittensor network. - - Registers an API endpoint to the FastAPI application router. - It uses the name of the first argument of the :func:`forward_fn` function as the endpoint name. - - The attach method in the Bittensor framework's axon class is a crucial function for registering - API endpoints to the Axon's FastAPI application router. This method allows the Axon server to - define how it handles incoming requests by attaching functions for forwarding, verifying, - blacklisting, and prioritizing requests. It's a key part of customizing the server's behavior - and ensuring efficient and secure handling of requests within the Bittensor network. - - Args: - forward_fn (Callable): Function to be called when the API endpoint is accessed. It should have at least one argument. - blacklist_fn (Callable, optional): Function to filter out undesired requests. It should take the same arguments as :func:`forward_fn` and return a boolean value. Defaults to ``None``, meaning no blacklist filter will be used. - priority_fn (Callable, optional): Function to rank requests based on their priority. It should take the same arguments as :func:`forward_fn` and return a numerical value representing the request's priority. Defaults to ``None``, meaning no priority sorting will be applied. - verify_fn (Callable, optional): Function to verify requests. It should take the same arguments as :func:`forward_fn` and return a boolean value. If ``None``, :func:`self.default_verify` function will be used. - - Note: - The methods :func:`forward_fn`, :func:`blacklist_fn`, :func:`priority_fn`, and :func:`verify_fn` should be designed to receive the same parameters. - - Raises: - AssertionError: If :func:`forward_fn` does not have the signature: ``forward( synapse: YourSynapse ) -> synapse``. - AssertionError: If :func:`blacklist_fn` does not have the signature: ``blacklist( synapse: YourSynapse ) -> bool``. - AssertionError: If :func:`priority_fn` does not have the signature: ``priority( synapse: YourSynapse ) -> float``. - AssertionError: If :func:`verify_fn` does not have the signature: ``verify( synapse: YourSynapse ) -> None``. - - Returns: - self: Returns the instance of the AxonServer class for potential method chaining. - - Example Usage:: - - def forward_custom(synapse: MyCustomSynapse) -> MyCustomSynapse: - # Custom logic for processing the request - return synapse - - def blacklist_custom(synapse: MyCustomSynapse) -> Tuple[bool, str]: - return True, "Allowed!" - - def priority_custom(synapse: MyCustomSynapse) -> float: - return 1.0 - - def verify_custom(synapse: MyCustomSynapse): - # Custom logic for verifying the request - pass - - my_axon = bittensor.axon(...) - my_axon.attach(forward_fn=forward_custom, verify_fn=verify_custom) - - Note: - The :func:`attach` method is fundamental in setting up the Axon server's request handling capabilities, - enabling it to participate effectively and securely in the Bittensor network. The flexibility - offered by this method allows developers to tailor the Axon's behavior to specific requirements and - use cases. - """ - forward_sig = signature(forward_fn) - try: - first_param = next(iter(forward_sig.parameters.values())) - except StopIteration: - raise ValueError( - "The forward_fn first argument must be a subclass of bittensor.Synapse, but it has no arguments" - ) - - param_class = first_param.annotation - assert issubclass( - param_class, bittensor.Synapse - ), "The first argument of forward_fn must inherit from bittensor.Synapse" - request_name = param_class.__name__ - - async def endpoint(*args, **kwargs): - start_time = time.time() - response = forward_fn(*args, **kwargs) - if isinstance(response, Awaitable): - response = await response - if isinstance(response, bittensor.Synapse): - return await self.middleware_cls.synapse_to_response( - synapse=response, start_time=start_time - ) - else: - response_synapse = getattr(response, "synapse", None) - if response_synapse is None: - warnings.warn( - "The response synapse is None. The input synapse will be used as the response synapse. " - "Reliance on forward_fn modifying input synapse as a side-effects is deprecated. " - "Explicitly set `synapse` on response object instead.", - DeprecationWarning, - ) - # Replace with `return response` in next major version - response_synapse = args[0] - - return await self.middleware_cls.synapse_to_response( - synapse=response_synapse, - start_time=start_time, - response_override=response, - ) - - return_annotation = forward_sig.return_annotation - - if isinstance(return_annotation, type) and issubclass( - return_annotation, bittensor.Synapse - ): - if issubclass( - return_annotation, - bittensor.StreamingSynapse, - ): - warnings.warn( - "The forward_fn return annotation is a subclass of bittensor.StreamingSynapse. " - "Most likely the correct return annotation would be BTStreamingResponse." - ) - else: - return_annotation = JSONResponse - - endpoint.__signature__ = Signature( # type: ignore - parameters=list(forward_sig.parameters.values()), - return_annotation=return_annotation, - ) - - # Add the endpoint to the router, making it available on both GET and POST methods - self.router.add_api_route( - f"/{request_name}", - endpoint, - methods=["GET", "POST"], - dependencies=[Depends(self.verify_body_integrity)], - ) - self.app.include_router(self.router) - - # Check the signature of blacklist_fn, priority_fn and verify_fn if they are provided - expected_params = [ - Parameter( - "synapse", - Parameter.POSITIONAL_OR_KEYWORD, - annotation=forward_sig.parameters[ - list(forward_sig.parameters)[0] - ].annotation, - ) - ] - if blacklist_fn: - blacklist_sig = Signature( - expected_params, return_annotation=Tuple[bool, str] - ) - assert ( - signature(blacklist_fn) == blacklist_sig - ), "The blacklist_fn function must have the signature: blacklist( synapse: {} ) -> Tuple[bool, str]".format( - request_name - ) - if priority_fn: - priority_sig = Signature(expected_params, return_annotation=float) - assert ( - signature(priority_fn) == priority_sig - ), "The priority_fn function must have the signature: priority( synapse: {} ) -> float".format( - request_name - ) - if verify_fn: - verify_sig = Signature(expected_params, return_annotation=None) - assert ( - signature(verify_fn) == verify_sig - ), "The verify_fn function must have the signature: verify( synapse: {} ) -> None".format( - request_name - ) - - # Store functions in appropriate attribute dictionaries - self.forward_class_types[request_name] = param_class - self.blacklist_fns[request_name] = blacklist_fn - self.priority_fns[request_name] = priority_fn - self.verify_fns[request_name] = ( - verify_fn or self.default_verify - ) # Use 'default_verify' if 'verify_fn' is None - self.forward_fns[request_name] = forward_fn - - return self - - @classmethod - def config(cls) -> "bittensor.config": - """ - Parses the command-line arguments to form a Bittensor configuration object. - - Returns: - bittensor.config: Configuration object with settings from command-line arguments. - """ - parser = argparse.ArgumentParser() - axon.add_args(parser) # Add specific axon-related arguments - return bittensor.config(parser, args=[]) - - @classmethod - def help(cls): - """ - Prints the help text (list of command-line arguments and their descriptions) to stdout. - """ - parser = argparse.ArgumentParser() - axon.add_args(parser) # Add specific axon-related arguments - print(cls.__new__.__doc__) # Print docstring of the class - parser.print_help() # Print parser's help text - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser, prefix: Optional[str] = None): - """ - Adds AxonServer-specific command-line arguments to the argument parser. - - Args: - parser (argparse.ArgumentParser): Argument parser to which the arguments will be added. - prefix (str, optional): Prefix to add to the argument names. Defaults to None. - - Note: - Environment variables are used to define default values for the arguments. - """ - prefix_str = "" if prefix is None else prefix + "." - try: - # Get default values from environment variables or use default values - default_axon_port = os.getenv("BT_AXON_PORT") or 8091 - default_axon_ip = os.getenv("BT_AXON_IP") or "[::]" - default_axon_external_port = os.getenv("BT_AXON_EXTERNAL_PORT") or None - default_axon_external_ip = os.getenv("BT_AXON_EXTERNAL_IP") or None - default_axon_max_workers = os.getenv("BT_AXON_MAX_WORERS") or 10 - - # Add command-line arguments to the parser - parser.add_argument( - "--" + prefix_str + "axon.port", - type=int, - help="The local port this axon endpoint is bound to. i.e. 8091", - default=default_axon_port, - ) - parser.add_argument( - "--" + prefix_str + "axon.ip", - type=str, - help="""The local ip this axon binds to. ie. [::]""", - default=default_axon_ip, - ) - parser.add_argument( - "--" + prefix_str + "axon.external_port", - type=int, - required=False, - help="""The public port this axon broadcasts to the network. i.e. 8091""", - default=default_axon_external_port, - ) - parser.add_argument( - "--" + prefix_str + "axon.external_ip", - type=str, - required=False, - help="""The external ip this axon broadcasts to the network to. ie. [::]""", - default=default_axon_external_ip, - ) - parser.add_argument( - "--" + prefix_str + "axon.max_workers", - type=int, - help="""The maximum number connection handler threads working simultaneously on this endpoint. - The grpc server distributes new worker threads to service requests up to this number.""", - default=default_axon_max_workers, - ) - - except argparse.ArgumentError: - # Exception handling for re-parsing arguments - pass - - async def verify_body_integrity(self, request: Request): - """ - The ``verify_body_integrity`` method in the Bittensor framework is a key security function within the - Axon server's middleware. It is responsible for ensuring the integrity of the body of incoming HTTP - requests. - - It asynchronously verifies the integrity of the body of a request by comparing the hash of required fields - with the corresponding hashes provided in the request headers. This method is critical for ensuring - that the incoming request payload has not been altered or tampered with during transmission, establishing - a level of trust and security between the sender and receiver in the network. - - Args: - request (Request): The incoming FastAPI request object containing both headers and the request body. - - Returns: - dict: Returns the parsed body of the request as a dictionary if all the hash comparisons match, - indicating that the body is intact and has not been tampered with. - - Raises: - JSONResponse: Raises a JSONResponse with a 400 status code if any of the hash comparisons fail, - indicating a potential integrity issue with the incoming request payload. - The response includes the detailed error message specifying which field has a hash mismatch. - - This method performs several key functions: - - 1. Decoding and loading the request body for inspection. - 2. Gathering required field names for hash comparison from the Axon configuration. - 3. Loading and parsing the request body into a dictionary. - 4. Reconstructing the Synapse object and recomputing the hash for verification and logging. - 5. Comparing the recomputed hash with the hash provided in the request headers for verification. - - Note: - The integrity verification is an essential step in ensuring the security of the data exchange - within the Bittensor network. It helps prevent tampering and manipulation of data during transit, - thereby maintaining the reliability and trust in the network communication. - """ - # Await and load the request body so we can inspect it - body = await request.body() - request_body = body.decode() if isinstance(body, bytes) else body - - request_name = request.url.path.split("/")[1] - - # Load the body dict and check if all required field hashes match - body_dict = json.loads(request_body) - - # Reconstruct the synapse object from the body dict and recompute the hash - syn = self.forward_class_types[request_name](**body_dict) # type: ignore - parsed_body_hash = syn.body_hash # Rehash the body from request - - body_hash = request.headers.get("computed_body_hash", "") - if parsed_body_hash != body_hash: - raise ValueError( - f"Hash mismatch between header body hash {body_hash} and parsed body hash {parsed_body_hash}" - ) - - # If body is good, return the parsed body so that it can be passed onto the route function - return body_dict - - @classmethod - def check_config(cls, config: "bittensor.config"): - """ - This method checks the configuration for the axon's port and wallet. - - Args: - config (bittensor.config): The config object holding axon settings. - - Raises: - AssertionError: If the axon or external ports are not in range [1024, 65535] - """ - assert ( - config.axon.port > 1024 and config.axon.port < 65535 - ), "Axon port must be in range [1024, 65535]" - - assert config.axon.external_port is None or ( - config.axon.external_port > 1024 and config.axon.external_port < 65535 - ), "External port must be in range [1024, 65535]" - - def to_string(self): - """ - Provides a human-readable representation of the AxonInfo for this Axon. - """ - return self.info().to_string() - - def __str__(self) -> str: - """ - Provides a human-readable representation of the Axon instance. - """ - return "Axon({}, {}, {}, {}, {})".format( - self.ip, - self.port, - self.wallet.hotkey.ss58_address, - "started" if self.started else "stopped", - list(self.forward_fns.keys()), - ) - - def __repr__(self) -> str: - """ - Provides a machine-readable (unambiguous) representation of the Axon instance. - It is made identical to __str__ in this case. - """ - return self.__str__() - - def __del__(self): - """ - This magic method is called when the Axon object is about to be destroyed. - It ensures that the Axon server shuts down properly. - """ - self.stop() - - def start(self) -> "bittensor.axon": - """ - Starts the Axon server and its underlying FastAPI server thread, transitioning the state of the - Axon instance to ``started``. This method initiates the server's ability to accept and process - incoming network requests, making it an active participant in the Bittensor network. - - The start method triggers the FastAPI server associated with the Axon to begin listening for - incoming requests. It is a crucial step in making the neuron represented by this Axon operational - within the Bittensor network. - - Returns: - bittensor.axon: The Axon instance in the 'started' state. - - Example:: - - my_axon = bittensor.axon(...) - ... # setup axon, attach functions, etc. - my_axon.start() # Starts the axon server - - Note: - After invoking this method, the Axon is ready to handle requests as per its configured endpoints and custom logic. - """ - self.fast_server.start() - self.started = True - return self - - def stop(self) -> "bittensor.axon": - """ - Stops the Axon server and its underlying GRPC server thread, transitioning the state of the Axon - instance to ``stopped``. This method ceases the server's ability to accept new network requests, - effectively removing the neuron's server-side presence in the Bittensor network. - - By stopping the FastAPI server, the Axon ceases to listen for incoming requests, and any existing - connections are gracefully terminated. This function is typically used when the neuron is being - shut down or needs to temporarily go offline. - - Returns: - bittensor.axon: The Axon instance in the 'stopped' state. - - Example:: - - my_axon = bittensor.axon(...) - my_axon.start() - ... - my_axon.stop() # Stops the axon server - - - Note: - It is advisable to ensure that all ongoing processes or requests are completed or properly handled before invoking this method. - """ - self.fast_server.stop() - self.started = False - return self - - def serve( - self, netuid: int, subtensor: Optional[bittensor.subtensor] = None - ) -> "bittensor.axon": - """ - Serves the Axon on the specified subtensor connection using the configured wallet. This method - registers the Axon with a specific subnet within the Bittensor network, identified by the ``netuid``. - It links the Axon to the broader network, allowing it to participate in the decentralized exchange - of information. - - Args: - netuid (int): The unique identifier of the subnet to register on. This ID is essential for the Axon to correctly position itself within the Bittensor network topology. - subtensor (bittensor.subtensor, optional): The subtensor connection to use for serving. If not provided, a new connection is established based on default configurations. - - Returns: - bittensor.axon: The Axon instance that is now actively serving on the specified subtensor. - - Example:: - - my_axon = bittensor.axon(...) - subtensor = bt.subtensor(network="local") # Local by default - my_axon.serve(netuid=1, subtensor=subtensor) # Serves the axon on subnet with netuid 1 - - Note: - The ``serve`` method is crucial for integrating the Axon into the Bittensor network, allowing it - to start receiving and processing requests from other neurons. - """ - if subtensor is not None and hasattr(subtensor, "serve_axon"): - subtensor.serve_axon(netuid=netuid, axon=self) - return self - - async def default_verify(self, synapse: bittensor.Synapse): - """ - This method is used to verify the authenticity of a received message using a digital signature. - - It ensures that the message was not tampered with and was sent by the expected sender. - - The :func:`default_verify` method in the Bittensor framework is a critical security function within the - Axon server. It is designed to authenticate incoming messages by verifying their digital - signatures. This verification ensures the integrity of the message and confirms that it was - indeed sent by the claimed sender. The method plays a pivotal role in maintaining the trustworthiness - and reliability of the communication within the Bittensor network. - - Key Features - Security Assurance - The default_verify method is crucial for ensuring the security of the Bittensor network. By verifying digital signatures, it guards against unauthorized access - and data manipulation. - - Preventing Replay Attacks - The method checks for increasing nonce values, which is a vital - step in preventing replay attacks. A replay attack involves an adversary reusing or - delaying the transmission of a valid data transmission to deceive the receiver. - The first time a nonce is seen, it is checked for freshness by ensuring it is - within an acceptable delta time range. - - Authenticity and Integrity Checks - By verifying that the message's digital signature matches - its content, the method ensures the message's authenticity (it comes from the claimed - sender) and integrity (it hasn't been altered during transmission). - - Trust in Communication - This method fosters trust in the network communication. Neurons - (nodes in the Bittensor network) can confidently interact, knowing that the messages they - receive are genuine and have not been tampered with. - - Cryptographic Techniques - The method's reliance on asymmetric encryption techniques is a - cornerstone of modern cryptographic security, ensuring that only entities with the correct - cryptographic keys can participate in secure communication. - - Args: - synapse: bittensor.Synapse - bittensor request synapse. - - Raises: - Exception: If the ``receiver_hotkey`` doesn't match with ``self.receiver_hotkey``. - Exception: If the nonce is not larger than the previous nonce for the same endpoint key. - Exception: If the signature verification fails. - - After successful verification, the nonce for the given endpoint key is updated. - - Note: - The verification process assumes the use of an asymmetric encryption algorithm, - where the sender signs the message with their private key and the receiver verifies the - signature using the sender's public key. - """ - # Build the keypair from the dendrite_hotkey - if synapse.dendrite is not None: - keypair = Keypair(ss58_address=synapse.dendrite.hotkey) - - # Build the signature messages. - message = f"{synapse.dendrite.nonce}.{synapse.dendrite.hotkey}.{self.wallet.hotkey.ss58_address}.{synapse.dendrite.uuid}.{synapse.computed_body_hash}" - - # Build the unique endpoint key. - endpoint_key = f"{synapse.dendrite.hotkey}:{synapse.dendrite.uuid}" - - # Requests must have nonces to be safe from replays - if synapse.dendrite.nonce is None: - raise Exception("Missing Nonce") - - # Newer nonce structure post v7.2 - if ( - synapse.dendrite.version is not None - and synapse.dendrite.version >= V_7_2_0 - ): - # If we don't have a nonce stored, ensure that the nonce falls within - # a reasonable delta. - current_time_ns = time.time_ns() - allowed_window_ns = allowed_nonce_window_ns( - current_time_ns, synapse.timeout - ) - - if ( - self.nonces.get(endpoint_key) is None - and synapse.dendrite.nonce <= allowed_window_ns - ): - diff_seconds, allowed_delta_seconds = calculate_diff_seconds( - current_time_ns, synapse.timeout, synapse.dendrite.nonce - ) - raise Exception( - f"Nonce is too old: acceptable delta is {allowed_delta_seconds:.2f} seconds but request was {diff_seconds:.2f} seconds old" - ) - - # If a nonce is stored, ensure the new nonce - # is greater than the previous nonce - if ( - self.nonces.get(endpoint_key) is not None - and synapse.dendrite.nonce <= self.nonces[endpoint_key] - ): - raise Exception("Nonce is too old, a newer one was last processed") - # Older nonce structure pre v7.2 - else: - if ( - self.nonces.get(endpoint_key) is not None - and synapse.dendrite.nonce <= self.nonces[endpoint_key] - ): - raise Exception("Nonce is too old, a newer one was last processed") - - if not keypair.verify(message, synapse.dendrite.signature): - raise Exception( - f"Signature mismatch with {message} and {synapse.dendrite.signature}" - ) - - # Success - self.nonces[endpoint_key] = synapse.dendrite.nonce # type: ignore - else: - raise SynapseDendriteNoneException(synapse=synapse) - - -def create_error_response(synapse: bittensor.Synapse): - if synapse.axon is None: - return JSONResponse( - status_code=400, - headers=synapse.to_headers(), - content={"message": "Invalid request name"}, - ) - else: - return JSONResponse( - status_code=synapse.axon.status_code or 400, - headers=synapse.to_headers(), - content={"message": synapse.axon.status_message}, - ) - - -def log_and_handle_error( - synapse: bittensor.Synapse, - exception: Exception, - status_code: typing.Optional[int] = None, - start_time: typing.Optional[float] = None, -) -> bittensor.Synapse: - if isinstance(exception, SynapseException): - synapse = exception.synapse or synapse - - bittensor.logging.trace(f"Forward handled exception: {exception}") - else: - bittensor.logging.trace(f"Forward exception: {traceback.format_exc()}") - - if synapse.axon is None: - synapse.axon = bittensor.TerminalInfo() - - # Set the status code of the synapse to the given status code. - error_id = str(uuid.uuid4()) - error_type = exception.__class__.__name__ - - # Log the detailed error message for internal use - bittensor.logging.error(f"{error_type}#{error_id}: {exception}") - - if not status_code and synapse.axon.status_code != 100: - status_code = synapse.axon.status_code - status_message = synapse.axon.status_message - if isinstance(exception, SynapseException): - if not status_code: - if isinstance(exception, PriorityException): - status_code = 503 - elif isinstance(exception, UnknownSynapseError): - status_code = 404 - elif isinstance(exception, BlacklistedException): - status_code = 403 - elif isinstance(exception, NotVerifiedException): - status_code = 401 - elif isinstance(exception, (InvalidRequestNameError, SynapseParsingError)): - status_code = 400 - else: - status_code = 500 - status_message = status_message or str(exception) - else: - status_code = status_code or 500 - status_message = status_message or f"Internal Server Error #{error_id}" - - # Set a user-friendly error message - synapse.axon.status_code = status_code - synapse.axon.status_message = status_message - - if start_time: - # Calculate the processing time by subtracting the start time from the current time. - synapse.axon.process_time = str(time.time() - start_time) # type: ignore - - return synapse - - -class AxonMiddleware(BaseHTTPMiddleware): - """ - The `AxonMiddleware` class is a key component in the Axon server, responsible for processing all incoming requests. - - It handles the essential tasks of verifying requests, executing blacklist checks, - running priority functions, and managing the logging of messages and errors. Additionally, the class - is responsible for updating the headers of the response and executing the requested functions. - - This middleware acts as an intermediary layer in request handling, ensuring that each request is - processed according to the defined rules and protocols of the Bittensor network. It plays a pivotal - role in maintaining the integrity and security of the network communication. - - Args: - app (FastAPI): An instance of the FastAPI application to which this middleware is attached. - axon (bittensor.axon): The Axon instance that will process the requests. - - The middleware operates by intercepting incoming requests, performing necessary preprocessing - (like verification and priority assessment), executing the request through the Axon's endpoints, and - then handling any postprocessing steps such as response header updating and logging. - """ - - def __init__(self, app: "AxonMiddleware", axon: "bittensor.axon"): - """ - Initialize the AxonMiddleware class. - - Args: - app (object): An instance of the application where the middleware processor is used. - axon (object): The axon instance used to process the requests. - """ - super().__init__(app) - self.axon = axon - - async def dispatch( - self, request: Request, call_next: RequestResponseEndpoint - ) -> Response: - """ - Asynchronously processes incoming HTTP requests and returns the corresponding responses. This - method acts as the central processing unit of the AxonMiddleware, handling each step in the - request lifecycle. - - Args: - request (Request): The incoming HTTP request to be processed. - call_next (RequestResponseEndpoint): A callable that processes the request and returns a response. - - Returns: - Response: The HTTP response generated after processing the request. - - This method performs several key functions: - - 1. Request Preprocessing: Sets up Synapse object from request headers and fills necessary information. - 2. Logging: Logs the start of request processing. - 3. Blacklist Checking: Verifies if the request is blacklisted. - 4. Request Verification: Ensures the authenticity and integrity of the request. - 5. Priority Assessment: Evaluates and assigns priority to the request. - 6. Request Execution: Calls the next function in the middleware chain to process the request. - 7. Response Postprocessing: Updates response headers and logs the end of the request processing. - - The method also handles exceptions and errors that might occur during each stage, ensuring that - appropriate responses are returned to the client. - """ - # Records the start time of the request processing. - start_time = time.time() - - try: - # Set up the synapse from its headers. - try: - synapse: bittensor.Synapse = await self.preprocess(request) - except Exception as exc: - if isinstance(exc, SynapseException) and exc.synapse is not None: - synapse = exc.synapse - else: - synapse = bittensor.Synapse() - raise - - # Logs the start of the request processing - if synapse.dendrite is not None: - bittensor.logging.trace( - f"axon | <-- | {request.headers.get('content-length', -1)} B | {synapse.name} | {synapse.dendrite.hotkey} | {synapse.dendrite.ip}:{synapse.dendrite.port} | 200 | Success " - ) - else: - bittensor.logging.trace( - f"axon | <-- | {request.headers.get('content-length', -1)} B | {synapse.name} | None | None | 200 | Success " - ) - - # Call the blacklist function - await self.blacklist(synapse) - - # Call verify and return the verified request - await self.verify(synapse) - - # Call the priority function - await self.priority(synapse) - - # Call the run function - response = await self.run(synapse, call_next, request) - - # Handle errors related to preprocess. - except InvalidRequestNameError as e: - if synapse.axon is None: - synapse.axon = bittensor.TerminalInfo() - synapse.axon.status_code = 400 - synapse.axon.status_message = str(e) - synapse = log_and_handle_error(synapse, e, start_time=start_time) - response = create_error_response(synapse) - except SynapseException as e: - synapse = e.synapse or synapse - synapse = log_and_handle_error(synapse, e, start_time=start_time) - response = create_error_response(synapse) - - # Handle all other errors. - except Exception as e: - synapse = log_and_handle_error(synapse, e, start_time=start_time) - response = create_error_response(synapse) - - # Logs the end of request processing and returns the response - finally: - # Log the details of the processed synapse, including total size, name, hotkey, IP, port, - # status code, and status message, using the debug level of the logger. - if synapse.dendrite is not None and synapse.axon is not None: - bittensor.logging.trace( - f"axon | --> | {response.headers.get('content-length', -1)} B | {synapse.name} | {synapse.dendrite.hotkey} | {synapse.dendrite.ip}:{synapse.dendrite.port} | {synapse.axon.status_code} | {synapse.axon.status_message}" - ) - elif synapse.axon is not None: - bittensor.logging.trace( - f"axon | --> | {response.headers.get('content-length', -1)} B | {synapse.name} | None | None | {synapse.axon.status_code} | {synapse.axon.status_message}" - ) - else: - bittensor.logging.trace( - f"axon | --> | {response.headers.get('content-length', -1)} B | {synapse.name} | None | None | 200 | Success " - ) - - # Return the response to the requester. - return response - - async def preprocess(self, request: Request) -> bittensor.Synapse: - """ - Performs the initial processing of the incoming request. This method is responsible for - extracting relevant information from the request and setting up the Synapse object, which - represents the state and context of the request within the Axon server. - - Args: - request (Request): The incoming request to be preprocessed. - - Returns: - bittensor.Synapse: The Synapse object representing the preprocessed state of the request. - - The preprocessing involves: - - 1. Extracting the request name from the URL path. - 2. Creating a Synapse instance from the request headers using the appropriate class type. - 3. Filling in the Axon and Dendrite information into the Synapse object. - 4. Signing the Synapse from the Axon side using the wallet hotkey. - - This method sets the foundation for the subsequent steps in the request handling process, - ensuring that all necessary information is encapsulated within the Synapse object. - """ - # Extracts the request name from the URL path. - try: - request_name = request.url.path.split("/")[1] - except Exception: - raise InvalidRequestNameError( - f"Improperly formatted request. Could not parser request {request.url.path}." - ) - - # Creates a synapse instance from the headers using the appropriate forward class type - # based on the request name obtained from the URL path. - request_synapse = self.axon.forward_class_types.get(request_name) - if request_synapse is None: - raise UnknownSynapseError( - f"Synapse name '{request_name}' not found. Available synapses {list(self.axon.forward_class_types.keys())}" - ) - - try: - synapse = request_synapse.from_headers(request.headers) # type: ignore - except Exception: - raise SynapseParsingError( - f"Improperly formatted request. Could not parse headers {request.headers} into synapse of type {request_name}." - ) - synapse.name = request_name - - # Fills the local axon information into the synapse. - synapse.axon.__dict__.update( - { - "version": str(bittensor.__version_as_int__), - "uuid": str(self.axon.uuid), - "nonce": time.time_ns(), - "status_code": 100, - } - ) - - # Fills the dendrite information into the synapse. - synapse.dendrite.__dict__.update( - {"port": str(request.client.port), "ip": str(request.client.host)} # type: ignore - ) - - # Signs the synapse from the axon side using the wallet hotkey. - message = f"{synapse.axon.nonce}.{synapse.dendrite.hotkey}.{synapse.axon.hotkey}.{synapse.axon.uuid}" - synapse.axon.signature = f"0x{self.axon.wallet.hotkey.sign(message).hex()}" - - # Return the setup synapse. - return synapse - - async def verify(self, synapse: bittensor.Synapse): - """ - Verifies the authenticity and integrity of the request. This method ensures that the incoming - request meets the predefined security and validation criteria. - - Args: - synapse (bittensor.Synapse): The Synapse object representing the request. - - Raises: - Exception: If the verification process fails due to unmet criteria or security concerns. - - The verification process involves: - - 1. Retrieving the specific verification function for the request's Synapse type. - 2. Executing the verification function and handling any exceptions that arise. - - Successful verification allows the request to proceed further in the processing pipeline, while - failure results in an appropriate exception being raised. - """ - # Start of the verification process. Verification is the process where we ensure that - # the incoming request is from a trusted source or fulfills certain requirements. - # We get a specific verification function from 'verify_fns' dictionary that corresponds - # to our request's name. Each request name (synapse name) has its unique verification function. - verify_fn = ( - self.axon.verify_fns.get(synapse.name) if synapse.name is not None else None - ) - - # If a verification function exists for the request's name - if verify_fn: - try: - # We attempt to run the verification function using the synapse instance - # created from the request. If this function runs without throwing an exception, - # it means that the verification was successful. - ( - await verify_fn(synapse) - if inspect.iscoroutinefunction(verify_fn) - else verify_fn(synapse) - ) - except Exception as e: - # If there was an exception during the verification process, we log that - # there was a verification exception. - bittensor.logging.trace(f"Verify exception {str(e)}") - - # Check if the synapse.axon object exists - if synapse.axon is not None: - # We set the status code of the synapse to "401" which denotes an unauthorized access. - synapse.axon.status_code = 401 - else: - # If the synapse.axon object doesn't exist, raise an exception. - raise Exception("Synapse.axon object is None") - - # We raise an exception to stop the process and return the error to the requester. - # The error message includes the original exception message. - raise NotVerifiedException( - f"Not Verified with error: {str(e)}", synapse=synapse - ) - - async def blacklist(self, synapse: bittensor.Synapse): - """ - Checks if the request should be blacklisted. This method ensures that requests from disallowed - sources or with malicious intent are blocked from processing. This can be extremely useful for - preventing spam or other forms of abuse. The blacklist is a list of keys or identifiers that - are prohibited from accessing certain resources. - - Args: - synapse (bittensor.Synapse): The Synapse object representing the request. - - Raises: - Exception: If the request is found in the blacklist. - - The blacklist check involves: - - 1. Retrieving the blacklist checking function for the request's Synapse type. - 2. Executing the check and handling the case where the request is blacklisted. - - If a request is blacklisted, it is blocked, and an exception is raised to halt further processing. - """ - # A blacklist is a list of keys or identifiers - # that are prohibited from accessing certain resources. - # We retrieve the blacklist checking function from the 'blacklist_fns' dictionary - # that corresponds to the request's name (synapse name). - blacklist_fn = ( - self.axon.blacklist_fns.get(synapse.name) - if synapse.name is not None - else None - ) - - # If a blacklist checking function exists for the request's name - if blacklist_fn: - # We execute the blacklist checking function using the synapse instance as input. - # If the function returns True, it means that the key or identifier is blacklisted. - blacklisted, reason = ( - await blacklist_fn(synapse) - if inspect.iscoroutinefunction(blacklist_fn) - else blacklist_fn(synapse) - ) - if blacklisted: - # We log that the key or identifier is blacklisted. - bittensor.logging.trace(f"Blacklisted: {blacklisted}, {reason}") - - # Check if the synapse.axon object exists - if synapse.axon is not None: - # We set the status code of the synapse to "403" which indicates a forbidden access. - synapse.axon.status_code = 403 - else: - # If the synapse.axon object doesn't exist, raise an exception. - raise Exception("Synapse.axon object is None") - - # We raise an exception to halt the process and return the error message to the requester. - raise BlacklistedException( - f"Forbidden. Key is blacklisted: {reason}.", synapse=synapse - ) - - async def priority(self, synapse: bittensor.Synapse): - """ - Executes the priority function for the request. This method assesses and assigns a priority - level to the request, determining its urgency and importance in the processing queue. - - Args: - synapse (bittensor.Synapse): The Synapse object representing the request. - - Raises: - Exception: If the priority assessment process encounters issues, such as timeouts. - - The priority function plays a crucial role in managing the processing load and ensuring that - critical requests are handled promptly. - """ - # Retrieve the priority function from the 'priority_fns' dictionary that corresponds - # to the request's name (synapse name). - priority_fn = self.axon.priority_fns.get(str(synapse.name), None) - - async def submit_task( - executor: PriorityThreadPoolExecutor, priority: float - ) -> Tuple[float, Any]: - """ - Submits the given priority function to the specified executor for asynchronous execution. - The function will run in the provided executor and return the priority value along with the result. - - Args: - executor: The executor in which the priority function will be run. - priority: The priority function to be executed. - - Returns: - tuple: A tuple containing the priority value and the result of the priority function execution. - """ - loop = asyncio.get_event_loop() - future = loop.run_in_executor(executor, lambda: priority) - result = await future - return priority, result - - # If a priority function exists for the request's name - if priority_fn: - try: - # Execute the priority function and get the priority value. - priority = ( - await priority_fn(synapse) - if inspect.iscoroutinefunction(priority_fn) - else priority_fn(synapse) - ) - - # Submit the task to the thread pool for execution with the given priority. - # The submit_task function will handle the execution and return the result. - _, result = await submit_task(self.axon.thread_pool, priority) - - except TimeoutError as e: - # If the execution of the priority function exceeds the timeout, - # it raises an exception to handle the timeout error. - bittensor.logging.trace(f"TimeoutError: {str(e)}") - - # Set the status code of the synapse to 408 which indicates a timeout error. - if synapse.axon is not None: - synapse.axon.status_code = 408 - - # Raise an exception to stop the process and return an appropriate error message to the requester. - raise PriorityException( - f"Response timeout after: {synapse.timeout}s", synapse=synapse - ) - - async def run( - self, - synapse: bittensor.Synapse, - call_next: RequestResponseEndpoint, - request: Request, - ) -> Response: - """ - Executes the requested function as part of the request processing pipeline. This method calls - the next function in the middleware chain to process the request and generate a response. - - Args: - synapse (bittensor.Synapse): The Synapse object representing the request. - call_next (RequestResponseEndpoint): The next function in the middleware chain to process requests. - request (Request): The original HTTP request. - - Returns: - Response: The HTTP response generated by processing the request. - - This method is a critical part of the request lifecycle, where the actual processing of the - request takes place, leading to the generation of a response. - """ - try: - # The requested function is executed by calling the 'call_next' function, - # passing the original request as an argument. This function processes the request - # and returns the response. - response = await call_next(request) - - except Exception as e: - # Log the exception for debugging purposes. - bittensor.logging.trace(f"Run exception: {str(e)}") - raise - - # Return the starlet response - return response - - @classmethod - async def synapse_to_response( - cls, - synapse: bittensor.Synapse, - start_time: float, - *, - response_override: Optional[Response] = None, - ) -> Response: - """ - Converts the Synapse object into a JSON response with HTTP headers. - - Args: - synapse: The Synapse object representing the request. - start_time: The timestamp when the request processing started. - response_override: - Instead of serializing the synapse, mutate the provided response object. - This is only really useful for StreamingSynapse responses. - - Returns: - Response: The final HTTP response, with updated headers, ready to be sent back to the client. - - Postprocessing is the last step in the request handling process, ensuring that the response is - properly formatted and contains all necessary information. - """ - if synapse.axon is None: - synapse.axon = bittensor.TerminalInfo() - - if synapse.axon.status_code is None: - synapse.axon.status_code = 200 - - if synapse.axon.status_code == 200 and not synapse.axon.status_message: - synapse.axon.status_message = "Success" - - synapse.axon.process_time = time.time() - start_time - - if response_override: - response = response_override - else: - serialized_synapse = await serialize_response(response_content=synapse) - response = JSONResponse( - status_code=synapse.axon.status_code, - content=serialized_synapse, - ) - - try: - updated_headers = synapse.to_headers() - except Exception as e: - raise PostProcessException( - f"Error while parsing response headers. Postprocess exception: {str(e)}.", - synapse=synapse, - ) from e - - try: - response.headers.update(updated_headers) - except Exception as e: - raise PostProcessException( - f"Error while updating response headers. Postprocess exception: {str(e)}.", - synapse=synapse, - ) from e - - return response diff --git a/bittensor/btlogging/__init__.py b/bittensor/btlogging/__init__.py deleted file mode 100644 index 6bf6d2bf35..0000000000 --- a/bittensor/btlogging/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -""" -btlogging sub-package standardized logging for Bittensor. - -This module provides logging functionality for the Bittensor package. It includes custom loggers, handlers, and -formatters to ensure consistent logging throughout the project. -""" - -from bittensor.btlogging.loggingmachine import LoggingMachine - - -logging = LoggingMachine(LoggingMachine.config()) diff --git a/bittensor/btlogging/defines.py b/bittensor/btlogging/defines.py deleted file mode 100644 index c87177ffd0..0000000000 --- a/bittensor/btlogging/defines.py +++ /dev/null @@ -1,28 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 OpenTensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -"""Btlogging constant definition module.""" - -BASE_LOG_FORMAT = "%(asctime)s | %(levelname)s | %(message)s" -TRACE_LOG_FORMAT = ( - f"%(asctime)s | %(levelname)s | %(name)s:%(filename)s:%(lineno)s | %(message)s" -) -DATE_FORMAT = "%Y-%m-%d %H:%M:%S" -BITTENSOR_LOGGER_NAME = "bittensor" -DEFAULT_LOG_FILE_NAME = "bittensor.log" -DEFAULT_MAX_ROTATING_LOG_FILE_SIZE = 25 * 1024 * 1024 -DEFAULT_LOG_BACKUP_COUNT = 10 diff --git a/bittensor/btlogging/format.py b/bittensor/btlogging/format.py deleted file mode 100644 index 5f2c8cb866..0000000000 --- a/bittensor/btlogging/format.py +++ /dev/null @@ -1,222 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 OpenTensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -""" -btlogging.format module - -This module defines custom logging formatters for the Bittensor project. -""" - -import logging -import time -from typing import Dict - -from colorama import init, Fore, Back, Style - - -init(autoreset=True) - -TRACE_LEVEL_NUM: int = 5 -SUCCESS_LEVEL_NUM: int = 21 - - -def _trace(self, message: str, *args, **kws): - if self.isEnabledFor(TRACE_LEVEL_NUM): - self._log(TRACE_LEVEL_NUM, message, args, **kws) - - -def _success(self, message: str, *args, **kws): - if self.isEnabledFor(SUCCESS_LEVEL_NUM): - self._log(SUCCESS_LEVEL_NUM, message, args, **kws) - - -logging.SUCCESS = SUCCESS_LEVEL_NUM -logging.addLevelName(SUCCESS_LEVEL_NUM, "SUCCESS") -logging.Logger.success = _success - -logging.TRACE = TRACE_LEVEL_NUM -logging.addLevelName(TRACE_LEVEL_NUM, "TRACE") -logging.Logger.trace = _trace - -emoji_map: Dict[str, str] = { - ":white_heavy_check_mark:": "✅", - ":cross_mark:": "❌", - ":satellite:": "đŸ›°ïž", -} - - -color_map: Dict[str, str] = { - "": Fore.RED, - "": Style.RESET_ALL, - "": Fore.BLUE, - "": Style.RESET_ALL, - "": Fore.GREEN, - "": Style.RESET_ALL, -} - - -log_level_color_prefix: Dict[int, str] = { - logging.NOTSET: Fore.RESET, - logging.TRACE: Fore.MAGENTA, - logging.DEBUG: Fore.BLUE, - logging.INFO: Fore.WHITE, - logging.SUCCESS: Fore.GREEN, - logging.WARNING: Fore.YELLOW, - logging.ERROR: Fore.RED, - logging.CRITICAL: Back.RED, -} - - -LOG_FORMATS: Dict[int, str] = { - level: f"{Fore.BLUE}%(asctime)s{Fore.RESET} | {Style.BRIGHT}{color}%(levelname)s\033[0m | %(message)s" - for level, color in log_level_color_prefix.items() -} - -LOG_TRACE_FORMATS: Dict[int, str] = { - level: f"{Fore.BLUE}%(asctime)s{Fore.RESET}" - f" | {Style.BRIGHT}{color}%(levelname)s{Fore.RESET}{Back.RESET}{Style.RESET_ALL}" - f" | %(name)s:%(filename)s:%(lineno)s" - f" | %(message)s" - for level, color in log_level_color_prefix.items() -} - -DEFAULT_LOG_FORMAT: str = ( - f"{Fore.BLUE}%(asctime)s{Fore.RESET} | " - f"{Style.BRIGHT}{Fore.WHITE}%(levelname)s{Style.RESET_ALL} | " - f"%(name)s:%(filename)s:%(lineno)s | %(message)s" -) - -DEFAULT_TRACE_FORMAT: str = ( - f"{Fore.BLUE}%(asctime)s{Fore.RESET} | " - f"{Style.BRIGHT}{Fore.WHITE}%(levelname)s{Style.RESET_ALL} | " - f"%(name)s:%(filename)s:%(lineno)s | %(message)s" -) - - -class BtStreamFormatter(logging.Formatter): - """ - A custom logging formatter for the Bittensor project that overrides the time formatting to include milliseconds, - centers the level name, and applies custom log formats, emojis, and colors. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.trace = False - - def formatTime(self, record, datefmt=None) -> str: - """ - Override formatTime to add milliseconds. - - Args: - record (logging.LogRecord): The log record. - datefmt (str, optional): The date format string. - - Returns: - s (str): The formatted time string with milliseconds. - """ - - created = self.converter(record.created) - if datefmt: - s = time.strftime(datefmt, created) - else: - s = time.strftime("%Y-%m-%d %H:%M:%S", created) - s += ".{:03d}".format(int(record.msecs)) - return s - - def format(self, record) -> str: - """ - Override format to apply custom formatting including emojis and colors. - - This method saves the original format, applies custom formatting based on the log level and trace flag, replaces - text with emojis and colors, and then returns the formatted log record. - - Args: - record (logging.LogRecord): The log record. - - Returns: - result (str): The formatted log record. - """ - - format_orig = self._style._fmt - record.levelname = f"{record.levelname:^16}" - - if record.levelno not in LOG_FORMATS: - self._style._fmt = ( - DEFAULT_TRACE_FORMAT if self.trace else DEFAULT_LOG_FORMAT - ) - else: - if self.trace is True: - self._style._fmt = LOG_TRACE_FORMATS[record.levelno] - else: - self._style._fmt = LOG_FORMATS[record.levelno] - - for text, emoji in emoji_map.items(): - record.msg = record.msg.replace(text, emoji) - # Apply color specifiers - for text, color in color_map.items(): - record.msg = record.msg.replace(text, color) - - result = super().format(record) - self._style._fmt = format_orig - - return result - - def set_trace(self, state: bool = True): - """Change formatter state.""" - self.trace = state - - -class BtFileFormatter(logging.Formatter): - """ - BtFileFormatter - - A custom logging formatter for the Bittensor project that overrides the time formatting to include milliseconds and - centers the level name. - """ - - def formatTime(self, record, datefmt=None) -> str: - """ - Override formatTime to add milliseconds. - - Args: - record (logging.LogRecord): The log record. - datefmt (str, optional): The date format string. - - Returns: - s (str): The formatted time string with milliseconds. - """ - - created = self.converter(record.created) - if datefmt: - s = time.strftime(datefmt, created) - else: - s = time.strftime("%Y-%m-%d %H:%M:%S", created) - s += ".{:03d}".format(int(record.msecs)) - return s - - def format(self, record) -> str: - """ - Override format to center the level name. - - Args: - record (logging.LogRecord): The log record. - - Returns: - formated record (str): The formatted log record. - """ - record.levelname = f"{record.levelname:^16}" - return super().format(record) diff --git a/bittensor/btlogging/helpers.py b/bittensor/btlogging/helpers.py deleted file mode 100644 index 532c1f7166..0000000000 --- a/bittensor/btlogging/helpers.py +++ /dev/null @@ -1,88 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 OpenTensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -""" -btlogging.helpers module provides helper functions for the Bittensor logging system. -""" - -import logging -from typing import Generator - - -def all_loggers() -> Generator[logging.Logger, None, None]: - """Generator that yields all logger instances in the application. - - Iterates through the logging root manager's logger dictionary and yields all active `Logger` instances. It skips - placeholders and other types that are not instances of `Logger`. - - Yields: - logger (logging.Logger): An active logger instance. - """ - for logger in logging.root.manager.loggerDict.values(): - if isinstance(logger, logging.PlaceHolder): - continue - # In some versions of Python, the values in loggerDict might be - # LoggerAdapter instances instead of Logger instances. - # We check for Logger instances specifically. - if isinstance(logger, logging.Logger): - yield logger - else: - # If it's not a Logger instance, it could be a LoggerAdapter or - # another form that doesn't directly offer logging methods. - # This branch can be extended to handle such cases as needed. - pass - - -def all_logger_names() -> Generator[str, None, None]: - """ - Generate the names of all active loggers. - - This function iterates through the logging root manager's logger dictionary and yields the names of all active - `Logger` instances. It skips placeholders and other types that are not instances of `Logger`. - - Yields: - name (str): The name of an active logger. - """ - for name, logger in logging.root.manager.loggerDict.items(): - if isinstance(logger, logging.PlaceHolder): - continue - # In some versions of Python, the values in loggerDict might be - # LoggerAdapter instances instead of Logger instances. - # We check for Logger instances specifically. - if isinstance(logger, logging.Logger): - yield name - else: - # If it's not a Logger instance, it could be a LoggerAdapter or - # another form that doesn't directly offer logging methods. - # This branch can be extended to handle such cases as needed. - pass - - -def get_max_logger_name_length() -> int: - """ - Calculate and return the length of the longest logger name. - - This function iterates through all active logger names and determines the length of the longest name. - - Returns: - max_length (int): The length of the longest logger name. - """ - max_length = 0 - for name in all_logger_names(): - if len(name) > max_length: - max_length = len(name) - return max_length diff --git a/bittensor/btlogging/loggingmachine.py b/bittensor/btlogging/loggingmachine.py deleted file mode 100644 index ef58cecdfd..0000000000 --- a/bittensor/btlogging/loggingmachine.py +++ /dev/null @@ -1,511 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 OpenTensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -""" -Module provides a logging framework for Bittensor, managing both Bittensor-specific and third-party logging states. -It leverages the StateMachine from the statemachine package to transition between different logging states such as -Default, Debug, Trace, and Disabled. -""" - -import argparse -import atexit -import copy -import logging as stdlogging -import multiprocessing as mp -import os -import sys -from logging.handlers import QueueHandler, QueueListener, RotatingFileHandler -from logging import Logger -from typing import NamedTuple - -from statemachine import State, StateMachine - -import bittensor.config -from bittensor.btlogging.defines import ( - BITTENSOR_LOGGER_NAME, - DATE_FORMAT, - DEFAULT_LOG_BACKUP_COUNT, - DEFAULT_LOG_FILE_NAME, - DEFAULT_MAX_ROTATING_LOG_FILE_SIZE, - TRACE_LOG_FORMAT, -) -from bittensor.btlogging.format import BtFileFormatter, BtStreamFormatter -from bittensor.btlogging.helpers import all_loggers - - -class LoggingConfig(NamedTuple): - """Named tuple to hold the logging configuration.""" - - debug: bool - trace: bool - record_log: bool - logging_dir: str - - -class LoggingMachine(StateMachine, Logger): - """Handles logger states for bittensor and 3rd party libraries.""" - - Default = State(initial=True) - Debug = State() - Trace = State() - Disabled = State() - - enable_default = ( - Debug.to(Default) - | Trace.to(Default) - | Disabled.to(Default) - | Default.to(Default) - ) - - enable_trace = ( - Default.to(Trace) | Debug.to(Trace) | Disabled.to(Trace) | Trace.to(Trace) - ) - - enable_debug = ( - Default.to(Debug) | Trace.to(Debug) | Disabled.to(Debug) | Debug.to(Debug) - ) - - disable_trace = Trace.to(Default) - - disable_debug = Debug.to(Default) - - disable_logging = ( - Trace.to(Disabled) - | Debug.to(Disabled) - | Default.to(Disabled) - | Disabled.to(Disabled) - ) - - def __init__(self, config: bittensor.config, name: str = BITTENSOR_LOGGER_NAME): - # basics - super(LoggingMachine, self).__init__() - self._queue = mp.Queue(-1) - self._primary_loggers = {name} - self._config = self._extract_logging_config(config) - - # Formatters - # - # In the future, this may be expanded to a dictionary mapping handler - # types to their respective formatters. - self._stream_formatter = BtStreamFormatter() - self._file_formatter = BtFileFormatter(TRACE_LOG_FORMAT, DATE_FORMAT) - - # start with handlers for the QueueListener. - # - # In the future, we may want to add options to introduce other handlers - # for things like log aggregation by external services. - self._handlers = self._configure_handlers(self._config) - - # configure and start the queue listener - self._listener = self._create_and_start_listener(self._handlers) - - # set up all the loggers - self._logger = self._initialize_bt_logger(name) - self.disable_third_party_loggers() - self._enable_initial_state(self._config) - - def _enable_initial_state(self, config): - """Set correct state action on initializing""" - if config.trace: - self.enable_trace() - elif config.debug: - self.enable_debug() - else: - self.enable_default() - - def _extract_logging_config(self, config) -> dict: - """Extract btlogging's config from bittensor config""" - if hasattr(config, "logging"): - return config.logging - else: - return config - - def _configure_handlers(self, config) -> list[stdlogging.Handler]: - handlers = list() - - # stream handler, a given - stream_handler = stdlogging.StreamHandler(sys.stdout) - stream_handler.setFormatter(self._stream_formatter) - handlers.append(stream_handler) - - # file handler, maybe - if config.record_log and config.logging_dir: - logfile = os.path.abspath( - os.path.join(config.logging_dir, DEFAULT_LOG_FILE_NAME) - ) - file_handler = self._create_file_handler(logfile) - handlers.append(file_handler) - return handlers - - def get_config(self): - return self._config - - def set_config(self, config): - """Set config after initialization, if desired.""" - self._config = config - if config.logging_dir and config.record_log: - expanded_dir = os.path.expanduser(config.logging_dir) - logfile = os.path.abspath(os.path.join(expanded_dir, DEFAULT_LOG_FILE_NAME)) - self._enable_file_logging(logfile) - if config.trace: - self.enable_trace() - elif config.debug: - self.enable_debug() - - def _create_and_start_listener(self, handlers): - """ - A listener to receive and publish log records. - - This listener receives records from a queue populated by the main bittensor logger, as well as 3rd party loggers - """ - - listener = QueueListener(self._queue, *handlers, respect_handler_level=True) - listener.start() - atexit.register(listener.stop) - return listener - - def get_queue(self): - """ - Get the queue the QueueListener is publishing from. - - To set up logging in a separate process, a QueueHandler must be added to all the desired loggers. - """ - return self._queue - - def _initialize_bt_logger(self, name): - """ - Initialize logging for bittensor. - - Since the initial state is Default, logging level for the module logger is INFO, and all third-party loggers are - silenced. Subsequent state transitions will handle all logger outputs. - """ - logger = stdlogging.getLogger(name) - queue_handler = QueueHandler(self._queue) - logger.addHandler(queue_handler) - return logger - - def _deinitialize_bt_logger(self, name): - """Find the logger by name and remove the queue handler associated with it.""" - logger = stdlogging.getLogger(name) - for handler in list(logger.handlers): - if isinstance(handler, QueueHandler): - logger.removeHandler(handler) - return logger - - def _create_file_handler(self, logfile: str): - file_handler = RotatingFileHandler( - logfile, - maxBytes=DEFAULT_MAX_ROTATING_LOG_FILE_SIZE, - backupCount=DEFAULT_LOG_BACKUP_COUNT, - ) - file_handler.setFormatter(self._file_formatter) - file_handler.setLevel(stdlogging.TRACE) - return file_handler - - def register_primary_logger(self, name: str): - """ - Register a logger as primary logger - - This adds a logger to the _primary_loggers set to ensure - it doesn't get disabled when disabling third-party loggers. - A queue handler is also associated with it. - """ - self._primary_loggers.add(name) - self._initialize_bt_logger(name) - - def deregister_primary_logger(self, name: str): - """ - De-registers a primary logger - - This function removes the logger from the _primary_loggers - set and deinitializes its queue handler - """ - self._primary_loggers.remove(name) - self._deinitialize_bt_logger(name) - - def enable_third_party_loggers(self): - """Enables logging for third-party loggers by adding a queue handler to each.""" - for logger in all_loggers(): - if logger.name in self._primary_loggers: - continue - queue_handler = QueueHandler(self._queue) - logger.addHandler(queue_handler) - logger.setLevel(self._logger.level) - - def disable_third_party_loggers(self): - """Disables logging for third-party loggers by removing all their handlers.""" - # remove all handlers - for logger in all_loggers(): - if logger.name in self._primary_loggers: - continue - for handler in logger.handlers: - logger.removeHandler(handler) - - def _enable_file_logging(self, logfile: str): - # preserve idempotency; do not create extra filehandlers - # if one already exists - if any( - [isinstance(handler, RotatingFileHandler) for handler in self._handlers] - ): - return - file_handler = self._create_file_handler(logfile) - self._handlers.append(file_handler) - self._listener.handlers = tuple(self._handlers) - - # state transitions - def before_transition(self, event, state): - """Stops listener after transition.""" - self._listener.stop() - - def after_transition(self, event, state): - """Starts listener after transition.""" - self._listener.start() - - # Default Logging - def before_enable_default(self): - """Logs status before enable Default.""" - self._logger.info(f"Enabling default logging.") - self._logger.setLevel(stdlogging.INFO) - for logger in all_loggers(): - if logger.name in self._primary_loggers: - continue - logger.setLevel(stdlogging.CRITICAL) - - def after_enable_default(self): - pass - - # Trace - def before_enable_trace(self): - """Logs status before enable Trace.""" - self._logger.info("Enabling trace.") - self._stream_formatter.set_trace(True) - for logger in all_loggers(): - logger.setLevel(stdlogging.TRACE) - - def after_enable_trace(self): - """Logs status after enable Trace.""" - self._logger.info("Trace enabled.") - - def before_disable_trace(self): - """Logs status before disable Trace.""" - self._logger.info(f"Disabling trace.") - self._stream_formatter.set_trace(False) - self.enable_default() - - def after_disable_trace(self): - """Logs status after disable Trace.""" - self._logger.info("Trace disabled.") - - # Debug - def before_enable_debug(self): - """Logs status before enable Debug.""" - self._logger.info("Enabling debug.") - self._stream_formatter.set_trace(True) - for logger in all_loggers(): - logger.setLevel(stdlogging.DEBUG) - - def after_enable_debug(self): - """Logs status after enable Debug.""" - self._logger.info("Debug enabled.") - - def before_disable_debug(self): - """Logs status before disable Debug.""" - self._logger.info("Disabling debug.") - self._stream_formatter.set_trace(False) - self.enable_default() - - def after_disable_debug(self): - """Logs status after disable Debug.""" - self._logger.info("Debug disabled.") - - # Disable Logging - def before_disable_logging(self): - """ - Prepares the logging system for disabling. - - This method performs the following actions: - 1. Logs an informational message indicating that logging is being disabled. - 2. Disables trace mode in the stream formatter. - 3. Sets the logging level to CRITICAL for all loggers. - - This ensures that only critical messages will be logged after this method is called. - """ - self._logger.info("Disabling logging.") - self._stream_formatter.set_trace(False) - - for logger in all_loggers(): - logger.setLevel(stdlogging.CRITICAL) - - # Required API support log commands for API backwards compatibility. - @property - def __trace_on__(self) -> bool: - """ - Checks if the current state is in "Trace" mode. - - Returns: - bool: True if the current state is "Trace", otherwise False. - """ - return self.current_state_value == "Trace" - - def trace(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps trace message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.trace(msg, *args, **kwargs) - - def debug(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps debug message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.debug(msg, *args, **kwargs) - - def info(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps info message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.info(msg, *args, **kwargs) - - def success(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps success message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.success(msg, *args, **kwargs) - - def warning(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps warning message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.warning(msg, *args, **kwargs) - - def error(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps error message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.error(msg, *args, **kwargs) - - def critical(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps critical message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.critical(msg, *args, **kwargs) - - def exception(self, msg="", prefix="", suffix="", *args, **kwargs): - """Wraps exception message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" - self._logger.exception(msg, *args, **kwargs) - - def on(self): - """Enable default state.""" - self._logger.info("Logging enabled.") - self.enable_default() - - def off(self): - """Disables all states.""" - self.disable_logging() - - def set_debug(self, on: bool = True): - """Sets Debug state.""" - if on and not self.current_state_value == "Debug": - self.enable_debug() - elif not on: - if self.current_state_value == "Debug": - self.disable_debug() - - def set_trace(self, on: bool = True): - """Sets Trace state.""" - if on and not self.current_state_value == "Trace": - self.enable_trace() - elif not on: - if self.current_state_value == "Trace": - self.disable_trace() - - def get_level(self) -> int: - """Returns Logging level.""" - return self._logger.level - - def check_config(self, config: bittensor.config): - assert config.logging - - def help(self): - pass - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None): - """Accept specific arguments fro parser""" - prefix_str = "" if prefix is None else prefix + "." - try: - default_logging_debug = os.getenv("BT_LOGGING_DEBUG") or False - default_logging_trace = os.getenv("BT_LOGGING_TRACE") or False - default_logging_record_log = os.getenv("BT_LOGGING_RECORD_LOG") or False - default_logging_logging_dir = ( - os.getenv("BT_LOGGING_LOGGING_DIR") or "~/.bittensor/miners" - ) - parser.add_argument( - "--" + prefix_str + "logging.debug", - action="store_true", - help="""Turn on bittensor debugging information""", - default=default_logging_debug, - ) - parser.add_argument( - "--" + prefix_str + "logging.trace", - action="store_true", - help="""Turn on bittensor trace level information""", - default=default_logging_trace, - ) - parser.add_argument( - "--" + prefix_str + "logging.record_log", - action="store_true", - help="""Turns on logging to file.""", - default=default_logging_record_log, - ) - parser.add_argument( - "--" + prefix_str + "logging.logging_dir", - type=str, - help="Logging default root directory.", - default=default_logging_logging_dir, - ) - except argparse.ArgumentError: - # re-parsing arguments. - pass - - @classmethod - def config(cls) -> bittensor.config: - """Get config from the argument parser. - - Return: - config (bittensor.config): config object - """ - parser = argparse.ArgumentParser() - cls.add_args(parser) - return bittensor.config(parser, args=[]) - - def __call__( - self, - config: bittensor.config = None, - debug: bool = None, - trace: bool = None, - record_log: bool = None, - logging_dir: str = None, - ): - if config is not None: - cfg = copy.deepcopy(config) - if debug is not None: - cfg.debug = debug - elif trace is not None: - cfg.trace = trace - if record_log is not None: - cfg.record_log = record_log - if logging_dir is not None: - cfg.logging_dir = logging_dir - else: - cfg = LoggingConfig( - debug=debug, trace=trace, record_log=record_log, logging_dir=logging_dir - ) - self.set_config(cfg) diff --git a/bittensor/chain_data.py b/bittensor/chain_data.py deleted file mode 100644 index 029cb29829..0000000000 --- a/bittensor/chain_data.py +++ /dev/null @@ -1,1204 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. -# -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -""" -This module provides data structures and functions for working with the Bittensor network, -including neuron and subnet information, SCALE encoding/decoding, and custom RPC type registry. -""" - -import json -from dataclasses import dataclass, asdict -from enum import Enum -from typing import List, Tuple, Dict, Optional, Any, TypedDict, Union - -from scalecodec.base import RuntimeConfiguration, ScaleBytes -from scalecodec.type_registry import load_type_registry_preset -from scalecodec.types import GenericCall -from scalecodec.utils.ss58 import ss58_encode - -import bittensor -from .utils import networking as net, RAOPERTAO, U16_NORMALIZED_FLOAT -from .utils.balance import Balance -from .utils.registration import torch, use_torch - -custom_rpc_type_registry = { - "types": { - "SubnetInfo": { - "type": "struct", - "type_mapping": [ - ["netuid", "Compact"], - ["rho", "Compact"], - ["kappa", "Compact"], - ["difficulty", "Compact"], - ["immunity_period", "Compact"], - ["max_allowed_validators", "Compact"], - ["min_allowed_weights", "Compact"], - ["max_weights_limit", "Compact"], - ["scaling_law_power", "Compact"], - ["subnetwork_n", "Compact"], - ["max_allowed_uids", "Compact"], - ["blocks_since_last_step", "Compact"], - ["tempo", "Compact"], - ["network_modality", "Compact"], - ["network_connect", "Vec<[u16; 2]>"], - ["emission_values", "Compact"], - ["burn", "Compact"], - ["owner", "AccountId"], - ], - }, - "DelegateInfo": { - "type": "struct", - "type_mapping": [ - ["delegate_ss58", "AccountId"], - ["take", "Compact"], - ["nominators", "Vec<(AccountId, Compact)>"], - ["owner_ss58", "AccountId"], - ["registrations", "Vec>"], - ["validator_permits", "Vec>"], - ["return_per_1000", "Compact"], - ["total_daily_return", "Compact"], - ], - }, - "NeuronInfo": { - "type": "struct", - "type_mapping": [ - ["hotkey", "AccountId"], - ["coldkey", "AccountId"], - ["uid", "Compact"], - ["netuid", "Compact"], - ["active", "bool"], - ["axon_info", "axon_info"], - ["prometheus_info", "PrometheusInfo"], - ["stake", "Vec<(AccountId, Compact)>"], - ["rank", "Compact"], - ["emission", "Compact"], - ["incentive", "Compact"], - ["consensus", "Compact"], - ["trust", "Compact"], - ["validator_trust", "Compact"], - ["dividends", "Compact"], - ["last_update", "Compact"], - ["validator_permit", "bool"], - ["weights", "Vec<(Compact, Compact)>"], - ["bonds", "Vec<(Compact, Compact)>"], - ["pruning_score", "Compact"], - ], - }, - "NeuronInfoLite": { - "type": "struct", - "type_mapping": [ - ["hotkey", "AccountId"], - ["coldkey", "AccountId"], - ["uid", "Compact"], - ["netuid", "Compact"], - ["active", "bool"], - ["axon_info", "axon_info"], - ["prometheus_info", "PrometheusInfo"], - ["stake", "Vec<(AccountId, Compact)>"], - ["rank", "Compact"], - ["emission", "Compact"], - ["incentive", "Compact"], - ["consensus", "Compact"], - ["trust", "Compact"], - ["validator_trust", "Compact"], - ["dividends", "Compact"], - ["last_update", "Compact"], - ["validator_permit", "bool"], - ["pruning_score", "Compact"], - ], - }, - "axon_info": { - "type": "struct", - "type_mapping": [ - ["block", "u64"], - ["version", "u32"], - ["ip", "u128"], - ["port", "u16"], - ["ip_type", "u8"], - ["protocol", "u8"], - ["placeholder1", "u8"], - ["placeholder2", "u8"], - ], - }, - "PrometheusInfo": { - "type": "struct", - "type_mapping": [ - ["block", "u64"], - ["version", "u32"], - ["ip", "u128"], - ["port", "u16"], - ["ip_type", "u8"], - ], - }, - "IPInfo": { - "type": "struct", - "type_mapping": [ - ["ip", "Compact"], - ["ip_type_and_protocol", "Compact"], - ], - }, - "StakeInfo": { - "type": "struct", - "type_mapping": [ - ["hotkey", "AccountId"], - ["coldkey", "AccountId"], - ["stake", "Compact"], - ], - }, - "SubnetHyperparameters": { - "type": "struct", - "type_mapping": [ - ["rho", "Compact"], - ["kappa", "Compact"], - ["immunity_period", "Compact"], - ["min_allowed_weights", "Compact"], - ["max_weights_limit", "Compact"], - ["tempo", "Compact"], - ["min_difficulty", "Compact"], - ["max_difficulty", "Compact"], - ["weights_version", "Compact"], - ["weights_rate_limit", "Compact"], - ["adjustment_interval", "Compact"], - ["activity_cutoff", "Compact"], - ["registration_allowed", "bool"], - ["target_regs_per_interval", "Compact"], - ["min_burn", "Compact"], - ["max_burn", "Compact"], - ["bonds_moving_avg", "Compact"], - ["max_regs_per_block", "Compact"], - ["serving_rate_limit", "Compact"], - ["max_validators", "Compact"], - ["adjustment_alpha", "Compact"], - ["difficulty", "Compact"], - ["commit_reveal_weights_interval", "Compact"], - ["commit_reveal_weights_enabled", "bool"], - ["alpha_high", "Compact"], - ["alpha_low", "Compact"], - ["liquid_alpha_enabled", "bool"], - ], - }, - "ScheduledColdkeySwapInfo": { - "type": "struct", - "type_mapping": [ - ["old_coldkey", "AccountId"], - ["new_coldkey", "AccountId"], - ["arbitration_block", "Compact"], - ], - }, - } -} - - -@dataclass -class AxonInfo: - version: int - ip: str - port: int - ip_type: int - hotkey: str - coldkey: str - protocol: int = 4 - placeholder1: int = 0 - placeholder2: int = 0 - - @property - def is_serving(self) -> bool: - """True if the endpoint is serving.""" - return self.ip != "0.0.0.0" - - def ip_str(self) -> str: - """Return the whole IP as string""" - return net.ip__str__(self.ip_type, self.ip, self.port) - - def __eq__(self, other: "AxonInfo"): - if other is None: - return False - - if ( - self.version == other.version - and self.ip == other.ip - and self.port == other.port - and self.ip_type == other.ip_type - and self.coldkey == other.coldkey - and self.hotkey == other.hotkey - ): - return True - - return False - - def __str__(self): - return "AxonInfo( {}, {}, {}, {} )".format( - str(self.ip_str()), str(self.hotkey), str(self.coldkey), self.version - ) - - def __repr__(self): - return self.__str__() - - def to_string(self) -> str: - """Converts the AxonInfo object to a string representation using JSON.""" - try: - return json.dumps(asdict(self)) - except (TypeError, ValueError) as e: - bittensor.logging.error(f"Error converting AxonInfo to string: {e}") - return AxonInfo(0, "", 0, 0, "", "").to_string() - - @classmethod - def from_string(cls, json_string: str) -> "AxonInfo": - """ - Creates an AxonInfo object from its string representation using JSON. - - Args: - json_string (str): The JSON string representation of the AxonInfo object. - - Returns: - AxonInfo: An instance of AxonInfo created from the JSON string. If decoding fails, returns a default AxonInfo object with default values. - - Raises: - json.JSONDecodeError: If there is an error in decoding the JSON string. - TypeError: If there is a type error when creating the AxonInfo object. - ValueError: If there is a value error when creating the AxonInfo object. - """ - try: - data = json.loads(json_string) - return cls(**data) - except json.JSONDecodeError as e: - bittensor.logging.error(f"Error decoding JSON: {e}") - except TypeError as e: - bittensor.logging.error(f"Type error: {e}") - except ValueError as e: - bittensor.logging.error(f"Value error: {e}") - return AxonInfo(0, "", 0, 0, "", "") - - @classmethod - def from_neuron_info(cls, neuron_info: dict) -> "AxonInfo": - """ - Converts a dictionary to an AxonInfo object. - - Args: - neuron_info (dict): A dictionary containing the neuron information. - - Returns: - instance (AxonInfo): An instance of AxonInfo created from the dictionary. - """ - return cls( - version=neuron_info["axon_info"]["version"], - ip=net.int_to_ip(int(neuron_info["axon_info"]["ip"])), - port=neuron_info["axon_info"]["port"], - ip_type=neuron_info["axon_info"]["ip_type"], - hotkey=neuron_info["hotkey"], - coldkey=neuron_info["coldkey"], - ) - - def to_parameter_dict( - self, - ) -> Union[dict[str, Union[int, str]], "torch.nn.ParameterDict"]: - """Returns a torch tensor or dict of the subnet info, depending on the USE_TORCH flag set.""" - if use_torch(): - return torch.nn.ParameterDict(self.__dict__) - else: - return self.__dict__ - - @classmethod - def from_parameter_dict( - cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] - ) -> "AxonInfo": - """Returns an axon_info object from a torch parameter_dict or a parameter dict.""" - if use_torch(): - return cls(**dict(parameter_dict)) - else: - return cls(**parameter_dict) - - -class ChainDataType(Enum): - NeuronInfo = 1 - SubnetInfo = 2 - DelegateInfo = 3 - NeuronInfoLite = 4 - DelegatedInfo = 5 - StakeInfo = 6 - IPInfo = 7 - SubnetHyperparameters = 8 - ScheduledColdkeySwapInfo = 9 - - -def from_scale_encoding( - input_: Union[List[int], bytes, ScaleBytes], - type_name: ChainDataType, - is_vec: bool = False, - is_option: bool = False, -) -> Optional[Dict]: - """ - Decodes input_ data from SCALE encoding based on the specified type name and modifiers. - - Args: - input_ (Union[List[int], bytes, ScaleBytes]): The input_ data to decode. - type_name (ChainDataType): The type of data being decoded. - is_vec (bool, optional): Whether the data is a vector of the specified type. Default is ``False``. - is_option (bool, optional): Whether the data is an optional value of the specified type. Default is ``False``. - - Returns: - Optional[Dict]: The decoded data as a dictionary, or ``None`` if the decoding fails. - """ - type_string = type_name.name - if type_name == ChainDataType.DelegatedInfo: - # DelegatedInfo is a tuple of (DelegateInfo, Compact) - type_string = f"({ChainDataType.DelegateInfo.name}, Compact)" - if is_option: - type_string = f"Option<{type_string}>" - if is_vec: - type_string = f"Vec<{type_string}>" - - return from_scale_encoding_using_type_string(input_, type_string) - - -def from_scale_encoding_using_type_string( - input_: Union[List[int], bytes, ScaleBytes], type_string: str -) -> Optional[Dict]: - if isinstance(input_, ScaleBytes): - as_scale_bytes = input_ - else: - if isinstance(input_, list) and all([isinstance(i, int) for i in input_]): - vec_u8 = input_ - as_bytes = bytes(vec_u8) - elif isinstance(input_, bytes): - as_bytes = input_ - else: - raise TypeError("input_ must be a List[int], bytes, or ScaleBytes") - - as_scale_bytes = ScaleBytes(as_bytes) - - rpc_runtime_config = RuntimeConfiguration() - rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy")) - rpc_runtime_config.update_type_registry(custom_rpc_type_registry) - - obj = rpc_runtime_config.create_scale_object(type_string, data=as_scale_bytes) - - return obj.decode() - - -# Dataclasses for chain data. -@dataclass -class NeuronInfo: - """Dataclass for neuron metadata.""" - - hotkey: str - coldkey: str - uid: int - netuid: int - active: int - stake: Balance - # mapping of coldkey to amount staked to this Neuron - stake_dict: Dict[str, Balance] - total_stake: Balance - rank: float - emission: float - incentive: float - consensus: float - trust: float - validator_trust: float - dividends: float - last_update: int - validator_permit: bool - weights: List[List[int]] - bonds: List[List[int]] - pruning_score: int - prometheus_info: Optional["PrometheusInfo"] = None - axon_info: Optional[AxonInfo] = None - is_null: bool = False - - @classmethod - def fix_decoded_values(cls, neuron_info_decoded: Any) -> "NeuronInfo": - """Fixes the values of the NeuronInfo object.""" - neuron_info_decoded["hotkey"] = ss58_encode( - neuron_info_decoded["hotkey"], bittensor.__ss58_format__ - ) - neuron_info_decoded["coldkey"] = ss58_encode( - neuron_info_decoded["coldkey"], bittensor.__ss58_format__ - ) - stake_dict = { - ss58_encode(coldkey, bittensor.__ss58_format__): Balance.from_rao( - int(stake) - ) - for coldkey, stake in neuron_info_decoded["stake"] - } - neuron_info_decoded["stake_dict"] = stake_dict - neuron_info_decoded["stake"] = sum(stake_dict.values()) - neuron_info_decoded["total_stake"] = neuron_info_decoded["stake"] - neuron_info_decoded["weights"] = [ - [int(weight[0]), int(weight[1])] - for weight in neuron_info_decoded["weights"] - ] - neuron_info_decoded["bonds"] = [ - [int(bond[0]), int(bond[1])] for bond in neuron_info_decoded["bonds"] - ] - neuron_info_decoded["rank"] = U16_NORMALIZED_FLOAT(neuron_info_decoded["rank"]) - neuron_info_decoded["emission"] = neuron_info_decoded["emission"] / RAOPERTAO - neuron_info_decoded["incentive"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["incentive"] - ) - neuron_info_decoded["consensus"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["consensus"] - ) - neuron_info_decoded["trust"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["trust"] - ) - neuron_info_decoded["validator_trust"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["validator_trust"] - ) - neuron_info_decoded["dividends"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["dividends"] - ) - neuron_info_decoded["prometheus_info"] = PrometheusInfo.fix_decoded_values( - neuron_info_decoded["prometheus_info"] - ) - neuron_info_decoded["axon_info"] = AxonInfo.from_neuron_info( - neuron_info_decoded - ) - return cls(**neuron_info_decoded) - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> "NeuronInfo": - """Returns a NeuronInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return NeuronInfo.get_null_neuron() - - decoded = from_scale_encoding(vec_u8, ChainDataType.NeuronInfo) - if decoded is None: - return NeuronInfo.get_null_neuron() - - return NeuronInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["NeuronInfo"]: - """Returns a list of NeuronInfo objects from a ``vec_u8``""" - - decoded_list = from_scale_encoding( - vec_u8, ChainDataType.NeuronInfo, is_vec=True - ) - if decoded_list is None: - return [] - - decoded_list = [ - NeuronInfo.fix_decoded_values(decoded) for decoded in decoded_list - ] - return decoded_list - - @staticmethod - def get_null_neuron() -> "NeuronInfo": - neuron = NeuronInfo( - uid=0, - netuid=0, - active=0, - stake=Balance.from_rao(0), - stake_dict={}, - total_stake=Balance.from_rao(0), - rank=0, - emission=0, - incentive=0, - consensus=0, - trust=0, - validator_trust=0, - dividends=0, - last_update=0, - validator_permit=False, - weights=[], - bonds=[], - prometheus_info=None, - axon_info=None, - is_null=True, - coldkey="000000000000000000000000000000000000000000000000", - hotkey="000000000000000000000000000000000000000000000000", - pruning_score=0, - ) - return neuron - - @classmethod - def from_weights_bonds_and_neuron_lite( - cls, - neuron_lite: "NeuronInfoLite", - weights_as_dict: Dict[int, List[Tuple[int, int]]], - bonds_as_dict: Dict[int, List[Tuple[int, int]]], - ) -> "NeuronInfo": - n_dict = neuron_lite.__dict__ - n_dict["weights"] = weights_as_dict.get(neuron_lite.uid, []) - n_dict["bonds"] = bonds_as_dict.get(neuron_lite.uid, []) - - return cls(**n_dict) - - -@dataclass -class NeuronInfoLite: - """Dataclass for neuron metadata, but without the weights and bonds.""" - - hotkey: str - coldkey: str - uid: int - netuid: int - active: int - stake: Balance - # mapping of coldkey to amount staked to this Neuron - stake_dict: Dict[str, Balance] - total_stake: Balance - rank: float - emission: float - incentive: float - consensus: float - trust: float - validator_trust: float - dividends: float - last_update: int - validator_permit: bool - prometheus_info: Optional["PrometheusInfo"] - axon_info: "axon_info" - pruning_score: int - is_null: bool = False - - @classmethod - def fix_decoded_values(cls, neuron_info_decoded: Any) -> "NeuronInfoLite": - """Fixes the values of the NeuronInfoLite object.""" - neuron_info_decoded["hotkey"] = ss58_encode( - neuron_info_decoded["hotkey"], bittensor.__ss58_format__ - ) - neuron_info_decoded["coldkey"] = ss58_encode( - neuron_info_decoded["coldkey"], bittensor.__ss58_format__ - ) - stake_dict = { - ss58_encode(coldkey, bittensor.__ss58_format__): Balance.from_rao( - int(stake) - ) - for coldkey, stake in neuron_info_decoded["stake"] - } - neuron_info_decoded["stake_dict"] = stake_dict - neuron_info_decoded["stake"] = sum(stake_dict.values()) - neuron_info_decoded["total_stake"] = neuron_info_decoded["stake"] - neuron_info_decoded["rank"] = U16_NORMALIZED_FLOAT(neuron_info_decoded["rank"]) - neuron_info_decoded["emission"] = neuron_info_decoded["emission"] / RAOPERTAO - neuron_info_decoded["incentive"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["incentive"] - ) - neuron_info_decoded["consensus"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["consensus"] - ) - neuron_info_decoded["trust"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["trust"] - ) - neuron_info_decoded["validator_trust"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["validator_trust"] - ) - neuron_info_decoded["dividends"] = U16_NORMALIZED_FLOAT( - neuron_info_decoded["dividends"] - ) - neuron_info_decoded["prometheus_info"] = PrometheusInfo.fix_decoded_values( - neuron_info_decoded["prometheus_info"] - ) - neuron_info_decoded["axon_info"] = AxonInfo.from_neuron_info( - neuron_info_decoded - ) - return cls(**neuron_info_decoded) - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> "NeuronInfoLite": - """Returns a NeuronInfoLite object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return NeuronInfoLite.get_null_neuron() - - decoded = from_scale_encoding(vec_u8, ChainDataType.NeuronInfoLite) - if decoded is None: - return NeuronInfoLite.get_null_neuron() - - return NeuronInfoLite.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["NeuronInfoLite"]: - """Returns a list of NeuronInfoLite objects from a ``vec_u8``.""" - - decoded_list = from_scale_encoding( - vec_u8, ChainDataType.NeuronInfoLite, is_vec=True - ) - if decoded_list is None: - return [] - - decoded_list = [ - NeuronInfoLite.fix_decoded_values(decoded) for decoded in decoded_list - ] - return decoded_list - - @staticmethod - def get_null_neuron() -> "NeuronInfoLite": - neuron = NeuronInfoLite( - uid=0, - netuid=0, - active=0, - stake=Balance.from_rao(0), - stake_dict={}, - total_stake=Balance.from_rao(0), - rank=0, - emission=0, - incentive=0, - consensus=0, - trust=0, - validator_trust=0, - dividends=0, - last_update=0, - validator_permit=False, - prometheus_info=None, - axon_info=None, - is_null=True, - coldkey="000000000000000000000000000000000000000000000000", - hotkey="000000000000000000000000000000000000000000000000", - pruning_score=0, - ) - return neuron - - -@dataclass -class PrometheusInfo: - """Dataclass for prometheus info.""" - - block: int - version: int - ip: str - port: int - ip_type: int - - @classmethod - def fix_decoded_values(cls, prometheus_info_decoded: Dict) -> "PrometheusInfo": - """Returns a PrometheusInfo object from a prometheus_info_decoded dictionary.""" - prometheus_info_decoded["ip"] = net.int_to_ip( - int(prometheus_info_decoded["ip"]) - ) - - return cls(**prometheus_info_decoded) - - -@dataclass -class DelegateInfoLite: - """ - Dataclass for DelegateLiteInfo. This is a lighter version of :func:`DelegateInfo`. - - Args: - delegate_ss58 (str): Hotkey of the delegate for which the information is being fetched. - take (float): Take of the delegate as a percentage. - nominators (int): Count of the nominators of the delegate. - owner_ss58 (str): Coldkey of the owner. - registrations (list[int]): List of subnets that the delegate is registered on. - validator_permits (list[int]): List of subnets that the delegate is allowed to validate on. - return_per_1000 (int): Return per 1000 TAO, for the delegate over a day. - total_daily_return (int): Total daily return of the delegate. - """ - - delegate_ss58: str # Hotkey of delegate - take: float # Take of the delegate as a percentage - nominators: int # Count of the nominators of the delegate. - owner_ss58: str # Coldkey of owner - registrations: list[int] # List of subnets that the delegate is registered on - validator_permits: list[ - int - ] # List of subnets that the delegate is allowed to validate on - return_per_1000: int # Return per 1000 tao for the delegate over a day - total_daily_return: int # Total daily return of the delegate - - -@dataclass -class DelegateInfo: - """ - Dataclass for delegate information. For a lighter version of this class, see :func:`DelegateInfoLite`. - - Args: - hotkey_ss58 (str): Hotkey of the delegate for which the information is being fetched. - total_stake (int): Total stake of the delegate. - nominators (list[Tuple[str, int]]): List of nominators of the delegate and their stake. - take (float): Take of the delegate as a percentage. - owner_ss58 (str): Coldkey of the owner. - registrations (list[int]): List of subnets that the delegate is registered on. - validator_permits (list[int]): List of subnets that the delegate is allowed to validate on. - return_per_1000 (int): Return per 1000 TAO, for the delegate over a day. - total_daily_return (int): Total daily return of the delegate. - - """ - - hotkey_ss58: str # Hotkey of delegate - total_stake: Balance # Total stake of the delegate - nominators: List[ - Tuple[str, Balance] - ] # List of nominators of the delegate and their stake - owner_ss58: str # Coldkey of owner - take: float # Take of the delegate as a percentage - validator_permits: List[ - int - ] # List of subnets that the delegate is allowed to validate on - registrations: List[int] # List of subnets that the delegate is registered on - return_per_1000: Balance # Return per 1000 tao of the delegate over a day - total_daily_return: Balance # Total daily return of the delegate - - @classmethod - def fix_decoded_values(cls, decoded: Any) -> "DelegateInfo": - """Fixes the decoded values.""" - - return cls( - hotkey_ss58=ss58_encode( - decoded["delegate_ss58"], bittensor.__ss58_format__ - ), - owner_ss58=ss58_encode(decoded["owner_ss58"], bittensor.__ss58_format__), - take=U16_NORMALIZED_FLOAT(decoded["take"]), - nominators=[ - ( - ss58_encode(nom[0], bittensor.__ss58_format__), - Balance.from_rao(nom[1]), - ) - for nom in decoded["nominators"] - ], - total_stake=Balance.from_rao( - sum([nom[1] for nom in decoded["nominators"]]) - ), - validator_permits=decoded["validator_permits"], - registrations=decoded["registrations"], - return_per_1000=Balance.from_rao(decoded["return_per_1000"]), - total_daily_return=Balance.from_rao(decoded["total_daily_return"]), - ) - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> Optional["DelegateInfo"]: - """Returns a DelegateInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo) - if decoded is None: - return None - - return DelegateInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["DelegateInfo"]: - """Returns a list of DelegateInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo, is_vec=True) - - if decoded is None: - return [] - - return [DelegateInfo.fix_decoded_values(d) for d in decoded] - - @classmethod - def delegated_list_from_vec_u8( - cls, vec_u8: List[int] - ) -> List[Tuple["DelegateInfo", Balance]]: - """Returns a list of Tuples of DelegateInfo objects, and Balance, from a ``vec_u8``. - - This is the list of delegates that the user has delegated to, and the amount of stake delegated. - """ - decoded = from_scale_encoding(vec_u8, ChainDataType.DelegatedInfo, is_vec=True) - if decoded is None: - return [] - - return [ - (DelegateInfo.fix_decoded_values(d), Balance.from_rao(s)) - for d, s in decoded - ] - - -@dataclass -class StakeInfo: - """Dataclass for stake info.""" - - hotkey_ss58: str # Hotkey address - coldkey_ss58: str # Coldkey address - stake: Balance # Stake for the hotkey-coldkey pair - - @classmethod - def fix_decoded_values(cls, decoded: Any) -> "StakeInfo": - """Fixes the decoded values.""" - return cls( - hotkey_ss58=ss58_encode(decoded["hotkey"], bittensor.__ss58_format__), - coldkey_ss58=ss58_encode(decoded["coldkey"], bittensor.__ss58_format__), - stake=Balance.from_rao(decoded["stake"]), - ) - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> Optional["StakeInfo"]: - """Returns a StakeInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.StakeInfo) - if decoded is None: - return None - - return StakeInfo.fix_decoded_values(decoded) - - @classmethod - def list_of_tuple_from_vec_u8( - cls, vec_u8: List[int] - ) -> Dict[str, List["StakeInfo"]]: - """Returns a list of StakeInfo objects from a ``vec_u8``.""" - decoded: Optional[list[tuple[str, list[object]]]] = ( - from_scale_encoding_using_type_string( - input_=vec_u8, type_string="Vec<(AccountId, Vec)>" - ) - ) - - if decoded is None: - return {} - - return { - ss58_encode(address=account_id, ss58_format=bittensor.__ss58_format__): [ - StakeInfo.fix_decoded_values(d) for d in stake_info - ] - for account_id, stake_info in decoded - } - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["StakeInfo"]: - """Returns a list of StakeInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding(vec_u8, ChainDataType.StakeInfo, is_vec=True) - if decoded is None: - return [] - - return [StakeInfo.fix_decoded_values(d) for d in decoded] - - -@dataclass -class SubnetInfo: - """Dataclass for subnet info.""" - - netuid: int - rho: int - kappa: int - difficulty: int - immunity_period: int - max_allowed_validators: int - min_allowed_weights: int - max_weight_limit: float - scaling_law_power: float - subnetwork_n: int - max_n: int - blocks_since_epoch: int - tempo: int - modality: int - # netuid -> topk percentile prunning score requirement (u16:MAX normalized.) - connection_requirements: Dict[str, float] - emission_value: float - burn: Balance - owner_ss58: str - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> Optional["SubnetInfo"]: - """Returns a SubnetInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.SubnetInfo) - if decoded is None: - return None - - return SubnetInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["SubnetInfo"]: - r"""Returns a list of SubnetInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding( - vec_u8, ChainDataType.SubnetInfo, is_vec=True, is_option=True - ) - - if decoded is None: - return [] - - return [SubnetInfo.fix_decoded_values(d) for d in decoded] - - @classmethod - def fix_decoded_values(cls, decoded: Dict) -> "SubnetInfo": - """Returns a SubnetInfo object from a decoded SubnetInfo dictionary.""" - return SubnetInfo( - netuid=decoded["netuid"], - rho=decoded["rho"], - kappa=decoded["kappa"], - difficulty=decoded["difficulty"], - immunity_period=decoded["immunity_period"], - max_allowed_validators=decoded["max_allowed_validators"], - min_allowed_weights=decoded["min_allowed_weights"], - max_weight_limit=decoded["max_weights_limit"], - # adjustment_alpha=decoded["adjustment_alpha"], - # bonds_moving_avg=decoded["bonds_moving_average"], - scaling_law_power=decoded["scaling_law_power"], - subnetwork_n=decoded["subnetwork_n"], - max_n=decoded["max_allowed_uids"], - blocks_since_epoch=decoded["blocks_since_last_step"], - tempo=decoded["tempo"], - modality=decoded["network_modality"], - connection_requirements={ - str(int(netuid)): U16_NORMALIZED_FLOAT(int(req)) - for netuid, req in decoded["network_connect"] - }, - emission_value=decoded["emission_values"], - burn=Balance.from_rao(decoded["burn"]), - owner_ss58=ss58_encode(decoded["owner"], bittensor.__ss58_format__), - ) - - def to_parameter_dict(self) -> Union[dict[str, Any], "torch.nn.ParameterDict"]: - """Returns a torch tensor or dict of the subnet info.""" - if use_torch(): - return torch.nn.ParameterDict(self.__dict__) - else: - return self.__dict__ - - @classmethod - def from_parameter_dict( - cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] - ) -> "SubnetInfo": - if use_torch(): - return cls(**dict(parameter_dict)) - else: - return cls(**parameter_dict) - - -@dataclass -class SubnetHyperparameters: - """Dataclass for subnet hyperparameters.""" - - rho: int - kappa: int - immunity_period: int - min_allowed_weights: int - max_weight_limit: float - tempo: int - min_difficulty: int - max_difficulty: int - weights_version: int - weights_rate_limit: int - adjustment_interval: int - activity_cutoff: int - registration_allowed: bool - target_regs_per_interval: int - min_burn: int - max_burn: int - bonds_moving_avg: int - max_regs_per_block: int - serving_rate_limit: int - max_validators: int - adjustment_alpha: int - difficulty: int - commit_reveal_weights_interval: int - commit_reveal_weights_enabled: bool - alpha_high: int - alpha_low: int - liquid_alpha_enabled: bool - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> Optional["SubnetHyperparameters"]: - """Returns a SubnetHyperparameters object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.SubnetHyperparameters) - if decoded is None: - return None - - return SubnetHyperparameters.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["SubnetHyperparameters"]: - """Returns a list of SubnetHyperparameters objects from a ``vec_u8``.""" - decoded = from_scale_encoding( - vec_u8, ChainDataType.SubnetHyperparameters, is_vec=True, is_option=True - ) - if decoded is None: - return [] - - return [SubnetHyperparameters.fix_decoded_values(d) for d in decoded] - - @classmethod - def fix_decoded_values(cls, decoded: Dict) -> "SubnetHyperparameters": - """Returns a SubnetInfo object from a decoded SubnetInfo dictionary.""" - return SubnetHyperparameters( - rho=decoded["rho"], - kappa=decoded["kappa"], - immunity_period=decoded["immunity_period"], - min_allowed_weights=decoded["min_allowed_weights"], - max_weight_limit=decoded["max_weights_limit"], - tempo=decoded["tempo"], - min_difficulty=decoded["min_difficulty"], - max_difficulty=decoded["max_difficulty"], - weights_version=decoded["weights_version"], - weights_rate_limit=decoded["weights_rate_limit"], - adjustment_interval=decoded["adjustment_interval"], - activity_cutoff=decoded["activity_cutoff"], - registration_allowed=decoded["registration_allowed"], - target_regs_per_interval=decoded["target_regs_per_interval"], - min_burn=decoded["min_burn"], - max_burn=decoded["max_burn"], - max_regs_per_block=decoded["max_regs_per_block"], - max_validators=decoded["max_validators"], - serving_rate_limit=decoded["serving_rate_limit"], - bonds_moving_avg=decoded["bonds_moving_avg"], - adjustment_alpha=decoded["adjustment_alpha"], - difficulty=decoded["difficulty"], - commit_reveal_weights_interval=decoded["commit_reveal_weights_interval"], - commit_reveal_weights_enabled=decoded["commit_reveal_weights_enabled"], - alpha_high=decoded["alpha_high"], - alpha_low=decoded["alpha_low"], - liquid_alpha_enabled=decoded["liquid_alpha_enabled"], - ) - - def to_parameter_dict( - self, - ) -> Union[dict[str, Union[int, float, bool]], "torch.nn.ParameterDict"]: - """Returns a torch tensor or dict of the subnet hyperparameters.""" - if use_torch(): - return torch.nn.ParameterDict(self.__dict__) - else: - return self.__dict__ - - @classmethod - def from_parameter_dict( - cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] - ) -> "SubnetHyperparameters": - if use_torch(): - return cls(**dict(parameter_dict)) - else: - return cls(**parameter_dict) - - -@dataclass -class IPInfo: - """Dataclass for associated IP Info.""" - - ip: str - ip_type: int - protocol: int - - def encode(self) -> Dict[str, Any]: - """Returns a dictionary of the IPInfo object that can be encoded.""" - return { - "ip": net.ip_to_int( - self.ip - ), # IP type and protocol are encoded together as a u8 - "ip_type_and_protocol": ((self.ip_type << 4) + self.protocol) & 0xFF, - } - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> Optional["IPInfo"]: - """Returns a IPInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.IPInfo) - if decoded is None: - return None - - return IPInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["IPInfo"]: - r"""Returns a list of IPInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding(vec_u8, ChainDataType.IPInfo, is_vec=True) - - if decoded is None: - return [] - - return [IPInfo.fix_decoded_values(d) for d in decoded] - - @classmethod - def fix_decoded_values(cls, decoded: Dict) -> "IPInfo": - """Returns a SubnetInfo object from a decoded IPInfo dictionary.""" - return IPInfo( - ip=net.int_to_ip(decoded["ip"]), - ip_type=decoded["ip_type_and_protocol"] >> 4, - protocol=decoded["ip_type_and_protocol"] & 0xF, - ) - - def to_parameter_dict( - self, - ) -> Union[dict[str, Union[str, int]], "torch.nn.ParameterDict"]: - """Returns a torch tensor or dict of the subnet IP info.""" - if use_torch(): - return torch.nn.ParameterDict(self.__dict__) - else: - return self.__dict__ - - @classmethod - def from_parameter_dict( - cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] - ) -> "IPInfo": - if use_torch(): - return cls(**dict(parameter_dict)) - else: - return cls(**parameter_dict) - - -# Senate / Proposal data -class ProposalVoteData(TypedDict): - index: int - threshold: int - ayes: List[str] - nays: List[str] - end: int - - -ProposalCallData = GenericCall - - -@dataclass -class ScheduledColdkeySwapInfo: - """Dataclass for scheduled coldkey swap information.""" - - old_coldkey: str - new_coldkey: str - arbitration_block: int - - @classmethod - def fix_decoded_values(cls, decoded: Any) -> "ScheduledColdkeySwapInfo": - """Fixes the decoded values.""" - return cls( - old_coldkey=ss58_encode(decoded["old_coldkey"], bittensor.__ss58_format__), - new_coldkey=ss58_encode(decoded["new_coldkey"], bittensor.__ss58_format__), - arbitration_block=decoded["arbitration_block"], - ) - - @classmethod - def from_vec_u8(cls, vec_u8: List[int]) -> Optional["ScheduledColdkeySwapInfo"]: - """Returns a ScheduledColdkeySwapInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.ScheduledColdkeySwapInfo) - if decoded is None: - return None - - return ScheduledColdkeySwapInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: List[int]) -> List["ScheduledColdkeySwapInfo"]: - """Returns a list of ScheduledColdkeySwapInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding( - vec_u8, ChainDataType.ScheduledColdkeySwapInfo, is_vec=True - ) - if decoded is None: - return [] - - return [ScheduledColdkeySwapInfo.fix_decoded_values(d) for d in decoded] - - @classmethod - def decode_account_id_list(cls, vec_u8: List[int]) -> Optional[List[str]]: - """Decodes a list of AccountIds from vec_u8.""" - decoded = from_scale_encoding( - vec_u8, ChainDataType.ScheduledColdkeySwapInfo.AccountId, is_vec=True - ) - if decoded is None: - return None - return [ - ss58_encode(account_id, bittensor.__ss58_format__) for account_id in decoded - ] diff --git a/bittensor/cli.py b/bittensor/cli.py deleted file mode 100644 index cc87c122e4..0000000000 --- a/bittensor/cli.py +++ /dev/null @@ -1,398 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import sys -import shtab -import argparse -import bittensor -from typing import List, Optional -from .commands import ( - AutocompleteCommand, - DelegateStakeCommand, - DelegateUnstakeCommand, - GetIdentityCommand, - GetWalletHistoryCommand, - InspectCommand, - ListCommand, - ListDelegatesCommand, - MetagraphCommand, - MyDelegatesCommand, - NewColdkeyCommand, - NewHotkeyCommand, - NominateCommand, - OverviewCommand, - PowRegisterCommand, - ProposalsCommand, - RegenColdkeyCommand, - RegenColdkeypubCommand, - RegenHotkeyCommand, - RegisterCommand, - RegisterSubnetworkCommand, - RootGetWeightsCommand, - RootList, - RootRegisterCommand, - RootSetBoostCommand, - RootSetSlashCommand, - RootSetWeightsCommand, - RunFaucetCommand, - SenateCommand, - SetIdentityCommand, - SetTakeCommand, - StakeCommand, - StakeShow, - SubnetGetHyperparamsCommand, - SubnetHyperparamsCommand, - SubnetListCommand, - SubnetLockCostCommand, - SubnetSudoCommand, - SwapHotkeyCommand, - TransferCommand, - UnStakeCommand, - UpdateCommand, - UpdateWalletCommand, - VoteCommand, - WalletBalanceCommand, - WalletCreateCommand, - CommitWeightCommand, - RevealWeightCommand, - CheckColdKeySwapCommand, - SetChildrenCommand, - GetChildrenCommand, - RevokeChildrenCommand, - SetChildKeyTakeCommand, - GetChildKeyTakeCommand, -) - -# Create a console instance for CLI display. -console = bittensor.__console__ - -ALIAS_TO_COMMAND = { - "subnets": "subnets", - "root": "root", - "wallet": "wallet", - "stake": "stake", - "sudo": "sudo", - "legacy": "legacy", - "s": "subnets", - "r": "root", - "w": "wallet", - "st": "stake", - "su": "sudo", - "l": "legacy", - "subnet": "subnets", - "roots": "root", - "wallets": "wallet", - "stakes": "stake", - "sudos": "sudo", - "i": "info", - "info": "info", - "weights": "weights", - "wt": "weights", - "weight": "weights", -} -COMMANDS = { - "subnets": { - "name": "subnets", - "aliases": ["s", "subnet"], - "help": "Commands for managing and viewing subnetworks.", - "commands": { - "list": SubnetListCommand, - "metagraph": MetagraphCommand, - "lock_cost": SubnetLockCostCommand, - "create": RegisterSubnetworkCommand, - "pow_register": PowRegisterCommand, - "register": RegisterCommand, - "hyperparameters": SubnetHyperparamsCommand, - }, - }, - "root": { - "name": "root", - "aliases": ["r", "roots"], - "help": "Commands for managing and viewing the root network.", - "commands": { - "list": RootList, - "weights": RootSetWeightsCommand, - "get_weights": RootGetWeightsCommand, - "boost": RootSetBoostCommand, - "slash": RootSetSlashCommand, - "senate_vote": VoteCommand, - "senate": SenateCommand, - "register": RootRegisterCommand, - "proposals": ProposalsCommand, - "set_take": SetTakeCommand, - "delegate": DelegateStakeCommand, - "undelegate": DelegateUnstakeCommand, - "my_delegates": MyDelegatesCommand, - "list_delegates": ListDelegatesCommand, - "nominate": NominateCommand, - }, - }, - "wallet": { - "name": "wallet", - "aliases": ["w", "wallets"], - "help": "Commands for managing and viewing wallets.", - "commands": { - "list": ListCommand, - "overview": OverviewCommand, - "transfer": TransferCommand, - "inspect": InspectCommand, - "balance": WalletBalanceCommand, - "create": WalletCreateCommand, - "new_hotkey": NewHotkeyCommand, - "new_coldkey": NewColdkeyCommand, - "regen_coldkey": RegenColdkeyCommand, - "regen_coldkeypub": RegenColdkeypubCommand, - "regen_hotkey": RegenHotkeyCommand, - "faucet": RunFaucetCommand, - "update": UpdateWalletCommand, - "swap_hotkey": SwapHotkeyCommand, - "set_identity": SetIdentityCommand, - "get_identity": GetIdentityCommand, - "history": GetWalletHistoryCommand, - "check_coldkey_swap": CheckColdKeySwapCommand, - }, - }, - "stake": { - "name": "stake", - "aliases": ["st", "stakes"], - "help": "Commands for staking and removing stake and setting child hotkey accounts.", - "commands": { - "show": StakeShow, - "add": StakeCommand, - "remove": UnStakeCommand, - "get_children": GetChildrenCommand, - "set_children": SetChildrenCommand, - "revoke_children": RevokeChildrenCommand, - "set_childkey_take": SetChildKeyTakeCommand, - "get_childkey_take": GetChildKeyTakeCommand, - }, - }, - "weights": { - "name": "weights", - "aliases": ["wt", "weight"], - "help": "Commands for managing weight for subnets.", - "commands": { - "commit": CommitWeightCommand, - "reveal": RevealWeightCommand, - }, - }, - "sudo": { - "name": "sudo", - "aliases": ["su", "sudos"], - "help": "Commands for subnet management", - "commands": { - # "dissolve": None, - "set": SubnetSudoCommand, - "get": SubnetGetHyperparamsCommand, - }, - }, - "legacy": { - "name": "legacy", - "aliases": ["l"], - "help": "Miscellaneous commands.", - "commands": { - "update": UpdateCommand, - "faucet": RunFaucetCommand, - }, - }, - "info": { - "name": "info", - "aliases": ["i"], - "help": "Instructions for enabling autocompletion for the CLI.", - "commands": { - "autocomplete": AutocompleteCommand, - }, - }, -} - - -class CLIErrorParser(argparse.ArgumentParser): - """ - Custom ArgumentParser for better error messages. - """ - - def error(self, message): - """ - This method is called when an error occurs. It prints a custom error message. - """ - sys.stderr.write(f"Error: {message}\n") - self.print_help() - sys.exit(2) - - -class cli: - """ - Implementation of the Command Line Interface (CLI) class for the Bittensor protocol. - This class handles operations like key management (hotkey and coldkey) and token transfer. - """ - - def __init__( - self, - config: Optional["bittensor.config"] = None, - args: Optional[List[str]] = None, - ): - """ - Initializes a bittensor.CLI object. - - Args: - config (bittensor.config, optional): The configuration settings for the CLI. - args (List[str], optional): List of command line arguments. - """ - # Turns on console for cli. - bittensor.turn_console_on() - - # If no config is provided, create a new one from args. - if config is None: - config = cli.create_config(args) - - self.config = config - if self.config.command in ALIAS_TO_COMMAND: - self.config.command = ALIAS_TO_COMMAND[self.config.command] - else: - console.print( - f":cross_mark:[red]Unknown command: {self.config.command}[/red]" - ) - sys.exit() - - # Check if the config is valid. - cli.check_config(self.config) - - # If no_version_checking is not set or set as False in the config, version checking is done. - if not self.config.get("no_version_checking", d=True): - try: - bittensor.utils.check_version() - except bittensor.utils.VersionCheckError: - # If version checking fails, inform user with an exception. - raise RuntimeError( - "To avoid internet-based version checking, pass --no_version_checking while running the CLI." - ) - - @staticmethod - def __create_parser__() -> "argparse.ArgumentParser": - """ - Creates the argument parser for the Bittensor CLI. - - Returns: - argparse.ArgumentParser: An argument parser object for Bittensor CLI. - """ - # Define the basic argument parser. - parser = CLIErrorParser( - description=f"bittensor cli v{bittensor.__version__}", - usage="btcli ", - add_help=True, - ) - # Add shtab completion - parser.add_argument( - "--print-completion", - choices=shtab.SUPPORTED_SHELLS, - help="Print shell tab completion script", - ) - # Add arguments for each sub-command. - cmd_parsers = parser.add_subparsers(dest="command") - # Add argument parsers for all available commands. - for command in COMMANDS.values(): - if isinstance(command, dict): - subcmd_parser = cmd_parsers.add_parser( - name=command["name"], - aliases=command["aliases"], - help=command["help"], - ) - subparser = subcmd_parser.add_subparsers( - help=command["help"], dest="subcommand", required=True - ) - - for subcommand in command["commands"].values(): - subcommand.add_args(subparser) - else: - command.add_args(cmd_parsers) - - return parser - - @staticmethod - def create_config(args: List[str]) -> "bittensor.config": - """ - From the argument parser, add config to bittensor.executor and local config - - Args: - args (List[str]): List of command line arguments. - - Returns: - bittensor.config: The configuration object for Bittensor CLI. - """ - parser = cli.__create_parser__() - - # If no arguments are passed, print help text and exit the program. - if len(args) == 0: - parser.print_help() - sys.exit() - - return bittensor.config(parser, args=args) - - @staticmethod - def check_config(config: "bittensor.config"): - """ - Checks if the essential configuration exists under different command - - Args: - config (bittensor.config): The configuration settings for the CLI. - """ - # Check if command exists, if so, run the corresponding check_config. - # If command doesn't exist, inform user and exit the program. - if config.command in COMMANDS: - command = config.command - command_data = COMMANDS[command] - - if isinstance(command_data, dict): - if config["subcommand"] is not None: - command_data["commands"][config["subcommand"]].check_config(config) - else: - console.print( - f":cross_mark:[red]Missing subcommand for: {config.command}[/red]" - ) - sys.exit(1) - else: - command_data.check_config(config) - else: - console.print(f":cross_mark:[red]Unknown command: {config.command}[/red]") - sys.exit(1) - - def run(self): - """ - Executes the command from the configuration. - """ - # Check for print-completion argument - if self.config.print_completion: - parser = cli.__create_parser__() - shell = self.config.print_completion - print(shtab.complete(parser, shell)) - return - - # Check if command exists, if so, run the corresponding method. - # If command doesn't exist, inform user and exit the program. - command = self.config.command - if command in COMMANDS: - command_data = COMMANDS[command] - - if isinstance(command_data, dict): - command_data["commands"][self.config["subcommand"]].run(self) - else: - command_data.run(self) - else: - console.print( - f":cross_mark:[red]Unknown command: {self.config.command}[/red]" - ) - sys.exit() diff --git a/bittensor/commands/__init__.py b/bittensor/commands/__init__.py deleted file mode 100644 index e1ac8c74cd..0000000000 --- a/bittensor/commands/__init__.py +++ /dev/null @@ -1,131 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from munch import Munch, munchify - -defaults: Munch = munchify( - { - "netuid": 1, - "subtensor": {"network": "finney", "chain_endpoint": None, "_mock": False}, - "pow_register": { - "num_processes": None, - "update_interval": 50000, - "output_in_place": True, - "verbose": False, - "cuda": {"dev_id": [0], "use_cuda": False, "tpb": 256}, - }, - "axon": { - "port": 8091, - "ip": "[::]", - "external_port": None, - "external_ip": None, - "max_workers": 10, - "maximum_concurrent_rpcs": 400, - }, - "priority": {"max_workers": 5, "maxsize": 10}, - "prometheus": {"port": 7091, "level": "INFO"}, - "wallet": { - "name": "default", - "hotkey": "default", - "path": "~/.bittensor/wallets/", - }, - "dataset": { - "batch_size": 10, - "block_size": 20, - "num_workers": 0, - "dataset_names": "default", - "data_dir": "~/.bittensor/data/", - "save_dataset": False, - "max_datasets": 3, - "num_batches": 100, - }, - "logging": { - "debug": False, - "trace": False, - "record_log": False, - "logging_dir": "~/.bittensor/miners", - }, - } -) - -from .stake import ( - StakeCommand, - StakeShow, - SetChildrenCommand, - GetChildrenCommand, - SetChildKeyTakeCommand, - GetChildKeyTakeCommand, -) -from .unstake import UnStakeCommand, RevokeChildrenCommand -from .overview import OverviewCommand -from .register import ( - PowRegisterCommand, - RegisterCommand, - RunFaucetCommand, - SwapHotkeyCommand, -) -from .delegates import ( - NominateCommand, - ListDelegatesCommand, - DelegateStakeCommand, - DelegateUnstakeCommand, - MyDelegatesCommand, - SetTakeCommand, -) -from .wallets import ( - NewColdkeyCommand, - NewHotkeyCommand, - RegenColdkeyCommand, - RegenColdkeypubCommand, - RegenHotkeyCommand, - UpdateWalletCommand, - WalletCreateCommand, - WalletBalanceCommand, - GetWalletHistoryCommand, -) -from .weights import CommitWeightCommand, RevealWeightCommand -from .transfer import TransferCommand -from .inspect import InspectCommand -from .metagraph import MetagraphCommand -from .list import ListCommand -from .misc import UpdateCommand, AutocompleteCommand -from .senate import ( - SenateCommand, - ProposalsCommand, - ShowVotesCommand, - SenateRegisterCommand, - SenateLeaveCommand, - VoteCommand, -) -from .network import ( - RegisterSubnetworkCommand, - SubnetLockCostCommand, - SubnetListCommand, - SubnetSudoCommand, - SubnetHyperparamsCommand, - SubnetGetHyperparamsCommand, -) -from .root import ( - RootRegisterCommand, - RootList, - RootSetWeightsCommand, - RootGetWeightsCommand, - RootSetBoostCommand, - RootSetSlashCommand, -) -from .identity import GetIdentityCommand, SetIdentityCommand -from .check_coldkey_swap import CheckColdKeySwapCommand diff --git a/bittensor/commands/check_coldkey_swap.py b/bittensor/commands/check_coldkey_swap.py deleted file mode 100644 index 2b003e8289..0000000000 --- a/bittensor/commands/check_coldkey_swap.py +++ /dev/null @@ -1,128 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. -# -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse - -from rich.prompt import Prompt - -import bittensor -from bittensor.utils.formatting import convert_blocks_to_time -from . import defaults - -console = bittensor.__console__ - - -def fetch_arbitration_stats(subtensor, wallet): - """ - Performs a check of the current arbitration data (if any), and displays it through the bittensor console. - """ - arbitration_check = len( - subtensor.check_in_arbitration(wallet.coldkeypub.ss58_address) - ) - if arbitration_check == 0: - bittensor.__console__.print( - "[green]There has been no previous key swap initiated for your coldkey.[/green]" - ) - if arbitration_check == 1: - arbitration_remaining = subtensor.get_remaining_arbitration_period( - wallet.coldkeypub.ss58_address - ) - hours, minutes, seconds = convert_blocks_to_time(arbitration_remaining) - bittensor.__console__.print( - "[yellow]There has been 1 swap request made for this coldkey already." - " By adding another swap request, the key will enter arbitration." - f" Your key swap is scheduled for {hours} hours, {minutes} minutes, {seconds} seconds" - " from now.[/yellow]" - ) - if arbitration_check > 1: - bittensor.__console__.print( - f"[red]This coldkey is currently in arbitration with a total swaps of {arbitration_check}.[/red]" - ) - - -class CheckColdKeySwapCommand: - """ - Executes the ``check_coldkey_swap`` command to check swap status of a coldkey in the Bittensor network. - Usage: - Users need to specify the wallet they want to check the swap status of. - Example usage:: - btcli wallet check_coldkey_swap - Note: - This command is important for users who wish check if swap requests were made against their coldkey. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """ - Runs the check coldkey swap command. - Args: - cli (bittensor.cli): The CLI object containing configuration and command-line interface utilities. - """ - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - CheckColdKeySwapCommand._run(cli, subtensor) - except Exception as e: - bittensor.logging.warning(f"Failed to get swap status: {e}") - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - """ - Internal method to check coldkey swap status. - Args: - cli (bittensor.cli): The CLI object containing configuration and command-line interface utilities. - subtensor (bittensor.subtensor): The subtensor object for blockchain interactions. - """ - config = cli.config.copy() - wallet = bittensor.wallet(config=config) - - fetch_arbitration_stats(subtensor, wallet) - - @classmethod - def check_config(cls, config: "bittensor.config"): - """ - Checks and prompts for necessary configuration settings. - Args: - config (bittensor.config): The configuration object. - Prompts the user for wallet name if not set in the config. - """ - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name: str = Prompt.ask( - "Enter wallet name", default=defaults.wallet.name - ) - config.wallet.name = str(wallet_name) - - @staticmethod - def add_args(command_parser: argparse.ArgumentParser): - """ - Adds arguments to the command parser. - Args: - command_parser (argparse.ArgumentParser): The command parser to add arguments to. - """ - swap_parser = command_parser.add_parser( - "check_coldkey_swap", - help="""Check the status of swap requests for a coldkey on the Bittensor network. - Adding more than one swap request will make the key go into arbitration mode.""", - ) - bittensor.wallet.add_args(swap_parser) - bittensor.subtensor.add_args(swap_parser) diff --git a/bittensor/commands/delegates.py b/bittensor/commands/delegates.py deleted file mode 100644 index cfba3526d2..0000000000 --- a/bittensor/commands/delegates.py +++ /dev/null @@ -1,1153 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 OpenTensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import os -import sys -from typing import List, Dict, Optional - -from rich.console import Text -from rich.prompt import Prompt, FloatPrompt, Confirm -from rich.table import Table -from substrateinterface.exceptions import SubstrateRequestException -from tqdm import tqdm - -import bittensor -from . import defaults -from .identity import SetIdentityCommand -from .utils import get_delegates_details, DelegatesDetails - - -def _get_coldkey_wallets_for_path(path: str) -> List["bittensor.wallet"]: - try: - wallet_names = next(os.walk(os.path.expanduser(path)))[1] - return [bittensor.wallet(path=path, name=name) for name in wallet_names] - except StopIteration: - # No wallet files found. - wallets = [] - return wallets - - -console = bittensor.__console__ - - -def show_delegates_lite( - delegates_lite: List["bittensor.DelegateInfoLite"], width: Optional[int] = None -): - """ - This method is a lite version of the :func:`show_delegates`. This method displays a formatted table of Bittensor network delegates with detailed statistics to the console. - - The table is sorted by total stake in descending order and provides - a snapshot of delegate performance and status, helping users make informed decisions for staking or nominating. - - This helper function is not intended to be used directly in user code unless specifically required. - - Args: - delegates_lite (List[bittensor.DelegateInfoLite]): A list of delegate information objects to be displayed. - width (Optional[int]): The width of the console output table. Defaults to ``None``, which will make the table expand to the maximum width of the console. - - The output table contains the following columns. To display more columns, use the :func:`show_delegates` function. - - - INDEX: The numerical index of the delegate. - - DELEGATE: The name of the delegate. - - SS58: The truncated SS58 address of the delegate. - - NOMINATORS: The number of nominators supporting the delegate. - - VPERMIT: Validator permits held by the delegate for the subnets. - - TAKE: The percentage of the delegate's earnings taken by the network. - - DELEGATE/(24h): The earnings of the delegate in the last 24 hours. - - Desc: A brief description provided by the delegate. - - Usage: - This function is typically used within the Bittensor CLI to show current delegate options to users who are considering where to stake their tokens. - - Example usage:: - - show_delegates_lite(delegates_lite, width=80) - - Note: - This function is primarily for display purposes within a command-line interface and does not return any values. It relies on the `rich `_ Python library to render - the table in the console. - """ - - registered_delegate_info: Optional[Dict[str, DelegatesDetails]] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - if registered_delegate_info is None: - bittensor.__console__.print( - ":warning:[yellow]Could not get delegate info from chain.[/yellow]" - ) - registered_delegate_info = {} - - table = Table(show_footer=True, width=width, pad_edge=False, box=None, expand=True) - table.add_column( - "[overline white]INDEX", - str(len(delegates_lite)), - footer_style="overline white", - style="bold white", - ) - table.add_column( - "[overline white]DELEGATE", - style="rgb(50,163,219)", - no_wrap=True, - justify="left", - ) - table.add_column( - "[overline white]SS58", - str(len(delegates_lite)), - footer_style="overline white", - style="bold yellow", - ) - table.add_column( - "[overline white]NOMINATORS", justify="center", style="green", no_wrap=True - ) - table.add_column("[overline white]VPERMIT", justify="right", no_wrap=False) - table.add_column("[overline white]TAKE", style="white", no_wrap=True) - table.add_column("[overline white]DELEGATE/(24h)", style="green", justify="center") - table.add_column("[overline white]Desc", style="rgb(50,163,219)") - - for i, d in enumerate(delegates_lite): - if d.delegate_ss58 in registered_delegate_info: - delegate_name = registered_delegate_info[d.delegate_ss58].name - delegate_url = registered_delegate_info[d.delegate_ss58].url - delegate_description = registered_delegate_info[d.delegate_ss58].description - else: - delegate_name = "" - delegate_url = "" - delegate_description = "" - - table.add_row( - # `INDEX` column - str(i), - # `DELEGATE` column - Text(delegate_name, style=f"link {delegate_url}"), - # `SS58` column - f"{d.delegate_ss58:8.8}...", - # `NOMINATORS` column - str(d.nominators), - # `VPERMIT` column - str(d.registrations), - # `TAKE` column - f"{d.take * 100:.1f}%", - # `DELEGATE/(24h)` column - f"τ{bittensor.Balance.from_tao(d.total_daily_return * 0.18) !s:6.6}", - # `Desc` column - str(delegate_description), - end_section=True, - ) - bittensor.__console__.print(table) - - -# Uses rich console to pretty print a table of delegates. -def show_delegates( - delegates: List["bittensor.DelegateInfo"], - prev_delegates: Optional[List["bittensor.DelegateInfo"]], - width: Optional[int] = None, -): - """ - Displays a formatted table of Bittensor network delegates with detailed statistics to the console. - - The table is sorted by total stake in descending order and provides - a snapshot of delegate performance and status, helping users make informed decisions for staking or nominating. - - This is a helper function that is called by the :func:`list_delegates` and :func:`my_delegates`, and is not intended - to be used directly in user code unless specifically required. - - Args: - delegates (List[bittensor.DelegateInfo]): A list of delegate information objects to be displayed. - prev_delegates (Optional[List[bittensor.DelegateInfo]]): A list of delegate information objects from a previous state, used to calculate changes in stake. Defaults to ``None``. - width (Optional[int]): The width of the console output table. Defaults to ``None``, which will make the table expand to the maximum width of the console. - - The output table contains the following columns: - - - INDEX: The numerical index of the delegate. - - DELEGATE: The name of the delegate. - - SS58: The truncated SS58 address of the delegate. - - NOMINATORS: The number of nominators supporting the delegate. - - DELEGATE STAKE(τ): The stake that is directly delegated to the delegate. - - TOTAL STAKE(τ): The total stake held by the delegate, including nominators' stake. - - CHANGE/(4h): The percentage change in the delegate's stake over the past 4 hours. - - VPERMIT: Validator permits held by the delegate for the subnets. - - TAKE: The percentage of the delegate's earnings taken by the network. - - NOMINATOR/(24h)/kτ: The earnings per 1000 τ staked by nominators in the last 24 hours. - - DELEGATE/(24h): The earnings of the delegate in the last 24 hours. - - Desc: A brief description provided by the delegate. - - Usage: - This function is typically used within the Bittensor CLI to show current delegate options to users who are considering where to stake their tokens. - - Example usage:: - - show_delegates(current_delegates, previous_delegates, width=80) - - Note: - This function is primarily for display purposes within a command-line interface and does - not return any values. It relies on the `rich `_ Python library to render - the table in the - console. - """ - - delegates.sort(key=lambda delegate: delegate.total_stake, reverse=True) - prev_delegates_dict = {} - if prev_delegates is not None: - for prev_delegate in prev_delegates: - prev_delegates_dict[prev_delegate.hotkey_ss58] = prev_delegate - - registered_delegate_info: Optional[Dict[str, DelegatesDetails]] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - if registered_delegate_info is None: - bittensor.__console__.print( - ":warning:[yellow]Could not get delegate info from chain.[/yellow]" - ) - registered_delegate_info = {} - - table = Table(show_footer=True, width=width, pad_edge=False, box=None, expand=True) - table.add_column( - "[overline white]INDEX", - str(len(delegates)), - footer_style="overline white", - style="bold white", - ) - table.add_column( - "[overline white]DELEGATE", - style="rgb(50,163,219)", - no_wrap=True, - justify="left", - ) - table.add_column( - "[overline white]SS58", - str(len(delegates)), - footer_style="overline white", - style="bold yellow", - ) - table.add_column( - "[overline white]NOMINATORS", justify="center", style="green", no_wrap=True - ) - table.add_column( - "[overline white]DELEGATE STAKE(\u03c4)", justify="right", no_wrap=True - ) - table.add_column( - "[overline white]TOTAL STAKE(\u03c4)", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column("[overline white]CHANGE/(4h)", style="grey0", justify="center") - table.add_column("[overline white]VPERMIT", justify="right", no_wrap=False) - table.add_column("[overline white]TAKE", style="white", no_wrap=True) - table.add_column( - "[overline white]NOMINATOR/(24h)/k\u03c4", style="green", justify="center" - ) - table.add_column("[overline white]DELEGATE/(24h)", style="green", justify="center") - table.add_column("[overline white]Desc", style="rgb(50,163,219)") - - for i, delegate in enumerate(delegates): - owner_stake = next( - map( - lambda x: x[1], # get stake - filter( - lambda x: x[0] == delegate.owner_ss58, delegate.nominators - ), # filter for owner - ), - bittensor.Balance.from_rao(0), # default to 0 if no owner stake. - ) - if delegate.hotkey_ss58 in registered_delegate_info: - delegate_name = registered_delegate_info[delegate.hotkey_ss58].name - delegate_url = registered_delegate_info[delegate.hotkey_ss58].url - delegate_description = registered_delegate_info[ - delegate.hotkey_ss58 - ].description - else: - delegate_name = "" - delegate_url = "" - delegate_description = "" - - if delegate.hotkey_ss58 in prev_delegates_dict: - prev_stake = prev_delegates_dict[delegate.hotkey_ss58].total_stake - if prev_stake == 0: - rate_change_in_stake_str = "[green]100%[/green]" - else: - rate_change_in_stake = ( - 100 - * (float(delegate.total_stake) - float(prev_stake)) - / float(prev_stake) - ) - if rate_change_in_stake > 0: - rate_change_in_stake_str = "[green]{:.2f}%[/green]".format( - rate_change_in_stake - ) - elif rate_change_in_stake < 0: - rate_change_in_stake_str = "[red]{:.2f}%[/red]".format( - rate_change_in_stake - ) - else: - rate_change_in_stake_str = "[grey0]0%[/grey0]" - else: - rate_change_in_stake_str = "[grey0]NA[/grey0]" - - table.add_row( - # INDEX - str(i), - # DELEGATE - Text(delegate_name, style=f"link {delegate_url}"), - # SS58 - f"{delegate.hotkey_ss58:8.8}...", - # NOMINATORS - str(len([nom for nom in delegate.nominators if nom[1].rao > 0])), - # DELEGATE STAKE - f"{owner_stake!s:13.13}", - # TOTAL STAKE - f"{delegate.total_stake!s:13.13}", - # CHANGE/(4h) - rate_change_in_stake_str, - # VPERMIT - str(delegate.registrations), - # TAKE - f"{delegate.take * 100:.1f}%", - # NOMINATOR/(24h)/k - f"{bittensor.Balance.from_tao( delegate.total_daily_return.tao * (1000/ (0.001 + delegate.total_stake.tao)))!s:6.6}", - # DELEGATE/(24h) - f"{bittensor.Balance.from_tao(delegate.total_daily_return.tao * 0.18) !s:6.6}", - # Desc - str(delegate_description), - end_section=True, - ) - bittensor.__console__.print(table) - - -class DelegateStakeCommand: - """ - Executes the ``delegate`` command, which stakes Tao to a specified delegate on the Bittensor network. - - This action allocates the user's Tao to support a delegate, potentially earning staking rewards in return. - - Optional Arguments: - - ``wallet.name``: The name of the wallet to use for the command. - - ``delegate_ss58key``: The ``SS58`` address of the delegate to stake to. - - ``amount``: The amount of Tao to stake. - - ``all``: If specified, the command stakes all available Tao. - - The command interacts with the user to determine the delegate and the amount of Tao to be staked. If the ``--all`` - flag is used, it delegates the entire available balance. - - Usage: - The user must specify the delegate's SS58 address and the amount of Tao to stake. The function sends a - transaction to the subtensor network to delegate the specified amount to the chosen delegate. These values are - prompted if not provided. - - Example usage:: - - btcli delegate --delegate_ss58key --amount - btcli delegate --delegate_ss58key --all - - Note: - This command modifies the blockchain state and may incur transaction fees. It requires user confirmation and - interaction, and is designed to be used within the Bittensor CLI environment. The user should ensure the - delegate's address and the amount to be staked are correct before executing the command. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Delegates stake to a chain delegate.""" - try: - config = cli.config.copy() - wallet = bittensor.wallet(config=config) - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - subtensor.delegate( - wallet=wallet, - delegate_ss58=config.get("delegate_ss58key"), - amount=config.get("amount"), - wait_for_inclusion=True, - prompt=not config.no_prompt, - ) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - delegate_stake_parser = parser.add_parser( - "delegate", help="""Delegate Stake to an account.""" - ) - delegate_stake_parser.add_argument( - "--delegate_ss58key", - "--delegate_ss58", - dest="delegate_ss58key", - type=str, - required=False, - help="""The ss58 address of the choosen delegate""", - ) - delegate_stake_parser.add_argument( - "--all", dest="stake_all", action="store_true" - ) - delegate_stake_parser.add_argument( - "--amount", dest="amount", type=float, required=False - ) - bittensor.wallet.add_args(delegate_stake_parser) - bittensor.subtensor.add_args(delegate_stake_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.get("delegate_ss58key"): - # Check for delegates. - with bittensor.__console__.status(":satellite: Loading delegates..."): - subtensor = bittensor.subtensor(config=config, log_verbose=False) - delegates: List[bittensor.DelegateInfo] = subtensor.get_delegates() - try: - prev_delegates = subtensor.get_delegates( - max(0, subtensor.block - 1200) - ) - except SubstrateRequestException: - prev_delegates = None - - if prev_delegates is None: - bittensor.__console__.print( - ":warning: [yellow]Could not fetch delegates history[/yellow]" - ) - - if len(delegates) == 0: - console.print( - ":cross_mark: [red]There are no delegates on {}[/red]".format( - subtensor.network - ) - ) - sys.exit(1) - - delegates.sort(key=lambda delegate: delegate.total_stake, reverse=True) - show_delegates(delegates, prev_delegates=prev_delegates) - delegate_index = Prompt.ask("Enter delegate index") - config.delegate_ss58key = str(delegates[int(delegate_index)].hotkey_ss58) - console.print( - "Selected: [yellow]{}[/yellow]".format(config.delegate_ss58key) - ) - - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - # Get amount. - if not config.get("amount") and not config.get("stake_all"): - if not Confirm.ask( - "Stake all Tao from account: [bold]'{}'[/bold]?".format( - config.wallet.get("name", defaults.wallet.name) - ) - ): - amount = Prompt.ask("Enter Tao amount to stake") - try: - config.amount = float(amount) - except ValueError: - console.print( - ":cross_mark: [red]Invalid Tao amount[/red] [bold white]{}[/bold white]".format( - amount - ) - ) - sys.exit() - else: - config.stake_all = True - - -class DelegateUnstakeCommand: - """ - Executes the ``undelegate`` command, allowing users to withdraw their staked Tao from a delegate on the Bittensor - network. - - This process is known as "undelegating" and it reverses the delegation process, freeing up the staked tokens. - - Optional Arguments: - - ``wallet.name``: The name of the wallet to use for the command. - - ``delegate_ss58key``: The ``SS58`` address of the delegate to undelegate from. - - ``amount``: The amount of Tao to undelegate. - - ``all``: If specified, the command undelegates all staked Tao from the delegate. - - The command prompts the user for the amount of Tao to undelegate and the ``SS58`` address of the delegate from which - to undelegate. If the ``--all`` flag is used, it will attempt to undelegate the entire staked amount from the - specified delegate. - - Usage: - The user must provide the delegate's SS58 address and the amount of Tao to undelegate. The function will then - send a transaction to the Bittensor network to process the undelegation. - - Example usage:: - - btcli undelegate --delegate_ss58key --amount - btcli undelegate --delegate_ss58key --all - - Note: - This command can result in a change to the blockchain state and may incur transaction fees. It is interactive - and requires confirmation from the user before proceeding. It should be used with care as undelegating can - affect the delegate's total stake and - potentially the user's staking rewards. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Undelegates stake from a chain delegate.""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - DelegateUnstakeCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - def _run(self: "bittensor.cli", subtensor: "bittensor.subtensor"): - """Undelegates stake from a chain delegate.""" - config = self.config.copy() - wallet = bittensor.wallet(config=config) - subtensor.undelegate( - wallet=wallet, - delegate_ss58=config.get("delegate_ss58key"), - amount=config.get("amount"), - wait_for_inclusion=True, - prompt=not config.no_prompt, - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - undelegate_stake_parser = parser.add_parser( - "undelegate", help="""Undelegate Stake from an account.""" - ) - undelegate_stake_parser.add_argument( - "--delegate_ss58key", - "--delegate_ss58", - dest="delegate_ss58key", - type=str, - required=False, - help="""The ss58 address of the choosen delegate""", - ) - undelegate_stake_parser.add_argument( - "--all", dest="unstake_all", action="store_true" - ) - undelegate_stake_parser.add_argument( - "--amount", dest="amount", type=float, required=False - ) - bittensor.wallet.add_args(undelegate_stake_parser) - bittensor.subtensor.add_args(undelegate_stake_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.get("delegate_ss58key"): - # Check for delegates. - with bittensor.__console__.status(":satellite: Loading delegates..."): - subtensor = bittensor.subtensor(config=config, log_verbose=False) - delegates: List[bittensor.DelegateInfo] = subtensor.get_delegates() - try: - prev_delegates = subtensor.get_delegates( - max(0, subtensor.block - 1200) - ) - except SubstrateRequestException: - prev_delegates = None - - if prev_delegates is None: - bittensor.__console__.print( - ":warning: [yellow]Could not fetch delegates history[/yellow]" - ) - - if len(delegates) == 0: - console.print( - ":cross_mark: [red]There are no delegates on {}[/red]".format( - subtensor.network - ) - ) - sys.exit(1) - - delegates.sort(key=lambda delegate: delegate.total_stake, reverse=True) - show_delegates(delegates, prev_delegates=prev_delegates) - delegate_index = Prompt.ask("Enter delegate index") - config.delegate_ss58key = str(delegates[int(delegate_index)].hotkey_ss58) - console.print( - "Selected: [yellow]{}[/yellow]".format(config.delegate_ss58key) - ) - - # Get amount. - if not config.get("amount") and not config.get("unstake_all"): - if not Confirm.ask( - "Unstake all Tao to account: [bold]'{}'[/bold]?".format( - config.wallet.get("name", defaults.wallet.name) - ) - ): - amount = Prompt.ask("Enter Tao amount to unstake") - try: - config.amount = float(amount) - except ValueError: - console.print( - ":cross_mark: [red]Invalid Tao amount[/red] [bold white]{}[/bold white]".format( - amount - ) - ) - sys.exit() - else: - config.unstake_all = True - - -class ListDelegatesCommand: - """ - Displays a formatted table of Bittensor network delegates, providing a comprehensive overview of delegate statistics and information. - - This table helps users make informed decisions on which delegates to allocate their TAO stake. - - Optional Arguments: - - ``wallet.name``: The name of the wallet to use for the command. - - ``subtensor.network``: The name of the network to use for the command. - - The table columns include: - - - INDEX: The delegate's index in the sorted list. - - DELEGATE: The name of the delegate. - - SS58: The delegate's unique SS58 address (truncated for display). - - NOMINATORS: The count of nominators backing the delegate. - - DELEGATE STAKE(τ): The amount of delegate's own stake (not the TAO delegated from any nominators). - - TOTAL STAKE(τ): The delegate's cumulative stake, including self-staked and nominators' stakes. - - CHANGE/(4h): The percentage change in the delegate's stake over the last four hours. - - SUBNETS: The subnets to which the delegate is registered. - - VPERMIT: Indicates the subnets for which the delegate has validator permits. - - NOMINATOR/(24h)/kτ: The earnings per 1000 τ staked by nominators in the last 24 hours. - - DELEGATE/(24h): The total earnings of the delegate in the last 24 hours. - - DESCRIPTION: A brief description of the delegate's purpose and operations. - - Sorting is done based on the ``TOTAL STAKE`` column in descending order. Changes in stake are highlighted: - increases in green and decreases in red. Entries with no previous data are marked with ``NA``. Each delegate's name - is a hyperlink to their respective URL, if available. - - Example usage:: - - btcli root list_delegates - btcli root list_delegates --wallet.name my_wallet - btcli root list_delegates --subtensor.network finney # can also be `test` or `local` - - Note: - This function is part of the Bittensor CLI tools and is intended for use within a console application. It prints - directly to the console and does not return any value. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r""" - List all delegates on the network. - """ - try: - cli.config.subtensor.network = "archive" - cli.config.subtensor.chain_endpoint = ( - "wss://archive.chain.opentensor.ai:443" - ) - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - ListDelegatesCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r""" - List all delegates on the network. - """ - with bittensor.__console__.status(":satellite: Loading delegates..."): - delegates: list[bittensor.DelegateInfo] = subtensor.get_delegates() - - try: - prev_delegates = subtensor.get_delegates(max(0, subtensor.block - 1200)) - except SubstrateRequestException: - prev_delegates = None - - if prev_delegates is None: - bittensor.__console__.print( - ":warning: [yellow]Could not fetch delegates history[/yellow]" - ) - - show_delegates( - delegates, - prev_delegates=prev_delegates, - width=cli.config.get("width", None), - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - list_delegates_parser = parser.add_parser( - "list_delegates", help="""List all delegates on the network""" - ) - bittensor.subtensor.add_args(list_delegates_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - pass - - -class NominateCommand: - """ - Executes the ``nominate`` command, which facilitates a wallet to become a delegate on the Bittensor network. - - This command handles the nomination process, including wallet unlocking and verification of the hotkey's current - delegate status. - - The command performs several checks: - - - Verifies that the hotkey is not already a delegate to prevent redundant nominations. - - Tries to nominate the wallet and reports success or failure. - - Upon success, the wallet's hotkey is registered as a delegate on the network. - - Optional Arguments: - - ``wallet.name``: The name of the wallet to use for the command. - - ``wallet.hotkey``: The name of the hotkey to use for the command. - - Usage: - To run the command, the user must have a configured wallet with both hotkey and coldkey. If the wallet is not - already nominated, this command will initiate the process. - - Example usage:: - - btcli root nominate - btcli root nominate --wallet.name my_wallet --wallet.hotkey my_hotkey - - Note: - This function is intended to be used as a CLI command. It prints the outcome directly to the console and does - not return any value. It should not be called programmatically in user code due to its interactive nature and - side effects on the network state. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Nominate wallet.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - NominateCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Nominate wallet.""" - wallet = bittensor.wallet(config=cli.config) - - # Unlock the wallet. - wallet.hotkey - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return - - # Check if the hotkey is already a delegate. - if subtensor.is_hotkey_delegate(wallet.hotkey.ss58_address): - bittensor.__console__.print( - "Aborting: Hotkey {} is already a delegate.".format( - wallet.hotkey.ss58_address - ) - ) - return - - result: bool = subtensor.nominate(wallet) - if not result: - bittensor.__console__.print( - "Could not became a delegate on [white]{}[/white]".format( - subtensor.network - ) - ) - else: - # Check if we are a delegate. - is_delegate: bool = subtensor.is_hotkey_delegate(wallet.hotkey.ss58_address) - if not is_delegate: - bittensor.__console__.print( - "Could not became a delegate on [white]{}[/white]".format( - subtensor.network - ) - ) - return - bittensor.__console__.print( - "Successfully became a delegate on [white]{}[/white]".format( - subtensor.network - ) - ) - - # Prompt use to set identity on chain. - if not cli.config.no_prompt: - do_set_identity = Prompt.ask( - f"Subnetwork registered successfully. Would you like to set your identity? [y/n]", - choices=["y", "n"], - ) - - if do_set_identity.lower() == "y": - subtensor.close() - config = cli.config.copy() - SetIdentityCommand.check_config(config) - cli.config = config - SetIdentityCommand.run(cli) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - nominate_parser = parser.add_parser( - "nominate", help="""Become a delegate on the network""" - ) - bittensor.wallet.add_args(nominate_parser) - bittensor.subtensor.add_args(nominate_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - -class MyDelegatesCommand: - """ - Executes the ``my_delegates`` command within the Bittensor CLI, which retrieves and displays a table of delegated - stakes from a user's wallet(s) to various delegates on the Bittensor network. - - The command provides detailed insights into the user's - staking activities and the performance of their chosen delegates. - - Optional Arguments: - - ``wallet.name``: The name of the wallet to use for the command. - - ``all``: If specified, the command aggregates information across all wallets. - - The table output includes the following columns: - - - Wallet: The name of the user's wallet. - - OWNER: The name of the delegate's owner. - - SS58: The truncated SS58 address of the delegate. - - Delegation: The amount of Tao staked by the user to the delegate. - - τ/24h: The earnings from the delegate to the user over the past 24 hours. - - NOMS: The number of nominators for the delegate. - - OWNER STAKE(τ): The stake amount owned by the delegate. - - TOTAL STAKE(τ): The total stake amount held by the delegate. - - SUBNETS: The list of subnets the delegate is a part of. - - VPERMIT: Validator permits held by the delegate for various subnets. - - 24h/kτ: Earnings per 1000 Tao staked over the last 24 hours. - - Desc: A description of the delegate. - - The command also sums and prints the total amount of Tao delegated across all wallets. - - Usage: - The command can be run as part of the Bittensor CLI suite of tools and requires no parameters if a single wallet - is used. If multiple wallets are present, the ``--all`` flag can be specified to aggregate information across - all wallets. - - Example usage:: - - btcli my_delegates - btcli my_delegates --all - btcli my_delegates --wallet.name my_wallet - - Note: - This function is typically called by the CLI parser and is not intended to be used directly in user code. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Delegates stake to a chain delegate.""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - MyDelegatesCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - """Delegates stake to a chain delegate.""" - config = cli.config.copy() - if config.get("all", d=None): - wallets = _get_coldkey_wallets_for_path(config.wallet.path) - else: - wallets = [bittensor.wallet(config=config)] - - table = Table(show_footer=True, pad_edge=False, box=None, expand=True) - table.add_column( - "[overline white]Wallet", footer_style="overline white", style="bold white" - ) - table.add_column( - "[overline white]OWNER", - style="rgb(50,163,219)", - no_wrap=True, - justify="left", - ) - table.add_column( - "[overline white]SS58", footer_style="overline white", style="bold yellow" - ) - table.add_column( - "[overline green]Delegation", - footer_style="overline green", - style="bold green", - ) - table.add_column( - "[overline green]\u03c4/24h", - footer_style="overline green", - style="bold green", - ) - table.add_column( - "[overline white]NOMS", justify="center", style="green", no_wrap=True - ) - table.add_column( - "[overline white]OWNER STAKE(\u03c4)", justify="right", no_wrap=True - ) - table.add_column( - "[overline white]TOTAL STAKE(\u03c4)", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]SUBNETS", justify="right", style="white", no_wrap=True - ) - table.add_column("[overline white]VPERMIT", justify="right", no_wrap=True) - table.add_column("[overline white]24h/k\u03c4", style="green", justify="center") - table.add_column("[overline white]Desc", style="rgb(50,163,219)") - total_delegated = 0 - - for wallet in tqdm(wallets): - if not wallet.coldkeypub_file.exists_on_device(): - continue - delegates = subtensor.get_delegated( - coldkey_ss58=wallet.coldkeypub.ss58_address - ) - - my_delegates = {} # hotkey, amount - for delegate in delegates: - for coldkey_addr, staked in delegate[0].nominators: - if ( - coldkey_addr == wallet.coldkeypub.ss58_address - and staked.tao > 0 - ): - my_delegates[delegate[0].hotkey_ss58] = staked - - delegates.sort(key=lambda delegate: delegate[0].total_stake, reverse=True) - total_delegated += sum(my_delegates.values()) - - registered_delegate_info: Optional[DelegatesDetails] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - if registered_delegate_info is None: - bittensor.__console__.print( - ":warning:[yellow]Could not get delegate info from chain.[/yellow]" - ) - registered_delegate_info = {} - - for i, delegate in enumerate(delegates): - owner_stake = next( - map( - lambda x: x[1], # get stake - filter( - lambda x: x[0] == delegate[0].owner_ss58, - delegate[0].nominators, - ), # filter for owner - ), - bittensor.Balance.from_rao(0), # default to 0 if no owner stake. - ) - if delegate[0].hotkey_ss58 in registered_delegate_info: - delegate_name = registered_delegate_info[ - delegate[0].hotkey_ss58 - ].name - delegate_url = registered_delegate_info[delegate[0].hotkey_ss58].url - delegate_description = registered_delegate_info[ - delegate[0].hotkey_ss58 - ].description - else: - delegate_name = "" - delegate_url = "" - delegate_description = "" - - if delegate[0].hotkey_ss58 in my_delegates: - table.add_row( - wallet.name, - Text(delegate_name, style=f"link {delegate_url}"), - f"{delegate[0].hotkey_ss58:8.8}...", - f"{my_delegates[delegate[0].hotkey_ss58]!s:13.13}", - f"{delegate[0].total_daily_return.tao * (my_delegates[delegate[0].hotkey_ss58]/delegate[0].total_stake.tao)!s:6.6}", - str(len(delegate[0].nominators)), - f"{owner_stake!s:13.13}", - f"{delegate[0].total_stake!s:13.13}", - str(delegate[0].registrations), - str( - [ - "*" if subnet in delegate[0].validator_permits else "" - for subnet in delegate[0].registrations - ] - ), - # f'{delegate.take * 100:.1f}%',s - f"{ delegate[0].total_daily_return.tao * ( 1000 / ( 0.001 + delegate[0].total_stake.tao ) )!s:6.6}", - str(delegate_description), - # f'{delegate_profile.description:140.140}', - ) - - bittensor.__console__.print(table) - bittensor.__console__.print("Total delegated Tao: {}".format(total_delegated)) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - delegate_stake_parser = parser.add_parser( - "my_delegates", - help="""Show all delegates where I am delegating a positive amount of stake""", - ) - delegate_stake_parser.add_argument( - "--all", - action="store_true", - help="""Check all coldkey wallets.""", - default=False, - ) - bittensor.wallet.add_args(delegate_stake_parser) - bittensor.subtensor.add_args(delegate_stake_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.get("all", d=None) - and not config.is_set("wallet.name") - and not config.no_prompt - ): - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - -class SetTakeCommand: - """ - Executes the ``set_take`` command, which sets the delegate take. - - The command performs several checks: - - 1. Hotkey is already a delegate - 2. New take value is within 0-18% range - - Optional Arguments: - - ``take``: The new take value - - ``wallet.name``: The name of the wallet to use for the command. - - ``wallet.hotkey``: The name of the hotkey to use for the command. - - Usage: - To run the command, the user must have a configured wallet with both hotkey and coldkey. Also, the hotkey should already be a delegate. - - Example usage:: - btcli root set_take --wallet.name my_wallet --wallet.hotkey my_hotkey - - Note: - This function can be used to update the takes individually for every subnet - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Set delegate take.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SetTakeCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Set delegate take.""" - config = cli.config.copy() - wallet = bittensor.wallet(config=cli.config) - - # Unlock the wallet. - wallet.hotkey - wallet.coldkey - - # Check if the hotkey is not a delegate. - if not subtensor.is_hotkey_delegate(wallet.hotkey.ss58_address): - bittensor.__console__.print( - "Aborting: Hotkey {} is NOT a delegate.".format( - wallet.hotkey.ss58_address - ) - ) - return - - # Prompt user for take value. - new_take_str = config.get("take") - if new_take_str == None: - new_take = FloatPrompt.ask(f"Enter take value (0.18 for 18%)") - else: - new_take = float(new_take_str) - - if new_take > 0.18: - bittensor.__console__.print("ERROR: Take value should not exceed 18%") - return - - result: bool = subtensor.set_take( - wallet=wallet, - delegate_ss58=wallet.hotkey.ss58_address, - take=new_take, - ) - if not result: - bittensor.__console__.print("Could not set the take") - else: - # Check if we are a delegate. - is_delegate: bool = subtensor.is_hotkey_delegate(wallet.hotkey.ss58_address) - if not is_delegate: - bittensor.__console__.print( - "Could not set the take [white]{}[/white]".format(subtensor.network) - ) - return - bittensor.__console__.print( - "Successfully set the take on [white]{}[/white]".format( - subtensor.network - ) - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - set_take_parser = parser.add_parser( - "set_take", help="""Set take for delegate""" - ) - set_take_parser.add_argument( - "--take", - dest="take", - type=float, - required=False, - help="""Take as a float number""", - ) - bittensor.wallet.add_args(set_take_parser) - bittensor.subtensor.add_args(set_take_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) diff --git a/bittensor/commands/identity.py b/bittensor/commands/identity.py deleted file mode 100644 index 4f74548495..0000000000 --- a/bittensor/commands/identity.py +++ /dev/null @@ -1,344 +0,0 @@ -import argparse -from rich.table import Table -from rich.prompt import Prompt -from sys import getsizeof - -import bittensor - - -class SetIdentityCommand: - """ - Executes the :func:`set_identity` command within the Bittensor network, which allows for the creation or update of a delegate's on-chain identity. - - This identity includes various - attributes such as display name, legal name, web URL, PGP fingerprint, and contact - information, among others. - - Optional Arguments: - - ``display``: The display name for the identity. - - ``legal``: The legal name for the identity. - - ``web``: The web URL for the identity. - - ``riot``: The riot handle for the identity. - - ``email``: The email address for the identity. - - ``pgp_fingerprint``: The PGP fingerprint for the identity. - - ``image``: The image URL for the identity. - - ``info``: The info for the identity. - - ``twitter``: The X (twitter) URL for the identity. - - The command prompts the user for the different identity attributes and validates the - input size for each attribute. It provides an option to update an existing validator - hotkey identity. If the user consents to the transaction cost, the identity is updated - on the blockchain. - - Each field has a maximum size of 64 bytes. The PGP fingerprint field is an exception - and has a maximum size of 20 bytes. The user is prompted to enter the PGP fingerprint - as a hex string, which is then converted to bytes. The user is also prompted to enter - the coldkey or hotkey ``ss58`` address for the identity to be updated. If the user does - not have a hotkey, the coldkey address is used by default. - - If setting a validator identity, the hotkey will be used by default. If the user is - setting an identity for a subnet, the coldkey will be used by default. - - Usage: - The user should call this command from the command line and follow the interactive - prompts to enter or update the identity information. The command will display the - updated identity details in a table format upon successful execution. - - Example usage:: - - btcli wallet set_identity - - Note: - This command should only be used if the user is willing to incur the 1 TAO transaction - fee associated with setting an identity on the blockchain. It is a high-level command - that makes changes to the blockchain state and should not be used programmatically as - part of other scripts or applications. - """ - - def run(cli: "bittensor.cli"): - r"""Create a new or update existing identity on-chain.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SetIdentityCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Create a new or update existing identity on-chain.""" - console = bittensor.__console__ - - wallet = bittensor.wallet(config=cli.config) - - id_dict = { - "display": cli.config.display, - "legal": cli.config.legal, - "web": cli.config.web, - "pgp_fingerprint": cli.config.pgp_fingerprint, - "riot": cli.config.riot, - "email": cli.config.email, - "image": cli.config.image, - "twitter": cli.config.twitter, - "info": cli.config.info, - } - - for field, string in id_dict.items(): - if getsizeof(string) > 113: # 64 + 49 overhead bytes for string - raise ValueError(f"Identity value `{field}` must be <= 64 raw bytes") - - identified = ( - wallet.hotkey.ss58_address - if str( - Prompt.ask( - "Are you updating a validator hotkey identity?", - default="y", - choices=["y", "n"], - ) - ).lower() - == "y" - else None - ) - - if ( - str( - Prompt.ask( - "Cost to register an Identity is [bold white italic]0.1 Tao[/bold white italic], are you sure you wish to continue?", - default="n", - choices=["y", "n"], - ) - ).lower() - == "n" - ): - console.print(":cross_mark: Aborted!") - exit(0) - - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return - - with console.status(":satellite: [bold green]Updating identity on-chain..."): - try: - subtensor.update_identity( - identified=identified, - wallet=wallet, - params=id_dict, - ) - except Exception as e: - console.print(f"[red]:cross_mark: Failed![/red] {e}") - exit(1) - - console.print(":white_heavy_check_mark: Success!") - - identity = subtensor.query_identity(identified or wallet.coldkey.ss58_address) - - table = Table(title="[bold white italic]Updated On-Chain Identity") - table.add_column("Key", justify="right", style="cyan", no_wrap=True) - table.add_column("Value", style="magenta") - - table.add_row("Address", identified or wallet.coldkey.ss58_address) - for key, value in identity.items(): - table.add_row(key, str(value) if value is not None else "None") - - console.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - config.wallet.name = Prompt.ask( - "Enter wallet name", default=bittensor.defaults.wallet.name - ) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - config.wallet.hotkey = Prompt.ask( - "Enter wallet hotkey", default=bittensor.defaults.wallet.hotkey - ) - if not config.is_set("subtensor.network") and not config.no_prompt: - config.subtensor.network = Prompt.ask( - "Enter subtensor network", - default=bittensor.defaults.subtensor.network, - choices=bittensor.__networks__, - ) - ( - _, - config.subtensor.chain_endpoint, - ) = bittensor.subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - if not config.is_set("display") and not config.no_prompt: - config.display = Prompt.ask("Enter display name", default="") - if not config.is_set("legal") and not config.no_prompt: - config.legal = Prompt.ask("Enter legal string", default="") - if not config.is_set("web") and not config.no_prompt: - config.web = Prompt.ask("Enter web url", default="") - if not config.is_set("pgp_fingerprint") and not config.no_prompt: - config.pgp_fingerprint = Prompt.ask( - "Enter pgp fingerprint (must be 20 bytes)", default=None - ) - if not config.is_set("riot") and not config.no_prompt: - config.riot = Prompt.ask("Enter riot", default="") - if not config.is_set("email") and not config.no_prompt: - config.email = Prompt.ask("Enter email address", default="") - if not config.is_set("image") and not config.no_prompt: - config.image = Prompt.ask("Enter image url", default="") - if not config.is_set("twitter") and not config.no_prompt: - config.twitter = Prompt.ask("Enter twitter url", default="") - if not config.is_set("info") and not config.no_prompt: - config.info = Prompt.ask("Enter info", default="") - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - new_coldkey_parser = parser.add_parser( - "set_identity", - help="""Create or update identity on-chain for a given cold wallet. Must be a subnet owner.""", - ) - new_coldkey_parser.add_argument( - "--display", - type=str, - help="""The display name for the identity.""", - ) - new_coldkey_parser.add_argument( - "--legal", - type=str, - help="""The legal name for the identity.""", - ) - new_coldkey_parser.add_argument( - "--web", - type=str, - help="""The web url for the identity.""", - ) - new_coldkey_parser.add_argument( - "--riot", - type=str, - help="""The riot handle for the identity.""", - ) - new_coldkey_parser.add_argument( - "--email", - type=str, - help="""The email address for the identity.""", - ) - new_coldkey_parser.add_argument( - "--pgp_fingerprint", - type=str, - help="""The pgp fingerprint for the identity.""", - ) - new_coldkey_parser.add_argument( - "--image", - type=str, - help="""The image url for the identity.""", - ) - new_coldkey_parser.add_argument( - "--info", - type=str, - help="""The info for the identity.""", - ) - new_coldkey_parser.add_argument( - "--twitter", - type=str, - help="""The twitter url for the identity.""", - ) - bittensor.wallet.add_args(new_coldkey_parser) - bittensor.subtensor.add_args(new_coldkey_parser) - - -class GetIdentityCommand: - """ - Executes the :func:`get_identity` command, which retrieves and displays the identity details of a user's coldkey or hotkey associated with the Bittensor network. This function - queries the subtensor chain for information such as the stake, rank, and trust associated - with the provided key. - - Optional Arguments: - - ``key``: The ``ss58`` address of the coldkey or hotkey to query. - - The command performs the following actions: - - - Connects to the subtensor network and retrieves the identity information. - - Displays the information in a structured table format. - - The displayed table includes: - - - **Address**: The ``ss58`` address of the queried key. - - **Item**: Various attributes of the identity such as stake, rank, and trust. - - **Value**: The corresponding values of the attributes. - - Usage: - The user must provide an ``ss58`` address as input to the command. If the address is not - provided in the configuration, the user is prompted to enter one. - - Example usage:: - - btcli wallet get_identity --key - - Note: - This function is designed for CLI use and should be executed in a terminal. It is - primarily used for informational purposes and has no side effects on the network state. - """ - - def run(cli: "bittensor.cli"): - r"""Queries the subtensor chain for user identity.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - GetIdentityCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - console = bittensor.__console__ - - with console.status(":satellite: [bold green]Querying chain identity..."): - identity = subtensor.query_identity(cli.config.key) - - table = Table(title="[bold white italic]On-Chain Identity") - table.add_column("Item", justify="right", style="cyan", no_wrap=True) - table.add_column("Value", style="magenta") - - table.add_row("Address", cli.config.key) - for key, value in identity.items(): - table.add_row(key, str(value) if value is not None else "None") - - console.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("key") and not config.no_prompt: - config.key = Prompt.ask( - "Enter coldkey or hotkey ss58 address", default=None - ) - if config.key is None: - raise ValueError("key must be set") - if not config.is_set("subtensor.network") and not config.no_prompt: - config.subtensor.network = Prompt.ask( - "Enter subtensor network", - default=bittensor.defaults.subtensor.network, - choices=bittensor.__networks__, - ) - ( - _, - config.subtensor.chain_endpoint, - ) = bittensor.subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - new_coldkey_parser = parser.add_parser( - "get_identity", - help="""Creates a new coldkey (for containing balance) under the specified path. """, - ) - new_coldkey_parser.add_argument( - "--key", - type=str, - default=None, - help="""The coldkey or hotkey ss58 address to query.""", - ) - bittensor.wallet.add_args(new_coldkey_parser) - bittensor.subtensor.add_args(new_coldkey_parser) diff --git a/bittensor/commands/inspect.py b/bittensor/commands/inspect.py deleted file mode 100644 index 4ef0e84c4e..0000000000 --- a/bittensor/commands/inspect.py +++ /dev/null @@ -1,279 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import bittensor -from tqdm import tqdm -from rich.table import Table -from rich.prompt import Prompt -from .utils import ( - get_delegates_details, - DelegatesDetails, - get_hotkey_wallets_for_wallet, - get_all_wallets_for_path, - filter_netuids_by_registered_hotkeys, -) -from . import defaults - -console = bittensor.__console__ - -import os -import bittensor -from typing import List, Tuple, Optional, Dict - - -def _get_coldkey_wallets_for_path(path: str) -> List["bittensor.wallet"]: - try: - wallet_names = next(os.walk(os.path.expanduser(path)))[1] - return [bittensor.wallet(path=path, name=name) for name in wallet_names] - except StopIteration: - # No wallet files found. - wallets = [] - return wallets - - -def _get_hotkey_wallets_for_wallet(wallet) -> List["bittensor.wallet"]: - hotkey_wallets = [] - hotkeys_path = wallet.path + "/" + wallet.name + "/hotkeys" - try: - hotkey_files = next(os.walk(os.path.expanduser(hotkeys_path)))[2] - except StopIteration: - hotkey_files = [] - for hotkey_file_name in hotkey_files: - try: - hotkey_for_name = bittensor.wallet( - path=wallet.path, name=wallet.name, hotkey=hotkey_file_name - ) - if ( - hotkey_for_name.hotkey_file.exists_on_device() - and not hotkey_for_name.hotkey_file.is_encrypted() - ): - hotkey_wallets.append(hotkey_for_name) - except Exception: - pass - return hotkey_wallets - - -class InspectCommand: - """ - Executes the ``inspect`` command, which compiles and displays a detailed report of a user's wallet pairs (coldkey, hotkey) on the Bittensor network. - - This report includes balance and - staking information for both the coldkey and hotkey associated with the wallet. - - Optional arguments: - - ``all``: If set to ``True``, the command will inspect all wallets located within the specified path. If set to ``False``, the command will inspect only the wallet specified by the user. - - The command gathers data on: - - - Coldkey balance and delegated stakes. - - Hotkey stake and emissions per neuron on the network. - - Delegate names and details fetched from the network. - - The resulting table includes columns for: - - - **Coldkey**: The coldkey associated with the user's wallet. - - **Balance**: The balance of the coldkey. - - **Delegate**: The name of the delegate to which the coldkey has staked funds. - - **Stake**: The amount of stake held by both the coldkey and hotkey. - - **Emission**: The emission or rewards earned from staking. - - **Netuid**: The network unique identifier of the subnet where the hotkey is active. - - **Hotkey**: The hotkey associated with the neuron on the network. - - Usage: - This command can be used to inspect a single wallet or all wallets located within a - specified path. It is useful for a comprehensive overview of a user's participation - and performance in the Bittensor network. - - Example usage:: - - btcli wallet inspect - btcli wallet inspect --all - - Note: - The ``inspect`` command is for displaying information only and does not perform any - transactions or state changes on the Bittensor network. It is intended to be used as - part of the Bittensor CLI and not as a standalone function within user code. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Inspect a cold, hot pair.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - InspectCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - if cli.config.get("all", d=False) == True: - wallets = _get_coldkey_wallets_for_path(cli.config.wallet.path) - all_hotkeys = get_all_wallets_for_path(cli.config.wallet.path) - else: - wallets = [bittensor.wallet(config=cli.config)] - all_hotkeys = get_hotkey_wallets_for_wallet(wallets[0]) - - netuids = subtensor.get_all_subnet_netuids() - netuids = filter_netuids_by_registered_hotkeys( - cli, subtensor, netuids, all_hotkeys - ) - bittensor.logging.debug(f"Netuids to check: {netuids}") - - registered_delegate_info: Optional[Dict[str, DelegatesDetails]] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - if registered_delegate_info is None: - bittensor.__console__.print( - ":warning:[yellow]Could not get delegate info from chain.[/yellow]" - ) - registered_delegate_info = {} - - neuron_state_dict = {} - for netuid in tqdm(netuids): - neurons = subtensor.neurons_lite(netuid) - neuron_state_dict[netuid] = neurons if neurons != None else [] - - table = Table(show_footer=True, pad_edge=False, box=None, expand=True) - table.add_column( - "[overline white]Coldkey", footer_style="overline white", style="bold white" - ) - table.add_column( - "[overline white]Balance", footer_style="overline white", style="green" - ) - table.add_column( - "[overline white]Delegate", footer_style="overline white", style="blue" - ) - table.add_column( - "[overline white]Stake", footer_style="overline white", style="green" - ) - table.add_column( - "[overline white]Emission", footer_style="overline white", style="green" - ) - table.add_column( - "[overline white]Netuid", footer_style="overline white", style="bold white" - ) - table.add_column( - "[overline white]Hotkey", footer_style="overline white", style="yellow" - ) - table.add_column( - "[overline white]Stake", footer_style="overline white", style="green" - ) - table.add_column( - "[overline white]Emission", footer_style="overline white", style="green" - ) - for wallet in tqdm(wallets): - delegates: List[Tuple[bittensor.DelegateInfo, bittensor.Balance]] = ( - subtensor.get_delegated(coldkey_ss58=wallet.coldkeypub.ss58_address) - ) - if not wallet.coldkeypub_file.exists_on_device(): - continue - cold_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - table.add_row(wallet.name, str(cold_balance), "", "", "", "", "", "", "") - for dele, staked in delegates: - if dele.hotkey_ss58 in registered_delegate_info: - delegate_name = registered_delegate_info[dele.hotkey_ss58].name - else: - delegate_name = dele.hotkey_ss58 - table.add_row( - "", - "", - str(delegate_name), - str(staked), - str( - dele.total_daily_return.tao - * (staked.tao / dele.total_stake.tao) - ), - "", - "", - "", - "", - ) - - hotkeys = _get_hotkey_wallets_for_wallet(wallet) - for netuid in netuids: - for neuron in neuron_state_dict[netuid]: - if neuron.coldkey == wallet.coldkeypub.ss58_address: - hotkey_name: str = "" - - hotkey_names: List[str] = [ - wallet.hotkey_str - for wallet in filter( - lambda hotkey: hotkey.hotkey.ss58_address - == neuron.hotkey, - hotkeys, - ) - ] - if len(hotkey_names) > 0: - hotkey_name = f"{hotkey_names[0]}-" - - table.add_row( - "", - "", - "", - "", - "", - str(netuid), - f"{hotkey_name}{neuron.hotkey}", - str(neuron.stake), - str(bittensor.Balance.from_tao(neuron.emission)), - ) - - bittensor.__console__.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.is_set("wallet.name") - and not config.no_prompt - and not config.get("all", d=None) - ): - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if config.netuids != [] and config.netuids != None: - if not isinstance(config.netuids, list): - config.netuids = [int(config.netuids)] - else: - config.netuids = [int(netuid) for netuid in config.netuids] - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - inspect_parser = parser.add_parser( - "inspect", help="""Inspect a wallet (cold, hot) pair""" - ) - inspect_parser.add_argument( - "--all", - action="store_true", - help="""Check all coldkey wallets.""", - default=False, - ) - inspect_parser.add_argument( - "--netuids", - dest="netuids", - type=int, - nargs="*", - help="""Set the netuid(s) to filter by.""", - default=None, - ) - - bittensor.wallet.add_args(inspect_parser) - bittensor.subtensor.add_args(inspect_parser) diff --git a/bittensor/commands/list.py b/bittensor/commands/list.py deleted file mode 100644 index b2946efffb..0000000000 --- a/bittensor/commands/list.py +++ /dev/null @@ -1,128 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import argparse -import bittensor -from rich import print -from rich.tree import Tree - -console = bittensor.__console__ - - -class ListCommand: - """ - Executes the ``list`` command which enumerates all wallets and their respective hotkeys present in the user's Bittensor configuration directory. - - The command organizes the information in a tree structure, displaying each wallet along with the ``ss58`` addresses for the coldkey public key and any hotkeys associated with it. - - Optional arguments: - - ``-p``, ``--path``: The path to the Bittensor configuration directory. Defaults to '~/.bittensor'. - - The output is presented in a hierarchical tree format, with each wallet as a root node, - and any associated hotkeys as child nodes. The ``ss58`` address is displayed for each - coldkey and hotkey that is not encrypted and exists on the device. - - Usage: - Upon invocation, the command scans the wallet directory and prints a list of all wallets, indicating whether the public keys are available (``?`` denotes unavailable or encrypted keys). - - Example usage:: - - btcli wallet list --path ~/.bittensor - - Note: - This command is read-only and does not modify the filesystem or the network state. It is intended for use within the Bittensor CLI to provide a quick overview of the user's wallets. - """ - - @staticmethod - def run(cli): - r"""Lists wallets.""" - try: - wallets = next(os.walk(os.path.expanduser(cli.config.wallet.path)))[1] - except StopIteration: - # No wallet files found. - wallets = [] - ListCommand._run(cli, wallets) - - @staticmethod - def _run(cli: "bittensor.cli", wallets, return_value=False): - root = Tree("Wallets") - for w_name in wallets: - wallet_for_name = bittensor.wallet(path=cli.config.wallet.path, name=w_name) - try: - if ( - wallet_for_name.coldkeypub_file.exists_on_device() - and not wallet_for_name.coldkeypub_file.is_encrypted() - ): - coldkeypub_str = wallet_for_name.coldkeypub.ss58_address - else: - coldkeypub_str = "?" - except: - coldkeypub_str = "?" - - wallet_tree = root.add( - "\n[bold white]{} ({})".format(w_name, coldkeypub_str) - ) - hotkeys_path = os.path.join(cli.config.wallet.path, w_name, "hotkeys") - try: - hotkeys = next(os.walk(os.path.expanduser(hotkeys_path))) - if len(hotkeys) > 1: - for h_name in hotkeys[2]: - hotkey_for_name = bittensor.wallet( - path=cli.config.wallet.path, name=w_name, hotkey=h_name - ) - try: - if ( - hotkey_for_name.hotkey_file.exists_on_device() - and not hotkey_for_name.hotkey_file.is_encrypted() - ): - hotkey_str = hotkey_for_name.hotkey.ss58_address - else: - hotkey_str = "?" - except: - hotkey_str = "?" - wallet_tree.add("[bold grey]{} ({})".format(h_name, hotkey_str)) - except: - continue - - if len(wallets) == 0: - root.add("[bold red]No wallets found.") - - # Uses rich print to display the tree. - if not return_value: - print(root) - else: - return root - - @staticmethod - def check_config(config: "bittensor.config"): - pass - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - list_parser = parser.add_parser("list", help="""List wallets""") - bittensor.wallet.add_args(list_parser) - bittensor.subtensor.add_args(list_parser) - - @staticmethod - def get_tree(cli): - try: - wallets = next(os.walk(os.path.expanduser(cli.config.wallet.path)))[1] - except StopIteration: - # No wallet files found. - wallets = [] - return ListCommand._run(cli=cli, wallets=wallets, return_value=True) diff --git a/bittensor/commands/metagraph.py b/bittensor/commands/metagraph.py deleted file mode 100644 index 79fa48b786..0000000000 --- a/bittensor/commands/metagraph.py +++ /dev/null @@ -1,268 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse - -from rich.table import Table - -import bittensor - -from .utils import check_netuid_set - -console = bittensor.__console__ # type: ignore - - -class MetagraphCommand: - """ - Executes the ``metagraph`` command to retrieve and display the entire metagraph for a specified network. - - This metagraph contains detailed information about - all the neurons (nodes) participating in the network, including their stakes, - trust scores, and more. - - Optional arguments: - - ``--netuid``: The netuid of the network to query. Defaults to the default network UID. - - ``--subtensor.network``: The name of the network to query. Defaults to the default network name. - - The table displayed includes the following columns for each neuron: - - - UID: Unique identifier of the neuron. - - STAKE(τ): Total stake of the neuron in Tau (τ). - - RANK: Rank score of the neuron. - - TRUST: Trust score assigned to the neuron by other neurons. - - CONSENSUS: Consensus score of the neuron. - - INCENTIVE: Incentive score representing the neuron's incentive alignment. - - DIVIDENDS: Dividends earned by the neuron. - - EMISSION(p): Emission in Rho (p) received by the neuron. - - VTRUST: Validator trust score indicating the network's trust in the neuron as a validator. - - VAL: Validator status of the neuron. - - UPDATED: Number of blocks since the neuron's last update. - - ACTIVE: Activity status of the neuron. - - AXON: Network endpoint information of the neuron. - - HOTKEY: Partial hotkey (public key) of the neuron. - - COLDKEY: Partial coldkey (public key) of the neuron. - - The command also prints network-wide statistics such as total stake, issuance, and difficulty. - - Usage: - The user must specify the network UID to query the metagraph. If not specified, the default network UID is used. - - Example usage:: - - btcli subnet metagraph --netuid 0 # Root network - btcli subnet metagraph --netuid 1 --subtensor.network test - - Note: - This command provides a snapshot of the network's state at the time of calling. - It is useful for network analysis and diagnostics. It is intended to be used as - part of the Bittensor CLI and not as a standalone function within user code. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Prints an entire metagraph.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - MetagraphCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Prints an entire metagraph.""" - console = bittensor.__console__ - console.print( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - cli.config.subtensor.network - ) - ) - metagraph: bittensor.metagraph = subtensor.metagraph(netuid=cli.config.netuid) - metagraph.save() - difficulty = subtensor.difficulty(cli.config.netuid) - subnet_emission = bittensor.Balance.from_tao( - subtensor.get_emission_value_by_subnet(cli.config.netuid) - ) - total_issuance = bittensor.Balance.from_rao(subtensor.total_issuance().rao) - - TABLE_DATA = [] - total_stake = 0.0 - total_rank = 0.0 - total_validator_trust = 0.0 - total_trust = 0.0 - total_consensus = 0.0 - total_incentive = 0.0 - total_dividends = 0.0 - total_emission = 0 - for uid in metagraph.uids: - neuron = metagraph.neurons[uid] - ep = metagraph.axons[uid] - row = [ - str(neuron.uid), - "{:.5f}".format(metagraph.total_stake[uid]), - "{:.5f}".format(metagraph.ranks[uid]), - "{:.5f}".format(metagraph.trust[uid]), - "{:.5f}".format(metagraph.consensus[uid]), - "{:.5f}".format(metagraph.incentive[uid]), - "{:.5f}".format(metagraph.dividends[uid]), - "{}".format(int(metagraph.emission[uid] * 1000000000)), - "{:.5f}".format(metagraph.validator_trust[uid]), - "*" if metagraph.validator_permit[uid] else "", - str((metagraph.block.item() - metagraph.last_update[uid].item())), - str(metagraph.active[uid].item()), - ( - ep.ip + ":" + str(ep.port) - if ep.is_serving - else "[yellow]none[/yellow]" - ), - ep.hotkey[:10], - ep.coldkey[:10], - ] - total_stake += metagraph.total_stake[uid] - total_rank += metagraph.ranks[uid] - total_validator_trust += metagraph.validator_trust[uid] - total_trust += metagraph.trust[uid] - total_consensus += metagraph.consensus[uid] - total_incentive += metagraph.incentive[uid] - total_dividends += metagraph.dividends[uid] - total_emission += int(metagraph.emission[uid] * 1000000000) - TABLE_DATA.append(row) - total_neurons = len(metagraph.uids) - table = Table(show_footer=False) - table.title = "[white]Metagraph: net: {}:{}, block: {}, N: {}/{}, stake: {}, issuance: {}, difficulty: {}".format( - subtensor.network, - metagraph.netuid, - metagraph.block.item(), - sum(metagraph.active.tolist()), - metagraph.n.item(), - bittensor.Balance.from_tao(total_stake), - total_issuance, - difficulty, - ) - table.add_column( - "[overline white]UID", - str(total_neurons), - footer_style="overline white", - style="yellow", - ) - table.add_column( - "[overline white]STAKE(\u03c4)", - "\u03c4{:.5f}".format(total_stake), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]RANK", - "{:.5f}".format(total_rank), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]TRUST", - "{:.5f}".format(total_trust), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]CONSENSUS", - "{:.5f}".format(total_consensus), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]INCENTIVE", - "{:.5f}".format(total_incentive), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]DIVIDENDS", - "{:.5f}".format(total_dividends), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]EMISSION(\u03c1)", - "\u03c1{}".format(int(total_emission)), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]VTRUST", - "{:.5f}".format(total_validator_trust), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]VAL", justify="right", style="green", no_wrap=True - ) - table.add_column("[overline white]UPDATED", justify="right", no_wrap=True) - table.add_column( - "[overline white]ACTIVE", justify="right", style="green", no_wrap=True - ) - table.add_column( - "[overline white]AXON", justify="left", style="dim blue", no_wrap=True - ) - table.add_column("[overline white]HOTKEY", style="dim blue", no_wrap=False) - table.add_column("[overline white]COLDKEY", style="dim purple", no_wrap=False) - table.show_footer = True - - for row in TABLE_DATA: - table.add_row(*row) - table.box = None - table.pad_edge = False - table.width = None - console.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - check_netuid_set( - config, subtensor=bittensor.subtensor(config=config, log_verbose=False) - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - metagraph_parser = parser.add_parser( - "metagraph", help="""View a subnet metagraph information.""" - ) - metagraph_parser.add_argument( - "--netuid", - dest="netuid", - type=int, - help="""Set the netuid to get the metagraph of""", - default=False, - ) - - bittensor.subtensor.add_args(metagraph_parser) diff --git a/bittensor/commands/misc.py b/bittensor/commands/misc.py deleted file mode 100644 index ded1c78042..0000000000 --- a/bittensor/commands/misc.py +++ /dev/null @@ -1,117 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import argparse -import bittensor -from rich.prompt import Prompt -from rich.table import Table - -console = bittensor.__console__ - - -class UpdateCommand: - """ - Executes the ``update`` command to update the local Bittensor package. - - This command performs a series of operations to ensure that the user's local Bittensor installation is updated to the latest version from the master branch of its GitHub repository. It primarily involves pulling the latest changes from the repository and reinstalling the package. - - Usage: - Upon invocation, the command first checks the user's configuration for the ``no_prompt`` setting. If ``no_prompt`` is set to ``True``, or if the user explicitly confirms with ``Y`` when prompted, the command proceeds to update the local Bittensor package. It changes the current directory to the Bittensor package directory, checks out the master branch of the Bittensor repository, pulls the latest changes, and then reinstalls the package using ``pip``. - - The command structure is as follows: - - 1. Change directory to the Bittensor package directory. - 2. Check out the master branch of the Bittensor GitHub repository. - 3. Pull the latest changes with the ``--ff-only`` option to ensure a fast-forward update. - 4. Reinstall the Bittensor package using pip. - - Example usage:: - - btcli legacy update - - Note: - This command is intended to be used within the Bittensor CLI to facilitate easy updates of the Bittensor package. It should be used with caution as it directly affects the local installation of the package. It is recommended to ensure that any important data or configurations are backed up before running this command. - """ - - @staticmethod - def run(cli): - if cli.config.no_prompt or cli.config.answer == "Y": - os.system( - " (cd ~/.bittensor/bittensor/ ; git checkout master ; git pull --ff-only )" - ) - os.system("pip install -e ~/.bittensor/bittensor/") - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.no_prompt: - answer = Prompt.ask( - "This will update the local bittensor package", - choices=["Y", "N"], - default="Y", - ) - config.answer = answer - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - update_parser = parser.add_parser( - "update", add_help=False, help="""Update bittensor """ - ) - - bittensor.subtensor.add_args(update_parser) - - -class AutocompleteCommand: - """Show users how to install and run autocompletion for Bittensor CLI.""" - - @staticmethod - def run(cli): - console = bittensor.__console__ - shell_commands = { - "Bash": "btcli --print-completion bash >> ~/.bashrc", - "Zsh": "btcli --print-completion zsh >> ~/.zshrc", - "Tcsh": "btcli --print-completion tcsh >> ~/.tcshrc", - } - - table = Table(show_header=True, header_style="bold magenta") - table.add_column("Shell", style="dim", width=12) - table.add_column("Command to Enable Autocompletion", justify="left") - - for shell, command in shell_commands.items(): - table.add_row(shell, command) - - console.print( - "To enable autocompletion for Bittensor CLI, run the appropriate command for your shell:" - ) - console.print(table) - - console.print( - "\n[bold]After running the command, execute the following to apply the changes:[/bold]" - ) - console.print(" [yellow]source ~/.bashrc[/yellow] # For Bash and Zsh") - console.print(" [yellow]source ~/.tcshrc[/yellow] # For Tcsh") - - @staticmethod - def add_args(parser): - parser.add_parser( - "autocomplete", - help="Instructions for enabling autocompletion for Bittensor CLI.", - ) - - @staticmethod - def check_config(config): - pass diff --git a/bittensor/commands/network.py b/bittensor/commands/network.py deleted file mode 100644 index 3564bc534d..0000000000 --- a/bittensor/commands/network.py +++ /dev/null @@ -1,672 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import bittensor -from . import defaults # type: ignore -from rich.prompt import Prompt -from rich.table import Table -from typing import List, Optional, Dict, Union, Tuple -from .utils import ( - get_delegates_details, - DelegatesDetails, - check_netuid_set, - normalize_hyperparameters, -) -from .identity import SetIdentityCommand - -console = bittensor.__console__ - - -class RegisterSubnetworkCommand: - """ - Executes the ``register_subnetwork`` command to register a new subnetwork on the Bittensor network. - - This command facilitates the creation and registration of a subnetwork, which involves interaction with the user's wallet and the Bittensor subtensor. It ensures that the user has the necessary credentials and configurations to successfully register a new subnetwork. - - Usage: - Upon invocation, the command performs several key steps to register a subnetwork: - - 1. It copies the user's current configuration settings. - 2. It accesses the user's wallet using the provided configuration. - 3. It initializes the Bittensor subtensor object with the user's configuration. - 4. It then calls the ``register_subnetwork`` function of the subtensor object, passing the user's wallet and a prompt setting based on the user's configuration. - - If the user's configuration does not specify a wallet name and ``no_prompt`` is not set, the command will prompt the user to enter a wallet name. This name is then used in the registration process. - - The command structure includes: - - - Copying the user's configuration. - - Accessing and preparing the user's wallet. - - Initializing the Bittensor subtensor. - - Registering the subnetwork with the necessary credentials. - - Example usage:: - - btcli subnets create - - Note: - This command is intended for advanced users of the Bittensor network who wish to contribute by adding new subnetworks. It requires a clear understanding of the network's functioning and the roles of subnetworks. Users should ensure that they have secured their wallet and are aware of the implications of adding a new subnetwork to the Bittensor ecosystem. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Register a subnetwork""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - RegisterSubnetworkCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Register a subnetwork""" - wallet = bittensor.wallet(config=cli.config) - - # Call register command. - success = subtensor.register_subnetwork( - wallet=wallet, - prompt=not cli.config.no_prompt, - ) - if success and not cli.config.no_prompt: - # Prompt for user to set identity. - do_set_identity = Prompt.ask( - f"Subnetwork registered successfully. Would you like to set your identity? [y/n]", - choices=["y", "n"], - ) - - if do_set_identity.lower() == "y": - subtensor.close() - config = cli.config.copy() - SetIdentityCommand.check_config(config) - cli.config = config - SetIdentityCommand.run(cli) - - @classmethod - def check_config(cls, config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - parser = parser.add_parser( - "create", - help="""Create a new bittensor subnetwork on this chain.""", - ) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - -class SubnetLockCostCommand: - """ - Executes the ``lock_cost`` command to view the locking cost required for creating a new subnetwork on the Bittensor network. - - This command is designed to provide users with the current cost of registering a new subnetwork, which is a critical piece of information for anyone considering expanding the network's infrastructure. - - The current implementation anneals the cost of creating a subnet over a period of two days. If the cost is unappealing currently, check back in a day or two to see if it has reached an amenble level. - - Usage: - Upon invocation, the command performs the following operations: - - 1. It copies the user's current Bittensor configuration. - 2. It initializes the Bittensor subtensor object with this configuration. - 3. It then retrieves the subnet lock cost using the ``get_subnet_burn_cost()`` method from the subtensor object. - 4. The cost is displayed to the user in a readable format, indicating the amount of Tao required to lock for registering a new subnetwork. - - In case of any errors during the process (e.g., network issues, configuration problems), the command will catch these exceptions and inform the user that it failed to retrieve the lock cost, along with the specific error encountered. - - The command structure includes: - - - Copying and using the user's configuration for Bittensor. - - Retrieving the current subnet lock cost from the Bittensor network. - - Displaying the cost in a user-friendly manner. - - Example usage:: - - btcli subnets lock_cost - - Note: - This command is particularly useful for users who are planning to contribute to the Bittensor network by adding new subnetworks. Understanding the lock cost is essential for these users to make informed decisions about their potential contributions and investments in the network. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""View locking cost of creating a new subnetwork""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - SubnetLockCostCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""View locking cost of creating a new subnetwork""" - config = cli.config.copy() - try: - bittensor.__console__.print( - f"Subnet lock cost: [green]{bittensor.utils.balance.Balance( subtensor.get_subnet_burn_cost() )}[/green]" - ) - except Exception as e: - bittensor.__console__.print( - f"Subnet lock cost: [red]Failed to get subnet lock cost[/red]" - f"Error: {e}" - ) - - @classmethod - def check_config(cls, config: "bittensor.config"): - pass - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - parser = parser.add_parser( - "lock_cost", - help=""" Return the lock cost to register a subnet""", - ) - - bittensor.subtensor.add_args(parser) - - -class SubnetListCommand: - """ - Executes the ``list`` command to list all subnets and their detailed information on the Bittensor network. - - This command is designed to provide users with comprehensive information about each subnet within the - network, including its unique identifier (netuid), the number of neurons, maximum neuron capacity, - emission rate, tempo, recycle register cost (burn), proof of work (PoW) difficulty, and the name or - SS58 address of the subnet owner. - - Usage: - Upon invocation, the command performs the following actions: - - 1. It initializes the Bittensor subtensor object with the user's configuration. - 2. It retrieves a list of all subnets in the network along with their detailed information. - 3. The command compiles this data into a table format, displaying key information about each subnet. - - In addition to the basic subnet details, the command also fetches delegate information to provide the - name of the subnet owner where available. If the owner's name is not available, the owner's ``SS58`` - address is displayed. - - The command structure includes: - - - Initializing the Bittensor subtensor and retrieving subnet information. - - Calculating the total number of neurons across all subnets. - - Constructing a table that includes columns for ``NETUID``, ``N`` (current neurons), ``MAX_N`` (maximum neurons), ``EMISSION``, ``TEMPO``, ``BURN``, ``POW`` (proof of work difficulty), and ``SUDO`` (owner's name or ``SS58`` address). - - Displaying the table with a footer that summarizes the total number of subnets and neurons. - - Example usage:: - - btcli subnets list - - Note: - This command is particularly useful for users seeking an overview of the Bittensor network's structure and the distribution of its resources and ownership information for each subnet. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""List all subnet netuids in the network.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SubnetListCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""List all subnet netuids in the network.""" - subnets: List[bittensor.SubnetInfo] = subtensor.get_all_subnets_info() - - rows = [] - total_neurons = 0 - delegate_info: Optional[Dict[str, DelegatesDetails]] = get_delegates_details( - url=bittensor.__delegates_details_url__ - ) - - for subnet in subnets: - total_neurons += subnet.max_n - rows.append( - ( - str(subnet.netuid), - str(subnet.subnetwork_n), - str(bittensor.utils.formatting.millify(subnet.max_n)), - f"{subnet.emission_value / bittensor.utils.RAOPERTAO * 100:0.2f}%", - str(subnet.tempo), - f"{subnet.burn!s:8.8}", - str(bittensor.utils.formatting.millify(subnet.difficulty)), - f"{delegate_info[subnet.owner_ss58].name if subnet.owner_ss58 in delegate_info else subnet.owner_ss58}", - ) - ) - table = Table( - show_footer=True, - width=cli.config.get("width", None), - pad_edge=True, - box=None, - show_edge=True, - ) - table.title = "[white]Subnets - {}".format(subtensor.network) - table.add_column( - "[overline white]NETUID", - str(len(subnets)), - footer_style="overline white", - style="bold green", - justify="center", - ) - table.add_column( - "[overline white]N", - str(total_neurons), - footer_style="overline white", - style="green", - justify="center", - ) - table.add_column("[overline white]MAX_N", style="white", justify="center") - table.add_column("[overline white]EMISSION", style="white", justify="center") - table.add_column("[overline white]TEMPO", style="white", justify="center") - table.add_column("[overline white]RECYCLE", style="white", justify="center") - table.add_column("[overline white]POW", style="white", justify="center") - table.add_column("[overline white]SUDO", style="white") - for row in rows: - table.add_row(*row) - bittensor.__console__.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - pass - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - list_subnets_parser = parser.add_parser( - "list", help="""List all subnets on the network""" - ) - bittensor.subtensor.add_args(list_subnets_parser) - - -HYPERPARAMS = { - "serving_rate_limit": "sudo_set_serving_rate_limit", - "min_difficulty": "sudo_set_min_difficulty", - "max_difficulty": "sudo_set_max_difficulty", - "weights_version": "sudo_set_weights_version_key", - "weights_rate_limit": "sudo_set_weights_set_rate_limit", - "max_weight_limit": "sudo_set_max_weight_limit", - "immunity_period": "sudo_set_immunity_period", - "min_allowed_weights": "sudo_set_min_allowed_weights", - "activity_cutoff": "sudo_set_activity_cutoff", - "network_registration_allowed": "sudo_set_network_registration_allowed", - "network_pow_registration_allowed": "sudo_set_network_pow_registration_allowed", - "min_burn": "sudo_set_min_burn", - "max_burn": "sudo_set_max_burn", - "adjustment_alpha": "sudo_set_adjustment_alpha", - "rho": "sudo_set_rho", - "kappa": "sudo_set_kappa", - "difficulty": "sudo_set_difficulty", - "bonds_moving_avg": "sudo_set_bonds_moving_average", - "commit_reveal_weights_interval": "sudo_set_commit_reveal_weights_interval", - "commit_reveal_weights_enabled": "sudo_set_commit_reveal_weights_enabled", - "alpha_values": "sudo_set_alpha_values", - "liquid_alpha_enabled": "sudo_set_liquid_alpha_enabled", -} - - -class SubnetSudoCommand: - """ - Executes the ``set`` command to set hyperparameters for a specific subnet on the Bittensor network. - - This command allows subnet owners to modify various hyperparameters of theirs subnet, such as its tempo, - emission rates, and other network-specific settings. - - Usage: - The command first prompts the user to enter the hyperparameter they wish to change and its new value. - It then uses the user's wallet and configuration settings to authenticate and send the hyperparameter update - to the specified subnet. - - Example usage:: - - btcli sudo set --netuid 1 --param 'tempo' --value '0.5' - - Note: - This command requires the user to specify the subnet identifier (``netuid``) and both the hyperparameter - and its new value. It is intended for advanced users who are familiar with the network's functioning - and the impact of changing these parameters. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Set subnet hyperparameters.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SubnetSudoCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run( - cli: "bittensor.cli", - subtensor: "bittensor.subtensor", - ): - r"""Set subnet hyperparameters.""" - wallet = bittensor.wallet(config=cli.config) - print("\n") - SubnetHyperparamsCommand.run(cli) - if not cli.config.is_set("param") and not cli.config.no_prompt: - param = Prompt.ask("Enter hyperparameter", choices=HYPERPARAMS) - cli.config.param = str(param) - if not cli.config.is_set("value") and not cli.config.no_prompt: - value = Prompt.ask("Enter new value") - cli.config.value = value - - if ( - cli.config.param == "network_registration_allowed" - or cli.config.param == "network_pow_registration_allowed" - or cli.config.param == "commit_reveal_weights_enabled" - or cli.config.param == "liquid_alpha_enabled" - ): - cli.config.value = ( - True - if (cli.config.value.lower() == "true" or cli.config.value == "1") - else False - ) - - is_allowed_value, value = allowed_value(cli.config.param, cli.config.value) - if not is_allowed_value: - raise ValueError( - f"Hyperparameter {cli.config.param} value is not within bounds. Value is {cli.config.value} but must be {value}" - ) - - subtensor.set_hyperparameter( - wallet, - netuid=cli.config.netuid, - parameter=cli.config.param, - value=value, - prompt=not cli.config.no_prompt, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("netuid") and not config.no_prompt: - check_netuid_set( - config, bittensor.subtensor(config=config, log_verbose=False) - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser("set", help="""Set hyperparameters for a subnet""") - parser.add_argument( - "--netuid", dest="netuid", type=int, required=False, default=False - ) - parser.add_argument("--param", dest="param", type=str, required=False) - parser.add_argument("--value", dest="value", type=str, required=False) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - -class SubnetHyperparamsCommand: - """ - Executes the '``hyperparameters``' command to view the current hyperparameters of a specific subnet on the Bittensor network. - - This command is useful for users who wish to understand the configuration and - operational parameters of a particular subnet. - - Usage: - Upon invocation, the command fetches and displays a list of all hyperparameters for the specified subnet. - These include settings like tempo, emission rates, and other critical network parameters that define - the subnet's behavior. - - Example usage:: - - $ btcli subnets hyperparameters --netuid 1 - - Subnet Hyperparameters - NETUID: 1 - finney - HYPERPARAMETER VALUE - rho 10 - kappa 32767 - immunity_period 7200 - min_allowed_weights 8 - max_weight_limit 455 - tempo 99 - min_difficulty 1000000000000000000 - max_difficulty 1000000000000000000 - weights_version 2013 - weights_rate_limit 100 - adjustment_interval 112 - activity_cutoff 5000 - registration_allowed True - target_regs_per_interval 2 - min_burn 1000000000 - max_burn 100000000000 - bonds_moving_avg 900000 - max_regs_per_block 1 - - Note: - The user must specify the subnet identifier (``netuid``) for which they want to view the hyperparameters. - This command is read-only and does not modify the network state or configurations. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""View hyperparameters of a subnetwork.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SubnetHyperparamsCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""View hyperparameters of a subnetwork.""" - subnet: bittensor.SubnetHyperparameters = subtensor.get_subnet_hyperparameters( - cli.config.netuid - ) - - table = Table( - show_footer=True, - width=cli.config.get("width", None), - pad_edge=True, - box=None, - show_edge=True, - ) - table.title = "[white]Subnet Hyperparameters - NETUID: {} - {}".format( - cli.config.netuid, subtensor.network - ) - table.add_column("[overline white]HYPERPARAMETER", style="white") - table.add_column("[overline white]VALUE", style="green") - table.add_column("[overline white]NORMALIZED", style="cyan") - - normalized_values = normalize_hyperparameters(subnet) - - for param, value, norm_value in normalized_values: - table.add_row(" " + param, value, norm_value) - - bittensor.__console__.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("netuid") and not config.no_prompt: - check_netuid_set( - config, bittensor.subtensor(config=config, log_verbose=False) - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "hyperparameters", help="""View subnet hyperparameters""" - ) - parser.add_argument( - "--netuid", dest="netuid", type=int, required=False, default=False - ) - bittensor.subtensor.add_args(parser) - - -class SubnetGetHyperparamsCommand: - """ - Executes the ``get`` command to retrieve the hyperparameters of a specific subnet on the Bittensor network. - - This command is similar to the ``hyperparameters`` command but may be used in different contexts within the CLI. - - Usage: - The command connects to the Bittensor network, queries the specified subnet, and returns a detailed list - of all its hyperparameters. This includes crucial operational parameters that determine the subnet's - performance and interaction within the network. - - Example usage:: - - $ btcli sudo get --netuid 1 - - Subnet Hyperparameters - NETUID: 1 - finney - HYPERPARAMETER VALUE - rho 10 - kappa 32767 - immunity_period 7200 - min_allowed_weights 8 - max_weight_limit 455 - tempo 99 - min_difficulty 1000000000000000000 - max_difficulty 1000000000000000000 - weights_version 2013 - weights_rate_limit 100 - adjustment_interval 112 - activity_cutoff 5000 - registration_allowed True - target_regs_per_interval 2 - min_burn 1000000000 - max_burn 100000000000 - bonds_moving_avg 900000 - max_regs_per_block 1 - - Note: - Users need to provide the ``netuid`` of the subnet whose hyperparameters they wish to view. This command is - designed for informational purposes and does not alter any network settings or configurations. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""View hyperparameters of a subnetwork.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SubnetGetHyperparamsCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""View hyperparameters of a subnetwork.""" - subnet: bittensor.SubnetHyperparameters = subtensor.get_subnet_hyperparameters( - cli.config.netuid - ) - - table = Table( - show_footer=True, - width=cli.config.get("width", None), - pad_edge=True, - box=None, - show_edge=True, - ) - table.title = "[white]Subnet Hyperparameters - NETUID: {} - {}".format( - cli.config.netuid, subtensor.network - ) - table.add_column("[overline white]HYPERPARAMETER", style="white") - table.add_column("[overline white]VALUE", style="green") - table.add_column("[overline white]NORMALIZED", style="cyan") - - normalized_values = normalize_hyperparameters(subnet) - - for param, value, norm_value in normalized_values: - table.add_row(" " + param, value, norm_value) - - bittensor.__console__.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("netuid") and not config.no_prompt: - check_netuid_set( - config, bittensor.subtensor(config=config, log_verbose=False) - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser("get", help="""View subnet hyperparameters""") - parser.add_argument( - "--netuid", dest="netuid", type=int, required=False, default=False - ) - bittensor.subtensor.add_args(parser) - - -def allowed_value( - param: str, value: Union[str, bool, float] -) -> Tuple[bool, Union[str, list[float], float]]: - """ - Check the allowed values on hyperparameters. Return False if value is out of bounds. - """ - # Reminder error message ends like: Value is {value} but must be {error_message}. (the second part of return statement) - # Check if value is a boolean, only allow boolean and floats - try: - if not isinstance(value, bool): - if param == "alpha_values": - # Split the string into individual values - alpha_low_str, alpha_high_str = value.split(",") - alpha_high = float(alpha_high_str) - alpha_low = float(alpha_low_str) - - # Check alpha_high value - if alpha_high <= 52428 or alpha_high >= 65535: - return ( - False, - f"between 52428 and 65535 for alpha_high (but is {alpha_high})", - ) - - # Check alpha_low value - if alpha_low < 0 or alpha_low > 52428: - return ( - False, - f"between 0 and 52428 for alpha_low (but is {alpha_low})", - ) - - return True, [alpha_low, alpha_high] - except ValueError: - return False, "a number or a boolean" - - return True, value diff --git a/bittensor/commands/overview.py b/bittensor/commands/overview.py deleted file mode 100644 index b572847e49..0000000000 --- a/bittensor/commands/overview.py +++ /dev/null @@ -1,778 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import bittensor -from tqdm import tqdm -from concurrent.futures import ProcessPoolExecutor -from collections import defaultdict -from fuzzywuzzy import fuzz -from rich.align import Align -from rich.table import Table -from rich.prompt import Prompt -from typing import List, Optional, Dict, Tuple -from .utils import ( - get_hotkey_wallets_for_wallet, - get_coldkey_wallets_for_path, - get_all_wallets_for_path, - filter_netuids_by_registered_hotkeys, -) -from . import defaults - -console = bittensor.__console__ - - -class OverviewCommand: - """ - Executes the ``overview`` command to present a detailed overview of the user's registered accounts on the Bittensor network. - - This command compiles and displays comprehensive information about each neuron associated with the user's wallets, - including both hotkeys and coldkeys. It is especially useful for users managing multiple accounts or seeking a summary - of their network activities and stake distributions. - - Usage: - The command offers various options to customize the output. Users can filter the displayed data by specific netuids, - sort by different criteria, and choose to include all wallets in the user's configuration directory. The output is - presented in a tabular format with the following columns: - - - COLDKEY: The SS58 address of the coldkey. - - HOTKEY: The SS58 address of the hotkey. - - UID: Unique identifier of the neuron. - - ACTIVE: Indicates if the neuron is active. - - STAKE(τ): Amount of stake in the neuron, in Tao. - - RANK: The rank of the neuron within the network. - - TRUST: Trust score of the neuron. - - CONSENSUS: Consensus score of the neuron. - - INCENTIVE: Incentive score of the neuron. - - DIVIDENDS: Dividends earned by the neuron. - - EMISSION(p): Emission received by the neuron, in Rho. - - VTRUST: Validator trust score of the neuron. - - VPERMIT: Indicates if the neuron has a validator permit. - - UPDATED: Time since last update. - - AXON: IP address and port of the neuron. - - HOTKEY_SS58: Human-readable representation of the hotkey. - - Example usage:: - - btcli wallet overview - btcli wallet overview --all --sort_by stake --sort_order descending - - Note: - This command is read-only and does not modify the network state or account configurations. It provides a quick and - comprehensive view of the user's network presence, making it ideal for monitoring account status, stake distribution, - and overall contribution to the Bittensor network. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Prints an overview for the wallet's colkey.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - OverviewCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _get_total_balance( - total_balance: "bittensor.Balance", - subtensor: "bittensor.subtensor", - cli: "bittensor.cli", - ) -> Tuple[List["bittensor.wallet"], "bittensor.Balance"]: - if cli.config.get("all", d=None): - cold_wallets = get_coldkey_wallets_for_path(cli.config.wallet.path) - for cold_wallet in tqdm(cold_wallets, desc="Pulling balances"): - if ( - cold_wallet.coldkeypub_file.exists_on_device() - and not cold_wallet.coldkeypub_file.is_encrypted() - ): - total_balance = total_balance + subtensor.get_balance( - cold_wallet.coldkeypub.ss58_address - ) - all_hotkeys = get_all_wallets_for_path(cli.config.wallet.path) - else: - # We are only printing keys for a single coldkey - coldkey_wallet = bittensor.wallet(config=cli.config) - if ( - coldkey_wallet.coldkeypub_file.exists_on_device() - and not coldkey_wallet.coldkeypub_file.is_encrypted() - ): - total_balance = subtensor.get_balance( - coldkey_wallet.coldkeypub.ss58_address - ) - if not coldkey_wallet.coldkeypub_file.exists_on_device(): - console.print("[bold red]No wallets found.") - return [], None - all_hotkeys = get_hotkey_wallets_for_wallet(coldkey_wallet) - - return all_hotkeys, total_balance - - @staticmethod - def _get_hotkeys( - cli: "bittensor.cli", all_hotkeys: List["bittensor.wallet"] - ) -> List["bittensor.wallet"]: - if not cli.config.get("all_hotkeys", False): - # We are only showing hotkeys that are specified. - all_hotkeys = [ - hotkey - for hotkey in all_hotkeys - if hotkey.hotkey_str in cli.config.hotkeys - ] - else: - # We are excluding the specified hotkeys from all_hotkeys. - all_hotkeys = [ - hotkey - for hotkey in all_hotkeys - if hotkey.hotkey_str not in cli.config.hotkeys - ] - return all_hotkeys - - @staticmethod - def _get_key_address(all_hotkeys: List["bittensor.wallet"]): - hotkey_coldkey_to_hotkey_wallet = {} - for hotkey_wallet in all_hotkeys: - if hotkey_wallet.hotkey.ss58_address not in hotkey_coldkey_to_hotkey_wallet: - hotkey_coldkey_to_hotkey_wallet[hotkey_wallet.hotkey.ss58_address] = {} - - hotkey_coldkey_to_hotkey_wallet[hotkey_wallet.hotkey.ss58_address][ - hotkey_wallet.coldkeypub.ss58_address - ] = hotkey_wallet - - all_hotkey_addresses = list(hotkey_coldkey_to_hotkey_wallet.keys()) - - return all_hotkey_addresses, hotkey_coldkey_to_hotkey_wallet - - @staticmethod - def _process_neuron_results( - results: List[Tuple[int, List["bittensor.NeuronInfoLite"], Optional[str]]], - neurons: Dict[str, List["bittensor.NeuronInfoLite"]], - netuids: List[int], - ) -> Dict[str, List["bittensor.NeuronInfoLite"]]: - for result in results: - netuid, neurons_result, err_msg = result - if err_msg is not None: - console.print(f"netuid '{netuid}': {err_msg}") - - if len(neurons_result) == 0: - # Remove netuid from overview if no neurons are found. - netuids.remove(netuid) - del neurons[str(netuid)] - else: - # Add neurons to overview. - neurons[str(netuid)] = neurons_result - return neurons - - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Prints an overview for the wallet's colkey.""" - console = bittensor.__console__ - wallet = bittensor.wallet(config=cli.config) - - all_hotkeys = [] - total_balance = bittensor.Balance(0) - - # We are printing for every coldkey. - all_hotkeys, total_balance = OverviewCommand._get_total_balance( - total_balance, subtensor, cli - ) - - # We are printing for a select number of hotkeys from all_hotkeys. - if cli.config.get("hotkeys"): - all_hotkeys = OverviewCommand._get_hotkeys(cli, all_hotkeys) - - # Check we have keys to display. - if len(all_hotkeys) == 0: - console.print("[red]No wallets found.[/red]") - return - - # Pull neuron info for all keys. - neurons: Dict[str, List[bittensor.NeuronInfoLite]] = {} - block = subtensor.block - - netuids = subtensor.get_all_subnet_netuids() - netuids = filter_netuids_by_registered_hotkeys( - cli, subtensor, netuids, all_hotkeys - ) - bittensor.logging.debug(f"Netuids to check: {netuids}") - - for netuid in netuids: - neurons[str(netuid)] = [] - - all_wallet_names = {wallet.name for wallet in all_hotkeys} - all_coldkey_wallets = [ - bittensor.wallet(name=wallet_name) for wallet_name in all_wallet_names - ] - - ( - all_hotkey_addresses, - hotkey_coldkey_to_hotkey_wallet, - ) = OverviewCommand._get_key_address(all_hotkeys) - - with console.status( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - cli.config.subtensor.get( - "network", bittensor.defaults.subtensor.network - ) - ) - ): - # Create a copy of the config without the parser and formatter_class. - ## This is needed to pass to the ProcessPoolExecutor, which cannot pickle the parser. - copy_config = cli.config.copy() - copy_config["__parser"] = None - copy_config["formatter_class"] = None - - # Pull neuron info for all keys. - ## Max len(netuids) or 5 threads. - with ProcessPoolExecutor(max_workers=max(len(netuids), 5)) as executor: - results = executor.map( - OverviewCommand._get_neurons_for_netuid, - [(copy_config, netuid, all_hotkey_addresses) for netuid in netuids], - ) - executor.shutdown(wait=True) # wait for all complete - - neurons = OverviewCommand._process_neuron_results( - results, neurons, netuids - ) - - total_coldkey_stake_from_metagraph = defaultdict( - lambda: bittensor.Balance(0.0) - ) - checked_hotkeys = set() - for neuron_list in neurons.values(): - for neuron in neuron_list: - if neuron.hotkey in checked_hotkeys: - continue - total_coldkey_stake_from_metagraph[neuron.coldkey] += ( - neuron.stake_dict[neuron.coldkey] - ) - checked_hotkeys.add(neuron.hotkey) - - alerts_table = Table(show_header=True, header_style="bold magenta") - alerts_table.add_column("đŸ„© alert!") - - coldkeys_to_check = [] - for coldkey_wallet in all_coldkey_wallets: - # Check if we have any stake with hotkeys that are not registered. - total_coldkey_stake_from_chain = subtensor.get_total_stake_for_coldkey( - ss58_address=coldkey_wallet.coldkeypub.ss58_address - ) - difference = ( - total_coldkey_stake_from_chain - - total_coldkey_stake_from_metagraph[ - coldkey_wallet.coldkeypub.ss58_address - ] - ) - if difference == 0: - continue # We have all our stake registered. - - coldkeys_to_check.append(coldkey_wallet) - alerts_table.add_row( - "Found {} stake with coldkey {} that is not registered.".format( - difference, coldkey_wallet.coldkeypub.ss58_address - ) - ) - - if coldkeys_to_check: - # We have some stake that is not with a registered hotkey. - if "-1" not in neurons: - neurons["-1"] = [] - - # Use process pool to check each coldkey wallet for de-registered stake. - with ProcessPoolExecutor( - max_workers=max(len(coldkeys_to_check), 5) - ) as executor: - results = executor.map( - OverviewCommand._get_de_registered_stake_for_coldkey_wallet, - [ - (cli.config, all_hotkey_addresses, coldkey_wallet) - for coldkey_wallet in coldkeys_to_check - ], - ) - executor.shutdown(wait=True) # wait for all complete - - for result in results: - coldkey_wallet, de_registered_stake, err_msg = result - if err_msg is not None: - console.print(err_msg) - - if len(de_registered_stake) == 0: - continue # We have no de-registered stake with this coldkey. - - de_registered_neurons = [] - for hotkey_addr, our_stake in de_registered_stake: - # Make a neuron info lite for this hotkey and coldkey. - de_registered_neuron = bittensor.NeuronInfoLite.get_null_neuron() - de_registered_neuron.hotkey = hotkey_addr - de_registered_neuron.coldkey = ( - coldkey_wallet.coldkeypub.ss58_address - ) - de_registered_neuron.total_stake = bittensor.Balance(our_stake) - - de_registered_neurons.append(de_registered_neuron) - - # Add this hotkey to the wallets dict - wallet_ = bittensor.wallet( - name=wallet, - ) - wallet_.hotkey_ss58 = hotkey_addr - wallet.hotkey_str = hotkey_addr[:5] # Max length of 5 characters - # Indicates a hotkey not on local machine but exists in stake_info obj on-chain - if hotkey_coldkey_to_hotkey_wallet.get(hotkey_addr) is None: - hotkey_coldkey_to_hotkey_wallet[hotkey_addr] = {} - hotkey_coldkey_to_hotkey_wallet[hotkey_addr][ - coldkey_wallet.coldkeypub.ss58_address - ] = wallet_ - - # Add neurons to overview. - neurons["-1"].extend(de_registered_neurons) - - # Setup outer table. - grid = Table.grid(pad_edge=False) - - # If there are any alerts, add them to the grid - if len(alerts_table.rows) > 0: - grid.add_row(alerts_table) - - title: str = "" - if not cli.config.get("all", d=None): - title = "[bold white italic]Wallet - {}:{}".format( - cli.config.wallet.name, wallet.coldkeypub.ss58_address - ) - else: - title = "[bold whit italic]All Wallets:" - - # Add title - grid.add_row(Align(title, vertical="middle", align="center")) - - # Generate rows per netuid - hotkeys_seen = set() - total_neurons = 0 - total_stake = 0.0 - for netuid in netuids: - subnet_tempo = subtensor.tempo(netuid=netuid) - last_subnet = netuid == netuids[-1] - TABLE_DATA = [] - total_rank = 0.0 - total_trust = 0.0 - total_consensus = 0.0 - total_validator_trust = 0.0 - total_incentive = 0.0 - total_dividends = 0.0 - total_emission = 0 - - for nn in neurons[str(netuid)]: - hotwallet = hotkey_coldkey_to_hotkey_wallet.get(nn.hotkey, {}).get( - nn.coldkey, None - ) - if not hotwallet: - # Indicates a mismatch between what the chain says the coldkey - # is for this hotkey and the local wallet coldkey-hotkey pair - hotwallet = argparse.Namespace() - hotwallet.name = nn.coldkey[:7] - hotwallet.hotkey_str = nn.hotkey[:7] - nn: bittensor.NeuronInfoLite - uid = nn.uid - active = nn.active - stake = nn.total_stake.tao - rank = nn.rank - trust = nn.trust - consensus = nn.consensus - validator_trust = nn.validator_trust - incentive = nn.incentive - dividends = nn.dividends - emission = int(nn.emission / (subnet_tempo + 1) * 1e9) - last_update = int(block - nn.last_update) - validator_permit = nn.validator_permit - row = [ - hotwallet.name, - hotwallet.hotkey_str, - str(uid), - str(active), - "{:.5f}".format(stake), - "{:.5f}".format(rank), - "{:.5f}".format(trust), - "{:.5f}".format(consensus), - "{:.5f}".format(incentive), - "{:.5f}".format(dividends), - "{:_}".format(emission), - "{:.5f}".format(validator_trust), - "*" if validator_permit else "", - str(last_update), - ( - bittensor.utils.networking.int_to_ip(nn.axon_info.ip) - + ":" - + str(nn.axon_info.port) - if nn.axon_info.port != 0 - else "[yellow]none[/yellow]" - ), - nn.hotkey, - ] - - total_rank += rank - total_trust += trust - total_consensus += consensus - total_incentive += incentive - total_dividends += dividends - total_emission += emission - total_validator_trust += validator_trust - - if not (nn.hotkey, nn.coldkey) in hotkeys_seen: - # Don't double count stake on hotkey-coldkey pairs. - hotkeys_seen.add((nn.hotkey, nn.coldkey)) - total_stake += stake - - # netuid -1 are neurons that are de-registered. - if netuid != "-1": - total_neurons += 1 - - TABLE_DATA.append(row) - - # Add subnet header - if netuid == "-1": - grid.add_row(f"Deregistered Neurons") - else: - grid.add_row(f"Subnet: [bold white]{netuid}[/bold white]") - - table = Table( - show_footer=False, - width=cli.config.get("width", None), - pad_edge=False, - box=None, - ) - if last_subnet: - table.add_column( - "[overline white]COLDKEY", - str(total_neurons), - footer_style="overline white", - style="bold white", - ) - table.add_column( - "[overline white]HOTKEY", - str(total_neurons), - footer_style="overline white", - style="white", - ) - else: - # No footer for non-last subnet. - table.add_column("[overline white]COLDKEY", style="bold white") - table.add_column("[overline white]HOTKEY", style="white") - table.add_column( - "[overline white]UID", - str(total_neurons), - footer_style="overline white", - style="yellow", - ) - table.add_column( - "[overline white]ACTIVE", justify="right", style="green", no_wrap=True - ) - if last_subnet: - table.add_column( - "[overline white]STAKE(\u03c4)", - "\u03c4{:.5f}".format(total_stake), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - else: - # No footer for non-last subnet. - table.add_column( - "[overline white]STAKE(\u03c4)", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]RANK", - "{:.5f}".format(total_rank), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]TRUST", - "{:.5f}".format(total_trust), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]CONSENSUS", - "{:.5f}".format(total_consensus), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]INCENTIVE", - "{:.5f}".format(total_incentive), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]DIVIDENDS", - "{:.5f}".format(total_dividends), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]EMISSION(\u03c1)", - "\u03c1{:_}".format(total_emission), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]VTRUST", - "{:.5f}".format(total_validator_trust), - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column("[overline white]VPERMIT", justify="right", no_wrap=True) - table.add_column("[overline white]UPDATED", justify="right", no_wrap=True) - table.add_column( - "[overline white]AXON", justify="left", style="dim blue", no_wrap=True - ) - table.add_column( - "[overline white]HOTKEY_SS58", style="dim blue", no_wrap=False - ) - table.show_footer = True - - sort_by: Optional[str] = cli.config.get("sort_by", None) - sort_order: Optional[str] = cli.config.get("sort_order", None) - - if sort_by is not None and sort_by != "": - column_to_sort_by: int = 0 - highest_matching_ratio: int = 0 - sort_descending: bool = False # Default sort_order to ascending - - for index, column in zip(range(len(table.columns)), table.columns): - # Fuzzy match the column name. Default to the first column. - column_name = column.header.lower().replace("[overline white]", "") - match_ratio = fuzz.ratio(sort_by.lower(), column_name) - # Finds the best matching column - if match_ratio > highest_matching_ratio: - highest_matching_ratio = match_ratio - column_to_sort_by = index - - if sort_order.lower() in {"desc", "descending", "reverse"}: - # Sort descending if the sort_order matches desc, descending, or reverse - sort_descending = True - - def overview_sort_function(row): - data = row[column_to_sort_by] - # Try to convert to number if possible - try: - data = float(data) - except ValueError: - pass - return data - - TABLE_DATA.sort(key=overview_sort_function, reverse=sort_descending) - - for row in TABLE_DATA: - table.add_row(*row) - - grid.add_row(table) - - console.clear() - - caption = "[italic][dim][white]Wallet balance: [green]\u03c4" + str( - total_balance.tao - ) - grid.add_row(Align(caption, vertical="middle", align="center")) - - # Print the entire table/grid - console.print(grid, width=cli.config.get("width", None)) - - @staticmethod - def _get_neurons_for_netuid( - args_tuple: Tuple["bittensor.Config", int, List[str]], - ) -> Tuple[int, List["bittensor.NeuronInfoLite"], Optional[str]]: - subtensor_config, netuid, hot_wallets = args_tuple - - result: List["bittensor.NeuronInfoLite"] = [] - - try: - subtensor = bittensor.subtensor(config=subtensor_config, log_verbose=False) - - all_neurons: List["bittensor.NeuronInfoLite"] = subtensor.neurons_lite( - netuid=netuid - ) - # Map the hotkeys to uids - hotkey_to_neurons = {n.hotkey: n.uid for n in all_neurons} - for hot_wallet_addr in hot_wallets: - uid = hotkey_to_neurons.get(hot_wallet_addr) - if uid is not None: - nn = all_neurons[uid] - result.append(nn) - except Exception as e: - return netuid, [], "Error: {}".format(e) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - return netuid, result, None - - @staticmethod - def _get_de_registered_stake_for_coldkey_wallet( - args_tuple, - ) -> Tuple[ - "bittensor.Wallet", List[Tuple[str, "bittensor.Balance"]], Optional[str] - ]: - subtensor_config, all_hotkey_addresses, coldkey_wallet = args_tuple - - # List of (hotkey_addr, our_stake) tuples. - result: List[Tuple[str, "bittensor.Balance"]] = [] - - try: - subtensor = bittensor.subtensor(config=subtensor_config, log_verbose=False) - - # Pull all stake for our coldkey - all_stake_info_for_coldkey = subtensor.get_stake_info_for_coldkey( - coldkey_ss58=coldkey_wallet.coldkeypub.ss58_address - ) - - ## Filter out hotkeys that are in our wallets - ## Filter out hotkeys that are delegates. - def _filter_stake_info(stake_info: "bittensor.StakeInfo") -> bool: - if stake_info.stake == 0: - return False # Skip hotkeys that we have no stake with. - if stake_info.hotkey_ss58 in all_hotkey_addresses: - return False # Skip hotkeys that are in our wallets. - if subtensor.is_hotkey_delegate(hotkey_ss58=stake_info.hotkey_ss58): - return False # Skip hotkeys that are delegates, they show up in btcli my_delegates table. - - return True - - all_staked_hotkeys = filter(_filter_stake_info, all_stake_info_for_coldkey) - result = [ - ( - stake_info.hotkey_ss58, - stake_info.stake.tao, - ) # stake is a Balance object - for stake_info in all_staked_hotkeys - ] - - except Exception as e: - return coldkey_wallet, [], "Error: {}".format(e) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - return coldkey_wallet, result, None - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - overview_parser = parser.add_parser( - "overview", help="""Show registered account overview.""" - ) - overview_parser.add_argument( - "--all", - dest="all", - action="store_true", - help="""View overview for all wallets.""", - default=False, - ) - overview_parser.add_argument( - "--width", - dest="width", - action="store", - type=int, - help="""Set the output width of the overview. Defaults to automatic width from terminal.""", - default=None, - ) - overview_parser.add_argument( - "--sort_by", - "--wallet.sort_by", - dest="sort_by", - required=False, - action="store", - default="", - type=str, - help="""Sort the hotkeys by the specified column title (e.g. name, uid, axon).""", - ) - overview_parser.add_argument( - "--sort_order", - "--wallet.sort_order", - dest="sort_order", - required=False, - action="store", - default="ascending", - type=str, - help="""Sort the hotkeys in the specified ordering. (ascending/asc or descending/desc/reverse)""", - ) - overview_parser.add_argument( - "--hotkeys", - "--exclude_hotkeys", - "--wallet.hotkeys", - "--wallet.exclude_hotkeys", - required=False, - action="store", - default=[], - type=str, - nargs="*", - help="""Specify the hotkeys by name or ss58 address. (e.g. hk1 hk2 hk3)""", - ) - overview_parser.add_argument( - "--all_hotkeys", - "--wallet.all_hotkeys", - required=False, - action="store_true", - default=False, - help="""To specify all hotkeys. Specifying hotkeys will exclude them from this all.""", - ) - overview_parser.add_argument( - "--netuids", - dest="netuids", - type=int, - nargs="*", - help="""Set the netuid(s) to filter by.""", - default=None, - ) - bittensor.wallet.add_args(overview_parser) - bittensor.subtensor.add_args(overview_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.is_set("wallet.name") - and not config.no_prompt - and not config.get("all", d=None) - ): - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if config.netuids != [] and config.netuids != None: - if not isinstance(config.netuids, list): - config.netuids = [int(config.netuids)] - else: - config.netuids = [int(netuid) for netuid in config.netuids] diff --git a/bittensor/commands/register.py b/bittensor/commands/register.py deleted file mode 100644 index a5a14773a2..0000000000 --- a/bittensor/commands/register.py +++ /dev/null @@ -1,613 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import sys -import argparse -import bittensor -from rich.prompt import Prompt, Confirm -from .utils import check_netuid_set, check_for_cuda_reg_config -from copy import deepcopy - -from . import defaults - -console = bittensor.__console__ - - -class RegisterCommand: - """ - Executes the ``register`` command to register a neuron on the Bittensor network by recycling some TAO (the network's native token). - - This command is used to add a new neuron to a specified subnet within the network, contributing to the decentralization and robustness of Bittensor. - - Usage: - Before registering, the command checks if the specified subnet exists and whether the user's balance is sufficient to cover the registration cost. - - The registration cost is determined by the current recycle amount for the specified subnet. If the balance is insufficient or the subnet does not exist, the command will exit with an appropriate error message. - - If the preconditions are met, and the user confirms the transaction (if ``no_prompt`` is not set), the command proceeds to register the neuron by recycling the required amount of TAO. - - The command structure includes: - - - Verification of subnet existence. - - Checking the user's balance against the current recycle amount for the subnet. - - User confirmation prompt for proceeding with registration. - - Execution of the registration process. - - Columns Displayed in the confirmation prompt: - - - Balance: The current balance of the user's wallet in TAO. - - Cost to Register: The required amount of TAO needed to register on the specified subnet. - - Example usage:: - - btcli subnets register --netuid 1 - - Note: - This command is critical for users who wish to contribute a new neuron to the network. It requires careful consideration of the subnet selection and an understanding of the registration costs. Users should ensure their wallet is sufficiently funded before attempting to register a neuron. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Register neuron by recycling some TAO.""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - RegisterCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Register neuron by recycling some TAO.""" - wallet = bittensor.wallet(config=cli.config) - - # Verify subnet exists - if not subtensor.subnet_exists(netuid=cli.config.netuid): - bittensor.__console__.print( - f"[red]Subnet {cli.config.netuid} does not exist[/red]" - ) - sys.exit(1) - - # Check current recycle amount - current_recycle = subtensor.recycle(netuid=cli.config.netuid) - balance = subtensor.get_balance(address=wallet.coldkeypub.ss58_address) - - # Check balance is sufficient - if balance < current_recycle: - bittensor.__console__.print( - f"[red]Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO[/red]" - ) - sys.exit(1) - - if not cli.config.no_prompt: - if ( - Confirm.ask( - f"Your balance is: [bold green]{balance}[/bold green]\nThe cost to register by recycle is [bold red]{current_recycle}[/bold red]\nDo you want to continue?", - default=False, - ) - == False - ): - sys.exit(1) - - subtensor.burned_register( - wallet=wallet, netuid=cli.config.netuid, prompt=not cli.config.no_prompt - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - register_parser = parser.add_parser( - "register", help="""Register a wallet to a network.""" - ) - register_parser.add_argument( - "--netuid", - type=int, - help="netuid for subnet to serve this neuron on", - default=argparse.SUPPRESS, - ) - - bittensor.wallet.add_args(register_parser) - bittensor.subtensor.add_args(register_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.is_set("subtensor.network") - and not config.is_set("subtensor.chain_endpoint") - and not config.no_prompt - ): - config.subtensor.network = Prompt.ask( - "Enter subtensor network", - choices=bittensor.__networks__, - default=defaults.subtensor.network, - ) - _, endpoint = bittensor.subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - config.subtensor.chain_endpoint = endpoint - - check_netuid_set( - config, subtensor=bittensor.subtensor(config=config, log_verbose=False) - ) - - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - -class PowRegisterCommand: - """ - Executes the ``pow_register`` command to register a neuron on the Bittensor network using Proof of Work (PoW). - - This method is an alternative registration process that leverages computational work for securing a neuron's place on the network. - - Usage: - The command starts by verifying the existence of the specified subnet. If the subnet does not exist, it terminates with an error message. - On successful verification, the PoW registration process is initiated, which requires solving computational puzzles. - - Optional arguments: - - ``--netuid`` (int): The netuid for the subnet on which to serve the neuron. Mandatory for specifying the target subnet. - - ``--pow_register.num_processes`` (int): The number of processors to use for PoW registration. Defaults to the system's default setting. - - ``--pow_register.update_interval`` (int): The number of nonces to process before checking for the next block during registration. Affects the frequency of update checks. - - ``--pow_register.no_output_in_place`` (bool): When set, disables the output of registration statistics in place. Useful for cleaner logs. - - ``--pow_register.verbose`` (bool): Enables verbose output of registration statistics for detailed information. - - ``--pow_register.cuda.use_cuda`` (bool): Enables the use of CUDA for GPU-accelerated PoW calculations. Requires a CUDA-compatible GPU. - - ``--pow_register.cuda.no_cuda`` (bool): Disables the use of CUDA, defaulting to CPU-based calculations. - - ``--pow_register.cuda.dev_id`` (int): Specifies the CUDA device ID, useful for systems with multiple CUDA-compatible GPUs. - - ``--pow_register.cuda.tpb`` (int): Sets the number of Threads Per Block for CUDA operations, affecting the GPU calculation dynamics. - - The command also supports additional wallet and subtensor arguments, enabling further customization of the registration process. - - Example usage:: - - btcli pow_register --netuid 1 --pow_register.num_processes 4 --cuda.use_cuda - - Note: - This command is suited for users with adequate computational resources to participate in PoW registration. It requires a sound understanding - of the network's operations and PoW mechanics. Users should ensure their systems meet the necessary hardware and software requirements, - particularly when opting for CUDA-based GPU acceleration. - - This command may be disabled according on the subnet owner's directive. For example, on netuid 1 this is permanently disabled. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Register neuron.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - PowRegisterCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Register neuron.""" - wallet = bittensor.wallet(config=cli.config) - - # Verify subnet exists - if not subtensor.subnet_exists(netuid=cli.config.netuid): - bittensor.__console__.print( - f"[red]Subnet {cli.config.netuid} does not exist[/red]" - ) - sys.exit(1) - - registered = subtensor.register( - wallet=wallet, - netuid=cli.config.netuid, - prompt=not cli.config.no_prompt, - tpb=cli.config.pow_register.cuda.get("tpb", None), - update_interval=cli.config.pow_register.get("update_interval", None), - num_processes=cli.config.pow_register.get("num_processes", None), - cuda=cli.config.pow_register.cuda.get( - "use_cuda", defaults.pow_register.cuda.use_cuda - ), - dev_id=cli.config.pow_register.cuda.get("dev_id", None), - output_in_place=cli.config.pow_register.get( - "output_in_place", defaults.pow_register.output_in_place - ), - log_verbose=cli.config.pow_register.get( - "verbose", defaults.pow_register.verbose - ), - ) - if not registered: - sys.exit(1) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - register_parser = parser.add_parser( - "pow_register", help="""Register a wallet to a network using PoW.""" - ) - register_parser.add_argument( - "--netuid", - type=int, - help="netuid for subnet to serve this neuron on", - default=argparse.SUPPRESS, - ) - register_parser.add_argument( - "--pow_register.num_processes", - "-n", - dest="pow_register.num_processes", - help="Number of processors to use for POW registration", - type=int, - default=defaults.pow_register.num_processes, - ) - register_parser.add_argument( - "--pow_register.update_interval", - "--pow_register.cuda.update_interval", - "--cuda.update_interval", - "-u", - help="The number of nonces to process before checking for next block during registration", - type=int, - default=defaults.pow_register.update_interval, - ) - register_parser.add_argument( - "--pow_register.no_output_in_place", - "--no_output_in_place", - dest="pow_register.output_in_place", - help="Whether to not ouput the registration statistics in-place. Set flag to disable output in-place.", - action="store_false", - required=False, - default=defaults.pow_register.output_in_place, - ) - register_parser.add_argument( - "--pow_register.verbose", - help="Whether to ouput the registration statistics verbosely.", - action="store_true", - required=False, - default=defaults.pow_register.verbose, - ) - - ## Registration args for CUDA registration. - register_parser.add_argument( - "--pow_register.cuda.use_cuda", - "--cuda", - "--cuda.use_cuda", - dest="pow_register.cuda.use_cuda", - default=defaults.pow_register.cuda.use_cuda, - help="""Set flag to use CUDA to register.""", - action="store_true", - required=False, - ) - register_parser.add_argument( - "--pow_register.cuda.no_cuda", - "--no_cuda", - "--cuda.no_cuda", - dest="pow_register.cuda.use_cuda", - default=not defaults.pow_register.cuda.use_cuda, - help="""Set flag to not use CUDA for registration""", - action="store_false", - required=False, - ) - - register_parser.add_argument( - "--pow_register.cuda.dev_id", - "--cuda.dev_id", - type=int, - nargs="+", - default=defaults.pow_register.cuda.dev_id, - help="""Set the CUDA device id(s). Goes by the order of speed. (i.e. 0 is the fastest).""", - required=False, - ) - register_parser.add_argument( - "--pow_register.cuda.tpb", - "--cuda.tpb", - type=int, - default=defaults.pow_register.cuda.tpb, - help="""Set the number of Threads Per Block for CUDA.""", - required=False, - ) - - bittensor.wallet.add_args(register_parser) - bittensor.subtensor.add_args(register_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.is_set("subtensor.network") - and not config.is_set("subtensor.chain_endpoint") - and not config.no_prompt - ): - config.subtensor.network = Prompt.ask( - "Enter subtensor network", - choices=bittensor.__networks__, - default=defaults.subtensor.network, - ) - _, endpoint = bittensor.subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - config.subtensor.chain_endpoint = endpoint - - check_netuid_set( - config, subtensor=bittensor.subtensor(config=config, log_verbose=False) - ) - - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - if not config.no_prompt: - check_for_cuda_reg_config(config) - - -class RunFaucetCommand: - """ - Executes the ``faucet`` command to obtain test TAO tokens by performing Proof of Work (PoW). - - IMPORTANT: - **THIS COMMAND IS CURRENTLY DISABLED.** - - This command is particularly useful for users who need test tokens for operations on the Bittensor testnet. - - Usage: - The command uses the PoW mechanism to validate the user's effort and rewards them with test TAO tokens. It is typically used in testnet environments where real value transactions are not necessary. - - Optional arguments: - - ``--faucet.num_processes`` (int): Specifies the number of processors to use for the PoW operation. A higher number of processors may increase the chances of successful computation. - - ``--faucet.update_interval`` (int): Sets the frequency of nonce processing before checking for the next block, which impacts the PoW operation's responsiveness. - - ``--faucet.no_output_in_place`` (bool): When set, it disables in-place output of registration statistics for cleaner log visibility. - - ``--faucet.verbose`` (bool): Enables verbose output for detailed statistical information during the PoW process. - - ``--faucet.cuda.use_cuda`` (bool): Activates the use of CUDA for GPU acceleration in the PoW process, suitable for CUDA-compatible GPUs. - - ``--faucet.cuda.no_cuda`` (bool): Disables the use of CUDA, opting for CPU-based calculations. - - ``--faucet.cuda.dev_id`` (int[]): Allows selection of specific CUDA device IDs for the operation, useful in multi-GPU setups. - - ``--faucet.cuda.tpb`` (int): Determines the number of Threads Per Block for CUDA operations, affecting GPU calculation efficiency. - - These options provide flexibility in configuring the PoW process according to the user's hardware capabilities and preferences. - - Example usage:: - - btcli wallet faucet --faucet.num_processes 4 --faucet.cuda.use_cuda - - Note: - This command is meant for use in testnet environments where users can experiment with the network without using real TAO tokens. - It's important for users to have the necessary hardware setup, especially when opting for CUDA-based GPU calculations. - - **THIS COMMAND IS CURRENTLY DISABLED.** - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Register neuron.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RunFaucetCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Register neuron.""" - wallet = bittensor.wallet(config=cli.config) - success = subtensor.run_faucet( - wallet=wallet, - prompt=not cli.config.no_prompt, - tpb=cli.config.pow_register.cuda.get("tpb", None), - update_interval=cli.config.pow_register.get("update_interval", None), - num_processes=cli.config.pow_register.get("num_processes", None), - cuda=cli.config.pow_register.cuda.get( - "use_cuda", defaults.pow_register.cuda.use_cuda - ), - dev_id=cli.config.pow_register.cuda.get("dev_id", None), - output_in_place=cli.config.pow_register.get( - "output_in_place", defaults.pow_register.output_in_place - ), - log_verbose=cli.config.pow_register.get( - "verbose", defaults.pow_register.verbose - ), - ) - if not success: - bittensor.logging.error("Faucet run failed.") - sys.exit(1) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - run_faucet_parser = parser.add_parser( - "faucet", help="""Perform PoW to receieve test TAO in your wallet.""" - ) - run_faucet_parser.add_argument( - "--faucet.num_processes", - "-n", - dest="pow_register.num_processes", - help="Number of processors to use for POW registration", - type=int, - default=defaults.pow_register.num_processes, - ) - run_faucet_parser.add_argument( - "--faucet.update_interval", - "--faucet.cuda.update_interval", - "--cuda.update_interval", - "-u", - help="The number of nonces to process before checking for next block during registration", - type=int, - default=defaults.pow_register.update_interval, - ) - run_faucet_parser.add_argument( - "--faucet.no_output_in_place", - "--no_output_in_place", - dest="pow_register.output_in_place", - help="Whether to not ouput the registration statistics in-place. Set flag to disable output in-place.", - action="store_false", - required=False, - default=defaults.pow_register.output_in_place, - ) - run_faucet_parser.add_argument( - "--faucet.verbose", - help="Whether to ouput the registration statistics verbosely.", - action="store_true", - required=False, - default=defaults.pow_register.verbose, - ) - - ## Registration args for CUDA registration. - run_faucet_parser.add_argument( - "--faucet.cuda.use_cuda", - "--cuda", - "--cuda.use_cuda", - dest="pow_register.cuda.use_cuda", - default=defaults.pow_register.cuda.use_cuda, - help="""Set flag to use CUDA to pow_register.""", - action="store_true", - required=False, - ) - run_faucet_parser.add_argument( - "--faucet.cuda.no_cuda", - "--no_cuda", - "--cuda.no_cuda", - dest="pow_register.cuda.use_cuda", - default=not defaults.pow_register.cuda.use_cuda, - help="""Set flag to not use CUDA for registration""", - action="store_false", - required=False, - ) - run_faucet_parser.add_argument( - "--faucet.cuda.dev_id", - "--cuda.dev_id", - type=int, - nargs="+", - default=defaults.pow_register.cuda.dev_id, - help="""Set the CUDA device id(s). Goes by the order of speed. (i.e. 0 is the fastest).""", - required=False, - ) - run_faucet_parser.add_argument( - "--faucet.cuda.tpb", - "--cuda.tpb", - type=int, - default=defaults.pow_register.cuda.tpb, - help="""Set the number of Threads Per Block for CUDA.""", - required=False, - ) - bittensor.wallet.add_args(run_faucet_parser) - bittensor.subtensor.add_args(run_faucet_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.no_prompt: - check_for_cuda_reg_config(config) - - -class SwapHotkeyCommand: - @staticmethod - def run(cli: "bittensor.cli"): - """ - Executes the ``swap_hotkey`` command to swap the hotkeys for a neuron on the network. - - Usage: - The command is used to swap the hotkey of a wallet for another hotkey on that same wallet. - - Optional arguments: - - ``--wallet.name`` (str): Specifies the wallet for which the hotkey is to be swapped. - - ``--wallet.hotkey`` (str): The original hotkey name that is getting swapped out. - - ``--wallet.hotkey_b`` (str): The new hotkey name for which the old is getting swapped out for. - - Example usage:: - - btcli wallet swap_hotkey --wallet.name your_wallet_name --wallet.hotkey original_hotkey --wallet.hotkey_b new_hotkey - """ - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SwapHotkeyCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Swap your hotkey for all registered axons on the network.""" - wallet = bittensor.wallet(config=cli.config) - - # This creates an unnecessary amount of extra data, but simplifies implementation. - new_config = deepcopy(cli.config) - new_config.wallet.hotkey = new_config.wallet.hotkey_b - new_wallet = bittensor.wallet(config=new_config) - - subtensor.swap_hotkey( - wallet=wallet, - new_wallet=new_wallet, - wait_for_finalization=False, - wait_for_inclusion=True, - prompt=False, - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - swap_hotkey_parser = parser.add_parser( - "swap_hotkey", help="""Swap your associated hotkey.""" - ) - - swap_hotkey_parser.add_argument( - "--wallet.hotkey_b", - type=str, - default=defaults.wallet.hotkey, - help="""Name of the new hotkey""", - required=False, - ) - - bittensor.wallet.add_args(swap_hotkey_parser) - bittensor.subtensor.add_args(swap_hotkey_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.is_set("subtensor.network") - and not config.is_set("subtensor.chain_endpoint") - and not config.no_prompt - ): - config.subtensor.network = Prompt.ask( - "Enter subtensor network", - choices=bittensor.__networks__, - default=defaults.subtensor.network, - ) - _, endpoint = bittensor.subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - config.subtensor.chain_endpoint = endpoint - - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter old hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - if not config.is_set("wallet.hotkey_b") and not config.no_prompt: - hotkey = Prompt.ask("Enter new hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey_b = str(hotkey) diff --git a/bittensor/commands/root.py b/bittensor/commands/root.py deleted file mode 100644 index 5607921b19..0000000000 --- a/bittensor/commands/root.py +++ /dev/null @@ -1,681 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import re -import typing -import argparse -import numpy as np -import bittensor -from typing import List, Optional, Dict -from rich.prompt import Prompt -from rich.table import Table -from .utils import get_delegates_details, DelegatesDetails - -from . import defaults - -console = bittensor.__console__ - - -class RootRegisterCommand: - """ - Executes the ``register`` command to register a wallet to the root network of the Bittensor network. - - This command is used to formally acknowledge a wallet's participation in the network's root layer. - - Usage: - The command registers the user's wallet with the root network, which is a crucial step for participating in network governance and other advanced functions. - - Optional arguments: - - None. The command primarily uses the wallet and subtensor configurations. - - Example usage:: - - btcli root register - - Note: - This command is important for users seeking to engage deeply with the Bittensor network, particularly in aspects related to network governance and decision-making. - - It is a straightforward process but requires the user to have an initialized and configured wallet. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Register to root network.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RootRegisterCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Register to root network.""" - wallet = bittensor.wallet(config=cli.config) - - subtensor.root_register(wallet=wallet, prompt=not cli.config.no_prompt) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "register", help="""Register a wallet to the root network.""" - ) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - -class RootList: - """ - Executes the ``list`` command to display the members of the root network on the Bittensor network. - - This command provides an overview of the neurons that constitute the network's foundational layer. - - Usage: - Upon execution, the command fetches and lists the neurons in the root network, showing their unique identifiers (UIDs), names, addresses, stakes, and whether they are part of the senate (network governance body). - - Optional arguments: - - None. The command uses the subtensor configuration to retrieve data. - - Example usage:: - - $ btcli root list - - UID NAME ADDRESS STAKE(τ) SENATOR - 0 5CaCUPsSSdKWcMJbmdmJdnWVa15fJQuz5HsSGgVdZffpHAUa 27086.37070 Yes - 1 RaoK9 5GmaAk7frPXnAxjbQvXcoEzMGZfkrDee76eGmKoB3wxUburE 520.24199 No - 2 Openτensor Foundaτion 5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3 1275437.45895 Yes - 3 RoundTable21 5FFApaS75bv5pJHfAp2FVLBj9ZaXuFDjEypsaBNc1wCfe52v 84718.42095 Yes - 4 5HK5tp6t2S59DywmHRWPBVJeJ86T61KjurYqeooqj8sREpeN 168897.40859 Yes - 5 Rizzo 5CXRfP2ekFhe62r7q3vppRajJmGhTi7vwvb2yr79jveZ282w 53383.34400 No - 6 τaosτaτs and BitAPAI 5Hddm3iBFD2GLT5ik7LZnT3XJUnRnN8PoeCFgGQgawUVKNm8 646944.73569 Yes - ... - - Note: - This command is useful for users interested in understanding the composition and governance structure of the Bittensor network's root layer. It provides insights into which neurons hold significant influence and responsibility within the network. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""List the root network""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RootList._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""List the root network""" - console.print( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - subtensor.network - ) - ) - - senate_members = subtensor.get_senate_members() - root_neurons: typing.List[bittensor.NeuronInfoLite] = subtensor.neurons_lite( - netuid=0 - ) - delegate_info: Optional[Dict[str, DelegatesDetails]] = get_delegates_details( - url=bittensor.__delegates_details_url__ - ) - - table = Table(show_footer=False) - table.title = "[white]Root Network" - table.add_column( - "[overline white]UID", - footer_style="overline white", - style="rgb(50,163,219)", - no_wrap=True, - ) - table.add_column( - "[overline white]NAME", - footer_style="overline white", - style="rgb(50,163,219)", - no_wrap=True, - ) - table.add_column( - "[overline white]ADDRESS", - footer_style="overline white", - style="yellow", - no_wrap=True, - ) - table.add_column( - "[overline white]STAKE(\u03c4)", - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]SENATOR", - footer_style="overline white", - style="green", - no_wrap=True, - ) - table.show_footer = True - - for neuron_data in root_neurons: - table.add_row( - str(neuron_data.uid), - ( - delegate_info[neuron_data.hotkey].name - if neuron_data.hotkey in delegate_info - else "" - ), - neuron_data.hotkey, - "{:.5f}".format( - float(subtensor.get_total_stake_for_hotkey(neuron_data.hotkey)) - ), - "Yes" if neuron_data.hotkey in senate_members else "No", - ) - - table.box = None - table.pad_edge = False - table.width = None - bittensor.__console__.print(table) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser("list", help="""List the root network""") - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - pass - - -class RootSetBoostCommand: - """ - Executes the ``boost`` command to boost the weights for a specific subnet within the root network on the Bittensor network. - - Usage: - The command allows boosting the weights for different subnets within the root network. - - Optional arguments: - - ``--netuid`` (int): A single netuid for which weights are to be boosted. - - ``--increase`` (float): The cooresponding increase in the weight for this subnet. - - Example usage:: - - $ btcli root boost --netuid 1 --increase 0.01 - - Enter netuid (e.g. 1): 1 - Enter amount (e.g. 0.01): 0.1 - Boosting weight for subnet: 1 by amount: 0.1 - - Normalized weights: - tensor([ - 0.0000, 0.5455, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.4545, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]) -> tensor([0.0000, 0.5455, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.4545, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000] - ) - - Do you want to set the following root weights?: - weights: tensor([ - 0.0000, 0.5455, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.4545, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]) - uids: tensor([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40])? [y/n]: y - True None - ✅ Finalized - ⠙ 📡 Setting root weights on test ...2023-11-28 22:09:14.001 | SUCCESS | Set weights Finalized: True - - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Set weights for root network.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RootSetBoostCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Set weights for root network.""" - wallet = bittensor.wallet(config=cli.config) - - root = subtensor.metagraph(0, lite=False) - try: - my_uid = root.hotkeys.index(wallet.hotkey.ss58_address) - except ValueError: - bittensor.__console__.print( - "Wallet hotkey: {} not found in root metagraph".format(wallet.hotkey) - ) - exit() - my_weights = root.weights[my_uid] - prev_weight = my_weights[cli.config.netuid] - new_weight = prev_weight + cli.config.amount - - bittensor.__console__.print( - f"Boosting weight for netuid {cli.config.netuid} from {prev_weight} -> {new_weight}" - ) - my_weights[cli.config.netuid] = new_weight - all_netuids = np.arange(len(my_weights)) - - bittensor.__console__.print("Setting root weights...") - subtensor.root_set_weights( - wallet=wallet, - netuids=all_netuids, - weights=my_weights, - version_key=0, - prompt=not cli.config.no_prompt, - wait_for_finalization=True, - wait_for_inclusion=True, - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "boost", help="""Boost weight for a specific subnet by increase amount.""" - ) - parser.add_argument("--netuid", dest="netuid", type=int, required=False) - parser.add_argument("--increase", dest="amount", type=float, required=False) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - if not config.is_set("netuid") and not config.no_prompt: - config.netuid = int(Prompt.ask(f"Enter netuid (e.g. 1)")) - if not config.is_set("amount") and not config.no_prompt: - config.amount = float(Prompt.ask(f"Enter amount (e.g. 0.01)")) - - -class RootSetSlashCommand: - """ - Executes the ``slash`` command to decrease the weights for a specific subnet within the root network on the Bittensor network. - - Usage: - The command allows slashing (decreasing) the weights for different subnets within the root network. - - Optional arguments: - - ``--netuid`` (int): A single netuid for which weights are to be slashed. - - ``--decrease`` (float): The corresponding decrease in the weight for this subnet. - - Example usage:: - - $ btcli root slash --netuid 1 --decrease 0.01 - - Enter netuid (e.g. 1): 1 - Enter decrease amount (e.g. 0.01): 0.2 - Slashing weight for subnet: 1 by amount: 0.2 - - Normalized weights: - tensor([ - 0.0000, 0.4318, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.5682, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]) -> tensor([ - 0.0000, 0.4318, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.5682, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000] - ) - - Do you want to set the following root weights?: - weights: tensor([ - 0.0000, 0.4318, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.5682, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]) - uids: tensor([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40])? [y/n]: y - ⠙ 📡 Setting root weights on test ...2023-11-28 22:09:14.001 | SUCCESS | Set weights Finalized: True - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Set weights for root network with decreased values.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RootSetSlashCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - wallet = bittensor.wallet(config=cli.config) - - bittensor.__console__.print( - "Slashing weight for subnet: {} by amount: {}".format( - cli.config.netuid, cli.config.amount - ) - ) - root = subtensor.metagraph(0, lite=False) - try: - my_uid = root.hotkeys.index(wallet.hotkey.ss58_address) - except ValueError: - bittensor.__console__.print( - "Wallet hotkey: {} not found in root metagraph".format(wallet.hotkey) - ) - exit() - my_weights = root.weights[my_uid] - my_weights[cli.config.netuid] -= cli.config.amount - my_weights[my_weights < 0] = 0 # Ensure weights don't go negative - all_netuids = np.arange(len(my_weights)) - - subtensor.root_set_weights( - wallet=wallet, - netuids=all_netuids, - weights=my_weights, - version_key=0, - prompt=not cli.config.no_prompt, - wait_for_finalization=True, - wait_for_inclusion=True, - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "slash", help="""Slash weight for a specific subnet by decrease amount.""" - ) - parser.add_argument("--netuid", dest="netuid", type=int, required=False) - parser.add_argument("--decrease", dest="amount", type=float, required=False) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - if not config.is_set("netuid") and not config.no_prompt: - config.netuid = int(Prompt.ask(f"Enter netuid (e.g. 1)")) - if not config.is_set("amount") and not config.no_prompt: - config.amount = float(Prompt.ask(f"Enter decrease amount (e.g. 0.01)")) - - -class RootSetWeightsCommand: - """ - Executes the ``weights`` command to set the weights for the root network on the Bittensor network. - - This command is used by network senators to influence the distribution of network rewards and responsibilities. - - Usage: - The command allows setting weights for different subnets within the root network. Users need to specify the netuids (network unique identifiers) and corresponding weights they wish to assign. - - Optional arguments: - - ``--netuids`` (str): A comma-separated list of netuids for which weights are to be set. - - ``--weights`` (str): Corresponding weights for the specified netuids, in comma-separated format. - - Example usage:: - - btcli root weights --netuids 1,2,3 --weights 0.3,0.3,0.4 - - Note: - This command is particularly important for network senators and requires a comprehensive understanding of the network's dynamics. - It is a powerful tool that directly impacts the network's operational mechanics and reward distribution. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Set weights for root network.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RootSetWeightsCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Set weights for root network.""" - wallet = bittensor.wallet(config=cli.config) - subnets: List[bittensor.SubnetInfo] = subtensor.get_all_subnets_info() - - # Get values if not set. - if not cli.config.is_set("netuids"): - example = ( - ", ".join(map(str, [subnet.netuid for subnet in subnets][:3])) + " ..." - ) - cli.config.netuids = Prompt.ask(f"Enter netuids (e.g. {example})") - - if not cli.config.is_set("weights"): - example = ( - ", ".join( - map( - str, - [ - "{:.2f}".format(float(1 / len(subnets))) - for subnet in subnets - ][:3], - ) - ) - + " ..." - ) - cli.config.weights = Prompt.ask(f"Enter weights (e.g. {example})") - - # Parse from string - matched_netuids = list(map(int, re.split(r"[ ,]+", cli.config.netuids))) - netuids = np.array(matched_netuids, dtype=np.int64) - - matched_weights = [ - float(weight) for weight in re.split(r"[ ,]+", cli.config.weights) - ] - weights = np.array(matched_weights, dtype=np.float32) - - # Run the set weights operation. - subtensor.root_set_weights( - wallet=wallet, - netuids=netuids, - weights=weights, - version_key=0, - prompt=not cli.config.no_prompt, - wait_for_finalization=True, - wait_for_inclusion=True, - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser("weights", help="""Set weights for root network.""") - parser.add_argument("--netuids", dest="netuids", type=str, required=False) - parser.add_argument("--weights", dest="weights", type=str, required=False) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - -class RootGetWeightsCommand: - """ - Executes the ``get_weights`` command to retrieve the weights set for the root network on the Bittensor network. - - This command provides visibility into how network responsibilities and rewards are distributed among various subnets. - - Usage: - The command outputs a table listing the weights assigned to each subnet within the root network. This information is crucial for understanding the current influence and reward distribution among the subnets. - - Optional arguments: - - None. The command fetches weight information based on the subtensor configuration. - - Example usage:: - - $ btcli root get_weights - - Root Network Weights - UID 0 1 2 3 4 5 8 9 11 13 18 19 - 1 100.00% - - - - - - - - - - - - 2 - 40.00% 5.00% 10.00% 10.00% 10.00% 10.00% 5.00% - - 10.00% - - 3 - - 25.00% - 25.00% - 25.00% - - - 25.00% - - 4 - - 7.00% 7.00% 20.00% 20.00% 20.00% - 6.00% - 20.00% - - 5 - 20.00% - 10.00% 15.00% 15.00% 15.00% 5.00% - - 10.00% 10.00% - 6 - - - - 10.00% 10.00% 25.00% 25.00% - - 30.00% - - 7 - 60.00% - - 20.00% - - - 20.00% - - - - 8 - 49.35% - 7.18% 13.59% 21.14% 1.53% 0.12% 7.06% 0.03% - - - 9 100.00% - - - - - - - - - - - - ... - - Note: - This command is essential for users interested in the governance and operational dynamics of the Bittensor network. It offers transparency into how network rewards and responsibilities are allocated across different subnets. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Get weights for root network.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RootGetWeightsCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Get weights for root network.""" - weights = subtensor.weights(0) - - table = Table(show_footer=False) - table.title = "[white]Root Network Weights" - table.add_column( - "[white]UID", - header_style="overline white", - footer_style="overline white", - style="rgb(50,163,219)", - no_wrap=True, - ) - - uid_to_weights = {} - netuids = set() - for matrix in weights: - [uid, weights_data] = matrix - - if not len(weights_data): - uid_to_weights[uid] = {} - normalized_weights = [] - else: - normalized_weights = np.array(weights_data)[:, 1] / max( - np.sum(weights_data, axis=0)[1], 1 - ) - - for weight_data, normalized_weight in zip(weights_data, normalized_weights): - [netuid, _] = weight_data - netuids.add(netuid) - if uid not in uid_to_weights: - uid_to_weights[uid] = {} - - uid_to_weights[uid][netuid] = normalized_weight - - for netuid in netuids: - table.add_column( - f"[white]{netuid}", - header_style="overline white", - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - - for uid in uid_to_weights: - row = [str(uid)] - - uid_weights = uid_to_weights[uid] - for netuid in netuids: - if netuid in uid_weights: - normalized_weight = uid_weights[netuid] - row.append("{:0.2f}%".format(normalized_weight * 100)) - else: - row.append("-") - table.add_row(*row) - - table.show_footer = True - - table.box = None - table.pad_edge = False - table.width = None - bittensor.__console__.print(table) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "get_weights", help="""Get weights for root network.""" - ) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - pass diff --git a/bittensor/commands/senate.py b/bittensor/commands/senate.py deleted file mode 100644 index 37f2d79585..0000000000 --- a/bittensor/commands/senate.py +++ /dev/null @@ -1,670 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -import argparse -import bittensor -from rich.prompt import Prompt, Confirm -from rich.table import Table -from typing import Optional, Dict -from .utils import get_delegates_details, DelegatesDetails -from . import defaults - -console = bittensor.__console__ - - -class SenateCommand: - """ - Executes the ``senate`` command to view the members of Bittensor's governance protocol, known as the Senate. - - This command lists the delegates involved in the decision-making process of the Bittensor network. - - Usage: - The command retrieves and displays a list of Senate members, showing their names and wallet addresses. - This information is crucial for understanding who holds governance roles within the network. - - Example usage:: - - btcli root senate - - Note: - This command is particularly useful for users interested in the governance structure and participants of the Bittensor network. It provides transparency into the network's decision-making body. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""View Bittensor's governance protocol proposals""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - SenateCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""View Bittensor's governance protocol proposals""" - console = bittensor.__console__ - console.print( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - cli.config.subtensor.network - ) - ) - - senate_members = subtensor.get_senate_members() - delegate_info: Optional[Dict[str, DelegatesDetails]] = get_delegates_details( - url=bittensor.__delegates_details_url__ - ) - - table = Table(show_footer=False) - table.title = "[white]Senate" - table.add_column( - "[overline white]NAME", - footer_style="overline white", - style="rgb(50,163,219)", - no_wrap=True, - ) - table.add_column( - "[overline white]ADDRESS", - footer_style="overline white", - style="yellow", - no_wrap=True, - ) - table.show_footer = True - - for ss58_address in senate_members: - table.add_row( - ( - delegate_info[ss58_address].name - if ss58_address in delegate_info - else "" - ), - ss58_address, - ) - - table.box = None - table.pad_edge = False - table.width = None - console.print(table) - - @classmethod - def check_config(cls, config: "bittensor.config"): - None - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - senate_parser = parser.add_parser( - "senate", help="""View senate and it's members""" - ) - - bittensor.wallet.add_args(senate_parser) - bittensor.subtensor.add_args(senate_parser) - - -def format_call_data(call_data: "bittensor.ProposalCallData") -> str: - human_call_data = list() - - for arg in call_data["call_args"]: - arg_value = arg["value"] - - # If this argument is a nested call - func_args = ( - format_call_data( - { - "call_function": arg_value["call_function"], - "call_args": arg_value["call_args"], - } - ) - if isinstance(arg_value, dict) and "call_function" in arg_value - else str(arg_value) - ) - - human_call_data.append("{}: {}".format(arg["name"], func_args)) - - return "{}({})".format(call_data["call_function"], ", ".join(human_call_data)) - - -def display_votes( - vote_data: "bittensor.ProposalVoteData", delegate_info: "bittensor.DelegateInfo" -) -> str: - vote_list = list() - - for address in vote_data["ayes"]: - vote_list.append( - "{}: {}".format( - delegate_info[address].name if address in delegate_info else address, - "[bold green]Aye[/bold green]", - ) - ) - - for address in vote_data["nays"]: - vote_list.append( - "{}: {}".format( - delegate_info[address].name if address in delegate_info else address, - "[bold red]Nay[/bold red]", - ) - ) - - return "\n".join(vote_list) - - -class ProposalsCommand: - """ - Executes the ``proposals`` command to view active proposals within Bittensor's governance protocol. - - This command displays the details of ongoing proposals, including votes, thresholds, and proposal data. - - Usage: - The command lists all active proposals, showing their hash, voting threshold, number of ayes and nays, detailed votes by address, end block number, and call data associated with each proposal. - - Example usage:: - - btcli root proposals - - Note: - This command is essential for users who are actively participating in or monitoring the governance of the Bittensor network. - It provides a detailed view of the proposals being considered, along with the community's response to each. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""View Bittensor's governance protocol proposals""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - ProposalsCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""View Bittensor's governance protocol proposals""" - console = bittensor.__console__ - console.print( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - subtensor.network - ) - ) - - senate_members = subtensor.get_senate_members() - proposals = subtensor.get_proposals() - - registered_delegate_info: Optional[Dict[str, DelegatesDetails]] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - - table = Table(show_footer=False) - table.title = ( - "[white]Proposals\t\tActive Proposals: {}\t\tSenate Size: {}".format( - len(proposals), len(senate_members) - ) - ) - table.add_column( - "[overline white]HASH", - footer_style="overline white", - style="yellow", - no_wrap=True, - ) - table.add_column( - "[overline white]THRESHOLD", footer_style="overline white", style="white" - ) - table.add_column( - "[overline white]AYES", footer_style="overline white", style="green" - ) - table.add_column( - "[overline white]NAYS", footer_style="overline white", style="red" - ) - table.add_column( - "[overline white]VOTES", - footer_style="overline white", - style="rgb(50,163,219)", - ) - table.add_column( - "[overline white]END", footer_style="overline white", style="blue" - ) - table.add_column( - "[overline white]CALLDATA", footer_style="overline white", style="white" - ) - table.show_footer = True - - for hash in proposals: - call_data, vote_data = proposals[hash] - - table.add_row( - hash, - str(vote_data["threshold"]), - str(len(vote_data["ayes"])), - str(len(vote_data["nays"])), - display_votes(vote_data, registered_delegate_info), - str(vote_data["end"]), - format_call_data(call_data), - ) - - table.box = None - table.pad_edge = False - table.width = None - console.print(table) - - @classmethod - def check_config(cls, config: "bittensor.config"): - None - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - proposals_parser = parser.add_parser( - "proposals", help="""View active triumvirate proposals and their status""" - ) - - bittensor.wallet.add_args(proposals_parser) - bittensor.subtensor.add_args(proposals_parser) - - -class ShowVotesCommand: - """ - Executes the ``proposal_votes`` command to view the votes for a specific proposal in Bittensor's governance protocol. - - IMPORTANT - **THIS COMMAND IS DEPRECATED**. Use ``btcli root proposals`` to see vote status. - - This command provides a detailed breakdown of the votes cast by the senators for a particular proposal. - - Usage: - Users need to specify the hash of the proposal they are interested in. The command then displays the voting addresses and their respective votes (Aye or Nay) for the specified proposal. - - Optional arguments: - - ``--proposal`` (str): The hash of the proposal for which votes need to be displayed. - - Example usage:: - - btcli root proposal_votes --proposal - - Note: - This command is crucial for users seeking detailed insights into the voting behavior of the Senate on specific governance proposals. - It helps in understanding the level of consensus or disagreement within the Senate on key decisions. - - **THIS COMMAND IS DEPRECATED**. Use ``btcli root proposals`` to see vote status. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""View Bittensor's governance protocol proposals active votes""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - ShowVotesCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""View Bittensor's governance protocol proposals active votes""" - console.print( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - cli.config.subtensor.network - ) - ) - - proposal_hash = cli.config.proposal_hash - if len(proposal_hash) == 0: - console.print( - 'Aborting: Proposal hash not specified. View all proposals with the "proposals" command.' - ) - return - - proposal_vote_data = subtensor.get_vote_data(proposal_hash) - if proposal_vote_data == None: - console.print(":cross_mark: [red]Failed[/red]: Proposal not found.") - return - - registered_delegate_info: Optional[Dict[str, DelegatesDetails]] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - - table = Table(show_footer=False) - table.title = "[white]Votes for Proposal {}".format(proposal_hash) - table.add_column( - "[overline white]ADDRESS", - footer_style="overline white", - style="yellow", - no_wrap=True, - ) - table.add_column( - "[overline white]VOTE", footer_style="overline white", style="white" - ) - table.show_footer = True - - votes = display_votes(proposal_vote_data, registered_delegate_info).split("\n") - for vote in votes: - split_vote_data = vote.split(": ") # Nasty, but will work. - table.add_row(split_vote_data[0], split_vote_data[1]) - - table.box = None - table.pad_edge = False - table.min_width = 64 - console.print(table) - - @classmethod - def check_config(cls, config: "bittensor.config"): - if config.proposal_hash == "" and not config.no_prompt: - proposal_hash = Prompt.ask("Enter proposal hash") - config.proposal_hash = str(proposal_hash) - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - show_votes_parser = parser.add_parser( - "proposal_votes", help="""View an active proposal's votes by address.""" - ) - show_votes_parser.add_argument( - "--proposal", - dest="proposal_hash", - type=str, - nargs="?", - help="""Set the proposal to show votes for.""", - default="", - ) - bittensor.wallet.add_args(show_votes_parser) - bittensor.subtensor.add_args(show_votes_parser) - - -class SenateRegisterCommand: - """ - Executes the ``senate_register`` command to register as a member of the Senate in Bittensor's governance protocol. - - This command is used by delegates who wish to participate in the governance and decision-making process of the network. - - Usage: - The command checks if the user's hotkey is a delegate and not already a Senate member before registering them to the Senate. - Successful execution allows the user to participate in proposal voting and other governance activities. - - Example usage:: - - btcli root senate_register - - Note: - This command is intended for delegates who are interested in actively participating in the governance of the Bittensor network. - It is a significant step towards engaging in network decision-making processes. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Register to participate in Bittensor's governance protocol proposals""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - SenateRegisterCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Register to participate in Bittensor's governance protocol proposals""" - wallet = bittensor.wallet(config=cli.config) - - # Unlock the wallet. - wallet.hotkey - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return - - # Check if the hotkey is a delegate. - if not subtensor.is_hotkey_delegate(wallet.hotkey.ss58_address): - console.print( - "Aborting: Hotkey {} isn't a delegate.".format( - wallet.hotkey.ss58_address - ) - ) - return - - if subtensor.is_senate_member(hotkey_ss58=wallet.hotkey.ss58_address): - console.print( - "Aborting: Hotkey {} is already a senate member.".format( - wallet.hotkey.ss58_address - ) - ) - return - - subtensor.register_senate(wallet=wallet, prompt=not cli.config.no_prompt) - - @classmethod - def check_config(cls, config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - senate_register_parser = parser.add_parser( - "senate_register", - help="""Register as a senate member to participate in proposals""", - ) - - bittensor.wallet.add_args(senate_register_parser) - bittensor.subtensor.add_args(senate_register_parser) - - -class SenateLeaveCommand: - """ - Executes the ``senate_leave`` command to discard membership in Bittensor's Senate. - - This command allows a Senate member to voluntarily leave the governance body. - - Usage: - The command checks if the user's hotkey is currently a Senate member before processing the request to leave the Senate. - It effectively removes the user from participating in future governance decisions. - - Example usage:: - - btcli root senate_leave - - Note: - This command is relevant for Senate members who wish to step down from their governance responsibilities within the Bittensor network. - It should be used when a member no longer desires to participate in the Senate activities. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Discard membership in Bittensor's governance protocol proposals""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - SenateLeaveCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.cli"): - r"""Discard membership in Bittensor's governance protocol proposals""" - wallet = bittensor.wallet(config=cli.config) - - # Unlock the wallet. - wallet.hotkey - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return - - if not subtensor.is_senate_member(hotkey_ss58=wallet.hotkey.ss58_address): - console.print( - "Aborting: Hotkey {} isn't a senate member.".format( - wallet.hotkey.ss58_address - ) - ) - return - - subtensor.leave_senate(wallet=wallet, prompt=not cli.config.no_prompt) - - @classmethod - def check_config(cls, config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - senate_leave_parser = parser.add_parser( - "senate_leave", - help="""Discard senate membership in the governance protocol""", - ) - - bittensor.wallet.add_args(senate_leave_parser) - bittensor.subtensor.add_args(senate_leave_parser) - - -class VoteCommand: - """ - Executes the ``senate_vote`` command to cast a vote on an active proposal in Bittensor's governance protocol. - - This command is used by Senate members to vote on various proposals that shape the network's future. - - Usage: - The user needs to specify the hash of the proposal they want to vote on. The command then allows the Senate member to cast an 'Aye' or 'Nay' vote, contributing to the decision-making process. - - Optional arguments: - - ``--proposal`` (str): The hash of the proposal to vote on. - - Example usage:: - - btcli root senate_vote --proposal - - Note: - This command is crucial for Senate members to exercise their voting rights on key proposals. It plays a vital role in the governance and evolution of the Bittensor network. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Vote in Bittensor's governance protocol proposals""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - VoteCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Vote in Bittensor's governance protocol proposals""" - wallet = bittensor.wallet(config=cli.config) - - proposal_hash = cli.config.proposal_hash - if len(proposal_hash) == 0: - console.print( - 'Aborting: Proposal hash not specified. View all proposals with the "proposals" command.' - ) - return - - if not subtensor.is_senate_member(hotkey_ss58=wallet.hotkey.ss58_address): - console.print( - "Aborting: Hotkey {} isn't a senate member.".format( - wallet.hotkey.ss58_address - ) - ) - return - - # Unlock the wallet. - wallet.hotkey - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return - - vote_data = subtensor.get_vote_data(proposal_hash) - - vote_data = subtensor.get_vote_data(proposal_hash) - if vote_data == None: - console.print(":cross_mark: [red]Failed[/red]: Proposal not found.") - return - - vote = Confirm.ask("Desired vote for proposal") - subtensor.vote_senate( - wallet=wallet, - proposal_hash=proposal_hash, - proposal_idx=vote_data["index"], - vote=vote, - prompt=not cli.config.no_prompt, - ) - - @classmethod - def check_config(cls, config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - if config.proposal_hash == "" and not config.no_prompt: - proposal_hash = Prompt.ask("Enter proposal hash") - config.proposal_hash = str(proposal_hash) - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - vote_parser = parser.add_parser( - "senate_vote", help="""Vote on an active proposal by hash.""" - ) - vote_parser.add_argument( - "--proposal", - dest="proposal_hash", - type=str, - nargs="?", - help="""Set the proposal to show votes for.""", - default="", - ) - bittensor.wallet.add_args(vote_parser) - bittensor.subtensor.add_args(vote_parser) diff --git a/bittensor/commands/stake.py b/bittensor/commands/stake.py deleted file mode 100644 index eff415d1a1..0000000000 --- a/bittensor/commands/stake.py +++ /dev/null @@ -1,1401 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import os -import sys -import re -from typing import List, Union, Optional, Dict, Tuple - -from rich.prompt import Confirm, Prompt -from rich.table import Table -from rich.console import Console -from rich.text import Text -from tqdm import tqdm - -import bittensor -from bittensor.utils.balance import Balance -from .utils import ( - get_hotkey_wallets_for_wallet, - get_delegates_details, - DelegatesDetails, -) -from . import defaults # type: ignore -from ..utils import wallet_utils -from ..utils.formatting import u64_to_float, u16_to_float - -console = bittensor.__console__ - -MAX_CHILDREN = 5 - - -def get_netuid( - cli: "bittensor.cli", subtensor: "bittensor.subtensor", prompt: bool = True -) -> Tuple[bool, int]: - """Retrieve and validate the netuid from the user or configuration.""" - console = Console() - if not cli.config.is_set("netuid") and prompt: - cli.config.netuid = Prompt.ask("Enter netuid") - try: - cli.config.netuid = int(cli.config.netuid) - except ValueError: - console.print( - "[red]Invalid input. Please enter a valid integer for netuid.[/red]" - ) - return False, -1 - netuid = cli.config.netuid - if netuid < 0 or netuid > 65535: - console.print( - "[red]Invalid input. Please enter a valid integer for netuid in subnet range.[/red]" - ) - return False, -1 - if not subtensor.subnet_exists(netuid=netuid): - console.print( - "[red]Network with netuid {} does not exist. Please try again.[/red]".format( - netuid - ) - ) - return False, -1 - return True, netuid - - -def get_hotkey(wallet: "bittensor.wallet", config: "bittensor.config") -> str: - """Retrieve the hotkey from the wallet or config.""" - if wallet and wallet.hotkey: - return wallet.hotkey.ss58_address - elif config.is_set("hotkey"): - return config.hotkey - elif config.is_set("ss58"): - return config.ss58 - else: - return Prompt.ask("Enter hotkey (ss58)") - - -class StakeCommand: - """ - Executes the ``add`` command to stake tokens to one or more hotkeys from a user's coldkey on the Bittensor network. - - This command is used to allocate tokens to different hotkeys, securing their position and influence on the network. - - Usage: - Users can specify the amount to stake, the hotkeys to stake to (either by name or ``SS58`` address), and whether to stake to all hotkeys. The command checks for sufficient balance and hotkey registration - before proceeding with the staking process. - - Optional arguments: - - ``--all`` (bool): When set, stakes all available tokens from the coldkey. - - ``--uid`` (int): The unique identifier of the neuron to which the stake is to be added. - - ``--amount`` (float): The amount of TAO tokens to stake. - - ``--max_stake`` (float): Sets the maximum amount of TAO to have staked in each hotkey. - - ``--hotkeys`` (list): Specifies hotkeys by name or SS58 address to stake to. - - ``--all_hotkeys`` (bool): When set, stakes to all hotkeys associated with the wallet, excluding any specified in --hotkeys. - - The command prompts for confirmation before executing the staking operation. - - Example usage:: - - btcli stake add --amount 100 --wallet.name --wallet.hotkey - - Note: - This command is critical for users who wish to distribute their stakes among different neurons (hotkeys) on the network. - It allows for a strategic allocation of tokens to enhance network participation and influence. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Stake token of amount to hotkey(s).""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - StakeCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Stake token of amount to hotkey(s).""" - config = cli.config.copy() - wallet = bittensor.wallet(config=config) - - # Get the hotkey_names (if any) and the hotkey_ss58s. - hotkeys_to_stake_to: List[Tuple[Optional[str], str]] = [] - if config.get("all_hotkeys"): - # Stake to all hotkeys. - all_hotkeys: List[bittensor.wallet] = get_hotkey_wallets_for_wallet( - wallet=wallet - ) - # Get the hotkeys to exclude. (d)efault to no exclusions. - hotkeys_to_exclude: List[str] = cli.config.get("hotkeys", d=[]) - # Exclude hotkeys that are specified. - hotkeys_to_stake_to = [ - (wallet.hotkey_str, wallet.hotkey.ss58_address) - for wallet in all_hotkeys - if wallet.hotkey_str not in hotkeys_to_exclude - ] # definitely wallets - - elif config.get("hotkeys"): - # Stake to specific hotkeys. - for hotkey_ss58_or_hotkey_name in config.get("hotkeys"): - if bittensor.utils.is_valid_ss58_address(hotkey_ss58_or_hotkey_name): - # If the hotkey is a valid ss58 address, we add it to the list. - hotkeys_to_stake_to.append((None, hotkey_ss58_or_hotkey_name)) - else: - # If the hotkey is not a valid ss58 address, we assume it is a hotkey name. - # We then get the hotkey from the wallet and add it to the list. - wallet_ = bittensor.wallet( - config=config, hotkey=hotkey_ss58_or_hotkey_name - ) - hotkeys_to_stake_to.append( - (wallet_.hotkey_str, wallet_.hotkey.ss58_address) - ) - elif config.wallet.get("hotkey"): - # Only config.wallet.hotkey is specified. - # so we stake to that single hotkey. - hotkey_ss58_or_name = config.wallet.get("hotkey") - if bittensor.utils.is_valid_ss58_address(hotkey_ss58_or_name): - hotkeys_to_stake_to = [(None, hotkey_ss58_or_name)] - else: - # Hotkey is not a valid ss58 address, so we assume it is a hotkey name. - wallet_ = bittensor.wallet(config=config, hotkey=hotkey_ss58_or_name) - hotkeys_to_stake_to = [ - (wallet_.hotkey_str, wallet_.hotkey.ss58_address) - ] - else: - # Only config.wallet.hotkey is specified. - # so we stake to that single hotkey. - assert config.wallet.hotkey is not None - hotkeys_to_stake_to = [ - (None, bittensor.wallet(config=config).hotkey.ss58_address) - ] - - # Get coldkey balance - wallet_balance: Balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - final_hotkeys: List[Tuple[str, str]] = [] - final_amounts: List[Union[float, Balance]] = [] - for hotkey in tqdm(hotkeys_to_stake_to): - hotkey: Tuple[Optional[str], str] # (hotkey_name (or None), hotkey_ss58) - if not subtensor.is_hotkey_registered_any(hotkey_ss58=hotkey[1]): - # Hotkey is not registered. - if len(hotkeys_to_stake_to) == 1: - # Only one hotkey, error - bittensor.__console__.print( - f"[red]Hotkey [bold]{hotkey[1]}[/bold] is not registered. Aborting.[/red]" - ) - return None - else: - # Otherwise, print warning and skip - bittensor.__console__.print( - f"[yellow]Hotkey [bold]{hotkey[1]}[/bold] is not registered. Skipping.[/yellow]" - ) - continue - - stake_amount_tao: float = config.get("amount") - if config.get("max_stake"): - # Get the current stake of the hotkey from this coldkey. - hotkey_stake: Balance = subtensor.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=hotkey[1], coldkey_ss58=wallet.coldkeypub.ss58_address - ) - stake_amount_tao: float = config.get("max_stake") - hotkey_stake.tao - - # If the max_stake is greater than the current wallet balance, stake the entire balance. - stake_amount_tao: float = min(stake_amount_tao, wallet_balance.tao) - if ( - stake_amount_tao <= 0.00001 - ): # Threshold because of fees, might create a loop otherwise - # Skip hotkey if max_stake is less than current stake. - continue - wallet_balance = Balance.from_tao(wallet_balance.tao - stake_amount_tao) - - if wallet_balance.tao < 0: - # No more balance to stake. - break - - final_amounts.append(stake_amount_tao) - final_hotkeys.append(hotkey) # add both the name and the ss58 address. - - if len(final_hotkeys) == 0: - # No hotkeys to stake to. - bittensor.__console__.print( - "Not enough balance to stake to any hotkeys or max_stake is less than current stake." - ) - return None - - # Ask to stake - if not config.no_prompt: - if not Confirm.ask( - f"Do you want to stake to the following keys from {wallet.name}:\n" - + "".join( - [ - f" [bold white]- {hotkey[0] + ':' if hotkey[0] else ''}{hotkey[1]}: {f'{amount} {bittensor.__tao_symbol__}' if amount else 'All'}[/bold white]\n" - for hotkey, amount in zip(final_hotkeys, final_amounts) - ] - ) - ): - return None - - if len(final_hotkeys) == 1: - # do regular stake - return subtensor.add_stake( - wallet=wallet, - hotkey_ss58=final_hotkeys[0][1], - amount=None if config.get("stake_all") else final_amounts[0], - wait_for_inclusion=True, - prompt=not config.no_prompt, - ) - - subtensor.add_stake_multiple( - wallet=wallet, - hotkey_ss58s=[hotkey_ss58 for _, hotkey_ss58 in final_hotkeys], - amounts=None if config.get("stake_all") else final_amounts, - wait_for_inclusion=True, - prompt=False, - ) - - @classmethod - def check_config(cls, config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if ( - not config.is_set("wallet.hotkey") - and not config.no_prompt - and not config.wallet.get("all_hotkeys") - and not config.wallet.get("hotkeys") - ): - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - # Get amount. - if ( - not config.get("amount") - and not config.get("stake_all") - and not config.get("max_stake") - ): - if not Confirm.ask( - "Stake all Tao from account: [bold]'{}'[/bold]?".format( - config.wallet.get("name", defaults.wallet.name) - ) - ): - amount = Prompt.ask("Enter Tao amount to stake") - try: - config.amount = float(amount) - except ValueError: - console.print( - ":cross_mark:[red]Invalid Tao amount[/red] [bold white]{}[/bold white]".format( - amount - ) - ) - sys.exit() - else: - config.stake_all = True - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser): - stake_parser = parser.add_parser( - "add", help="""Add stake to your hotkey accounts from your coldkey.""" - ) - stake_parser.add_argument("--all", dest="stake_all", action="store_true") - stake_parser.add_argument("--uid", dest="uid", type=int, required=False) - stake_parser.add_argument("--amount", dest="amount", type=float, required=False) - stake_parser.add_argument( - "--max_stake", - dest="max_stake", - type=float, - required=False, - action="store", - default=None, - help="""Specify the maximum amount of Tao to have staked in each hotkey.""", - ) - stake_parser.add_argument( - "--hotkeys", - "--exclude_hotkeys", - "--wallet.hotkeys", - "--wallet.exclude_hotkeys", - required=False, - action="store", - default=[], - type=str, - nargs="*", - help="""Specify the hotkeys by name or ss58 address. (e.g. hk1 hk2 hk3)""", - ) - stake_parser.add_argument( - "--all_hotkeys", - "--wallet.all_hotkeys", - required=False, - action="store_true", - default=False, - help="""To specify all hotkeys. Specifying hotkeys will exclude them from this all.""", - ) - bittensor.wallet.add_args(stake_parser) - bittensor.subtensor.add_args(stake_parser) - - -def _get_coldkey_wallets_for_path(path: str) -> List["bittensor.wallet"]: - try: - wallet_names = next(os.walk(os.path.expanduser(path)))[1] - return [bittensor.wallet(path=path, name=name) for name in wallet_names] - except StopIteration: - # No wallet files found. - wallets = [] - return wallets - - -def _get_hotkey_wallets_for_wallet(wallet) -> List["bittensor.wallet"]: - hotkey_wallets = [] - hotkeys_path = wallet.path + "/" + wallet.name + "/hotkeys" - try: - hotkey_files = next(os.walk(os.path.expanduser(hotkeys_path)))[2] - except StopIteration: - hotkey_files = [] - for hotkey_file_name in hotkey_files: - try: - hotkey_for_name = bittensor.wallet( - path=wallet.path, name=wallet.name, hotkey=hotkey_file_name - ) - if ( - hotkey_for_name.hotkey_file.exists_on_device() - and not hotkey_for_name.hotkey_file.is_encrypted() - ): - hotkey_wallets.append(hotkey_for_name) - except Exception: - pass - return hotkey_wallets - - -class StakeShow: - """ - Executes the ``show`` command to list all stake accounts associated with a user's wallet on the Bittensor network. - - This command provides a comprehensive view of the stakes associated with both hotkeys and delegates linked to the user's coldkey. - - Usage: - The command lists all stake accounts for a specified wallet or all wallets in the user's configuration directory. - It displays the coldkey, balance, account details (hotkey/delegate name), stake amount, and the rate of return. - - Optional arguments: - - ``--all`` (bool): When set, the command checks all coldkey wallets instead of just the specified wallet. - - The command compiles a table showing: - - - Coldkey: The coldkey associated with the wallet. - - Balance: The balance of the coldkey. - - Account: The name of the hotkey or delegate. - - Stake: The amount of TAO staked to the hotkey or delegate. - - Rate: The rate of return on the stake, typically shown in TAO per day. - - Example usage:: - - btcli stake show --all - - Note: - This command is essential for users who wish to monitor their stake distribution and returns across various accounts on the Bittensor network. - It provides a clear and detailed overview of the user's staking activities. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Show all stake accounts.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - StakeShow._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Show all stake accounts.""" - if cli.config.get("all", d=False) == True: - wallets = _get_coldkey_wallets_for_path(cli.config.wallet.path) - else: - wallets = [bittensor.wallet(config=cli.config)] - registered_delegate_info: Optional[Dict[str, DelegatesDetails]] = ( - get_delegates_details(url=bittensor.__delegates_details_url__) - ) - - def get_stake_accounts( - wallet, subtensor - ) -> Dict[str, Dict[str, Union[str, Balance]]]: - """Get stake account details for the given wallet. - - Args: - wallet: The wallet object to fetch the stake account details for. - - Returns: - A dictionary mapping SS58 addresses to their respective stake account details. - """ - - wallet_stake_accounts = {} - - # Get this wallet's coldkey balance. - cold_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - - # Populate the stake accounts with local hotkeys data. - wallet_stake_accounts.update(get_stakes_from_hotkeys(subtensor, wallet)) - - # Populate the stake accounts with delegations data. - wallet_stake_accounts.update(get_stakes_from_delegates(subtensor, wallet)) - - return { - "name": wallet.name, - "balance": cold_balance, - "accounts": wallet_stake_accounts, - } - - def get_stakes_from_hotkeys( - subtensor, wallet - ) -> Dict[str, Dict[str, Union[str, Balance]]]: - """Fetch stakes from hotkeys for the provided wallet. - - Args: - wallet: The wallet object to fetch the stakes for. - - Returns: - A dictionary of stakes related to hotkeys. - """ - hotkeys = get_hotkey_wallets_for_wallet(wallet) - stakes = {} - for hot in hotkeys: - emission = sum( - [ - n.emission - for n in subtensor.get_all_neurons_for_pubkey( - hot.hotkey.ss58_address - ) - ] - ) - hotkey_stake = subtensor.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=hot.hotkey.ss58_address, - coldkey_ss58=wallet.coldkeypub.ss58_address, - ) - stakes[hot.hotkey.ss58_address] = { - "name": hot.hotkey_str, - "stake": hotkey_stake, - "rate": emission, - } - return stakes - - def get_stakes_from_delegates( - subtensor, wallet - ) -> Dict[str, Dict[str, Union[str, Balance]]]: - """Fetch stakes from delegates for the provided wallet. - - Args: - wallet: The wallet object to fetch the stakes for. - - Returns: - A dictionary of stakes related to delegates. - """ - delegates = subtensor.get_delegated( - coldkey_ss58=wallet.coldkeypub.ss58_address - ) - stakes = {} - for dele, staked in delegates: - for nom in dele.nominators: - if nom[0] == wallet.coldkeypub.ss58_address: - delegate_name = ( - registered_delegate_info[dele.hotkey_ss58].name - if dele.hotkey_ss58 in registered_delegate_info - else dele.hotkey_ss58 - ) - stakes[dele.hotkey_ss58] = { - "name": delegate_name, - "stake": nom[1], - "rate": dele.total_daily_return.tao - * (nom[1] / dele.total_stake.tao), - } - return stakes - - def get_all_wallet_accounts( - wallets, - subtensor, - ) -> List[Dict[str, Dict[str, Union[str, Balance]]]]: - """Fetch stake accounts for all provided wallets using a ThreadPool. - - Args: - wallets: List of wallets to fetch the stake accounts for. - - Returns: - A list of dictionaries, each dictionary containing stake account details for each wallet. - """ - - accounts = [] - # Create a progress bar using tqdm - with tqdm(total=len(wallets), desc="Fetching accounts", ncols=100) as pbar: - for wallet in wallets: - accounts.append(get_stake_accounts(wallet, subtensor)) - pbar.update() - return accounts - - accounts = get_all_wallet_accounts(wallets, subtensor) - - total_stake = 0 - total_balance = 0 - total_rate = 0 - for acc in accounts: - total_balance += acc["balance"].tao - for key, value in acc["accounts"].items(): - total_stake += value["stake"].tao - total_rate += float(value["rate"]) - table = Table(show_footer=True, pad_edge=False, box=None, expand=False) - table.add_column( - "[overline white]Coldkey", footer_style="overline white", style="bold white" - ) - table.add_column( - "[overline white]Balance", - "\u03c4{:.5f}".format(total_balance), - footer_style="overline white", - style="green", - ) - table.add_column( - "[overline white]Account", footer_style="overline white", style="blue" - ) - table.add_column( - "[overline white]Stake", - "\u03c4{:.5f}".format(total_stake), - footer_style="overline white", - style="green", - ) - table.add_column( - "[overline white]Rate", - "\u03c4{:.5f}/d".format(total_rate), - footer_style="overline white", - style="green", - ) - for acc in accounts: - table.add_row(acc["name"], acc["balance"], "", "") - for key, value in acc["accounts"].items(): - table.add_row( - "", "", value["name"], value["stake"], str(value["rate"]) + "/d" - ) - bittensor.__console__.print(table) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.get("all", d=None) - and not config.is_set("wallet.name") - and not config.no_prompt - ): - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - list_parser = parser.add_parser( - "show", help="""List all stake accounts for wallet.""" - ) - list_parser.add_argument( - "--all", - action="store_true", - help="""Check all coldkey wallets.""", - default=False, - ) - - bittensor.wallet.add_args(list_parser) - bittensor.subtensor.add_args(list_parser) - - -class SetChildKeyTakeCommand: - """ - Executes the ``set_childkey_take`` command to modify your childkey take on a specified subnet on the Bittensor network to the caller. - - This command is used to modify your childkey take on a specified subnet on the Bittensor network. - - Usage: - Users can specify the amount or 'take' for their child hotkeys (``SS58`` address), - the user needs to have access to the ss58 hotkey this call, and the take must be between 0 and 18%. - - The command prompts for confirmation before executing the set_childkey_take operation. - - Example usage:: - - btcli stake set_childkey_take --hotkey --netuid 1 --take 0.18 - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Set childkey take.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SetChildKeyTakeCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - console = Console() - wallet = bittensor.wallet(config=cli.config) - - # Get values if not set. - exists, netuid = get_netuid(cli, subtensor) - if not exists: - return - - # get parent hotkey - hotkey = get_hotkey(wallet, cli.config) - if not wallet_utils.is_valid_ss58_address(hotkey): - console.print(f":cross_mark:[red] Invalid SS58 address: {hotkey}[/red]") - return - - if not cli.config.is_set("take"): - cli.config.take = Prompt.ask( - "Enter the percentage of take for your child hotkey (between 0 and 0.18 representing 0-18%)" - ) - - # extract take from cli input - try: - take = float(cli.config.take) - except ValueError: - print( - ":cross_mark:[red]Take must be a float value using characters between 0 and 9.[/red]" - ) - return - - if take < 0 or take > 0.18: - console.print( - f":cross_mark:[red]Invalid take: Childkey Take must be between 0 and 0.18 (representing 0% to 18%). Proposed take is {take}.[/red]" - ) - return - - success, message = subtensor.set_childkey_take( - wallet=wallet, - netuid=netuid, - hotkey=hotkey, - take=take, - wait_for_inclusion=cli.config.wait_for_inclusion, - wait_for_finalization=cli.config.wait_for_finalization, - prompt=cli.config.prompt, - ) - - # Result - if success: - console.print(":white_heavy_check_mark: [green]Set childkey take.[/green]") - console.print( - f"The childkey take for {hotkey} is now set to {take * 100:.3f}%." - ) - else: - console.print( - f":cross_mark:[red] Unable to set childkey take.[/red] {message}" - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey_or_ss58 = Prompt.ask( - "Enter hotkey name or ss58", default=defaults.wallet.hotkey - ) - if wallet_utils.is_valid_ss58_address(hotkey_or_ss58): - config.ss58 = str(hotkey_or_ss58) - else: - config.wallet.hotkey = str(hotkey_or_ss58) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - set_childkey_take_parser = parser.add_parser( - "set_childkey_take", help="""Set childkey take.""" - ) - set_childkey_take_parser.add_argument( - "--netuid", dest="netuid", type=int, required=False - ) - set_childkey_take_parser.add_argument( - "--hotkey", dest="hotkey", type=str, required=False - ) - set_childkey_take_parser.add_argument( - "--take", dest="take", type=float, required=False - ) - set_childkey_take_parser.add_argument( - "--wait_for_inclusion", - dest="wait_for_inclusion", - action="store_true", - default=True, - help="""Wait for the transaction to be included in a block.""", - ) - set_childkey_take_parser.add_argument( - "--wait_for_finalization", - dest="wait_for_finalization", - action="store_true", - default=True, - help="""Wait for the transaction to be finalized.""", - ) - set_childkey_take_parser.add_argument( - "--prompt", - dest="prompt", - action="store_true", - default=True, - help="""Prompt for confirmation before proceeding.""", - ) - set_childkey_take_parser.add_argument( - "--y", - "--yes", - "--no_prompt", - dest="prompt", - action="store_false", - help="""Disable prompt for confirmation before proceeding. Defaults to Yes for all prompts.""", - ) - bittensor.wallet.add_args(set_childkey_take_parser) - bittensor.subtensor.add_args(set_childkey_take_parser) - - -class GetChildKeyTakeCommand: - """ - Executes the ``get_childkey_take`` command to get your childkey take on a specified subnet on the Bittensor network to the caller. - - This command is used to get your childkey take on a specified subnet on the Bittensor network. - - Usage: - Users can get the amount or 'take' for their child hotkeys (``SS58`` address) - - Example usage:: - - btcli stake get_childkey_take --hotkey --netuid 1 - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Get childkey take.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - GetChildKeyTakeCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - console = Console() - wallet = bittensor.wallet(config=cli.config) - - # Get values if not set. - exists, netuid = get_netuid(cli, subtensor) - if not exists: - return - - # get parent hotkey - hotkey = get_hotkey(wallet, cli.config) - if not wallet_utils.is_valid_ss58_address(hotkey): - console.print(f":cross_mark:[red] Invalid SS58 address: {hotkey}[/red]") - return - - take_u16 = subtensor.get_childkey_take( - netuid=netuid, - hotkey=hotkey, - ) - - # Result - if take_u16: - take = u16_to_float(take_u16) - console.print(f"The childkey take for {hotkey} is {take * 100:.3f}%.") - else: - console.print(":cross_mark:[red] Unable to get childkey take.[/red]") - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey_or_ss58 = Prompt.ask( - "Enter hotkey name or ss58", default=defaults.wallet.hotkey - ) - if wallet_utils.is_valid_ss58_address(hotkey_or_ss58): - config.ss58 = str(hotkey_or_ss58) - else: - config.wallet.hotkey = str(hotkey_or_ss58) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - get_childkey_take_parser = parser.add_parser( - "get_childkey_take", help="""Get childkey take.""" - ) - get_childkey_take_parser.add_argument( - "--netuid", dest="netuid", type=int, required=False - ) - get_childkey_take_parser.add_argument( - "--hotkey", dest="hotkey", type=str, required=False - ) - bittensor.wallet.add_args(get_childkey_take_parser) - bittensor.subtensor.add_args(get_childkey_take_parser) - - @staticmethod - def get_take(subtensor, hotkey, netuid) -> float: - """ - Get the take value for a given subtensor, hotkey, and netuid. - - @param subtensor: The subtensor object. - @param hotkey: The hotkey to retrieve the take value for. - @param netuid: The netuid to retrieve the take value for. - - @return: The take value as a float. If the take value is not available, it returns 0. - - """ - take_u16 = subtensor.get_childkey_take( - netuid=netuid, - hotkey=hotkey, - ) - if take_u16: - return u16_to_float(take_u16) - else: - return 0 - - -class SetChildrenCommand: - """ - Executes the ``set_children`` command to add children hotkeys on a specified subnet on the Bittensor network to the caller. - - This command is used to delegate authority to different hotkeys, securing their position and influence on the subnet. - - Usage: - Users can specify the amount or 'proportion' to delegate to child hotkeys (``SS58`` address), - the user needs to have sufficient authority to make this call, and the sum of proportions must equal 1, - representing 100% of the proportion allocation. - - The command prompts for confirmation before executing the set_children operation. - - Example usage:: - - btcli stake set_children --children , --hotkey --netuid 1 --proportions 0.4,0.6 - - Note: - This command is critical for users who wish to delegate children hotkeys among different neurons (hotkeys) on the network. - It allows for a strategic allocation of authority to enhance network participation and influence. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Set children hotkeys.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - SetChildrenCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - console = Console() - wallet = bittensor.wallet(config=cli.config) - - # Get values if not set. - exists, netuid = get_netuid(cli, subtensor) - if not exists: - return - - # get parent hotkey - hotkey = get_hotkey(wallet, cli.config) - if not wallet_utils.is_valid_ss58_address(hotkey): - console.print(f":cross_mark:[red] Invalid SS58 address: {hotkey}[/red]") - return - - # get current children - curr_children = GetChildrenCommand.retrieve_children( - subtensor=subtensor, - hotkey=hotkey, - netuid=netuid, - render_table=False, - ) - - if curr_children: - # print the table of current children - hotkey_stake = subtensor.get_total_stake_for_hotkey(hotkey) - GetChildrenCommand.render_table( - subtensor=subtensor, - hotkey=hotkey, - hotkey_stake=hotkey_stake, - children=curr_children, - netuid=netuid, - prompt=False, - ) - - # get new children - if not cli.config.is_set("children"): - cli.config.children = Prompt.ask( - "Enter child hotkeys (ss58) as comma-separated values" - ) - proposed_children = [str(x) for x in re.split(r"[ ,]+", cli.config.children)] - - # Set max 5 children - if len(proposed_children) > MAX_CHILDREN: - console.print( - ":cross_mark:[red] Too many children. Maximum 5 children per hotkey[/red]" - ) - return - - # Validate children SS58 addresses - for child in proposed_children: - if not wallet_utils.is_valid_ss58_address(child): - console.print(f":cross_mark:[red] Invalid SS58 address: {child}[/red]") - return - - # get proportions for new children - if not cli.config.is_set("proportions"): - cli.config.proportions = Prompt.ask( - "Enter the percentage of proportion for each child as comma-separated values (total from all children must be less than or equal to 1)" - ) - - # extract proportions and child addresses from cli input - proportions = [ - float(x) for x in re.split(r"[ ,]+", str(cli.config.proportions)) - ] - total_proposed = sum(proportions) - if total_proposed > 1: - console.print( - f":cross_mark:[red]Invalid proportion: The sum of all proportions must be less or equal to than 1 (representing 100% of the allocation). Proposed sum addition is proportions is {total_proposed}.[/red]" - ) - return - - if len(proportions) != len(proposed_children): - console.print( - ":cross_mark:[red]Invalid proportion and children length: The count of children and number of proportion values entered do not match.[/red]" - ) - return - - # combine proposed and current children - children_with_proportions = list(zip(proportions, proposed_children)) - - SetChildrenCommand.print_current_stake( - subtensor=subtensor, children=proposed_children, hotkey=hotkey - ) - - success, message = subtensor.set_children( - wallet=wallet, - netuid=netuid, - hotkey=hotkey, - children_with_proportions=children_with_proportions, - wait_for_inclusion=cli.config.wait_for_inclusion, - wait_for_finalization=cli.config.wait_for_finalization, - prompt=cli.config.prompt, - ) - - # Result - if success: - if cli.config.wait_for_finalization and cli.config.wait_for_inclusion: - console.print("New Status:") - GetChildrenCommand.retrieve_children( - subtensor=subtensor, - hotkey=hotkey, - netuid=netuid, - render_table=True, - ) - console.print( - ":white_heavy_check_mark: [green]Set children hotkeys.[/green]" - ) - else: - console.print( - f":cross_mark:[red] Unable to set children hotkeys.[/red] {message}" - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey_or_ss58 = Prompt.ask( - "Enter hotkey name or ss58", default=defaults.wallet.hotkey - ) - if wallet_utils.is_valid_ss58_address(hotkey_or_ss58): - config.ss58 = str(hotkey_or_ss58) - else: - config.wallet.hotkey = str(hotkey_or_ss58) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - set_children_parser = parser.add_parser( - "set_children", help="""Set multiple children hotkeys.""" - ) - set_children_parser.add_argument( - "--netuid", dest="netuid", type=int, required=False - ) - set_children_parser.add_argument( - "--children", dest="children", type=str, required=False - ) - set_children_parser.add_argument( - "--hotkey", dest="hotkey", type=str, required=False - ) - set_children_parser.add_argument( - "--proportions", dest="proportions", type=str, required=False - ) - set_children_parser.add_argument( - "--wait_for_inclusion", - dest="wait_for_inclusion", - action="store_true", - default=True, - help="""Wait for the transaction to be included in a block.""", - ) - set_children_parser.add_argument( - "--wait_for_finalization", - dest="wait_for_finalization", - action="store_true", - default=True, - help="""Wait for the transaction to be finalized.""", - ) - set_children_parser.add_argument( - "--prompt", - dest="prompt", - action="store_true", - default=True, - help="""Prompt for confirmation before proceeding.""", - ) - set_children_parser.add_argument( - "--y", - "--yes", - "--no_prompt", - dest="prompt", - action="store_false", - help="""Disable prompt for confirmation before proceeding. Defaults to Yes for all prompts.""", - ) - bittensor.wallet.add_args(set_children_parser) - bittensor.subtensor.add_args(set_children_parser) - - @staticmethod - def print_current_stake(subtensor, children, hotkey): - console = Console() - parent_stake = subtensor.get_total_stake_for_hotkey(ss58_address=hotkey) - console.print("Current Status:") - console.print(f"My Hotkey: {hotkey} | ", style="cyan", end="", no_wrap=True) - console.print(f"Total Stake: {parent_stake}τ") - for child in children: - child_stake = subtensor.get_total_stake_for_hotkey(child) - console.print( - f"Child Hotkey: {child} | Current Child Stake: {child_stake}τ" - ) - - -class GetChildrenCommand: - """ - Executes the ``get_children_info`` command to get all child hotkeys on a specified subnet on the Bittensor network. - - This command is used to view delegated authority to different hotkeys on the subnet. - - Usage: - Users can specify the subnet and see the children and the proportion that is given to them. - - The command compiles a table showing: - - - ChildHotkey: The hotkey associated with the child. - - ParentHotKey: The hotkey associated with the parent. - - Proportion: The proportion that is assigned to them. - - Expiration: The expiration of the hotkey. - - Example usage:: - - btcli stake get_children --netuid 1 - - Note: - This command is for users who wish to see child hotkeys among different neurons (hotkeys) on the network. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Get children hotkeys.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - return GetChildrenCommand._run(cli, subtensor) - except Exception as e: - console = Console() - console.print(f":cross_mark:[red] An error occurred: {str(e)}[/red]") - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - console = Console() - wallet = bittensor.wallet(config=cli.config) - - # check all - if cli.config.is_set("all"): - cli.config.netuid = None - cli.config.all = True - elif cli.config.is_set("netuid"): - if cli.config.netuid == "all": - cli.config.all = True - else: - cli.config.netuid = int(cli.config.netuid) - exists, netuid = get_netuid(cli, subtensor) - if not exists: - return - else: - netuid_input = Prompt.ask("Enter netuid or 'all'", default="all") - if netuid_input == "all": - cli.config.netuid = None - cli.config.all = True - else: - cli.config.netuid = int(netuid_input) - exists, netuid = get_netuid(cli, subtensor, False) - if not exists: - return - - # get parent hotkey - hotkey = get_hotkey(wallet, cli.config) - if not wallet_utils.is_valid_ss58_address(hotkey): - console.print(f":cross_mark:[red] Invalid SS58 address: {hotkey}[/red]") - return - - try: - netuids = subtensor.get_all_subnet_netuids() if cli.config.all else [netuid] - hotkey_stake = GetChildrenCommand.get_parent_stake_info( - console, subtensor, hotkey - ) - for netuid in netuids: - children = subtensor.get_children(hotkey, netuid) - if children: - GetChildrenCommand.render_table( - subtensor, - hotkey, - hotkey_stake, - children, - netuid, - not cli.config.is_set("all"), - ) - except Exception as e: - console.print( - f":cross_mark:[red] An error occurred while retrieving children: {str(e)}[/red]" - ) - return - - return children - - @staticmethod - def get_parent_stake_info(console, subtensor, hotkey): - hotkey_stake = subtensor.get_total_stake_for_hotkey(hotkey) - console.print( - f"\nYour Hotkey: {hotkey} | ", style="cyan", end="", no_wrap=True - ) - console.print(f"Total Stake: {hotkey_stake}τ") - return hotkey_stake - - @staticmethod - def retrieve_children( - subtensor: "bittensor.subtensor", hotkey: str, netuid: int, render_table: bool - ) -> list[tuple[int, str]]: - """ - - Static method to retrieve children for a given subtensor. - - Args: - subtensor (bittensor.subtensor): The subtensor object used to interact with the Bittensor network. - hotkey (str): The hotkey of the parent. - netuid (int): The network unique identifier of the subtensor. - render_table (bool): Flag indicating whether to render the retrieved children in a table. - - Returns: - List[str]: A list of children hotkeys. - - """ - try: - children = subtensor.get_children(hotkey, netuid) - if render_table: - hotkey_stake = subtensor.get_total_stake_for_hotkey(hotkey) - GetChildrenCommand.render_table( - subtensor, hotkey, hotkey_stake, children, netuid, False - ) - return children - except Exception as e: - console = Console() - console.print( - f":cross_mark:[red] An error occurred while retrieving children: {str(e)}[/red]" - ) - return [] - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey_or_ss58 = Prompt.ask( - "Enter hotkey name or ss58", default=defaults.wallet.hotkey - ) - if wallet_utils.is_valid_ss58_address(hotkey_or_ss58): - config.ss58 = str(hotkey_or_ss58) - else: - config.wallet.hotkey = str(hotkey_or_ss58) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "get_children", help="""Get child hotkeys on subnet.""" - ) - parser.add_argument("--netuid", dest="netuid", type=str, required=False) - parser.add_argument("--hotkey", dest="hotkey", type=str, required=False) - parser.add_argument( - "--all", - dest="all", - action="store_true", - help="Retrieve children from all subnets.", - ) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def render_table( - subtensor: "bittensor.subtensor", - hotkey: str, - hotkey_stake: "Balance", - children: list[Tuple[int, str]], - netuid: int, - prompt: bool, - ): - """ - - Render a table displaying information about child hotkeys on a particular subnet. - - Parameters: - - subtensor: An instance of the "bittensor.subtensor" class. - - hotkey: The hotkey of the parent node. - - children: A list of tuples containing information about child hotkeys. Each tuple should contain: - - The proportion of the child's stake relative to the total stake. - - The hotkey of the child node. - - netuid: The ID of the subnet. - - prompt: A boolean indicating whether to display a prompt for adding a child hotkey. - - Returns: - None - - Example Usage: - subtensor = bittensor.subtensor_instance - hotkey = "parent_hotkey" - children = [(0.5, "child1_hotkey"), (0.3, "child2_hotkey"), (0.2, "child3_hotkey")] - netuid = 1234 - prompt = True - render_table(subtensor, hotkey, children, netuid, prompt) - - """ - console = Console() - - # Initialize Rich table for pretty printing - table = Table( - show_header=True, - header_style="bold magenta", - border_style="blue", - style="dim", - ) - - # Add columns to the table with specific styles - table.add_column("Index", style="bold yellow", no_wrap=True, justify="center") - table.add_column("Child Hotkey", style="bold green") - table.add_column("Proportion", style="bold cyan", no_wrap=True, justify="right") - table.add_column( - "Childkey Take", style="bold blue", no_wrap=True, justify="right" - ) - table.add_column( - "Current Stake Weight", style="bold red", no_wrap=True, justify="right" - ) - - if not children: - console.print(table) - console.print( - f"[bold white]There are currently no child hotkeys on subnet {netuid} with Parent HotKey {hotkey}.[/bold white]" - ) - if prompt: - command = f"btcli stake set_children --children --hotkey --netuid {netuid} --proportion " - console.print( - f"[bold cyan]To add a child hotkey you can run the command: [white]{command}[/white][/bold cyan]" - ) - return - - console.print(f"\nChildren for netuid: {netuid} ", style="cyan") - - # calculate totals - total_proportion = 0 - total_stake = 0 - total_stake_weight = 0 - avg_take = 0 - - children_info = [] - for child in children: - proportion = child[0] - child_hotkey = child[1] - child_stake = subtensor.get_total_stake_for_hotkey( - ss58_address=child_hotkey - ) or Balance(0) - - child_take = subtensor.get_childkey_take(child_hotkey, netuid) - child_take = u16_to_float(child_take) - - # add to totals - total_stake += child_stake.tao - avg_take += child_take - - proportion = u64_to_float(proportion) - - children_info.append((proportion, child_hotkey, child_stake, child_take)) - - children_info.sort( - key=lambda x: x[0], reverse=True - ) # sorting by proportion (highest first) - - # add the children info to the table - for i, (proportion, hotkey, stake, child_take) in enumerate(children_info, 1): - proportion_percent = proportion * 100 # Proportion in percent - proportion_tao = hotkey_stake.tao * proportion # Proportion in TAO - - total_proportion += proportion_percent - - # Conditionally format text - proportion_str = f"{proportion_percent:.3f}% ({proportion_tao:.3f}τ)" - stake_weight = stake.tao + proportion_tao - total_stake_weight += stake_weight - take_str = f"{child_take * 100:.3f}%" - - hotkey = Text(hotkey, style="italic red" if proportion == 0 else "") - table.add_row( - str(i), - hotkey, - proportion_str, - take_str, - str(f"{stake_weight:.3f}"), - ) - - avg_take = avg_take / len(children_info) - - # add totals row - table.add_row( - "", - "[dim]Total[/dim]", - f"[dim]{total_proportion:.3f}%[/dim]", - f"[dim](avg) {avg_take * 100:.3f}%[/dim]", - f"[dim]{total_stake_weight:.3f}τ[/dim]", - style="dim", - ) - console.print(table) diff --git a/bittensor/commands/transfer.py b/bittensor/commands/transfer.py deleted file mode 100644 index 24c6e78402..0000000000 --- a/bittensor/commands/transfer.py +++ /dev/null @@ -1,133 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import sys -import argparse -import bittensor -from rich.prompt import Prompt -from . import defaults - -console = bittensor.__console__ - - -class TransferCommand: - """ - Executes the ``transfer`` command to transfer TAO tokens from one account to another on the Bittensor network. - - This command is used for transactions between different accounts, enabling users to send tokens to other participants on the network. - - Usage: - The command requires specifying the destination address (public key) and the amount of TAO to be transferred. - It checks for sufficient balance and prompts for confirmation before proceeding with the transaction. - - Optional arguments: - - ``--dest`` (str): The destination address for the transfer. This can be in the form of an SS58 or ed2519 public key. - - ``--amount`` (float): The amount of TAO tokens to transfer. - - The command displays the user's current balance before prompting for the amount to transfer, ensuring transparency and accuracy in the transaction. - - Example usage:: - - btcli wallet transfer --dest 5Dp8... --amount 100 - - Note: - This command is crucial for executing token transfers within the Bittensor network. Users should verify the destination address and amount before confirming the transaction to avoid errors or loss of funds. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Transfer token of amount to destination.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - TransferCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Transfer token of amount to destination.""" - wallet = bittensor.wallet(config=cli.config) - subtensor.transfer( - wallet=wallet, - dest=cli.config.dest, - amount=cli.config.amount, - wait_for_inclusion=True, - prompt=not cli.config.no_prompt, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - # Get destination. - if not config.dest and not config.no_prompt: - dest = Prompt.ask("Enter destination public key: (ss58 or ed2519)") - if not bittensor.utils.is_valid_bittensor_address_or_public_key(dest): - sys.exit() - else: - config.dest = str(dest) - - # Get current balance and print to user. - if not config.no_prompt: - wallet = bittensor.wallet(config=config) - subtensor = bittensor.subtensor(config=config, log_verbose=False) - with bittensor.__console__.status(":satellite: Checking Balance..."): - account_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - bittensor.__console__.print( - "Balance: [green]{}[/green]".format(account_balance) - ) - - # Get amount. - if not config.get("amount"): - if not config.no_prompt: - amount = Prompt.ask("Enter TAO amount to transfer") - try: - config.amount = float(amount) - except ValueError: - console.print( - ":cross_mark:[red] Invalid TAO amount[/red] [bold white]{}[/bold white]".format( - amount - ) - ) - sys.exit() - else: - console.print( - ":cross_mark:[red] Invalid TAO amount[/red] [bold white]{}[/bold white]".format( - amount - ) - ) - sys.exit(1) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - transfer_parser = parser.add_parser( - "transfer", help="""Transfer Tao between accounts.""" - ) - transfer_parser.add_argument("--dest", dest="dest", type=str, required=False) - transfer_parser.add_argument( - "--amount", dest="amount", type=float, required=False - ) - - bittensor.wallet.add_args(transfer_parser) - bittensor.subtensor.add_args(transfer_parser) diff --git a/bittensor/commands/unstake.py b/bittensor/commands/unstake.py deleted file mode 100644 index 291aeb6e9a..0000000000 --- a/bittensor/commands/unstake.py +++ /dev/null @@ -1,443 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import sys -import argparse -from typing import List, Union, Optional, Tuple - -from rich.prompt import Confirm, Prompt -from tqdm import tqdm - -import bittensor -from bittensor.utils.balance import Balance -from . import defaults, GetChildrenCommand -from .utils import get_hotkey_wallets_for_wallet -from ..utils import wallet_utils - -console = bittensor.__console__ - - -class UnStakeCommand: - """ - Executes the ``remove`` command to unstake TAO tokens from one or more hotkeys and transfer them back to the user's coldkey on the Bittensor network. - - This command is used to withdraw tokens previously staked to different hotkeys. - - Usage: - Users can specify the amount to unstake, the hotkeys to unstake from (either by name or ``SS58`` address), and whether to unstake from all hotkeys. The command checks for sufficient stake and prompts for confirmation before proceeding with the unstaking process. - - Optional arguments: - - ``--all`` (bool): When set, unstakes all staked tokens from the specified hotkeys. - - ``--amount`` (float): The amount of TAO tokens to unstake. - - --hotkey_ss58address (str): The SS58 address of the hotkey to unstake from. - - ``--max_stake`` (float): Sets the maximum amount of TAO to remain staked in each hotkey. - - ``--hotkeys`` (list): Specifies hotkeys by name or SS58 address to unstake from. - - ``--all_hotkeys`` (bool): When set, unstakes from all hotkeys associated with the wallet, excluding any specified in --hotkeys. - - The command prompts for confirmation before executing the unstaking operation. - - Example usage:: - - btcli stake remove --amount 100 --hotkeys hk1,hk2 - - Note: - This command is important for users who wish to reallocate their stakes or withdraw them from the network. - It allows for flexible management of token stakes across different neurons (hotkeys) on the network. - """ - - @classmethod - def check_config(cls, config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if ( - not config.get("hotkey_ss58address", d=None) - and not config.is_set("wallet.hotkey") - and not config.no_prompt - and not config.get("all_hotkeys") - and not config.get("hotkeys") - ): - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - # Get amount. - if ( - not config.get("hotkey_ss58address") - and not config.get("amount") - and not config.get("unstake_all") - and not config.get("max_stake") - ): - hotkeys: str = "" - if config.get("all_hotkeys"): - hotkeys = "all hotkeys" - elif config.get("hotkeys"): - hotkeys = str(config.hotkeys).replace("[", "").replace("]", "") - else: - hotkeys = str(config.wallet.hotkey) - if config.no_prompt: - config.unstake_all = True - else: - # I really don't like this logic flow. It can be a bit confusing to read for something - # as serious as unstaking all. - if Confirm.ask(f"Unstake all Tao from: [bold]'{hotkeys}'[/bold]?"): - config.unstake_all = True - else: - config.unstake_all = False - amount = Prompt.ask("Enter Tao amount to unstake") - try: - config.amount = float(amount) - except ValueError: - console.print( - f":cross_mark:[red] Invalid Tao amount[/red] [bold white]{amount}[/bold white]" - ) - sys.exit() - - @staticmethod - def add_args(command_parser): - unstake_parser = command_parser.add_parser( - "remove", - help="""Remove stake from the specified hotkey into the coldkey balance.""", - ) - unstake_parser.add_argument( - "--all", dest="unstake_all", action="store_true", default=False - ) - unstake_parser.add_argument( - "--amount", dest="amount", type=float, required=False - ) - unstake_parser.add_argument( - "--hotkey_ss58address", dest="hotkey_ss58address", type=str, required=False - ) - unstake_parser.add_argument( - "--max_stake", - dest="max_stake", - type=float, - required=False, - action="store", - default=None, - help="""Specify the maximum amount of Tao to have staked in each hotkey.""", - ) - unstake_parser.add_argument( - "--hotkeys", - "--exclude_hotkeys", - "--wallet.hotkeys", - "--wallet.exclude_hotkeys", - required=False, - action="store", - default=[], - type=str, - nargs="*", - help="""Specify the hotkeys by name or ss58 address. (e.g. hk1 hk2 hk3)""", - ) - unstake_parser.add_argument( - "--all_hotkeys", - "--wallet.all_hotkeys", - required=False, - action="store_true", - default=False, - help="""To specify all hotkeys. Specifying hotkeys will exclude them from this all.""", - ) - bittensor.wallet.add_args(unstake_parser) - bittensor.subtensor.add_args(unstake_parser) - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Unstake token of amount from hotkey(s).""" - try: - config = cli.config.copy() - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=config, log_verbose=False - ) - UnStakeCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Unstake token of amount from hotkey(s).""" - config = cli.config.copy() - wallet = bittensor.wallet(config=config) - - # Get the hotkey_names (if any) and the hotkey_ss58s. - hotkeys_to_unstake_from: List[Tuple[Optional[str], str]] = [] - if cli.config.get("hotkey_ss58address"): - # Stake to specific hotkey. - hotkeys_to_unstake_from = [(None, cli.config.get("hotkey_ss58address"))] - elif cli.config.get("all_hotkeys"): - # Stake to all hotkeys. - all_hotkeys: List[bittensor.wallet] = get_hotkey_wallets_for_wallet( - wallet=wallet - ) - # Get the hotkeys to exclude. (d)efault to no exclusions. - hotkeys_to_exclude: List[str] = cli.config.get("hotkeys", d=[]) - # Exclude hotkeys that are specified. - hotkeys_to_unstake_from = [ - (wallet.hotkey_str, wallet.hotkey.ss58_address) - for wallet in all_hotkeys - if wallet.hotkey_str not in hotkeys_to_exclude - ] # definitely wallets - - elif cli.config.get("hotkeys"): - # Stake to specific hotkeys. - for hotkey_ss58_or_hotkey_name in cli.config.get("hotkeys"): - if bittensor.utils.is_valid_ss58_address(hotkey_ss58_or_hotkey_name): - # If the hotkey is a valid ss58 address, we add it to the list. - hotkeys_to_unstake_from.append((None, hotkey_ss58_or_hotkey_name)) - else: - # If the hotkey is not a valid ss58 address, we assume it is a hotkey name. - # We then get the hotkey from the wallet and add it to the list. - wallet_ = bittensor.wallet( - config=cli.config, hotkey=hotkey_ss58_or_hotkey_name - ) - hotkeys_to_unstake_from.append( - (wallet_.hotkey_str, wallet_.hotkey.ss58_address) - ) - elif cli.config.wallet.get("hotkey"): - # Only cli.config.wallet.hotkey is specified. - # so we stake to that single hotkey. - hotkey_ss58_or_name = cli.config.wallet.get("hotkey") - if bittensor.utils.is_valid_ss58_address(hotkey_ss58_or_name): - hotkeys_to_unstake_from = [(None, hotkey_ss58_or_name)] - else: - # Hotkey is not a valid ss58 address, so we assume it is a hotkey name. - wallet_ = bittensor.wallet( - config=cli.config, hotkey=hotkey_ss58_or_name - ) - hotkeys_to_unstake_from = [ - (wallet_.hotkey_str, wallet_.hotkey.ss58_address) - ] - else: - # Only cli.config.wallet.hotkey is specified. - # so we stake to that single hotkey. - assert cli.config.wallet.hotkey is not None - hotkeys_to_unstake_from = [ - (None, bittensor.wallet(config=cli.config).hotkey.ss58_address) - ] - - final_hotkeys: List[Tuple[str, str]] = [] - final_amounts: List[Union[float, Balance]] = [] - for hotkey in tqdm(hotkeys_to_unstake_from): - hotkey: Tuple[Optional[str], str] # (hotkey_name (or None), hotkey_ss58) - unstake_amount_tao: float = cli.config.get( - "amount" - ) # The amount specified to unstake. - hotkey_stake: Balance = subtensor.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=hotkey[1], coldkey_ss58=wallet.coldkeypub.ss58_address - ) - if unstake_amount_tao == None: - unstake_amount_tao = hotkey_stake.tao - if cli.config.get("max_stake"): - # Get the current stake of the hotkey from this coldkey. - unstake_amount_tao: float = hotkey_stake.tao - cli.config.get( - "max_stake" - ) - cli.config.amount = unstake_amount_tao - if unstake_amount_tao < 0: - # Skip if max_stake is greater than current stake. - continue - else: - if unstake_amount_tao is not None: - # There is a specified amount to unstake. - if unstake_amount_tao > hotkey_stake.tao: - # Skip if the specified amount is greater than the current stake. - continue - - final_amounts.append(unstake_amount_tao) - final_hotkeys.append(hotkey) # add both the name and the ss58 address. - - if len(final_hotkeys) == 0: - # No hotkeys to unstake from. - bittensor.__console__.print( - "Not enough stake to unstake from any hotkeys or max_stake is more than current stake." - ) - return None - - # Ask to unstake - if not cli.config.no_prompt: - if not Confirm.ask( - f"Do you want to unstake from the following keys to {wallet.name}:\n" - + "".join( - [ - f" [bold white]- {hotkey[0] + ':' if hotkey[0] else ''}{hotkey[1]}: {f'{amount} {bittensor.__tao_symbol__}' if amount else 'All'}[/bold white]\n" - for hotkey, amount in zip(final_hotkeys, final_amounts) - ] - ) - ): - return None - - if len(final_hotkeys) == 1: - # do regular unstake - return subtensor.unstake( - wallet=wallet, - hotkey_ss58=final_hotkeys[0][1], - amount=None if cli.config.get("unstake_all") else final_amounts[0], - wait_for_inclusion=True, - prompt=not cli.config.no_prompt, - ) - - subtensor.unstake_multiple( - wallet=wallet, - hotkey_ss58s=[hotkey_ss58 for _, hotkey_ss58 in final_hotkeys], - amounts=None if cli.config.get("unstake_all") else final_amounts, - wait_for_inclusion=True, - prompt=False, - ) - - -class RevokeChildrenCommand: - """ - Executes the ``revoke_children`` command to remove all children hotkeys on a specified subnet on the Bittensor network. - - This command is used to remove delegated authority from all child hotkeys, removing their position and influence on the subnet. - - Usage: - Users need to specify the parent hotkey and the subnet ID (netuid). - The user needs to have sufficient authority to make this call. - - The command prompts for confirmation before executing the revoke_children operation. - - Example usage:: - - btcli stake revoke_children --hotkey --netuid 1 - - Note: - This command is critical for users who wish to remove children hotkeys on the network. - It allows for a complete removal of delegated authority to enhance network participation and influence. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Revokes all children hotkeys.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RevokeChildrenCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - wallet = bittensor.wallet(config=cli.config) - - # Get values if not set. - if not cli.config.is_set("netuid"): - cli.config.netuid = int(Prompt.ask("Enter netuid")) - - netuid = cli.config.netuid - total_subnets = subtensor.get_total_subnets() - if total_subnets is not None and not (0 <= netuid < total_subnets): - console.print("Netuid is outside the current subnet range") - return - - # get parent hotkey - if wallet and wallet.hotkey: - hotkey = wallet.hotkey.ss58_address - elif cli.config.is_set("hotkey"): - hotkey = cli.config.hotkey - elif cli.config.is_set("ss58"): - hotkey = cli.config.ss58 - else: - hotkey = Prompt.ask("Enter parent hotkey (ss58)") - - if not wallet_utils.is_valid_ss58_address(hotkey): - console.print(f":cross_mark:[red] Invalid SS58 address: {hotkey}[/red]") - return - - success, message = subtensor.set_children( - wallet=wallet, - netuid=netuid, - children_with_proportions=[], - hotkey=hotkey, - wait_for_inclusion=cli.config.wait_for_inclusion, - wait_for_finalization=cli.config.wait_for_finalization, - prompt=cli.config.prompt, - ) - - # Result - if success: - if cli.config.wait_for_finalization and cli.config.wait_for_inclusion: - GetChildrenCommand.retrieve_children( - subtensor=subtensor, - hotkey=hotkey, - netuid=netuid, - render_table=True, - ) - console.print( - ":white_heavy_check_mark: [green]Revoked all children hotkeys.[/green]" - ) - else: - console.print( - f":cross_mark:[red] Unable to revoke children hotkeys.[/red] {message}" - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey_or_ss58 = Prompt.ask( - "Enter hotkey name or ss58", default=defaults.wallet.hotkey - ) - if wallet_utils.is_valid_ss58_address(hotkey_or_ss58): - config.ss58 = str(hotkey_or_ss58) - else: - config.wallet.hotkey = str(hotkey_or_ss58) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "revoke_children", help="""Revoke all children hotkeys.""" - ) - parser.add_argument("--netuid", dest="netuid", type=int, required=False) - parser.add_argument("--hotkey", dest="hotkey", type=str, required=False) - parser.add_argument( - "--wait_for_inclusion", - dest="wait_for_inclusion", - action="store_true", - default=True, - help="""Wait for the transaction to be included in a block.""", - ) - parser.add_argument( - "--wait_for_finalization", - dest="wait_for_finalization", - action="store_true", - default=True, - help="""Wait for the transaction to be finalized.""", - ) - parser.add_argument( - "--prompt", - dest="prompt", - action="store_true", - default=True, - help="""Prompt for confirmation before proceeding.""", - ) - parser.add_argument( - "--y", - "--yes", - "--no_prompt", - dest="prompt", - action="store_false", - help="""Disable prompt for confirmation before proceeding. Defaults to Yes for all prompts.""", - ) - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) diff --git a/bittensor/commands/utils.py b/bittensor/commands/utils.py deleted file mode 100644 index 661cd818cc..0000000000 --- a/bittensor/commands/utils.py +++ /dev/null @@ -1,283 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import sys -import os -import bittensor -import requests -from bittensor.utils.registration import torch -from bittensor.utils.balance import Balance -from bittensor.utils import U64_NORMALIZED_FLOAT, U16_NORMALIZED_FLOAT -from typing import List, Dict, Any, Optional, Tuple -from rich.prompt import Confirm, PromptBase -from dataclasses import dataclass -from . import defaults - -console = bittensor.__console__ - - -class IntListPrompt(PromptBase): - """Prompt for a list of integers.""" - - def check_choice(self, value: str) -> bool: - assert self.choices is not None - # check if value is a valid choice or all the values in a list of ints are valid choices - return ( - value == "All" - or value in self.choices - or all( - val.strip() in self.choices for val in value.replace(",", " ").split() - ) - ) - - -def check_netuid_set( - config: "bittensor.config", - subtensor: "bittensor.subtensor", - allow_none: bool = False, -): - if subtensor.network != "nakamoto": - all_netuids = [str(netuid) for netuid in subtensor.get_subnets()] - if len(all_netuids) == 0: - console.print(":cross_mark:[red]There are no open networks.[/red]") - sys.exit() - - # Make sure netuid is set. - if not config.is_set("netuid"): - if not config.no_prompt: - netuid = IntListPrompt.ask( - "Enter netuid", choices=all_netuids, default=str(all_netuids[0]) - ) - else: - netuid = str(defaults.netuid) if not allow_none else "None" - else: - netuid = config.netuid - - if isinstance(netuid, str) and netuid.lower() in ["none"] and allow_none: - config.netuid = None - else: - if isinstance(netuid, list): - netuid = netuid[0] - try: - config.netuid = int(netuid) - except: - raise ValueError('netuid must be an integer or "None" (if applicable)') - - -def check_for_cuda_reg_config(config: "bittensor.config") -> None: - """Checks, when CUDA is available, if the user would like to register with their CUDA device.""" - if torch and torch.cuda.is_available(): - if not config.no_prompt: - if config.pow_register.cuda.get("use_cuda") is None: # flag not set - # Ask about cuda registration only if a CUDA device is available. - cuda = Confirm.ask("Detected CUDA device, use CUDA for registration?\n") - config.pow_register.cuda.use_cuda = cuda - - # Only ask about which CUDA device if the user has more than one CUDA device. - if ( - config.pow_register.cuda.use_cuda - and config.pow_register.cuda.get("dev_id") is None - ): - devices: List[str] = [str(x) for x in range(torch.cuda.device_count())] - device_names: List[str] = [ - torch.cuda.get_device_name(x) - for x in range(torch.cuda.device_count()) - ] - console.print("Available CUDA devices:") - choices_str: str = "" - for i, device in enumerate(devices): - choices_str += " {}: {}\n".format(device, device_names[i]) - console.print(choices_str) - dev_id = IntListPrompt.ask( - "Which GPU(s) would you like to use? Please list one, or comma-separated", - choices=devices, - default="All", - ) - if dev_id.lower() == "all": - dev_id = list(range(torch.cuda.device_count())) - else: - try: - # replace the commas with spaces then split over whitespace., - # then strip the whitespace and convert to ints. - dev_id = [ - int(dev_id.strip()) - for dev_id in dev_id.replace(",", " ").split() - ] - except ValueError: - console.log( - ":cross_mark:[red]Invalid GPU device[/red] [bold white]{}[/bold white]\nAvailable CUDA devices:{}".format( - dev_id, choices_str - ) - ) - sys.exit(1) - config.pow_register.cuda.dev_id = dev_id - else: - # flag was not set, use default value. - if config.pow_register.cuda.get("use_cuda") is None: - config.pow_register.cuda.use_cuda = defaults.pow_register.cuda.use_cuda - - -def get_hotkey_wallets_for_wallet(wallet) -> List["bittensor.wallet"]: - hotkey_wallets = [] - hotkeys_path = wallet.path + "/" + wallet.name + "/hotkeys" - try: - hotkey_files = next(os.walk(os.path.expanduser(hotkeys_path)))[2] - except StopIteration: - hotkey_files = [] - for hotkey_file_name in hotkey_files: - try: - hotkey_for_name = bittensor.wallet( - path=wallet.path, name=wallet.name, hotkey=hotkey_file_name - ) - if ( - hotkey_for_name.hotkey_file.exists_on_device() - and not hotkey_for_name.hotkey_file.is_encrypted() - ): - hotkey_wallets.append(hotkey_for_name) - except Exception: - pass - return hotkey_wallets - - -def get_coldkey_wallets_for_path(path: str) -> List["bittensor.wallet"]: - try: - wallet_names = next(os.walk(os.path.expanduser(path)))[1] - return [bittensor.wallet(path=path, name=name) for name in wallet_names] - except StopIteration: - # No wallet files found. - wallets = [] - return wallets - - -def get_all_wallets_for_path(path: str) -> List["bittensor.wallet"]: - all_wallets = [] - cold_wallets = get_coldkey_wallets_for_path(path) - for cold_wallet in cold_wallets: - if ( - cold_wallet.coldkeypub_file.exists_on_device() - and not cold_wallet.coldkeypub_file.is_encrypted() - ): - all_wallets.extend(get_hotkey_wallets_for_wallet(cold_wallet)) - return all_wallets - - -def filter_netuids_by_registered_hotkeys( - cli, subtensor, netuids, all_hotkeys -) -> List[int]: - netuids_with_registered_hotkeys = [] - for wallet in all_hotkeys: - netuids_list = subtensor.get_netuids_for_hotkey(wallet.hotkey.ss58_address) - bittensor.logging.debug( - f"Hotkey {wallet.hotkey.ss58_address} registered in netuids: {netuids_list}" - ) - netuids_with_registered_hotkeys.extend(netuids_list) - - if not cli.config.netuids: - netuids = netuids_with_registered_hotkeys - - else: - netuids = [netuid for netuid in netuids if netuid in cli.config.netuids] - netuids.extend(netuids_with_registered_hotkeys) - - return list(set(netuids)) - - -def normalize_hyperparameters( - subnet: bittensor.SubnetHyperparameters, -) -> List[Tuple[str, str, str]]: - """ - Normalizes the hyperparameters of a subnet. - - Args: - subnet: The subnet hyperparameters object. - - Returns: - A list of tuples containing the parameter name, value, and normalized value. - """ - param_mappings = { - "adjustment_alpha": U64_NORMALIZED_FLOAT, - "min_difficulty": U64_NORMALIZED_FLOAT, - "max_difficulty": U64_NORMALIZED_FLOAT, - "difficulty": U64_NORMALIZED_FLOAT, - "bonds_moving_avg": U64_NORMALIZED_FLOAT, - "max_weight_limit": U16_NORMALIZED_FLOAT, - "kappa": U16_NORMALIZED_FLOAT, - "alpha_high": U16_NORMALIZED_FLOAT, - "alpha_low": U16_NORMALIZED_FLOAT, - "min_burn": Balance.from_rao, - "max_burn": Balance.from_rao, - } - - normalized_values: List[Tuple[str, str, str]] = [] - subnet_dict = subnet.__dict__ - - for param, value in subnet_dict.items(): - try: - if param in param_mappings: - norm_value = param_mappings[param](value) - if isinstance(norm_value, float): - norm_value = f"{norm_value:.{10}g}" - else: - norm_value = value - except Exception as e: - bittensor.logging.warning(f"Error normalizing parameter '{param}': {e}") - norm_value = "-" - - normalized_values.append((param, str(value), str(norm_value))) - - return normalized_values - - -@dataclass -class DelegatesDetails: - name: str - url: str - description: str - signature: str - - @classmethod - def from_json(cls, json: Dict[str, any]) -> "DelegatesDetails": - return cls( - name=json["name"], - url=json["url"], - description=json["description"], - signature=json["signature"], - ) - - -def _get_delegates_details_from_github( - requests_get, url: str -) -> Dict[str, DelegatesDetails]: - response = requests_get(url) - - if response.status_code == 200: - all_delegates: Dict[str, Any] = response.json() - all_delegates_details = {} - for delegate_hotkey, delegates_details in all_delegates.items(): - all_delegates_details[delegate_hotkey] = DelegatesDetails.from_json( - delegates_details - ) - return all_delegates_details - else: - return {} - - -def get_delegates_details(url: str) -> Optional[Dict[str, DelegatesDetails]]: - try: - return _get_delegates_details_from_github(requests.get, url) - except Exception: - return None # Fail silently diff --git a/bittensor/commands/wallets.py b/bittensor/commands/wallets.py deleted file mode 100644 index 15819ece7b..0000000000 --- a/bittensor/commands/wallets.py +++ /dev/null @@ -1,1101 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import os -import sys -from typing import List, Optional, Tuple - -import requests -from rich.prompt import Confirm, Prompt -from rich.table import Table - -import bittensor - -from ..utils import RAOPERTAO -from . import defaults - - -class RegenColdkeyCommand: - """ - Executes the ``regen_coldkey`` command to regenerate a coldkey for a wallet on the Bittensor network. - - This command is used to create a new coldkey from an existing mnemonic, seed, or JSON file. - - Usage: - Users can specify a mnemonic, a seed string, or a JSON file path to regenerate a coldkey. - The command supports optional password protection for the generated key and can overwrite an existing coldkey. - - Optional arguments: - - ``--mnemonic`` (str): A mnemonic phrase used to regenerate the key. - - ``--seed`` (str): A seed hex string used for key regeneration. - - ``--json`` (str): Path to a JSON file containing an encrypted key backup. - - ``--json_password`` (str): Password to decrypt the JSON file. - - ``--use_password`` (bool): Enables password protection for the generated key. - - ``--overwrite_coldkey`` (bool): Overwrites the existing coldkey with the new one. - - Example usage:: - - btcli wallet regen_coldkey --mnemonic "word1 word2 ... word12" - - Note: - This command is critical for users who need to regenerate their coldkey, possibly for recovery or security reasons. - It should be used with caution to avoid overwriting existing keys unintentionally. - """ - - def run(cli): - r"""Creates a new coldkey under this wallet.""" - wallet = bittensor.wallet(config=cli.config) - - json_str: Optional[str] = None - json_password: Optional[str] = None - if cli.config.get("json"): - file_name: str = cli.config.get("json") - if not os.path.exists(file_name) or not os.path.isfile(file_name): - raise ValueError("File {} does not exist".format(file_name)) - with open(cli.config.get("json"), "r") as f: - json_str = f.read() - # Password can be "", assume if None - json_password = cli.config.get("json_password", "") - wallet.regenerate_coldkey( - mnemonic=cli.config.mnemonic, - seed=cli.config.seed, - json=(json_str, json_password), - use_password=cli.config.use_password, - overwrite=cli.config.overwrite_coldkey, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if ( - config.mnemonic == None - and config.get("seed", d=None) == None - and config.get("json", d=None) == None - ): - prompt_answer = Prompt.ask("Enter mnemonic, seed, or json file location") - if prompt_answer.startswith("0x"): - config.seed = prompt_answer - elif len(prompt_answer.split(" ")) > 1: - config.mnemonic = prompt_answer - else: - config.json = prompt_answer - - if config.get("json", d=None) and config.get("json_password", d=None) == None: - config.json_password = Prompt.ask( - "Enter json backup password", password=True - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - regen_coldkey_parser = parser.add_parser( - "regen_coldkey", help="""Regenerates a coldkey from a passed value""" - ) - regen_coldkey_parser.add_argument( - "--mnemonic", - required=False, - nargs="+", - help="Mnemonic used to regen your key i.e. horse cart dog ...", - ) - regen_coldkey_parser.add_argument( - "--seed", - required=False, - default=None, - help="Seed hex string used to regen your key i.e. 0x1234...", - ) - regen_coldkey_parser.add_argument( - "--json", - required=False, - default=None, - help="""Path to a json file containing the encrypted key backup. (e.g. from PolkadotJS)""", - ) - regen_coldkey_parser.add_argument( - "--json_password", - required=False, - default=None, - help="""Password to decrypt the json file.""", - ) - regen_coldkey_parser.add_argument( - "--use_password", - dest="use_password", - action="store_true", - help="""Set true to protect the generated bittensor key with a password.""", - default=True, - ) - regen_coldkey_parser.add_argument( - "--no_password", - dest="use_password", - action="store_false", - help="""Set off protects the generated bittensor key with a password.""", - ) - regen_coldkey_parser.add_argument( - "--overwrite_coldkey", - default=False, - action="store_true", - help="""Overwrite the old coldkey with the newly generated coldkey""", - ) - bittensor.wallet.add_args(regen_coldkey_parser) - bittensor.subtensor.add_args(regen_coldkey_parser) - - -class RegenColdkeypubCommand: - """ - Executes the ``regen_coldkeypub`` command to regenerate the public part of a coldkey (coldkeypub) for a wallet on the Bittensor network. - - This command is used when a user needs to recreate their coldkeypub from an existing public key or SS58 address. - - Usage: - The command requires either a public key in hexadecimal format or an ``SS58`` address to regenerate the coldkeypub. It optionally allows overwriting an existing coldkeypub file. - - Optional arguments: - - ``--public_key_hex`` (str): The public key in hex format. - - ``--ss58_address`` (str): The SS58 address of the coldkey. - - ``--overwrite_coldkeypub`` (bool): Overwrites the existing coldkeypub file with the new one. - - Example usage:: - - btcli wallet regen_coldkeypub --ss58_address 5DkQ4... - - Note: - This command is particularly useful for users who need to regenerate their coldkeypub, perhaps due to file corruption or loss. - It is a recovery-focused utility that ensures continued access to wallet functionalities. - """ - - def run(cli): - r"""Creates a new coldkeypub under this wallet.""" - wallet = bittensor.wallet(config=cli.config) - wallet.regenerate_coldkeypub( - ss58_address=cli.config.get("ss58_address"), - public_key=cli.config.get("public_key_hex"), - overwrite=cli.config.overwrite_coldkeypub, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if config.ss58_address == None and config.public_key_hex == None: - prompt_answer = Prompt.ask( - "Enter the ss58_address or the public key in hex" - ) - if prompt_answer.startswith("0x"): - config.public_key_hex = prompt_answer - else: - config.ss58_address = prompt_answer - if not bittensor.utils.is_valid_bittensor_address_or_public_key( - address=( - config.ss58_address if config.ss58_address else config.public_key_hex - ) - ): - sys.exit(1) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - regen_coldkeypub_parser = parser.add_parser( - "regen_coldkeypub", - help="""Regenerates a coldkeypub from the public part of the coldkey.""", - ) - regen_coldkeypub_parser.add_argument( - "--public_key", - "--pubkey", - dest="public_key_hex", - required=False, - default=None, - type=str, - help="The public key (in hex) of the coldkey to regen e.g. 0x1234 ...", - ) - regen_coldkeypub_parser.add_argument( - "--ss58_address", - "--addr", - "--ss58", - dest="ss58_address", - required=False, - default=None, - type=str, - help="The ss58 address of the coldkey to regen e.g. 5ABCD ...", - ) - regen_coldkeypub_parser.add_argument( - "--overwrite_coldkeypub", - default=False, - action="store_true", - help="""Overwrite the old coldkeypub file with the newly generated coldkeypub""", - ) - bittensor.wallet.add_args(regen_coldkeypub_parser) - bittensor.subtensor.add_args(regen_coldkeypub_parser) - - -class RegenHotkeyCommand: - """ - Executes the ``regen_hotkey`` command to regenerate a hotkey for a wallet on the Bittensor network. - - Similar to regenerating a coldkey, this command creates a new hotkey from a mnemonic, seed, or JSON file. - - Usage: - Users can provide a mnemonic, seed string, or a JSON file to regenerate the hotkey. - The command supports optional password protection and can overwrite an existing hotkey. - - Optional arguments: - - ``--mnemonic`` (str): A mnemonic phrase used to regenerate the key. - - ``--seed`` (str): A seed hex string used for key regeneration. - - ``--json`` (str): Path to a JSON file containing an encrypted key backup. - - ``--json_password`` (str): Password to decrypt the JSON file. - - ``--use_password`` (bool): Enables password protection for the generated key. - - ``--overwrite_hotkey`` (bool): Overwrites the existing hotkey with the new one. - - Example usage:: - - btcli wallet regen_hotkey - btcli wallet regen_hotkey --seed 0x1234... - - Note: - This command is essential for users who need to regenerate their hotkey, possibly for security upgrades or key recovery. - It should be used cautiously to avoid accidental overwrites of existing keys. - """ - - def run(cli): - r"""Creates a new coldkey under this wallet.""" - wallet = bittensor.wallet(config=cli.config) - - json_str: Optional[str] = None - json_password: Optional[str] = None - if cli.config.get("json"): - file_name: str = cli.config.get("json") - if not os.path.exists(file_name) or not os.path.isfile(file_name): - raise ValueError("File {} does not exist".format(file_name)) - with open(cli.config.get("json"), "r") as f: - json_str = f.read() - - # Password can be "", assume if None - json_password = cli.config.get("json_password", "") - - wallet.regenerate_hotkey( - mnemonic=cli.config.mnemonic, - seed=cli.config.seed, - json=(json_str, json_password), - use_password=cli.config.use_password, - overwrite=cli.config.overwrite_hotkey, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - if ( - config.mnemonic == None - and config.get("seed", d=None) == None - and config.get("json", d=None) == None - ): - prompt_answer = Prompt.ask("Enter mnemonic, seed, or json file location") - if prompt_answer.startswith("0x"): - config.seed = prompt_answer - elif len(prompt_answer.split(" ")) > 1: - config.mnemonic = prompt_answer - else: - config.json = prompt_answer - - if config.get("json", d=None) and config.get("json_password", d=None) == None: - config.json_password = Prompt.ask( - "Enter json backup password", password=True - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - regen_hotkey_parser = parser.add_parser( - "regen_hotkey", help="""Regenerates a hotkey from a passed mnemonic""" - ) - regen_hotkey_parser.add_argument( - "--mnemonic", - required=False, - nargs="+", - help="Mnemonic used to regen your key i.e. horse cart dog ...", - ) - regen_hotkey_parser.add_argument( - "--seed", - required=False, - default=None, - help="Seed hex string used to regen your key i.e. 0x1234...", - ) - regen_hotkey_parser.add_argument( - "--json", - required=False, - default=None, - help="""Path to a json file containing the encrypted key backup. (e.g. from PolkadotJS)""", - ) - regen_hotkey_parser.add_argument( - "--json_password", - required=False, - default=None, - help="""Password to decrypt the json file.""", - ) - regen_hotkey_parser.add_argument( - "--use_password", - dest="use_password", - action="store_true", - help="""Set true to protect the generated bittensor key with a password.""", - default=False, - ) - regen_hotkey_parser.add_argument( - "--no_password", - dest="use_password", - action="store_false", - help="""Set off protects the generated bittensor key with a password.""", - ) - regen_hotkey_parser.add_argument( - "--overwrite_hotkey", - dest="overwrite_hotkey", - action="store_true", - default=False, - help="""Overwrite the old hotkey with the newly generated hotkey""", - ) - bittensor.wallet.add_args(regen_hotkey_parser) - bittensor.subtensor.add_args(regen_hotkey_parser) - - -class NewHotkeyCommand: - """ - Executes the ``new_hotkey`` command to create a new hotkey under a wallet on the Bittensor network. - - This command is used to generate a new hotkey for managing a neuron or participating in the network. - - Usage: - The command creates a new hotkey with an optional word count for the mnemonic and supports password protection. - It also allows overwriting an existing hotkey. - - Optional arguments: - - ``--n_words`` (int): The number of words in the mnemonic phrase. - - ``--use_password`` (bool): Enables password protection for the generated key. - - ``--overwrite_hotkey`` (bool): Overwrites the existing hotkey with the new one. - - Example usage:: - - btcli wallet new_hotkey --n_words 24 - - Note: - This command is useful for users who wish to create additional hotkeys for different purposes, - such as running multiple miners or separating operational roles within the network. - """ - - def run(cli): - """Creates a new hotke under this wallet.""" - wallet = bittensor.wallet(config=cli.config) - wallet.create_new_hotkey( - n_words=cli.config.n_words, - use_password=cli.config.use_password, - overwrite=cli.config.overwrite_hotkey, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - new_hotkey_parser = parser.add_parser( - "new_hotkey", - help="""Creates a new hotkey (for running a miner) under the specified path.""", - ) - new_hotkey_parser.add_argument( - "--n_words", - type=int, - choices=[12, 15, 18, 21, 24], - default=12, - help="""The number of words representing the mnemonic. i.e. horse cart dog ... x 24""", - ) - new_hotkey_parser.add_argument( - "--use_password", - dest="use_password", - action="store_true", - help="""Set true to protect the generated bittensor key with a password.""", - default=False, - ) - new_hotkey_parser.add_argument( - "--no_password", - dest="use_password", - action="store_false", - help="""Set off protects the generated bittensor key with a password.""", - ) - new_hotkey_parser.add_argument( - "--overwrite_hotkey", - action="store_true", - default=False, - help="""Overwrite the old hotkey with the newly generated hotkey""", - ) - bittensor.wallet.add_args(new_hotkey_parser) - bittensor.subtensor.add_args(new_hotkey_parser) - - -class NewColdkeyCommand: - """ - Executes the ``new_coldkey`` command to create a new coldkey under a wallet on the Bittensor network. - - This command generates a coldkey, which is essential for holding balances and performing high-value transactions. - - Usage: - The command creates a new coldkey with an optional word count for the mnemonic and supports password protection. - It also allows overwriting an existing coldkey. - - Optional arguments: - - ``--n_words`` (int): The number of words in the mnemonic phrase. - - ``--use_password`` (bool): Enables password protection for the generated key. - - ``--overwrite_coldkey`` (bool): Overwrites the existing coldkey with the new one. - - Example usage:: - - btcli wallet new_coldkey --n_words 15 - - Note: - This command is crucial for users who need to create a new coldkey for enhanced security or as part of setting up a new wallet. - It's a foundational step in establishing a secure presence on the Bittensor network. - """ - - def run(cli): - r"""Creates a new coldkey under this wallet.""" - wallet = bittensor.wallet(config=cli.config) - wallet.create_new_coldkey( - n_words=cli.config.n_words, - use_password=cli.config.use_password, - overwrite=cli.config.overwrite_coldkey, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - new_coldkey_parser = parser.add_parser( - "new_coldkey", - help="""Creates a new coldkey (for containing balance) under the specified path. """, - ) - new_coldkey_parser.add_argument( - "--n_words", - type=int, - choices=[12, 15, 18, 21, 24], - default=12, - help="""The number of words representing the mnemonic. i.e. horse cart dog ... x 24""", - ) - new_coldkey_parser.add_argument( - "--use_password", - dest="use_password", - action="store_true", - help="""Set true to protect the generated bittensor key with a password.""", - default=True, - ) - new_coldkey_parser.add_argument( - "--no_password", - dest="use_password", - action="store_false", - help="""Set off protects the generated bittensor key with a password.""", - ) - new_coldkey_parser.add_argument( - "--overwrite_coldkey", - action="store_true", - default=False, - help="""Overwrite the old coldkey with the newly generated coldkey""", - ) - bittensor.wallet.add_args(new_coldkey_parser) - bittensor.subtensor.add_args(new_coldkey_parser) - - -class WalletCreateCommand: - """ - Executes the ``create`` command to generate both a new coldkey and hotkey under a specified wallet on the Bittensor network. - - This command is a comprehensive utility for creating a complete wallet setup with both cold and hotkeys. - - Usage: - The command facilitates the creation of a new coldkey and hotkey with an optional word count for the mnemonics. - It supports password protection for the coldkey and allows overwriting of existing keys. - - Optional arguments: - - ``--n_words`` (int): The number of words in the mnemonic phrase for both keys. - - ``--use_password`` (bool): Enables password protection for the coldkey. - - ``--overwrite_coldkey`` (bool): Overwrites the existing coldkey with the new one. - - ``--overwrite_hotkey`` (bool): Overwrites the existing hotkey with the new one. - - Example usage:: - - btcli wallet create --n_words 21 - - Note: - This command is ideal for new users setting up their wallet for the first time or for those who wish to completely renew their wallet keys. - It ensures a fresh start with new keys for secure and effective participation in the network. - """ - - def run(cli): - r"""Creates a new coldkey and hotkey under this wallet.""" - wallet = bittensor.wallet(config=cli.config) - wallet.create_new_coldkey( - n_words=cli.config.n_words, - use_password=cli.config.use_password, - overwrite=cli.config.overwrite_coldkey, - ) - wallet.create_new_hotkey( - n_words=cli.config.n_words, - use_password=False, - overwrite=cli.config.overwrite_hotkey, - ) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - new_coldkey_parser = parser.add_parser( - "create", - help="""Creates a new coldkey (for containing balance) under the specified path. """, - ) - new_coldkey_parser.add_argument( - "--n_words", - type=int, - choices=[12, 15, 18, 21, 24], - default=12, - help="""The number of words representing the mnemonic. i.e. horse cart dog ... x 24""", - ) - new_coldkey_parser.add_argument( - "--use_password", - dest="use_password", - action="store_true", - help="""Set true to protect the generated bittensor key with a password.""", - default=True, - ) - new_coldkey_parser.add_argument( - "--no_password", - dest="use_password", - action="store_false", - help="""Set off protects the generated bittensor key with a password.""", - ) - new_coldkey_parser.add_argument( - "--overwrite_coldkey", - action="store_true", - default=False, - help="""Overwrite the old coldkey with the newly generated coldkey""", - ) - new_coldkey_parser.add_argument( - "--overwrite_hotkey", - action="store_true", - default=False, - help="""Overwrite the old hotkey with the newly generated hotkey""", - ) - bittensor.wallet.add_args(new_coldkey_parser) - bittensor.subtensor.add_args(new_coldkey_parser) - - -def _get_coldkey_wallets_for_path(path: str) -> List["bittensor.wallet"]: - """Get all coldkey wallet names from path.""" - try: - wallet_names = next(os.walk(os.path.expanduser(path)))[1] - return [bittensor.wallet(path=path, name=name) for name in wallet_names] - except StopIteration: - # No wallet files found. - wallets = [] - return wallets - - -class UpdateWalletCommand: - """ - Executes the ``update`` command to check and potentially update the security of the wallets in the Bittensor network. - - This command is used to enhance wallet security using modern encryption standards. - - Usage: - The command checks if any of the wallets need an update in their security protocols. - It supports updating all legacy wallets or a specific one based on the user's choice. - - Optional arguments: - - ``--all`` (bool): When set, updates all legacy wallets. - - Example usage:: - - btcli wallet update --all - - Note: - This command is important for maintaining the highest security standards for users' wallets. - It is recommended to run this command periodically to ensure wallets are up-to-date with the latest security practices. - """ - - @staticmethod - def run(cli): - """Check if any of the wallets needs an update.""" - config = cli.config.copy() - if config.get("all", d=False) == True: - wallets = _get_coldkey_wallets_for_path(config.wallet.path) - else: - wallets = [bittensor.wallet(config=config)] - - for wallet in wallets: - print("\n===== ", wallet, " =====") - wallet.coldkey_file.check_and_update_encryption() - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - update_wallet_parser = parser.add_parser( - "update", - help="""Updates the wallet security using NaCL instead of ansible vault.""", - ) - update_wallet_parser.add_argument("--all", action="store_true") - bittensor.wallet.add_args(update_wallet_parser) - bittensor.subtensor.add_args(update_wallet_parser) - - @staticmethod - def check_config(config: "bittensor.Config"): - if config.get("all", d=False) == False: - if not config.no_prompt: - if Confirm.ask("Do you want to update all legacy wallets?"): - config["all"] = True - - # Ask the user to specify the wallet if the wallet name is not clear. - if ( - config.get("all", d=False) == False - and config.wallet.get("name") == bittensor.defaults.wallet.name - and not config.no_prompt - ): - wallet_name = Prompt.ask( - "Enter wallet name", default=bittensor.defaults.wallet.name - ) - config.wallet.name = str(wallet_name) - - -def _get_coldkey_ss58_addresses_for_path(path: str) -> Tuple[List[str], List[str]]: - """Get all coldkey ss58 addresses from path.""" - - def list_coldkeypub_files(dir_path): - abspath = os.path.abspath(os.path.expanduser(dir_path)) - coldkey_files = [] - wallet_names = [] - - for potential_wallet_name in os.listdir(abspath): - coldkey_path = os.path.join( - abspath, potential_wallet_name, "coldkeypub.txt" - ) - if os.path.isdir( - os.path.join(abspath, potential_wallet_name) - ) and os.path.exists(coldkey_path): - coldkey_files.append(coldkey_path) - wallet_names.append(potential_wallet_name) - else: - bittensor.logging.warning( - f"{coldkey_path} does not exist. Excluding..." - ) - return coldkey_files, wallet_names - - coldkey_files, wallet_names = list_coldkeypub_files(path) - addresses = [ - bittensor.keyfile(coldkey_path).keypair.ss58_address - for coldkey_path in coldkey_files - ] - return addresses, wallet_names - - -class WalletBalanceCommand: - """ - Executes the ``balance`` command to check the balance of the wallet on the Bittensor network. - - This command provides a detailed view of the wallet's coldkey balances, including free and staked balances. - - Usage: - The command lists the balances of all wallets in the user's configuration directory, showing the wallet name, coldkey address, and the respective free and staked balances. - - Optional arguments: - None. The command uses the wallet and subtensor configurations to fetch balance data. - - Example usages: - - - To display the balance of a single wallet, use the command with the `--wallet.name` argument to specify the wallet name: - - ``` - btcli w balance --wallet.name WALLET - ``` - - - Alternatively, you can invoke the command without specifying a wallet name, which will prompt you to enter the wallets path: - - ``` - btcli w balance - ``` - - - To display the balances of all wallets, use the `--all` argument: - - ``` - btcli w balance --all - ``` - - Note: - When using `btcli`, `w` is used interchangeably with `wallet`. You may use either based on your preference for brevity or clarity. - This command is essential for users to monitor their financial status on the Bittensor network. - It helps in keeping track of assets and ensuring the wallet's financial health. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - """Check the balance of the wallet.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - WalletBalanceCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - wallet = bittensor.wallet(config=cli.config) - - wallet_names = [] - coldkeys = [] - free_balances = [] - staked_balances = [] - total_free_balance = 0 - total_staked_balance = 0 - balances = {} - - if cli.config.get("all", d=None): - coldkeys, wallet_names = _get_coldkey_ss58_addresses_for_path( - cli.config.wallet.path - ) - - free_balances = [ - subtensor.get_balance(coldkeys[i]) for i in range(len(coldkeys)) - ] - - staked_balances = [ - subtensor.get_total_stake_for_coldkey(coldkeys[i]) - for i in range(len(coldkeys)) - ] - - total_free_balance = sum(free_balances) - total_staked_balance = sum(staked_balances) - - balances = { - name: (coldkey, free, staked) - for name, coldkey, free, staked in sorted( - zip(wallet_names, coldkeys, free_balances, staked_balances) - ) - } - else: - coldkey_wallet = bittensor.wallet(config=cli.config) - if ( - coldkey_wallet.coldkeypub_file.exists_on_device() - and not coldkey_wallet.coldkeypub_file.is_encrypted() - ): - coldkeys = [coldkey_wallet.coldkeypub.ss58_address] - wallet_names = [coldkey_wallet.name] - - free_balances = [ - subtensor.get_balance(coldkeys[i]) for i in range(len(coldkeys)) - ] - - staked_balances = [ - subtensor.get_total_stake_for_coldkey(coldkeys[i]) - for i in range(len(coldkeys)) - ] - - total_free_balance = sum(free_balances) - total_staked_balance = sum(staked_balances) - - balances = { - name: (coldkey, free, staked) - for name, coldkey, free, staked in sorted( - zip(wallet_names, coldkeys, free_balances, staked_balances) - ) - } - - if not coldkey_wallet.coldkeypub_file.exists_on_device(): - bittensor.__console__.print("[bold red]No wallets found.") - return - - table = Table(show_footer=False) - table.title = "[white]Wallet Coldkey Balances" - table.add_column( - "[white]Wallet Name", - header_style="overline white", - footer_style="overline white", - style="rgb(50,163,219)", - no_wrap=True, - ) - - table.add_column( - "[white]Coldkey Address", - header_style="overline white", - footer_style="overline white", - style="rgb(50,163,219)", - no_wrap=True, - ) - - for typestr in ["Free", "Staked", "Total"]: - table.add_column( - f"[white]{typestr} Balance", - header_style="overline white", - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - - for name, (coldkey, free, staked) in balances.items(): - table.add_row( - name, - coldkey, - str(free), - str(staked), - str(free + staked), - ) - table.add_row() - table.add_row( - "Total Balance Across All Coldkeys", - "", - str(total_free_balance), - str(total_staked_balance), - str(total_free_balance + total_staked_balance), - ) - table.show_footer = True - - table.box = None - table.pad_edge = False - table.width = None - bittensor.__console__.print(table) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - balance_parser = parser.add_parser( - "balance", help="""Checks the balance of the wallet.""" - ) - balance_parser.add_argument( - "--all", - dest="all", - action="store_true", - help="""View balance for all wallets.""", - default=False, - ) - - bittensor.wallet.add_args(balance_parser) - bittensor.subtensor.add_args(balance_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if ( - not config.is_set("wallet.path") - and not config.no_prompt - and not config.get("all", d=None) - ): - path = Prompt.ask("Enter wallets path", default=defaults.wallet.path) - config.wallet.path = str(path) - - if ( - not config.is_set("wallet.name") - and not config.no_prompt - and not config.get("all", d=None) - ): - wallet_name = Prompt.ask( - "Enter wallet name", default=defaults.wallet.name - ) - config.wallet.name = str(wallet_name) - - if not config.is_set("subtensor.network") and not config.no_prompt: - network = Prompt.ask( - "Enter network", - default=defaults.subtensor.network, - choices=bittensor.__networks__, - ) - config.subtensor.network = str(network) - ( - _, - config.subtensor.chain_endpoint, - ) = bittensor.subtensor.determine_chain_endpoint_and_network(str(network)) - - -API_URL = "https://api.subquery.network/sq/TaoStats/bittensor-indexer" -MAX_TXN = 1000 -GRAPHQL_QUERY = """ -query ($first: Int!, $after: Cursor, $filter: TransferFilter, $order: [TransfersOrderBy!]!) { - transfers(first: $first, after: $after, filter: $filter, orderBy: $order) { - nodes { - id - from - to - amount - extrinsicId - blockNumber - } - pageInfo { - endCursor - hasNextPage - hasPreviousPage - } - totalCount - } -} -""" - - -class GetWalletHistoryCommand: - """ - Executes the ``history`` command to fetch the latest transfers of the provided wallet on the Bittensor network. - - This command provides a detailed view of the transfers carried out on the wallet. - - Usage: - The command lists the latest transfers of the provided wallet, showing the From, To, Amount, Extrinsic Id and Block Number. - - Optional arguments: - None. The command uses the wallet and subtensor configurations to fetch latest transfer data associated with a wallet. - - Example usage:: - - btcli wallet history - - Note: - This command is essential for users to monitor their financial status on the Bittensor network. - It helps in fetching info on all the transfers so that user can easily tally and cross check the transactions. - """ - - @staticmethod - def run(cli): - r"""Check the transfer history of the provided wallet.""" - wallet = bittensor.wallet(config=cli.config) - wallet_address = wallet.get_coldkeypub().ss58_address - # Fetch all transfers - transfers = get_wallet_transfers(wallet_address) - - # Create output table - table = create_transfer_history_table(transfers) - - bittensor.__console__.print(table) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - history_parser = parser.add_parser( - "history", - help="""Fetch transfer history associated with the provided wallet""", - ) - bittensor.wallet.add_args(history_parser) - bittensor.subtensor.add_args(history_parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - -def get_wallet_transfers(wallet_address) -> List[dict]: - """Get all transfers associated with the provided wallet address.""" - - variables = { - "first": MAX_TXN, - "filter": { - "or": [ - {"from": {"equalTo": wallet_address}}, - {"to": {"equalTo": wallet_address}}, - ] - }, - "order": "BLOCK_NUMBER_DESC", - } - - response = requests.post( - API_URL, json={"query": GRAPHQL_QUERY, "variables": variables} - ) - data = response.json() - - # Extract nodes and pageInfo from the response - transfer_data = data.get("data", {}).get("transfers", {}) - transfers = transfer_data.get("nodes", []) - - return transfers - - -def create_transfer_history_table(transfers): - """Get output transfer table""" - - table = Table(show_footer=False) - # Define the column names - column_names = [ - "Id", - "From", - "To", - "Amount (Tao)", - "Extrinsic Id", - "Block Number", - "URL (taostats)", - ] - taostats_url_base = "https://x.taostats.io/extrinsic" - - # Create a table - table = Table(show_footer=False) - table.title = "[white]Wallet Transfers" - - # Define the column styles - header_style = "overline white" - footer_style = "overline white" - column_style = "rgb(50,163,219)" - no_wrap = True - - # Add columns to the table - for column_name in column_names: - table.add_column( - f"[white]{column_name}", - header_style=header_style, - footer_style=footer_style, - style=column_style, - no_wrap=no_wrap, - justify="left" if column_name == "Id" else "right", - ) - - # Add rows to the table - for item in transfers: - try: - tao_amount = int(item["amount"]) / RAOPERTAO - except: - tao_amount = item["amount"] - table.add_row( - item["id"], - item["from"], - item["to"], - f"{tao_amount:.3f}", - str(item["extrinsicId"]), - item["blockNumber"], - f"{taostats_url_base}/{item['blockNumber']}-{item['extrinsicId']}", - ) - table.add_row() - table.show_footer = True - table.box = None - table.pad_edge = False - table.width = None - return table diff --git a/bittensor/commands/weights.py b/bittensor/commands/weights.py deleted file mode 100644 index b8844433c3..0000000000 --- a/bittensor/commands/weights.py +++ /dev/null @@ -1,290 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -"""Module that encapsulates the CommitWeightCommand and the RevealWeightCommand. Used to commit and reveal weights -for a specific subnet on the Bittensor Network.""" - -import argparse -import os -import re - -import numpy as np -from rich.prompt import Prompt, Confirm - -import bittensor -import bittensor.utils.weight_utils as weight_utils -from . import defaults # type: ignore - - -class CommitWeightCommand: - """ - Executes the ``commit`` command to commit weights for specific subnet on the Bittensor network. - - Usage: - The command allows committing weights for a specific subnet. Users need to specify the netuid (network unique identifier), corresponding UIDs, and weights they wish to commit. - - Optional arguments: - - ``--netuid`` (int): The netuid of the subnet for which weights are to be commited. - - ``--uids`` (str): Corresponding UIDs for the specified netuid, in comma-separated format. - - ``--weights`` (str): Corresponding weights for the specified UIDs, in comma-separated format. - - Example usage: - $ btcli wt commit --netuid 1 --uids 1,2,3,4 --weights 0.1,0.2,0.3,0.4 - - Note: - This command is used to commit weights for a specific subnet and requires the user to have the necessary permissions. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Commit weights for a specific subnet.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - CommitWeightCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Commit weights for a specific subnet""" - wallet = bittensor.wallet(config=cli.config) - - # Get values if not set - if not cli.config.is_set("netuid"): - cli.config.netuid = int(Prompt.ask("Enter netuid")) - - if not cli.config.is_set("uids"): - cli.config.uids = Prompt.ask("Enter UIDs (comma-separated)") - - if not cli.config.is_set("weights"): - cli.config.weights = Prompt.ask("Enter weights (comma-separated)") - - # Parse from string - netuid = cli.config.netuid - uids = np.array( - [int(x) for x in re.split(r"[ ,]+", cli.config.uids)], dtype=np.int64 - ) - weights = np.array( - [float(x) for x in re.split(r"[ ,]+", cli.config.weights)], dtype=np.float32 - ) - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - - if not cli.config.is_set("salt"): - # Generate random salt - salt_length = 8 - salt = list(os.urandom(salt_length)) - - if not Confirm.ask( - f"Have you recorded the [red]salt[/red]: [bold white]'{salt}'[/bold white]? It will be " - f"required to reveal weights." - ): - return False, "User cancelled the operation." - else: - salt = np.array( - [int(x) for x in re.split(r"[ ,]+", cli.config.salt)], - dtype=np.int64, - ).tolist() - - # Run the commit weights operation - success, message = subtensor.commit_weights( - wallet=wallet, - netuid=netuid, - uids=weight_uids, - weights=weight_vals, - salt=salt, - wait_for_inclusion=cli.config.wait_for_inclusion, - wait_for_finalization=cli.config.wait_for_finalization, - prompt=cli.config.prompt, - ) - - # Result - if success: - bittensor.__console__.print("Weights committed successfully") - else: - bittensor.__console__.print(f"Failed to commit weights: {message}") - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "commit", help="""Commit weights for a specific subnet.""" - ) - parser.add_argument("--netuid", dest="netuid", type=int, required=False) - parser.add_argument("--uids", dest="uids", type=str, required=False) - parser.add_argument("--weights", dest="weights", type=str, required=False) - parser.add_argument("--salt", dest="salt", type=str, required=False) - parser.add_argument( - "--wait-for-inclusion", - dest="wait_for_inclusion", - action="store_true", - default=False, - ) - parser.add_argument( - "--wait-for-finalization", - dest="wait_for_finalization", - action="store_true", - default=True, - ) - parser.add_argument( - "--prompt", - dest="prompt", - action="store_true", - default=False, - ) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.no_prompt and not config.is_set("wallet.name"): - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.no_prompt and not config.is_set("wallet.hotkey"): - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - -class RevealWeightCommand: - """ - Executes the ``reveal`` command to reveal weights for a specific subnet on the Bittensor network. - Usage: - The command allows revealing weights for a specific subnet. Users need to specify the netuid (network unique identifier), corresponding UIDs, and weights they wish to reveal. - Optional arguments: - - ``--netuid`` (int): The netuid of the subnet for which weights are to be revealed. - - ``--uids`` (str): Corresponding UIDs for the specified netuid, in comma-separated format. - - ``--weights`` (str): Corresponding weights for the specified UIDs, in comma-separated format. - - ``--salt`` (str): Corresponding salt for the hash function, integers in comma-separated format. - Example usage:: - $ btcli wt reveal --netuid 1 --uids 1,2,3,4 --weights 0.1,0.2,0.3,0.4 --salt 163,241,217,11,161,142,147,189 - Note: - This command is used to reveal weights for a specific subnet and requires the user to have the necessary permissions. - """ - - @staticmethod - def run(cli: "bittensor.cli"): - r"""Reveal weights for a specific subnet.""" - try: - subtensor: "bittensor.subtensor" = bittensor.subtensor( - config=cli.config, log_verbose=False - ) - RevealWeightCommand._run(cli, subtensor) - finally: - if "subtensor" in locals(): - subtensor.close() - bittensor.logging.debug("closing subtensor connection") - - @staticmethod - def _run(cli: "bittensor.cli", subtensor: "bittensor.subtensor"): - r"""Reveal weights for a specific subnet.""" - wallet = bittensor.wallet(config=cli.config) - - # Get values if not set. - if not cli.config.is_set("netuid"): - cli.config.netuid = int(Prompt.ask("Enter netuid")) - - if not cli.config.is_set("uids"): - cli.config.uids = Prompt.ask("Enter UIDs (comma-separated)") - - if not cli.config.is_set("weights"): - cli.config.weights = Prompt.ask("Enter weights (comma-separated)") - - if not cli.config.is_set("salt"): - cli.config.salt = Prompt.ask("Enter salt (comma-separated)") - - # Parse from string - netuid = cli.config.netuid - version = bittensor.__version_as_int__ - uids = np.array( - [int(x) for x in re.split(r"[ ,]+", cli.config.uids)], - dtype=np.int64, - ) - weights = np.array( - [float(x) for x in re.split(r"[ ,]+", cli.config.weights)], - dtype=np.float32, - ) - salt = np.array( - [int(x) for x in re.split(r"[ ,]+", cli.config.salt)], - dtype=np.int64, - ) - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - - # Run the reveal weights operation. - success, message = subtensor.reveal_weights( - wallet=wallet, - netuid=netuid, - uids=weight_uids, - weights=weight_vals, - salt=salt, - version_key=version, - wait_for_inclusion=cli.config.wait_for_inclusion, - wait_for_finalization=cli.config.wait_for_finalization, - prompt=cli.config.prompt, - ) - - if success: - bittensor.__console__.print("Weights revealed successfully") - else: - bittensor.__console__.print(f"Failed to reveal weights: {message}") - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - parser = parser.add_parser( - "reveal", help="""Reveal weights for a specific subnet.""" - ) - parser.add_argument("--netuid", dest="netuid", type=int, required=False) - parser.add_argument("--uids", dest="uids", type=str, required=False) - parser.add_argument("--weights", dest="weights", type=str, required=False) - parser.add_argument("--salt", dest="salt", type=str, required=False) - parser.add_argument( - "--wait-for-inclusion", - dest="wait_for_inclusion", - action="store_true", - default=False, - ) - parser.add_argument( - "--wait-for-finalization", - dest="wait_for_finalization", - action="store_true", - default=True, - ) - parser.add_argument( - "--prompt", - dest="prompt", - action="store_true", - default=False, - ) - - bittensor.wallet.add_args(parser) - bittensor.subtensor.add_args(parser) - - @staticmethod - def check_config(config: "bittensor.config"): - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) diff --git a/bittensor/config.py b/bittensor/config.py deleted file mode 100644 index 59ad4451b8..0000000000 --- a/bittensor/config.py +++ /dev/null @@ -1,415 +0,0 @@ -""" -Implementation of the config class, which manages the configuration of different Bittensor modules. -""" - -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import sys -import yaml -import copy -from copy import deepcopy -from munch import DefaultMunch -from typing import List, Optional, Dict, Any, TypeVar, Type -import argparse - - -class InvalidConfigFile(Exception): - """In place of YAMLError""" - - pass - - -class config(DefaultMunch): - """ - Implementation of the config class, which manages the configuration of different Bittensor modules. - """ - - __is_set: Dict[str, bool] - - r""" Translates the passed parser into a nested Bittensor config. - - Args: - parser (argparse.ArgumentParser): - Command line parser object. - strict (bool): - If ``true``, the command line arguments are strictly parsed. - args (list of str): - Command line arguments. - default (Optional[Any]): - Default value for the Config. Defaults to ``None``. - This default will be returned for attributes that are undefined. - Returns: - config (bittensor.config): - Nested config object created from parser arguments. - """ - - def __init__( - self, - parser: argparse.ArgumentParser = None, - args: Optional[List[str]] = None, - strict: bool = False, - default: Optional[Any] = None, - ) -> None: - super().__init__(default) - - self["__is_set"] = {} - - if parser == None: - return None - - # Optionally add config specific arguments - try: - parser.add_argument( - "--config", - type=str, - help="If set, defaults are overridden by passed file.", - ) - except: - # this can fail if --config has already been added. - pass - - try: - parser.add_argument( - "--strict", - action="store_true", - help="""If flagged, config will check that only exact arguments have been set.""", - default=False, - ) - except: - # this can fail if --strict has already been added. - pass - - try: - parser.add_argument( - "--no_version_checking", - action="store_true", - help="Set ``true`` to stop cli version checking.", - default=False, - ) - except: - # this can fail if --no_version_checking has already been added. - pass - - try: - parser.add_argument( - "--no_prompt", - dest="no_prompt", - action="store_true", - help="Set ``true`` to stop cli from prompting the user.", - default=False, - ) - except: - # this can fail if --no_version_checking has already been added. - pass - - # Get args from argv if not passed in. - if args == None: - args = sys.argv[1:] - - # Check for missing required arguments before proceeding - missing_required_args = self.__check_for_missing_required_args(parser, args) - if missing_required_args: - # Handle missing required arguments gracefully - raise ValueError( - f"Missing required arguments: {', '.join(missing_required_args)}" - ) - - # 1.1 Optionally load defaults if the --config is set. - try: - config_file_path = ( - str(os.getcwd()) - + "/" - + vars(parser.parse_known_args(args)[0])["config"] - ) - except Exception as e: - config_file_path = None - - # Parse args not strict - config_params = config.__parse_args__(args=args, parser=parser, strict=False) - - # 2. Optionally check for --strict - ## strict=True when passed in OR when --strict is set - strict = config_params.strict or strict - - if config_file_path != None: - config_file_path = os.path.expanduser(config_file_path) - try: - with open(config_file_path) as f: - params_config = yaml.safe_load(f) - print("Loading config defaults from: {}".format(config_file_path)) - parser.set_defaults(**params_config) - except Exception as e: - print("Error in loading: {} using default parser settings".format(e)) - - # 2. Continue with loading in params. - params = config.__parse_args__(args=args, parser=parser, strict=strict) - - _config = self - - # Splits params and add to config - config.__split_params__(params=params, _config=_config) - - # Make the is_set map - _config["__is_set"] = {} - - ## Reparse args using default of unset - parser_no_defaults = copy.deepcopy(parser) - - # Only command as the arg, else no args - default_param_args = ( - [_config.get("command")] - if _config.get("command") != None and _config.get("subcommand") == None - else [] - ) - if _config.get("command") != None and _config.get("subcommand") != None: - default_param_args = [_config.get("command"), _config.get("subcommand")] - - ## Get all args by name - default_params = parser.parse_args(args=default_param_args) - - all_default_args = default_params.__dict__.keys() | [] - ## Make a dict with keys as args and values as argparse.SUPPRESS - defaults_as_suppress = {key: argparse.SUPPRESS for key in all_default_args} - ## Set the defaults to argparse.SUPPRESS, should remove them from the namespace - parser_no_defaults.set_defaults(**defaults_as_suppress) - parser_no_defaults._defaults.clear() # Needed for quirk of argparse - - ### Check for subparsers and do the same - if parser_no_defaults._subparsers != None: - for action in parser_no_defaults._subparsers._actions: - # Should only be the "command" subparser action - if isinstance(action, argparse._SubParsersAction): - # Set the defaults to argparse.SUPPRESS, should remove them from the namespace - # Each choice is the keyword for a command, we need to set the defaults for each of these - ## Note: we also need to clear the _defaults dict for each, this is a quirk of argparse - cmd_parser: argparse.ArgumentParser - for cmd_parser in action.choices.values(): - # If this choice is also a subparser, set defaults recursively - if cmd_parser._subparsers: - for action in cmd_parser._subparsers._actions: - # Should only be the "command" subparser action - if isinstance(action, argparse._SubParsersAction): - cmd_parser: argparse.ArgumentParser - for cmd_parser in action.choices.values(): - cmd_parser.set_defaults(**defaults_as_suppress) - cmd_parser._defaults.clear() # Needed for quirk of argparse - else: - cmd_parser.set_defaults(**defaults_as_suppress) - cmd_parser._defaults.clear() # Needed for quirk of argparse - - ## Reparse the args, but this time with the defaults as argparse.SUPPRESS - params_no_defaults = config.__parse_args__( - args=args, parser=parser_no_defaults, strict=strict - ) - - ## Diff the params and params_no_defaults to get the is_set map - _config["__is_set"] = { - arg_key: True - for arg_key in [ - k - for k, _ in filter( - lambda kv: kv[1] != argparse.SUPPRESS, - params_no_defaults.__dict__.items(), - ) - ] - } - - @staticmethod - def __split_params__(params: argparse.Namespace, _config: "config"): - # Splits params on dot syntax i.e neuron.axon_port and adds to _config - for arg_key, arg_val in params.__dict__.items(): - split_keys = arg_key.split(".") - head = _config - keys = split_keys - while len(keys) > 1: - if ( - hasattr(head, keys[0]) and head[keys[0]] != None - ): # Needs to be Config - head = getattr(head, keys[0]) - keys = keys[1:] - else: - head[keys[0]] = config() - head = head[keys[0]] - keys = keys[1:] - if len(keys) == 1: - head[keys[0]] = arg_val - - @staticmethod - def __parse_args__( - args: List[str], parser: argparse.ArgumentParser = None, strict: bool = False - ) -> argparse.Namespace: - """Parses the passed args use the passed parser. - - Args: - args (List[str]): - List of arguments to parse. - parser (argparse.ArgumentParser): - Command line parser object. - strict (bool): - If ``true``, the command line arguments are strictly parsed. - Returns: - Namespace: - Namespace object created from parser arguments. - """ - if not strict: - params, unrecognized = parser.parse_known_args(args=args) - params_list = list(params.__dict__) - # bug within argparse itself, does not correctly set value for boolean flags - for unrec in unrecognized: - if unrec.startswith("--") and unrec[2:] in params_list: - # Set the missing boolean value to true - setattr(params, unrec[2:], True) - else: - params = parser.parse_args(args=args) - - return params - - def __deepcopy__(self, memo) -> "config": - _default = self.__default__ - - config_state = self.__getstate__() - config_copy = config() - memo[id(self)] = config_copy - - config_copy.__setstate__(config_state) - config_copy.__default__ = _default - - config_copy["__is_set"] = deepcopy(self["__is_set"], memo) - - return config_copy - - def __repr__(self) -> str: - return self.__str__() - - @staticmethod - def _remove_private_keys(d): - if "__parser" in d: - d.pop("__parser", None) - if "__is_set" in d: - d.pop("__is_set", None) - for k, v in list(d.items()): - if isinstance(v, dict): - config._remove_private_keys(v) - return d - - def __str__(self) -> str: - # remove the parser and is_set map from the visible config - visible = copy.deepcopy(self.toDict()) - visible.pop("__parser", None) - visible.pop("__is_set", None) - cleaned = config._remove_private_keys(visible) - return "\n" + yaml.dump(cleaned, sort_keys=False) - - def copy(self) -> "config": - return copy.deepcopy(self) - - def to_string(self, items) -> str: - """Get string from items""" - return "\n" + yaml.dump(items.toDict()) - - def update_with_kwargs(self, kwargs): - """Add config to self""" - for key, val in kwargs.items(): - self[key] = val - - @classmethod - def _merge(cls, a, b): - """Merge two configurations recursively. - If there is a conflict, the value from the second configuration will take precedence. - """ - for key in b: - if key in a: - if isinstance(a[key], dict) and isinstance(b[key], dict): - a[key] = cls._merge(a[key], b[key]) - else: - a[key] = b[key] - else: - a[key] = b[key] - return a - - def merge(self, b): - """ - Merges the current config with another config. - - Args: - b: Another config to merge. - """ - self = self._merge(self, b) - - @classmethod - def merge_all(cls, configs: List["config"]) -> "config": - """ - Merge all configs in the list into one config. - If there is a conflict, the value from the last configuration in the list will take precedence. - - Args: - configs (list of config): - List of configs to be merged. - - Returns: - config: - Merged config object. - """ - result = cls() - for cfg in configs: - result.merge(cfg) - return result - - def is_set(self, param_name: str) -> bool: - """ - Returns a boolean indicating whether the parameter has been set or is still the default. - """ - if param_name not in self.get("__is_set"): - return False - else: - return self.get("__is_set")[param_name] - - def __check_for_missing_required_args( - self, parser: argparse.ArgumentParser, args: List[str] - ) -> List[str]: - required_args = self.__get_required_args_from_parser(parser) - missing_args = [arg for arg in required_args if not any(arg in s for s in args)] - return missing_args - - @staticmethod - def __get_required_args_from_parser(parser: argparse.ArgumentParser) -> List[str]: - required_args = [] - for action in parser._actions: - if action.required: - # Prefix the argument with '--' if it's a long argument, or '-' if it's short - prefix = "--" if len(action.dest) > 1 else "-" - required_args.append(prefix + action.dest) - return required_args - - -T = TypeVar("T", bound="DefaultConfig") - - -class DefaultConfig(config): - """ - A Config with a set of default values. - """ - - @classmethod - def default(cls: Type[T]) -> T: - """ - Get default config. - """ - raise NotImplementedError("Function default is not implemented.") diff --git a/bittensor/constants.py b/bittensor/constants.py deleted file mode 100644 index 74d3dd2e08..0000000000 --- a/bittensor/constants.py +++ /dev/null @@ -1,43 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 OpenTensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -# Standard Library -import asyncio -from typing import Dict, Type - -# 3rd Party -import aiohttp - - -ALLOWED_DELTA = 4_000_000_000 # Delta of 4 seconds for nonce validation -V_7_2_0 = 7002000 -NANOSECONDS_IN_SECOND = 1_000_000_000 - -#### Dendrite #### -DENDRITE_ERROR_MAPPING: Dict[Type[Exception], tuple] = { - aiohttp.ClientConnectorError: ("503", "Service unavailable"), - asyncio.TimeoutError: ("408", "Request timeout"), - aiohttp.ClientResponseError: (None, "Client response error"), - aiohttp.ClientPayloadError: ("400", "Payload error"), - aiohttp.ClientError: ("500", "Client error"), - aiohttp.ServerTimeoutError: ("504", "Server timeout error"), - aiohttp.ServerDisconnectedError: ("503", "Service disconnected"), - aiohttp.ServerConnectionError: ("503", "Service connection error"), -} - -DENDRITE_DEFAULT_ERROR = ("422", "Failed to parse response") -#### End Dendrite #### diff --git a/bittensor/dendrite.py b/bittensor/dendrite.py deleted file mode 100644 index 683ac595a5..0000000000 --- a/bittensor/dendrite.py +++ /dev/null @@ -1,868 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -# Standard Library -from __future__ import annotations -import asyncio -import time -from typing import Optional, List, Union, AsyncGenerator, Any -import uuid - -# 3rd Party -import aiohttp -from aiohttp import ClientTimeout - -# Application -import bittensor -from bittensor.constants import DENDRITE_ERROR_MAPPING, DENDRITE_DEFAULT_ERROR -from bittensor.utils.registration import torch, use_torch - - -class DendriteMixin: - """ - The Dendrite class represents the abstracted implementation of a network client module. - - In the brain analogy, dendrites receive signals - from other neurons (in this case, network servers or axons), and the Dendrite class here is designed - to send requests to those endpoint to recieve inputs. - - This class includes a wallet or keypair used for signing messages, and methods for making - HTTP requests to the network servers. It also provides functionalities such as logging - network requests and processing server responses. - - Args: - keypair: The wallet or keypair used for signing messages. - external_ip (str): The external IP address of the local system. - synapse_history (list): A list of Synapse objects representing the historical responses. - - Methods: - __str__(): Returns a string representation of the Dendrite object. - __repr__(): Returns a string representation of the Dendrite object, acting as a fallback for __str__(). - query(self, *args, **kwargs) -> Union[bittensor.Synapse, List[bittensor.Synapse]]: - Makes synchronous requests to one or multiple target Axons and returns responses. - - forward(self, axons, synapse=bittensor.Synapse(), timeout=12, deserialize=True, run_async=True, streaming=False) -> bittensor.Synapse: - Asynchronously sends requests to one or multiple Axons and collates their responses. - - call(self, target_axon, synapse=bittensor.Synapse(), timeout=12.0, deserialize=True) -> bittensor.Synapse: - Asynchronously sends a request to a specified Axon and processes the response. - - call_stream(self, target_axon, synapse=bittensor.Synapse(), timeout=12.0, deserialize=True) -> AsyncGenerator[bittensor.Synapse, None]: - Sends a request to a specified Axon and yields an AsyncGenerator that contains streaming response chunks before finally yielding the filled Synapse as the final element. - - preprocess_synapse_for_request(self, target_axon_info, synapse, timeout=12.0) -> bittensor.Synapse: - Preprocesses the synapse for making a request, including building headers and signing. - - process_server_response(self, server_response, json_response, local_synapse): - Processes the server response, updates the local synapse state, and merges headers. - - close_session(self): - Synchronously closes the internal aiohttp client session. - - aclose_session(self): - Asynchronously closes the internal aiohttp client session. - - NOTE: - When working with async `aiohttp `_ client sessions, it is recommended to use a context manager. - - Example with a context manager:: - - >>> aysnc with dendrite(wallet = bittensor.wallet()) as d: - >>> print(d) - >>> d( ) # ping axon - >>> d( [] ) # ping multiple - >>> d( bittensor.axon(), bittensor.Synapse ) - - However, you are able to safely call :func:`dendrite.query()` without a context manager in a synchronous setting. - - Example without a context manager:: - - >>> d = dendrite(wallet = bittensor.wallet() ) - >>> print(d) - >>> d( ) # ping axon - >>> d( [] ) # ping multiple - >>> d( bittensor.axon(), bittensor.Synapse ) - """ - - def __init__( - self, wallet: Optional[Union[bittensor.wallet, bittensor.Keypair]] = None - ): - """ - Initializes the Dendrite object, setting up essential properties. - - Args: - wallet (Optional[Union['bittensor.wallet', 'bittensor.keypair']], optional): - The user's wallet or keypair used for signing messages. Defaults to ``None``, in which case a new :func:`bittensor.wallet().hotkey` is generated and used. - """ - # Initialize the parent class - super(DendriteMixin, self).__init__() - - # Unique identifier for the instance - self.uuid = str(uuid.uuid1()) - - # Get the external IP - self.external_ip = bittensor.utils.networking.get_external_ip() - - # If a wallet or keypair is provided, use its hotkey. If not, generate a new one. - self.keypair = ( - wallet.hotkey if isinstance(wallet, bittensor.wallet) else wallet - ) or bittensor.wallet().hotkey - - self.synapse_history: list = [] - - self._session: Optional[aiohttp.ClientSession] = None - - @property - async def session(self) -> aiohttp.ClientSession: - """ - An asynchronous property that provides access to the internal `aiohttp `_ client session. - - This property ensures the management of HTTP connections in an efficient way. It lazily - initializes the `aiohttp.ClientSession `_ on its first use. The session is then reused for subsequent - HTTP requests, offering performance benefits by reusing underlying connections. - - This is used internally by the dendrite when querying axons, and should not be used directly - unless absolutely necessary for your application. - - Returns: - aiohttp.ClientSession: The active `aiohttp `_ client session instance. If no session exists, a - new one is created and returned. This session is used for asynchronous HTTP requests within - the dendrite, adhering to the async nature of the network interactions in the Bittensor framework. - - Example usage:: - - import bittensor as bt # Import bittensor - wallet = bt.wallet( ... ) # Initialize a wallet - dendrite = bt.dendrite( wallet ) # Initialize a dendrite instance with the wallet - - async with (await dendrite.session).post( # Use the session to make an HTTP POST request - url, # URL to send the request to - headers={...}, # Headers dict to be sent with the request - json={...}, # JSON body data to be sent with the request - timeout=10, # Timeout duration in seconds - ) as response: - json_response = await response.json() # Extract the JSON response from the server - - """ - if self._session is None: - self._session = aiohttp.ClientSession() - return self._session - - def close_session(self): - """ - Closes the internal `aiohttp `_ client session synchronously. - - This method ensures the proper closure and cleanup of the aiohttp client session, releasing any - resources like open connections and internal buffers. It is crucial for preventing resource leakage - and should be called when the dendrite instance is no longer in use, especially in synchronous contexts. - - Note: - This method utilizes asyncio's event loop to close the session asynchronously from a synchronous context. It is advisable to use this method only when asynchronous context management is not feasible. - - Usage: - When finished with dendrite in a synchronous context - :func:`dendrite_instance.close_session()`. - """ - if self._session: - loop = asyncio.get_event_loop() - loop.run_until_complete(self._session.close()) - self._session = None - - async def aclose_session(self): - """ - Asynchronously closes the internal `aiohttp `_ client session. - - This method is the asynchronous counterpart to the :func:`close_session` method. It should be used in - asynchronous contexts to ensure that the aiohttp client session is closed properly. The method - releases resources associated with the session, such as open connections and internal buffers, - which is essential for resource management in asynchronous applications. - - Usage: - When finished with dendrite in an asynchronous context - await :func:`dendrite_instance.aclose_session()`. - - Example:: - - async with dendrite_instance: - # Operations using dendrite - pass - # The session will be closed automatically after the above block - """ - if self._session: - await self._session.close() - self._session = None - - def _get_endpoint_url(self, target_axon, request_name): - """ - Constructs the endpoint URL for a network request to a target axon. - - This internal method generates the full HTTP URL for sending a request to the specified axon. The - URL includes the IP address and port of the target axon, along with the specific request name. It - differentiates between requests to the local system (using '0.0.0.0') and external systems. - - Args: - target_axon: The target axon object containing IP and port information. - request_name: The specific name of the request being made. - - Returns: - str: A string representing the complete HTTP URL for the request. - """ - endpoint = ( - f"0.0.0.0:{str(target_axon.port)}" - if target_axon.ip == str(self.external_ip) - else f"{target_axon.ip}:{str(target_axon.port)}" - ) - return f"http://{endpoint}/{request_name}" - - def log_exception(self, exception: Exception): - """ - Logs an exception with a unique identifier. - - This method generates a unique UUID for the error, extracts the error type, - and logs the error message using Bittensor's logging system. - - Args: - exception (Exception): The exception object to be logged. - - Returns: - None - """ - error_id = str(uuid.uuid4()) - error_type = exception.__class__.__name__ - bittensor.logging.error(f"{error_type}#{error_id}: {exception}") - - def process_error_message( - self, - synapse: Union[bittensor.Synapse, bittensor.StreamingSynapse], - request_name: str, - exception: Exception, - ) -> Union[bittensor.Synapse, bittensor.StreamingSynapse]: - """ - Handles exceptions that occur during network requests, updating the synapse with appropriate status codes and messages. - - This method interprets different types of exceptions and sets the corresponding status code and - message in the synapse object. It covers common network errors such as connection issues and timeouts. - - Args: - synapse: The synapse object associated with the request. - request_name: The name of the request during which the exception occurred. - exception: The exception object caught during the request. - - Returns: - bittensor.Synapse: The updated synapse object with the error status code and message. - - Note: - This method updates the synapse object in-place. - """ - - self.log_exception(exception) - - error_info = DENDRITE_ERROR_MAPPING.get(type(exception), DENDRITE_DEFAULT_ERROR) - status_code, status_message = error_info - - if status_code: - synapse.dendrite.status_code = status_code # type: ignore - elif isinstance(exception, aiohttp.ClientResponseError): - synapse.dendrite.status_code = str(exception.code) # type: ignore - - message = f"{status_message}: {str(exception)}" - if isinstance(exception, aiohttp.ClientConnectorError): - message = f"{status_message} at {synapse.axon.ip}:{synapse.axon.port}/{request_name}" # type: ignore - elif isinstance(exception, asyncio.TimeoutError): - message = f"{status_message} after {synapse.timeout} seconds" - - synapse.dendrite.status_message = message # type: ignore - - return synapse - - def _log_outgoing_request(self, synapse): - """ - Logs information about outgoing requests for debugging purposes. - - This internal method logs key details about each outgoing request, including the size of the - request, the name of the synapse, the axon's details, and a success indicator. This information - is crucial for monitoring and debugging network activity within the Bittensor network. - - To turn on debug messages, set the environment variable BITTENSOR_DEBUG to ``1``, or call the bittensor debug method like so:: - - import bittensor - bittensor.debug() - - Args: - synapse: The synapse object representing the request being sent. - """ - bittensor.logging.trace( - f"dendrite | --> | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | 0 | Success" - ) - - def _log_incoming_response(self, synapse): - """ - Logs information about incoming responses for debugging and monitoring. - - Similar to :func:`_log_outgoing_request`, this method logs essential details of the incoming responses, - including the size of the response, synapse name, axon details, status code, and status message. - This logging is vital for troubleshooting and understanding the network interactions in Bittensor. - - Args: - synapse: The synapse object representing the received response. - """ - bittensor.logging.trace( - f"dendrite | <-- | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | {synapse.dendrite.status_code} | {synapse.dendrite.status_message}" - ) - - def query( - self, *args, **kwargs - ) -> List[ - Union[AsyncGenerator[Any, Any], bittensor.Synapse, bittensor.StreamingSynapse] - ]: - """ - Makes a synchronous request to multiple target Axons and returns the server responses. - - Cleanup is automatically handled and sessions are closed upon completed requests. - - Args: - axons (Union[List[Union['bittensor.AxonInfo', 'bittensor.axon']], Union['bittensor.AxonInfo', 'bittensor.axon']]): - The list of target Axon information. - synapse (bittensor.Synapse, optional): The Synapse object. Defaults to :func:`bittensor.Synapse()`. - timeout (float, optional): The request timeout duration in seconds. - Defaults to ``12.0`` seconds. - Returns: - Union[bittensor.Synapse, List[bittensor.Synapse]]: If a single target axon is provided, returns the response from that axon. If multiple target axons are provided, returns a list of responses from all target axons. - """ - result = None - try: - loop = asyncio.get_event_loop() - result = loop.run_until_complete(self.forward(*args, **kwargs)) - except Exception: - new_loop = asyncio.new_event_loop() - asyncio.set_event_loop(new_loop) - result = loop.run_until_complete(self.forward(*args, **kwargs)) - new_loop.close() - finally: - self.close_session() - return result # type: ignore - - async def forward( - self, - axons: Union[ - List[Union[bittensor.AxonInfo, bittensor.axon]], - Union[bittensor.AxonInfo, bittensor.axon], - ], - synapse: bittensor.Synapse = bittensor.Synapse(), - timeout: float = 12, - deserialize: bool = True, - run_async: bool = True, - streaming: bool = False, - ) -> List[ - Union[AsyncGenerator[Any, Any], bittensor.Synapse, bittensor.StreamingSynapse] - ]: - """ - Asynchronously sends requests to one or multiple Axons and collates their responses. - - This function acts as a bridge for sending multiple requests concurrently or sequentially - based on the provided parameters. It checks the type of the target Axons, preprocesses - the requests, and then sends them off. After getting the responses, it processes and - collates them into a unified format. - - When querying an Axon that sends a single response, this function returns a Synapse object - containing the response data. If multiple Axons are queried, a list of Synapse objects is - returned, each containing the response from the corresponding Axon. - - For example:: - - >>> ... - >>> wallet = bittensor.wallet() # Initialize a wallet - >>> synapse = bittensor.Synapse(...) # Create a synapse object that contains query data - >>> dendrte = bittensor.dendrite(wallet = wallet) # Initialize a dendrite instance - >>> axons = metagraph.axons # Create a list of axons to query - >>> responses = await dendrite(axons, synapse) # Send the query to all axons and await the responses - - When querying an Axon that sends back data in chunks using the Dendrite, this function - returns an AsyncGenerator that yields each chunk as it is received. The generator can be - iterated over to process each chunk individually. - - For example:: - - >>> ... - >>> dendrte = bittensor.dendrite(wallet = wallet) - >>> async for chunk in dendrite.forward(axons, synapse, timeout, deserialize, run_async, streaming): - >>> # Process each chunk here - >>> print(chunk) - - Args: - axons (Union[List[Union['bittensor.AxonInfo', 'bittensor.axon']], Union['bittensor.AxonInfo', 'bittensor.axon']]): - The target Axons to send requests to. Can be a single Axon or a list of Axons. - synapse (bittensor.Synapse, optional): The Synapse object encapsulating the data. Defaults to a new :func:`bittensor.Synapse` instance. - timeout (float, optional): Maximum duration to wait for a response from an Axon in seconds. Defaults to ``12.0``. - deserialize (bool, optional): Determines if the received response should be deserialized. Defaults to ``True``. - run_async (bool, optional): If ``True``, sends requests concurrently. Otherwise, sends requests sequentially. Defaults to ``True``. - streaming (bool, optional): Indicates if the response is expected to be in streaming format. Defaults to ``False``. - - Returns: - Union[AsyncGenerator, bittensor.Synapse, List[bittensor.Synapse]]: If a single Axon is targeted, returns its response. - If multiple Axons are targeted, returns a list of their responses. - """ - is_list = True - # If a single axon is provided, wrap it in a list for uniform processing - if not isinstance(axons, list): - is_list = False - axons = [axons] - - # Check if synapse is an instance of the StreamingSynapse class or if streaming flag is set. - is_streaming_subclass = issubclass( - synapse.__class__, bittensor.StreamingSynapse - ) - if streaming != is_streaming_subclass: - bittensor.logging.warning( - f"Argument streaming is {streaming} while issubclass(synapse, StreamingSynapse) is {synapse.__class__.__name__}. This may cause unexpected behavior." - ) - streaming = is_streaming_subclass or streaming - - async def query_all_axons( - is_stream: bool, - ) -> Union[ - AsyncGenerator[Any, Any], bittensor.Synapse, bittensor.StreamingSynapse - ]: - """ - Handles the processing of requests to all targeted axons, accommodating both streaming and non-streaming responses. - - This function manages the concurrent or sequential dispatch of requests to a list of axons. - It utilizes the ``is_stream`` parameter to determine the mode of response handling (streaming - or non-streaming). For each axon, it calls ``single_axon_response`` and aggregates the responses. - - Args: - is_stream (bool): Flag indicating whether the axon responses are expected to be streamed. - If ``True``, responses are handled in streaming mode. - - Returns: - List[Union[AsyncGenerator, bittensor.Synapse, bittensor.StreamingSynapse]]: A list - containing the responses from each axon. The type of each response depends on the - streaming mode and the type of synapse used. - """ - - async def single_axon_response( - target_axon, - ) -> Union[ - AsyncGenerator[Any, Any], bittensor.Synapse, bittensor.StreamingSynapse - ]: - """ - Manages the request and response process for a single axon, supporting both streaming and non-streaming modes. - - This function is responsible for initiating a request to a single axon. Depending on the - ``is_stream`` flag, it either uses ``call_stream`` for streaming responses or ``call`` for - standard responses. The function handles the response processing, catering to the specifics - of streaming or non-streaming data. - - Args: - target_axon: The target axon object to which the request is to be sent. This object contains the necessary information like IP address and port to formulate the request. - - Returns: - Union[AsyncGenerator, bittensor.Synapse, bittensor.StreamingSynapse]: The response - from the targeted axon. In streaming mode, an AsyncGenerator is returned, yielding - data chunks. In non-streaming mode, a Synapse or StreamingSynapse object is returned - containing the response. - """ - if is_stream: - # If in streaming mode, return the async_generator - return self.call_stream( - target_axon=target_axon, - synapse=synapse.model_copy(), # type: ignore - timeout=timeout, - deserialize=deserialize, - ) - else: - # If not in streaming mode, simply call the axon and get the response. - return await self.call( - target_axon=target_axon, - synapse=synapse.model_copy(), # type: ignore - timeout=timeout, - deserialize=deserialize, - ) - - # If run_async flag is False, get responses one by one. - if not run_async: - return [ - await single_axon_response(target_axon) for target_axon in axons - ] # type: ignore - # If run_async flag is True, get responses concurrently using asyncio.gather(). - return await asyncio.gather( - *(single_axon_response(target_axon) for target_axon in axons) - ) # type: ignore - - # Get responses for all axons. - responses = await query_all_axons(streaming) - # Return the single response if only one axon was targeted, else return all responses - return responses[0] if len(responses) == 1 and not is_list else responses # type: ignore - - async def call( - self, - target_axon: Union[bittensor.AxonInfo, bittensor.axon], - synapse: bittensor.Synapse = bittensor.Synapse(), - timeout: float = 12.0, - deserialize: bool = True, - ) -> bittensor.Synapse: - """ - Asynchronously sends a request to a specified Axon and processes the response. - - This function establishes a connection with a specified Axon, sends the encapsulated - data through the Synapse object, waits for a response, processes it, and then - returns the updated Synapse object. - - Args: - target_axon (Union['bittensor.AxonInfo', 'bittensor.axon']): The target Axon to send the request to. - synapse (bittensor.Synapse, optional): The Synapse object encapsulating the data. Defaults to a new :func:`bittensor.Synapse` instance. - timeout (float, optional): Maximum duration to wait for a response from the Axon in seconds. Defaults to ``12.0``. - deserialize (bool, optional): Determines if the received response should be deserialized. Defaults to ``True``. - - Returns: - bittensor.Synapse: The Synapse object, updated with the response data from the Axon. - """ - - # Record start time - start_time = time.time() - target_axon = ( - target_axon.info() - if isinstance(target_axon, bittensor.axon) - else target_axon - ) - - # Build request endpoint from the synapse class - request_name = synapse.__class__.__name__ - url = self._get_endpoint_url(target_axon, request_name=request_name) - - # Preprocess synapse for making a request - synapse = self.preprocess_synapse_for_request(target_axon, synapse, timeout) - - try: - # Log outgoing request - self._log_outgoing_request(synapse) - - # Make the HTTP POST request - async with (await self.session).post( - url, - headers=synapse.to_headers(), - json=synapse.model_dump(), - timeout=ClientTimeout(total=timeout), - ) as response: - # Extract the JSON response from the server - json_response = await response.json() - # Process the server response and fill synapse - self.process_server_response(response, json_response, synapse) - - # Set process time and log the response - synapse.dendrite.process_time = str(time.time() - start_time) # type: ignore - - except Exception as e: - synapse = self.process_error_message(synapse, request_name, e) - - finally: - self._log_incoming_response(synapse) - - # Log synapse event history - self.synapse_history.append( - bittensor.Synapse.from_headers(synapse.to_headers()) - ) - - # Return the updated synapse object after deserializing if requested - return synapse.deserialize() if deserialize else synapse - - async def call_stream( - self, - target_axon: Union[bittensor.AxonInfo, bittensor.axon], - synapse: bittensor.StreamingSynapse = bittensor.Synapse(), # type: ignore - timeout: float = 12.0, - deserialize: bool = True, - ) -> AsyncGenerator[Any, Any]: - """ - Sends a request to a specified Axon and yields streaming responses. - - Similar to ``call``, but designed for scenarios where the Axon sends back data in - multiple chunks or streams. The function yields each chunk as it is received. This is - useful for processing large responses piece by piece without waiting for the entire - data to be transmitted. - - Args: - target_axon (Union['bittensor.AxonInfo', 'bittensor.axon']): The target Axon to send the request to. - synapse (bittensor.Synapse, optional): The Synapse object encapsulating the data. Defaults to a new :func:`bittensor.Synapse` instance. - timeout (float, optional): Maximum duration to wait for a response (or a chunk of the response) from the Axon in seconds. Defaults to ``12.0``. - deserialize (bool, optional): Determines if each received chunk should be deserialized. Defaults to ``True``. - - Yields: - object: Each yielded object contains a chunk of the arbitrary response data from the Axon. - bittensor.Synapse: After the AsyncGenerator has been exhausted, yields the final filled Synapse. - """ - - # Record start time - start_time = time.time() - target_axon = ( - target_axon.info() - if isinstance(target_axon, bittensor.axon) - else target_axon - ) - - # Build request endpoint from the synapse class - request_name = synapse.__class__.__name__ - endpoint = ( - f"0.0.0.0:{str(target_axon.port)}" - if target_axon.ip == str(self.external_ip) - else f"{target_axon.ip}:{str(target_axon.port)}" - ) - url = f"http://{endpoint}/{request_name}" - - # Preprocess synapse for making a request - synapse = self.preprocess_synapse_for_request(target_axon, synapse, timeout) # type: ignore - - try: - # Log outgoing request - self._log_outgoing_request(synapse) - - # Make the HTTP POST request - async with (await self.session).post( - url, - headers=synapse.to_headers(), - json=synapse.model_dump(), - timeout=ClientTimeout(total=timeout), - ) as response: - # Use synapse subclass' process_streaming_response method to yield the response chunks - async for chunk in synapse.process_streaming_response(response): # type: ignore - yield chunk # Yield each chunk as it's processed - json_response = synapse.extract_response_json(response) - - # Process the server response - self.process_server_response(response, json_response, synapse) - - # Set process time and log the response - synapse.dendrite.process_time = str(time.time() - start_time) # type: ignore - - except Exception as e: - synapse = self.process_error_message(synapse, request_name, e) # type: ignore - - finally: - self._log_incoming_response(synapse) - - # Log synapse event history - self.synapse_history.append( - bittensor.Synapse.from_headers(synapse.to_headers()) - ) - - # Return the updated synapse object after deserializing if requested - if deserialize: - yield synapse.deserialize() - else: - yield synapse - - def preprocess_synapse_for_request( - self, - target_axon_info: bittensor.AxonInfo, - synapse: bittensor.Synapse, - timeout: float = 12.0, - ) -> bittensor.Synapse: - """ - Preprocesses the synapse for making a request. This includes building - headers for Dendrite and Axon and signing the request. - - Args: - target_axon_info (bittensor.AxonInfo): The target axon information. - synapse (bittensor.Synapse): The synapse object to be preprocessed. - timeout (float, optional): The request timeout duration in seconds. - Defaults to ``12.0`` seconds. - - Returns: - bittensor.Synapse: The preprocessed synapse. - """ - # Set the timeout for the synapse - synapse.timeout = timeout - synapse.dendrite = bittensor.TerminalInfo( - ip=self.external_ip, - version=bittensor.__version_as_int__, - nonce=time.time_ns(), - uuid=self.uuid, - hotkey=self.keypair.ss58_address, - ) - - # Build the Axon headers using the target axon's details - synapse.axon = bittensor.TerminalInfo( - ip=target_axon_info.ip, - port=target_axon_info.port, - hotkey=target_axon_info.hotkey, - ) - - # Sign the request using the dendrite, axon info, and the synapse body hash - message = f"{synapse.dendrite.nonce}.{synapse.dendrite.hotkey}.{synapse.axon.hotkey}.{synapse.dendrite.uuid}.{synapse.body_hash}" - synapse.dendrite.signature = f"0x{self.keypair.sign(message).hex()}" - - return synapse - - def process_server_response( - self, - server_response: aiohttp.ClientResponse, - json_response: dict, - local_synapse: bittensor.Synapse, - ): - """ - Processes the server response, updates the local synapse state with the - server's state and merges headers set by the server. - - Args: - server_response (object): The `aiohttp `_ response object from the server. - json_response (dict): The parsed JSON response from the server. - local_synapse (bittensor.Synapse): The local synapse object to be updated. - - Raises: - None: But errors in attribute setting are silently ignored. - """ - # Check if the server responded with a successful status code - if server_response.status == 200: - # If the response is successful, overwrite local synapse state with - # server's state only if the protocol allows mutation. To prevent overwrites, - # the protocol must set Frozen = True - server_synapse = local_synapse.__class__(**json_response) - for key in local_synapse.model_dump().keys(): - try: - # Set the attribute in the local synapse from the corresponding - # attribute in the server synapse - setattr(local_synapse, key, getattr(server_synapse, key)) - except Exception: - # Ignore errors during attribute setting - pass - else: - # If the server responded with an error, update the local synapse state - if local_synapse.axon is None: - local_synapse.axon = bittensor.TerminalInfo() - local_synapse.axon.status_code = server_response.status - local_synapse.axon.status_message = json_response.get("message") - - # Extract server headers and overwrite None values in local synapse headers - server_headers = bittensor.Synapse.from_headers(server_response.headers) # type: ignore - - # Merge dendrite headers - local_synapse.dendrite.__dict__.update( - { - **local_synapse.dendrite.model_dump(exclude_none=True), # type: ignore - **server_headers.dendrite.model_dump(exclude_none=True), # type: ignore - } - ) - - # Merge axon headers - local_synapse.axon.__dict__.update( - { - **local_synapse.axon.model_dump(exclude_none=True), # type: ignore - **server_headers.axon.model_dump(exclude_none=True), # type: ignore - } - ) - - # Update the status code and status message of the dendrite to match the axon - local_synapse.dendrite.status_code = local_synapse.axon.status_code # type: ignore - local_synapse.dendrite.status_message = local_synapse.axon.status_message # type: ignore - - def __str__(self) -> str: - """ - Returns a string representation of the Dendrite object. - - Returns: - str: The string representation of the Dendrite object in the format :func:`dendrite()`. - """ - return "dendrite({})".format(self.keypair.ss58_address) - - def __repr__(self) -> str: - """ - Returns a string representation of the Dendrite object, acting as a fallback for :func:`__str__()`. - - Returns: - str: The string representation of the Dendrite object in the format :func:`dendrite()`. - """ - return self.__str__() - - async def __aenter__(self): - """ - Asynchronous context manager entry method. - - Enables the use of the ``async with`` statement with the Dendrite instance. When entering the context, - the current instance of the class is returned, making it accessible within the asynchronous context. - - Returns: - Dendrite: The current instance of the Dendrite class. - - Usage:: - - async with Dendrite() as dendrite: - await dendrite.some_async_method() - """ - return self - - async def __aexit__(self, exc_type, exc_value, traceback): - """ - Asynchronous context manager exit method. - - Ensures proper cleanup when exiting the ``async with`` context. This method will close the `aiohttp `_ client session - asynchronously, releasing any tied resources. - - Args: - exc_type (Type[BaseException], optional): The type of exception that was raised. - exc_value (BaseException, optional): The instance of exception that was raised. - traceback (TracebackType, optional): A traceback object encapsulating the call stack at the point where the exception was raised. - - Usage:: - - async with bt.dendrite( wallet ) as dendrite: - await dendrite.some_async_method() - - Note: - This automatically closes the session by calling :func:`__aexit__` after the context closes. - """ - await self.aclose_session() - - def __del__(self): - """ - Dendrite destructor. - - This method is invoked when the Dendrite instance is about to be destroyed. The destructor ensures that the - aiohttp client session is closed before the instance is fully destroyed, releasing any remaining resources. - - Note: - Relying on the destructor for cleanup can be unpredictable. It is recommended to explicitly close sessions using the provided methods or the ``async with`` context manager. - - Usage:: - - dendrite = Dendrite() - # ... some operations ... - del dendrite # This will implicitly invoke the __del__ method and close the session. - """ - self.close_session() - - -# For back-compatibility with torch -BaseModel: Union["torch.nn.Module", object] = torch.nn.Module if use_torch() else object - - -class dendrite(DendriteMixin, BaseModel): # type: ignore - def __init__( - self, wallet: Optional[Union[bittensor.wallet, bittensor.Keypair]] = None - ): - if use_torch(): - torch.nn.Module.__init__(self) - DendriteMixin.__init__(self, wallet) - - -if not use_torch(): - - async def call(self, *args, **kwargs): - return await self.forward(*args, **kwargs) - - dendrite.__call__ = call diff --git a/bittensor/errors.py b/bittensor/errors.py deleted file mode 100644 index b8366ee681..0000000000 --- a/bittensor/errors.py +++ /dev/null @@ -1,185 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -from __future__ import annotations - -import typing - -if typing.TYPE_CHECKING: - import bittensor - - -class ChainError(BaseException): - r"""Base error for any chain related errors.""" - - pass - - -class ChainConnectionError(ChainError): - r"""Error for any chain connection related errors.""" - - pass - - -class ChainTransactionError(ChainError): - r"""Error for any chain transaction related errors.""" - - pass - - -class ChainQueryError(ChainError): - r"""Error for any chain query related errors.""" - - pass - - -class StakeError(ChainTransactionError): - r"""Error raised when a stake transaction fails.""" - - pass - - -class UnstakeError(ChainTransactionError): - r"""Error raised when an unstake transaction fails.""" - - pass - - -class IdentityError(ChainTransactionError): - r"""Error raised when an identity transaction fails.""" - - pass - - -class NominationError(ChainTransactionError): - r"""Error raised when a nomination transaction fails.""" - - pass - - -class TakeError(ChainTransactionError): - r"""Error raised when a increase / decrease take transaction fails.""" - - pass - - -class TransferError(ChainTransactionError): - r"""Error raised when a transfer transaction fails.""" - - pass - - -class RegistrationError(ChainTransactionError): - r"""Error raised when a neuron registration transaction fails.""" - - pass - - -class NotRegisteredError(ChainTransactionError): - r"""Error raised when a neuron is not registered, and the transaction requires it to be.""" - - pass - - -class NotDelegateError(StakeError): - r"""Error raised when a hotkey you are trying to stake to is not a delegate.""" - - pass - - -class KeyFileError(Exception): - """Error thrown when the keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid.""" - - pass - - -class MetadataError(ChainTransactionError): - r"""Error raised when metadata commitment transaction fails.""" - - pass - - -class InvalidRequestNameError(Exception): - r"""This exception is raised when the request name is invalid. Ususally indicates a broken URL.""" - - pass - - -class SynapseException(Exception): - def __init__( - self, message="Synapse Exception", synapse: "bittensor.Synapse" | None = None - ): - self.message = message - self.synapse = synapse - super().__init__(self.message) - - -class UnknownSynapseError(SynapseException): - r"""This exception is raised when the request name is not found in the Axon's forward_fns dictionary.""" - - pass - - -class SynapseParsingError(Exception): - r"""This exception is raised when the request headers are unable to be parsed into the synapse type.""" - - pass - - -class NotVerifiedException(SynapseException): - r"""This exception is raised when the request is not verified.""" - - pass - - -class BlacklistedException(SynapseException): - r"""This exception is raised when the request is blacklisted.""" - - pass - - -class PriorityException(SynapseException): - r"""This exception is raised when the request priority is not met.""" - - pass - - -class PostProcessException(SynapseException): - r"""This exception is raised when the response headers cannot be updated.""" - - pass - - -class RunException(SynapseException): - r"""This exception is raised when the requested function cannot be executed. Indicates a server error.""" - - pass - - -class InternalServerError(SynapseException): - r"""This exception is raised when the requested function fails on the server. Indicates a server error.""" - - pass - - -class SynapseDendriteNoneException(SynapseException): - def __init__( - self, - message="Synapse Dendrite is None", - synapse: "bittensor.Synapse" | None = None, - ): - self.message = message - super().__init__(self.message, synapse) diff --git a/bittensor/extrinsics/__init__.py b/bittensor/extrinsics/__init__.py deleted file mode 100644 index 5780b2ee82..0000000000 --- a/bittensor/extrinsics/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. diff --git a/bittensor/extrinsics/commit_weights.py b/bittensor/extrinsics/commit_weights.py deleted file mode 100644 index a9192952ef..0000000000 --- a/bittensor/extrinsics/commit_weights.py +++ /dev/null @@ -1,127 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -"""Module commit weights and reveal weights extrinsic.""" - -from typing import Tuple, List - -from rich.prompt import Confirm - -import bittensor - -from bittensor.utils import format_error_message - - -def commit_weights_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - commit_hash: str, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> Tuple[bool, str]: - """ - Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. - This function is a wrapper around the `_do_commit_weights` method, handling user prompts and error messages. - Args: - subtensor (bittensor.subtensor): The subtensor instance used for blockchain interaction. - wallet (bittensor.wallet): The wallet associated with the neuron committing the weights. - netuid (int): The unique identifier of the subnet. - commit_hash (str): The hash of the neuron's weights to be committed. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - Returns: - Tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string - value describing the success or potential error. - This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper - error handling and user interaction when required. - """ - if prompt and not Confirm.ask(f"Would you like to commit weights?"): - return False, "User cancelled the operation." - - success, error_message = subtensor._do_commit_weights( - wallet=wallet, - netuid=netuid, - commit_hash=commit_hash, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if success: - bittensor.logging.info("Successfully committed weights.") - return True, "Successfully committed weights." - else: - bittensor.logging.error(f"Failed to commit weights: {error_message}") - return False, format_error_message(error_message) - - -def reveal_weights_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - uids: List[int], - weights: List[int], - salt: List[int], - version_key: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> Tuple[bool, str]: - """ - Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. - This function is a wrapper around the `_do_reveal_weights` method, handling user prompts and error messages. - Args: - subtensor (bittensor.subtensor): The subtensor instance used for blockchain interaction. - wallet (bittensor.wallet): The wallet associated with the neuron revealing the weights. - netuid (int): The unique identifier of the subnet. - uids (List[int]): List of neuron UIDs for which weights are being revealed. - weights (List[int]): List of weight values corresponding to each UID. - salt (List[int]): List of salt values corresponding to the hash function. - version_key (int): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - Returns: - Tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string - value describing the success or potential error. - This function provides a user-friendly interface for revealing weights on the Bittensor blockchain, ensuring proper - error handling and user interaction when required. - """ - - if prompt and not Confirm.ask(f"Would you like to reveal weights?"): - return False, "User cancelled the operation." - - success, error_message = subtensor._do_reveal_weights( - wallet=wallet, - netuid=netuid, - uids=uids, - values=weights, - salt=salt, - version_key=version_key, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if success: - bittensor.logging.info("Successfully revealed weights.") - return True, "Successfully revealed weights." - else: - bittensor.logging.error(f"Failed to reveal weights: {error_message}") - return False, error_message diff --git a/bittensor/extrinsics/delegation.py b/bittensor/extrinsics/delegation.py deleted file mode 100644 index e61a97efb4..0000000000 --- a/bittensor/extrinsics/delegation.py +++ /dev/null @@ -1,528 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import logging -import bittensor -from ..errors import ( - NominationError, - NotDelegateError, - NotRegisteredError, - StakeError, - TakeError, -) -from rich.prompt import Confirm -from typing import Union, Optional -from bittensor.utils.balance import Balance -from bittensor.btlogging.defines import BITTENSOR_LOGGER_NAME - -logger = logging.getLogger(BITTENSOR_LOGGER_NAME) - - -def nominate_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - wait_for_finalization: bool = False, - wait_for_inclusion: bool = True, -) -> bool: - r"""Becomes a delegate for the hotkey. - - Args: - wallet (bittensor.wallet): The wallet to become a delegate for. - Returns: - success (bool): ``True`` if the transaction was successful. - """ - # Unlock the coldkey. - - try: - wallet.coldkey - - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - wallet.hotkey - # Check if the hotkey is already a delegate. - if subtensor.is_hotkey_delegate(wallet.hotkey.ss58_address): - logger.error( - "Hotkey {} is already a delegate.".format(wallet.hotkey.ss58_address) - ) - return False - - if not subtensor.is_hotkey_registered_any(wallet.hotkey.ss58_address): - logger.error( - "Hotkey {} is not registered to any network".format( - wallet.hotkey.ss58_address - ) - ) - return False - - with bittensor.__console__.status( - ":satellite: Sending nominate call on [white]{}[/white] ...".format( - subtensor.network - ) - ): - try: - success = subtensor._do_nominate( - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if success is True: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix="Become Delegate", - suffix="Finalized: " + str(success), - ) - - # Raises NominationError if False - return success - - except Exception as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - except NominationError as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - - return False - - -def delegate_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - delegate_ss58: Optional[str] = None, - amount: Optional[Union[Balance, float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Delegates the specified amount of stake to the passed delegate. - - Args: - wallet (bittensor.wallet): Bittensor wallet object. - delegate_ss58 (Optional[str]): The ``ss58`` address of the delegate. - amount (Union[Balance, float]): Amount to stake as bittensor balance, or ``float`` interpreted as Tao. - wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - - Raises: - NotRegisteredError: If the wallet is not registered on the chain. - NotDelegateError: If the hotkey is not a delegate on the chain. - """ - # Decrypt keys, - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - if not subtensor.is_hotkey_delegate(delegate_ss58): - raise NotDelegateError("Hotkey: {} is not a delegate.".format(delegate_ss58)) - - # Get state. - my_prev_coldkey_balance = subtensor.get_balance(wallet.coldkey.ss58_address) - delegate_take = subtensor.get_delegate_take(delegate_ss58) - delegate_owner = subtensor.get_hotkey_owner(delegate_ss58) - my_prev_delegated_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=delegate_ss58 - ) - - # Convert to bittensor.Balance - if amount is None: - # Stake it all. - staking_balance = bittensor.Balance.from_tao(my_prev_coldkey_balance.tao) - elif not isinstance(amount, bittensor.Balance): - staking_balance = bittensor.Balance.from_tao(amount) - else: - staking_balance = amount - - # Remove existential balance to keep key alive. - if staking_balance > bittensor.Balance.from_rao(1000): - staking_balance = staking_balance - bittensor.Balance.from_rao(1000) - else: - staking_balance = staking_balance - - # Check enough balance to stake. - if staking_balance > my_prev_coldkey_balance: - bittensor.__console__.print( - ":cross_mark: [red]Not enough balance[/red]:[bold white]\n balance:{}\n amount: {}\n coldkey: {}[/bold white]".format( - my_prev_coldkey_balance, staking_balance, wallet.name - ) - ) - return False - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to delegate:[bold white]\n amount: {}\n to: {}\n owner: {}[/bold white]".format( - staking_balance, delegate_ss58, delegate_owner - ) - ): - return False - - try: - with bittensor.__console__.status( - ":satellite: Staking to: [bold white]{}[/bold white] ...".format( - subtensor.network - ) - ): - staking_response: bool = subtensor._do_delegation( - wallet=wallet, - delegate_ss58=delegate_ss58, - amount=staking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if staking_response is True: # If we successfully staked. - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - with bittensor.__console__.status( - ":satellite: Checking Balance on: [white]{}[/white] ...".format( - subtensor.network - ) - ): - new_balance = subtensor.get_balance(address=wallet.coldkey.ss58_address) - block = subtensor.get_current_block() - new_delegate_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, - hotkey_ss58=delegate_ss58, - block=block, - ) # Get current stake - - bittensor.__console__.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - my_prev_coldkey_balance, new_balance - ) - ) - bittensor.__console__.print( - "Stake:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - my_prev_delegated_stake, new_delegate_stake - ) - ) - return True - else: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: Error unknown." - ) - return False - - except NotRegisteredError as e: - bittensor.__console__.print( - ":cross_mark: [red]Hotkey: {} is not registered.[/red]".format( - wallet.hotkey_str - ) - ) - return False - except StakeError as e: - bittensor.__console__.print(":cross_mark: [red]Stake Error: {}[/red]".format(e)) - return False - - -def undelegate_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - delegate_ss58: Optional[str] = None, - amount: Optional[Union[Balance, float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Un-delegates stake from the passed delegate. - - Args: - wallet (bittensor.wallet): Bittensor wallet object. - delegate_ss58 (Optional[str]): The ``ss58`` address of the delegate. - amount (Union[Balance, float]): Amount to unstake as bittensor balance, or ``float`` interpreted as Tao. - wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - - Raises: - NotRegisteredError: If the wallet is not registered on the chain. - NotDelegateError: If the hotkey is not a delegate on the chain. - """ - # Decrypt keys, - wallet.coldkey - if not subtensor.is_hotkey_delegate(delegate_ss58): - raise NotDelegateError("Hotkey: {} is not a delegate.".format(delegate_ss58)) - - # Get state. - my_prev_coldkey_balance = subtensor.get_balance(wallet.coldkey.ss58_address) - delegate_take = subtensor.get_delegate_take(delegate_ss58) - delegate_owner = subtensor.get_hotkey_owner(delegate_ss58) - my_prev_delegated_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=delegate_ss58 - ) - - # Convert to bittensor.Balance - if amount is None: - # Stake it all. - unstaking_balance = bittensor.Balance.from_tao(my_prev_delegated_stake.tao) - - elif not isinstance(amount, bittensor.Balance): - unstaking_balance = bittensor.Balance.from_tao(amount) - - else: - unstaking_balance = amount - - # Check enough stake to unstake. - if unstaking_balance > my_prev_delegated_stake: - bittensor.__console__.print( - ":cross_mark: [red]Not enough delegated stake[/red]:[bold white]\n stake:{}\n amount: {}\n coldkey: {}[/bold white]".format( - my_prev_delegated_stake, unstaking_balance, wallet.name - ) - ) - return False - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to un-delegate:[bold white]\n amount: {}\n from: {}\n owner: {}[/bold white]".format( - unstaking_balance, delegate_ss58, delegate_owner - ) - ): - return False - - try: - with bittensor.__console__.status( - ":satellite: Unstaking from: [bold white]{}[/bold white] ...".format( - subtensor.network - ) - ): - staking_response: bool = subtensor._do_undelegation( - wallet=wallet, - delegate_ss58=delegate_ss58, - amount=unstaking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if staking_response is True: # If we successfully staked. - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - with bittensor.__console__.status( - ":satellite: Checking Balance on: [white]{}[/white] ...".format( - subtensor.network - ) - ): - new_balance = subtensor.get_balance(address=wallet.coldkey.ss58_address) - block = subtensor.get_current_block() - new_delegate_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, - hotkey_ss58=delegate_ss58, - block=block, - ) # Get current stake - - bittensor.__console__.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - my_prev_coldkey_balance, new_balance - ) - ) - bittensor.__console__.print( - "Stake:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - my_prev_delegated_stake, new_delegate_stake - ) - ) - return True - else: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: Error unknown." - ) - return False - - except NotRegisteredError as e: - bittensor.__console__.print( - ":cross_mark: [red]Hotkey: {} is not registered.[/red]".format( - wallet.hotkey_str - ) - ) - return False - except StakeError as e: - bittensor.__console__.print(":cross_mark: [red]Stake Error: {}[/red]".format(e)) - return False - - -def decrease_take_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58: Optional[str] = None, - take: int = 0, - wait_for_finalization: bool = False, - wait_for_inclusion: bool = True, -) -> bool: - r"""Decrease delegate take for the hotkey. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - hotkey_ss58 (Optional[str]): - The ``ss58`` address of the hotkey account to stake to defaults to the wallet's hotkey. - take (float): - The ``take`` of the hotkey. - Returns: - success (bool): ``True`` if the transaction was successful. - """ - # Unlock the coldkey. - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - wallet.hotkey - - with bittensor.__console__.status( - ":satellite: Sending decrease_take_extrinsic call on [white]{}[/white] ...".format( - subtensor.network - ) - ): - try: - success = subtensor._do_decrease_take( - wallet=wallet, - hotkey_ss58=hotkey_ss58, - take=take, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if success is True: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix="Decrease Delegate Take", - suffix="Finalized: " + str(success), - ) - - return success - - except (TakeError, Exception) as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - - return False - - -def increase_take_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58: Optional[str] = None, - take: int = 0, - wait_for_finalization: bool = False, - wait_for_inclusion: bool = True, -) -> bool: - r"""Increase delegate take for the hotkey. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - hotkey_ss58 (Optional[str]): - The ``ss58`` address of the hotkey account to stake to defaults to the wallet's hotkey. - take (float): - The ``take`` of the hotkey. - Returns: - success (bool): ``True`` if the transaction was successful. - """ - # Unlock the coldkey. - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - wallet.hotkey - - with bittensor.__console__.status( - ":satellite: Sending increase_take_extrinsic call on [white]{}[/white] ...".format( - subtensor.network - ) - ): - try: - success = subtensor._do_increase_take( - wallet=wallet, - hotkey_ss58=hotkey_ss58, - take=take, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if success is True: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix="Increase Delegate Take", - suffix="Finalized: " + str(success), - ) - - return success - - except Exception as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - except TakeError as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - - return False diff --git a/bittensor/extrinsics/network.py b/bittensor/extrinsics/network.py deleted file mode 100644 index 5aecaa459a..0000000000 --- a/bittensor/extrinsics/network.py +++ /dev/null @@ -1,250 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import time - -import substrateinterface -from rich.prompt import Confirm - -import bittensor -from bittensor.utils import format_error_message -from ..commands.network import HYPERPARAMS - - -def _find_event_attributes_in_extrinsic_receipt( - response: "substrateinterface.base.ExtrinsicReceipt", event_name: str -) -> list: - """ - Searches for the attributes of a specified event within an extrinsic receipt. - - Args: - response (substrateinterface.base.ExtrinsicReceipt): The receipt of the extrinsic to be searched. - event_name (str): The name of the event to search for. - - Returns: - list: A list of attributes for the specified event. Returns [-1] if the event is not found. - """ - for event in response.triggered_events: - # Access the event details - event_details = event.value["event"] - # Check if the event_id is 'NetworkAdded' - if event_details["event_id"] == event_name: - # Once found, you can access the attributes of the event_name - return event_details["attributes"] - return [-1] - - -def register_subnetwork_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Registers a new subnetwork. - - Args: - wallet (bittensor.wallet): - bittensor wallet object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If true, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. - If we did not wait for finalization / inclusion, the response is ``true``. - """ - your_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - burn_cost = bittensor.utils.balance.Balance(subtensor.get_subnet_burn_cost()) - if burn_cost > your_balance: - bittensor.__console__.print( - f"Your balance of: [green]{your_balance}[/green] is not enough to pay the subnet lock cost of: [green]{burn_cost}[/green]" - ) - return False - - if prompt: - bittensor.__console__.print(f"Your balance is: [green]{your_balance}[/green]") - if not Confirm.ask( - f"Do you want to register a subnet for [green]{ burn_cost }[/green]?" - ): - return False - - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - with bittensor.__console__.status(":satellite: Registering subnet..."): - with subtensor.substrate as substrate: - # create extrinsic call - call = substrate.compose_call( - call_module="SubtensorModule", - call_function="register_network", - call_params={"immunity_period": 0, "reg_allowed": True}, - ) - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - # process if registration successful - response.process_events() - if not response.is_success: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(response.error_message)}" - ) - time.sleep(0.5) - - # Successful registration, final check for membership - else: - attributes = _find_event_attributes_in_extrinsic_receipt( - response, "NetworkAdded" - ) - bittensor.__console__.print( - f":white_heavy_check_mark: [green]Registered subnetwork with netuid: {attributes[0]}[/green]" - ) - return True - - -def set_hyperparameter_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - parameter: str, - value, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Sets a hyperparameter for a specific subnetwork. - - Args: - wallet (bittensor.wallet): - bittensor wallet object. - netuid (int): - Subnetwork ``uid``. - parameter (str): - Hyperparameter name. - value (any): - New hyperparameter value. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. - If we did not wait for finalization / inclusion, the response is ``true``. - """ - if subtensor.get_subnet_owner(netuid) != wallet.coldkeypub.ss58_address: - bittensor.__console__.print( - ":cross_mark: [red]This wallet doesn't own the specified subnet.[/red]" - ) - return False - - wallet.coldkey # unlock coldkey - - extrinsic = HYPERPARAMS.get(parameter) - if extrinsic is None: - bittensor.__console__.print( - ":cross_mark: [red]Invalid hyperparameter specified.[/red]" - ) - return False - - with bittensor.__console__.status( - f":satellite: Setting hyperparameter {parameter} to {value} on subnet: {netuid} ..." - ): - with subtensor.substrate as substrate: - extrinsic_params = substrate.get_metadata_call_function( - "AdminUtils", extrinsic - ) - call_params = {"netuid": netuid} - - # if input value is a list, iterate through the list and assign values - if isinstance(value, list): - # Create an iterator for the list of values - value_iterator = iter(value) - # Iterate over all value arguments and add them to the call_params dictionary - for value_argument in extrinsic_params["fields"]: - if "netuid" not in str(value_argument["name"]): - # Assign the next value from the iterator - try: - call_params[str(value_argument["name"])] = next( - value_iterator - ) - except StopIteration: - raise ValueError( - "Not enough values provided in the list for all parameters" - ) - - else: - value_argument = extrinsic_params["fields"][ - len(extrinsic_params["fields"]) - 1 - ] - call_params[str(value_argument["name"])] = value - - # create extrinsic call - call = substrate.compose_call( - call_module="AdminUtils", - call_function=extrinsic, - call_params=call_params, - ) - - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - # process if registration successful - response.process_events() - if not response.is_success: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(response.error_message)}" - ) - time.sleep(0.5) - - # Successful registration, final check for membership - else: - bittensor.__console__.print( - f":white_heavy_check_mark: [green]Hyper parameter {parameter} changed to {value}[/green]" - ) - return True diff --git a/bittensor/extrinsics/prometheus.py b/bittensor/extrinsics/prometheus.py deleted file mode 100644 index 97f7c17714..0000000000 --- a/bittensor/extrinsics/prometheus.py +++ /dev/null @@ -1,140 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor - -import json -import bittensor.utils.networking as net - - -def prometheus_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - port: int, - netuid: int, - ip: int = None, - wait_for_inclusion: bool = False, - wait_for_finalization=True, -) -> bool: - r"""Subscribes an Bittensor endpoint to the substensor chain. - - Args: - subtensor (bittensor.subtensor): - Bittensor subtensor object. - wallet (bittensor.wallet): - Bittensor wallet object. - ip (str): - Endpoint host port i.e., ``192.122.31.4``. - port (int): - Endpoint port number i.e., `9221`. - netuid (int): - Network `uid` to serve on. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. - If we did not wait for finalization / inclusion, the response is ``true``. - """ - - # ---- Get external ip ---- - if ip is None: - try: - external_ip = net.get_external_ip() - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Found external ip: {}[/green]".format( - external_ip - ) - ) - bittensor.logging.success( - prefix="External IP", suffix="{}".format(external_ip) - ) - except Exception as E: - raise RuntimeError( - "Unable to attain your external ip. Check your internet connection. error: {}".format( - E - ) - ) from E - else: - external_ip = ip - - call_params: "bittensor.PrometheusServeCallParams" = { - "version": bittensor.__version_as_int__, - "ip": net.ip_to_int(external_ip), - "port": port, - "ip_type": net.ip_version(external_ip), - } - - with bittensor.__console__.status(":satellite: Checking Prometheus..."): - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid - ) - neuron_up_to_date = not neuron.is_null and call_params == { - "version": neuron.prometheus_info.version, - "ip": net.ip_to_int(neuron.prometheus_info.ip), - "port": neuron.prometheus_info.port, - "ip_type": neuron.prometheus_info.ip_type, - } - - if neuron_up_to_date: - bittensor.__console__.print( - f":white_heavy_check_mark: [green]Prometheus already Served[/green]\n" - f"[green not bold]- Status: [/green not bold] |" - f"[green not bold] ip: [/green not bold][white not bold]{net.int_to_ip(neuron.prometheus_info.ip)}[/white not bold] |" - f"[green not bold] ip_type: [/green not bold][white not bold]{neuron.prometheus_info.ip_type}[/white not bold] |" - f"[green not bold] port: [/green not bold][white not bold]{neuron.prometheus_info.port}[/white not bold] | " - f"[green not bold] version: [/green not bold][white not bold]{neuron.prometheus_info.version}[/white not bold] |" - ) - - bittensor.__console__.print( - ":white_heavy_check_mark: [white]Prometheus already served.[/white]".format( - external_ip - ) - ) - return True - - # Add netuid, not in prometheus_info - call_params["netuid"] = netuid - - with bittensor.__console__.status( - ":satellite: Serving prometheus on: [white]{}:{}[/white] ...".format( - subtensor.network, netuid - ) - ): - success, err = subtensor._do_serve_prometheus( - wallet=wallet, - call_params=call_params, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if wait_for_inclusion or wait_for_finalization: - if success is True: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Served prometheus[/green]\n [bold white]{}[/bold white]".format( - json.dumps(call_params, indent=4, sort_keys=True) - ) - ) - return True - else: - bittensor.__console__.print(f":cross_mark: [red]Failed[/red]: {err}") - return False - else: - return True diff --git a/bittensor/extrinsics/registration.py b/bittensor/extrinsics/registration.py deleted file mode 100644 index 40bde3fc89..0000000000 --- a/bittensor/extrinsics/registration.py +++ /dev/null @@ -1,543 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import time -from typing import List, Union, Optional, Tuple - -from rich.prompt import Confirm - -import bittensor -from bittensor.utils import format_error_message - -from bittensor.utils.registration import ( - POWSolution, - create_pow, - torch, - log_no_torch_error, -) - - -def register_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - max_allowed_attempts: int = 3, - output_in_place: bool = True, - cuda: bool = False, - dev_id: Union[List[int], int] = 0, - tpb: int = 256, - num_processes: Optional[int] = None, - update_interval: Optional[int] = None, - log_verbose: bool = False, -) -> bool: - r"""Registers the wallet to the chain. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - netuid (int): - The ``netuid`` of the subnet to register on. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - max_allowed_attempts (int): - Maximum number of attempts to register the wallet. - cuda (bool): - If ``true``, the wallet should be registered using CUDA device(s). - dev_id (Union[List[int], int]): - The CUDA device id to use, or a list of device ids. - tpb (int): - The number of threads per block (CUDA). - num_processes (int): - The number of processes to use to register. - update_interval (int): - The number of nonces to solve between updates. - log_verbose (bool): - If ``true``, the registration process will log more information. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - if not subtensor.subnet_exists(netuid): - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error: [bold white]subnet:{}[/bold white] does not exist.".format( - netuid - ) - ) - return False - - with bittensor.__console__.status( - f":satellite: Checking Account on [bold]subnet:{netuid}[/bold]..." - ): - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid - ) - if not neuron.is_null: - bittensor.logging.debug( - f"Wallet {wallet} is already registered on {neuron.netuid} with {neuron.uid}" - ) - return True - - if prompt: - if not Confirm.ask( - "Continue Registration?\n hotkey: [bold white]{}[/bold white]\n coldkey: [bold white]{}[/bold white]\n network: [bold white]{}[/bold white]".format( - wallet.hotkey.ss58_address, - wallet.coldkeypub.ss58_address, - subtensor.network, - ) - ): - return False - - if not torch: - log_no_torch_error() - return False - - # Attempt rolling registration. - attempts = 1 - while True: - bittensor.__console__.print( - ":satellite: Registering...({}/{})".format(attempts, max_allowed_attempts) - ) - # Solve latest POW. - if cuda: - if not torch.cuda.is_available(): - if prompt: - bittensor.__console__.print("CUDA is not available.") - return False - pow_result: Optional[POWSolution] = create_pow( - subtensor, - wallet, - netuid, - output_in_place, - cuda=cuda, - dev_id=dev_id, - tpb=tpb, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - else: - pow_result: Optional[POWSolution] = create_pow( - subtensor, - wallet, - netuid, - output_in_place, - cuda=cuda, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - - # pow failed - if not pow_result: - # might be registered already on this subnet - is_registered = subtensor.is_hotkey_registered( - netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bittensor.__console__.print( - f":white_heavy_check_mark: [green]Already registered on netuid:{netuid}[/green]" - ) - return True - - # pow successful, proceed to submit pow to chain for registration - else: - with bittensor.__console__.status(":satellite: Submitting POW..."): - # check if pow result is still valid - while not pow_result.is_stale(subtensor=subtensor): - result: Tuple[bool, Optional[str]] = subtensor._do_pow_register( - netuid=netuid, - wallet=wallet, - pow_result=pow_result, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - success, err_msg = result - - if not success: - # Look error here - # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs - if "HotKeyAlreadyRegisteredInSubNet" in err_msg: - bittensor.__console__.print( - f":white_heavy_check_mark: [green]Already Registered on [bold]subnet:{netuid}[/bold][/green]" - ) - return True - - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {err_msg}" - ) - time.sleep(0.5) - - # Successful registration, final check for neuron and pubkey - else: - bittensor.__console__.print(":satellite: Checking Balance...") - is_registered = subtensor.is_hotkey_registered( - netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Registered[/green]" - ) - return True - else: - # neuron not found, try again - bittensor.__console__.print( - ":cross_mark: [red]Unknown error. Neuron not found.[/red]" - ) - continue - else: - # Exited loop because pow is no longer valid. - bittensor.__console__.print("[red]POW is stale.[/red]") - # Try again. - continue - - if attempts < max_allowed_attempts: - # Failed registration, retry pow - attempts += 1 - bittensor.__console__.print( - ":satellite: Failed registration, retrying pow ...({}/{})".format( - attempts, max_allowed_attempts - ) - ) - else: - # Failed to register after max attempts. - bittensor.__console__.print("[red]No more attempts.[/red]") - return False - - -def burned_register_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Registers the wallet to chain by recycling TAO. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - netuid (int): - The ``netuid`` of the subnet to register on. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - if not subtensor.subnet_exists(netuid): - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error: [bold white]subnet:{}[/bold white] does not exist.".format( - netuid - ) - ) - return False - - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - with bittensor.__console__.status( - f":satellite: Checking Account on [bold]subnet:{netuid}[/bold]..." - ): - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid - ) - - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - - recycle_amount = subtensor.recycle(netuid=netuid) - if not neuron.is_null: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Already Registered[/green]:\n" - "uid: [bold white]{}[/bold white]\n" - "netuid: [bold white]{}[/bold white]\n" - "hotkey: [bold white]{}[/bold white]\n" - "coldkey: [bold white]{}[/bold white]".format( - neuron.uid, neuron.netuid, neuron.hotkey, neuron.coldkey - ) - ) - return True - - if prompt: - # Prompt user for confirmation. - if not Confirm.ask(f"Recycle {recycle_amount} to register on subnet:{netuid}?"): - return False - - with bittensor.__console__.status(":satellite: Recycling TAO for Registration..."): - success, err_msg = subtensor._do_burned_register( - netuid=netuid, - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not success: - bittensor.__console__.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - time.sleep(0.5) - return False - # Successful registration, final check for neuron and pubkey - else: - bittensor.__console__.print(":satellite: Checking Balance...") - block = subtensor.get_current_block() - new_balance = subtensor.get_balance( - wallet.coldkeypub.ss58_address, block=block - ) - - bittensor.__console__.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_balance, new_balance - ) - ) - is_registered = subtensor.is_hotkey_registered( - netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Registered[/green]" - ) - return True - else: - # neuron not found, try again - bittensor.__console__.print( - ":cross_mark: [red]Unknown error. Neuron not found.[/red]" - ) - return False - - -class MaxSuccessException(Exception): - pass - - -class MaxAttemptsException(Exception): - pass - - -def run_faucet_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - max_allowed_attempts: int = 3, - output_in_place: bool = True, - cuda: bool = False, - dev_id: Union[List[int], int] = 0, - tpb: int = 256, - num_processes: Optional[int] = None, - update_interval: Optional[int] = None, - log_verbose: bool = False, -) -> Tuple[bool, str]: - r"""Runs a continual POW to get a faucet of TAO on the test net. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - max_allowed_attempts (int): - Maximum number of attempts to register the wallet. - cuda (bool): - If ``true``, the wallet should be registered using CUDA device(s). - dev_id (Union[List[int], int]): - The CUDA device id to use, or a list of device ids. - tpb (int): - The number of threads per block (CUDA). - num_processes (int): - The number of processes to use to register. - update_interval (int): - The number of nonces to solve between updates. - log_verbose (bool): - If ``true``, the registration process will log more information. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - if prompt: - if not Confirm.ask( - "Run Faucet ?\n coldkey: [bold white]{}[/bold white]\n network: [bold white]{}[/bold white]".format( - wallet.coldkeypub.ss58_address, - subtensor.network, - ) - ): - return False, "" - - if not torch: - log_no_torch_error() - return False, "Requires torch" - - # Unlock coldkey - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False, "" - - # Get previous balance. - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - - # Attempt rolling registration. - attempts = 1 - successes = 1 - while True: - try: - pow_result = None - while pow_result is None or pow_result.is_stale(subtensor=subtensor): - # Solve latest POW. - if cuda: - if not torch.cuda.is_available(): - if prompt: - bittensor.__console__.print("CUDA is not available.") - return False, "CUDA is not available." - pow_result: Optional[POWSolution] = create_pow( - subtensor, - wallet, - -1, - output_in_place, - cuda=cuda, - dev_id=dev_id, - tpb=tpb, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - else: - pow_result: Optional[POWSolution] = create_pow( - subtensor, - wallet, - -1, - output_in_place, - cuda=cuda, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - call = subtensor.substrate.compose_call( - call_module="SubtensorModule", - call_function="faucet", - call_params={ - "block_number": pow_result.block_number, - "nonce": pow_result.nonce, - "work": [int(byte_) for byte_ in pow_result.seal], - }, - ) - extrinsic = subtensor.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = subtensor.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # process if registration successful, try again if pow is still valid - response.process_events() - if not response.is_success: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(response.error_message)}" - ) - if attempts == max_allowed_attempts: - raise MaxAttemptsException - attempts += 1 - # Wait a bit before trying again - time.sleep(1) - - # Successful registration - else: - new_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - bittensor.__console__.print( - f"Balance: [blue]{old_balance}[/blue] :arrow_right: [green]{new_balance}[/green]" - ) - old_balance = new_balance - - if successes == 3: - raise MaxSuccessException - - attempts = 1 # Reset attempts on success - successes += 1 - - except KeyboardInterrupt: - return True, "Done" - - except MaxSuccessException: - return True, f"Max successes reached: {3}" - - except MaxAttemptsException: - return False, f"Max attempts reached: {max_allowed_attempts}" - - -def swap_hotkey_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - new_wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - if prompt: - # Prompt user for confirmation. - if not Confirm.ask( - f"Swap {wallet.hotkey} for new hotkey: {new_wallet.hotkey}?" - ): - return False - - with bittensor.__console__.status(":satellite: Swapping hotkeys..."): - success, err_msg = subtensor._do_swap_hotkey( - wallet=wallet, - new_wallet=new_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not success: - bittensor.__console__.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - time.sleep(0.5) - return False - - else: - bittensor.__console__.print( - f"Hotkey {wallet.hotkey} swapped for new hotkey: {new_wallet.hotkey}" - ) - return True diff --git a/bittensor/extrinsics/root.py b/bittensor/extrinsics/root.py deleted file mode 100644 index c0a4fcabd1..0000000000 --- a/bittensor/extrinsics/root.py +++ /dev/null @@ -1,237 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor - -import time -import logging -import numpy as np -from numpy.typing import NDArray -from rich.prompt import Confirm -from typing import Union, List -import bittensor.utils.weight_utils as weight_utils -from bittensor.btlogging.defines import BITTENSOR_LOGGER_NAME -from bittensor.utils.registration import torch, legacy_torch_api_compat - -logger = logging.getLogger(BITTENSOR_LOGGER_NAME) - - -def root_register_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Registers the wallet to root network. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - is_registered = subtensor.is_hotkey_registered( - netuid=0, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Already registered on root network.[/green]" - ) - return True - - if prompt: - # Prompt user for confirmation. - if not Confirm.ask("Register to root network?"): - return False - - with bittensor.__console__.status(":satellite: Registering to root network..."): - success, err_msg = subtensor._do_root_register( - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not success: - bittensor.__console__.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - time.sleep(0.5) - - # Successful registration, final check for neuron and pubkey - else: - is_registered = subtensor.is_hotkey_registered( - netuid=0, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Registered[/green]" - ) - return True - else: - # neuron not found, try again - bittensor.__console__.print( - ":cross_mark: [red]Unknown error. Neuron not found.[/red]" - ) - - -@legacy_torch_api_compat -def set_root_weights_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuids: Union[NDArray[np.int64], "torch.LongTensor", List[int]], - weights: Union[NDArray[np.float32], "torch.FloatTensor", List[float]], - version_key: int = 0, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Sets the given weights and values on chain for wallet hotkey account. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - netuids (Union[NDArray[np.int64], torch.LongTensor, List[int]]): - The ``netuid`` of the subnet to set weights for. - weights (Union[NDArray[np.float32], torch.FloatTensor, list]): - Weights to set. These must be ``float`` s and must correspond to the passed ``netuid`` s. - version_key (int): - The version key of the validator. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - # First convert types. - if isinstance(netuids, list): - netuids = np.array(netuids, dtype=np.int64) - if isinstance(weights, list): - weights = np.array(weights, dtype=np.float32) - - # Get weight restrictions. - min_allowed_weights = subtensor.min_allowed_weights(netuid=0) - max_weight_limit = subtensor.max_weight_limit(netuid=0) - - # Get non zero values. - non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1) - non_zero_weight_uids = netuids[non_zero_weight_idx] - non_zero_weights = weights[non_zero_weight_idx] - if non_zero_weights.size < min_allowed_weights: - raise ValueError( - "The minimum number of weights required to set weights is {}, got {}".format( - min_allowed_weights, non_zero_weights.size - ) - ) - - # Normalize the weights to max value. - formatted_weights = bittensor.utils.weight_utils.normalize_max_weight( - x=weights, limit=max_weight_limit - ) - bittensor.__console__.print( - f"\nRaw Weights -> Normalized weights: \n\t{weights} -> \n\t{formatted_weights}\n" - ) - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to set the following root weights?:\n[bold white] weights: {}\n uids: {}[/bold white ]?".format( - formatted_weights, netuids - ) - ): - return False - - with bittensor.__console__.status( - ":satellite: Setting root weights on [white]{}[/white] ...".format( - subtensor.network - ) - ): - try: - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - netuids, weights - ) - success, error_message = subtensor._do_set_root_weights( - wallet=wallet, - netuid=0, - uids=weight_uids, - vals=weight_vals, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - bittensor.__console__.print(success, error_message) - - if not wait_for_finalization and not wait_for_inclusion: - return True - - if success is True: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix="Set weights", - suffix="Finalized: " + str(success), - ) - return True - else: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {error_message}" - ) - bittensor.logging.warning( - prefix="Set weights", - suffix="Failed: " + str(error_message), - ) - return False - - except Exception as e: - # TODO( devs ): lets remove all of the bittensor.__console__ calls and replace with the bittensor logger. - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - return False diff --git a/bittensor/extrinsics/senate.py b/bittensor/extrinsics/senate.py deleted file mode 100644 index f586cec399..0000000000 --- a/bittensor/extrinsics/senate.py +++ /dev/null @@ -1,275 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import time - -from rich.prompt import Confirm - -import bittensor -from bittensor.utils import format_error_message - - -def register_senate_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Registers the wallet to chain for senate voting. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - wallet.hotkey # unlock hotkey - - if prompt: - # Prompt user for confirmation. - if not Confirm.ask(f"Register delegate hotkey to senate?"): - return False - - with bittensor.__console__.status(":satellite: Registering with senate..."): - with subtensor.substrate as substrate: - # create extrinsic call - call = substrate.compose_call( - call_module="SubtensorModule", - call_function="join_senate", - call_params={"hotkey": wallet.hotkey.ss58_address}, - ) - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - # process if registration successful - response.process_events() - if not response.is_success: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]:{format_error_message(response.error_message)}" - ) - time.sleep(0.5) - - # Successful registration, final check for membership - else: - is_registered = wallet.is_senate_member(subtensor) - - if is_registered: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Registered[/green]" - ) - return True - else: - # neuron not found, try again - bittensor.__console__.print( - ":cross_mark: [red]Unknown error. Senate membership not found.[/red]" - ) - - -def leave_senate_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Removes the wallet from chain for senate voting. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - try: - wallet.coldkey # unlock coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - wallet.hotkey # unlock hotkey - - if prompt: - # Prompt user for confirmation. - if not Confirm.ask(f"Remove delegate hotkey from senate?"): - return False - - with bittensor.__console__.status(":satellite: Leaving senate..."): - with subtensor.substrate as substrate: - # create extrinsic call - call = substrate.compose_call( - call_module="SubtensorModule", - call_function="leave_senate", - call_params={"hotkey": wallet.hotkey.ss58_address}, - ) - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - # process if registration successful - response.process_events() - if not response.is_success: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(response.error_message)}" - ) - time.sleep(0.5) - - # Successful registration, final check for membership - else: - is_registered = wallet.is_senate_member(subtensor) - - if not is_registered: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Left senate[/green]" - ) - return True - else: - # neuron not found, try again - bittensor.__console__.print( - ":cross_mark: [red]Unknown error. Senate membership still found.[/red]" - ) - - -def vote_senate_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - proposal_hash: str, - proposal_idx: int, - vote: bool, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Votes ayes or nays on proposals. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - wallet.coldkey # unlock coldkey - wallet.hotkey # unlock hotkey - - if prompt: - # Prompt user for confirmation. - if not Confirm.ask("Cast a vote of {}?".format(vote)): - return False - - with bittensor.__console__.status(":satellite: Casting vote.."): - with subtensor.substrate as substrate: - # create extrinsic call - call = substrate.compose_call( - call_module="SubtensorModule", - call_function="vote", - call_params={ - "hotkey": wallet.hotkey.ss58_address, - "proposal": proposal_hash, - "index": proposal_idx, - "approve": vote, - }, - ) - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - # process if vote successful - response.process_events() - if not response.is_success: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(response.error_message)}" - ) - time.sleep(0.5) - - # Successful vote, final check for data - else: - vote_data = subtensor.get_vote_data(proposal_hash) - has_voted = ( - vote_data["ayes"].count(wallet.hotkey.ss58_address) > 0 - or vote_data["nays"].count(wallet.hotkey.ss58_address) > 0 - ) - - if has_voted: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Vote cast.[/green]" - ) - return True - else: - # hotkey not found in ayes/nays - bittensor.__console__.print( - ":cross_mark: [red]Unknown error. Couldn't find vote.[/red]" - ) diff --git a/bittensor/extrinsics/serving.py b/bittensor/extrinsics/serving.py deleted file mode 100644 index 734561835f..0000000000 --- a/bittensor/extrinsics/serving.py +++ /dev/null @@ -1,286 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import json -from typing import Optional - -from retry import retry -from rich.prompt import Confirm - -import bittensor -import bittensor.utils.networking as net -from bittensor.utils import format_error_message -from bittensor.utils.networking import ensure_connected -from ..errors import MetadataError - - -def serve_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - ip: str, - port: int, - protocol: int, - netuid: int, - placeholder1: int = 0, - placeholder2: int = 0, - wait_for_inclusion: bool = False, - wait_for_finalization=True, - prompt: bool = False, -) -> bool: - r"""Subscribes a Bittensor endpoint to the subtensor chain. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - ip (str): - Endpoint host port i.e., ``192.122.31.4``. - port (int): - Endpoint port number i.e., ``9221``. - protocol (int): - An ``int`` representation of the protocol. - netuid (int): - The network uid to serve on. - placeholder1 (int): - A placeholder for future use. - placeholder2 (int): - A placeholder for future use. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - # Decrypt hotkey - wallet.hotkey - params: "bittensor.AxonServeCallParams" = { - "version": bittensor.__version_as_int__, - "ip": net.ip_to_int(ip), - "port": port, - "ip_type": net.ip_version(ip), - "netuid": netuid, - "hotkey": wallet.hotkey.ss58_address, - "coldkey": wallet.coldkeypub.ss58_address, - "protocol": protocol, - "placeholder1": placeholder1, - "placeholder2": placeholder2, - } - bittensor.logging.debug("Checking axon ...") - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid - ) - neuron_up_to_date = not neuron.is_null and params == { - "version": neuron.axon_info.version, - "ip": net.ip_to_int(neuron.axon_info.ip), - "port": neuron.axon_info.port, - "ip_type": neuron.axon_info.ip_type, - "netuid": neuron.netuid, - "hotkey": neuron.hotkey, - "coldkey": neuron.coldkey, - "protocol": neuron.axon_info.protocol, - "placeholder1": neuron.axon_info.placeholder1, - "placeholder2": neuron.axon_info.placeholder2, - } - output = params.copy() - output["coldkey"] = wallet.coldkeypub.ss58_address - output["hotkey"] = wallet.hotkey.ss58_address - if neuron_up_to_date: - bittensor.logging.debug( - f"Axon already served on: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) " - ) - return True - - if prompt: - output = params.copy() - output["coldkey"] = wallet.coldkeypub.ss58_address - output["hotkey"] = wallet.hotkey.ss58_address - if not Confirm.ask( - "Do you want to serve axon:\n [bold white]{}[/bold white]".format( - json.dumps(output, indent=4, sort_keys=True) - ) - ): - return False - - bittensor.logging.debug( - f"Serving axon with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) -> {subtensor.network}:{netuid}" - ) - success, error_message = subtensor._do_serve_axon( - wallet=wallet, - call_params=params, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if wait_for_inclusion or wait_for_finalization: - if success is True: - bittensor.logging.debug( - f"Axon served with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) on {subtensor.network}:{netuid} " - ) - return True - else: - bittensor.logging.error(f"Failed: {error_message}") - return False - else: - return True - - -def serve_axon_extrinsic( - subtensor: "bittensor.subtensor", - netuid: int, - axon: "bittensor.Axon", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, -) -> bool: - r"""Serves the axon to the network. - - Args: - netuid ( int ): - The ``netuid`` being served on. - axon (bittensor.Axon): - Axon to serve. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - axon.wallet.hotkey - axon.wallet.coldkeypub - external_port = axon.external_port - - # ---- Get external ip ---- - if axon.external_ip is None: - try: - external_ip = net.get_external_ip() - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Found external ip: {}[/green]".format( - external_ip - ) - ) - bittensor.logging.success( - prefix="External IP", suffix="{}".format(external_ip) - ) - except Exception as E: - raise RuntimeError( - "Unable to attain your external ip. Check your internet connection. error: {}".format( - E - ) - ) from E - else: - external_ip = axon.external_ip - - # ---- Subscribe to chain ---- - serve_success = subtensor.serve( - wallet=axon.wallet, - ip=external_ip, - port=external_port, - netuid=netuid, - protocol=4, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - return serve_success - - -def publish_metadata( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - data_type: str, - data: bytes, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, -) -> bool: - """ - Publishes metadata on the Bittensor network using the specified wallet and network identifier. - - Args: - subtensor (bittensor.subtensor): - The subtensor instance representing the Bittensor blockchain connection. - wallet (bittensor.wallet): - The wallet object used for authentication in the transaction. - netuid (int): - Network UID on which the metadata is to be published. - data_type (str): - The data type of the information being submitted. It should be one of the following: ``'Sha256'``, ``'Blake256'``, ``'Keccak256'``, or ``'Raw0-128'``. This specifies the format or hashing algorithm used for the data. - data (str): - The actual metadata content to be published. This should be formatted or hashed according to the ``type`` specified. (Note: max ``str`` length is 128 bytes) - wait_for_inclusion (bool, optional): - If ``True``, the function will wait for the extrinsic to be included in a block before returning. Defaults to ``False``. - wait_for_finalization (bool, optional): - If ``True``, the function will wait for the extrinsic to be finalized on the chain before returning. Defaults to ``True``. - - Returns: - bool: - ``True`` if the metadata was successfully published (and finalized if specified). ``False`` otherwise. - - Raises: - MetadataError: - If there is an error in submitting the extrinsic or if the response from the blockchain indicates failure. - """ - - wallet.hotkey - - with subtensor.substrate as substrate: - call = substrate.compose_call( - call_module="Commitments", - call_function="set_commitment", - call_params={ - "netuid": netuid, - "info": {"fields": [[{f"{data_type}": data}]]}, - }, - ) - - extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.hotkey) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise MetadataError(format_error_message(response.error_message)) - - -@ensure_connected -def get_metadata(self, netuid: int, hotkey: str, block: Optional[int] = None) -> str: - @retry(delay=2, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - with self.substrate as substrate: - return substrate.query( - module="Commitments", - storage_function="CommitmentOf", - params=[netuid, hotkey], - block_hash=None if block is None else substrate.get_block_hash(block), - ) - - commit_data = make_substrate_call_with_retry() - return commit_data.value diff --git a/bittensor/extrinsics/set_weights.py b/bittensor/extrinsics/set_weights.py deleted file mode 100644 index ea51fab237..0000000000 --- a/bittensor/extrinsics/set_weights.py +++ /dev/null @@ -1,136 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor - -import logging -import numpy as np -from numpy.typing import NDArray -from rich.prompt import Confirm -from typing import Union, Tuple -import bittensor.utils.weight_utils as weight_utils -from bittensor.btlogging.defines import BITTENSOR_LOGGER_NAME -from bittensor.utils.registration import torch, use_torch - -logger = logging.getLogger(BITTENSOR_LOGGER_NAME) - - -def set_weights_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - uids: Union[NDArray[np.int64], "torch.LongTensor", list], - weights: Union[NDArray[np.float32], "torch.FloatTensor", list], - version_key: int = 0, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> Tuple[bool, str]: - r"""Sets the given weights and values on chain for wallet hotkey account. - - Args: - subtensor (bittensor.subtensor): - Subtensor endpoint to use. - wallet (bittensor.wallet): - Bittensor wallet object. - netuid (int): - The ``netuid`` of the subnet to set weights for. - uids (Union[NDArray[np.int64], torch.LongTensor, list]): - The ``uint64`` uids of destination neurons. - weights (Union[NDArray[np.float32], torch.FloatTensor, list]): - The weights to set. These must be ``float`` s and correspond to the passed ``uid`` s. - version_key (int): - The version key of the validator. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - # First convert types. - if use_torch(): - if isinstance(uids, list): - uids = torch.tensor(uids, dtype=torch.int64) - if isinstance(weights, list): - weights = torch.tensor(weights, dtype=torch.float32) - else: - if isinstance(uids, list): - uids = np.array(uids, dtype=np.int64) - if isinstance(weights, list): - weights = np.array(weights, dtype=np.float32) - - # Reformat and normalize. - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids, weights - ) - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to set weights:\n[bold white] weights: {}\n uids: {}[/bold white ]?".format( - [float(v / 65535) for v in weight_vals], weight_uids - ) - ): - return False, "Prompt refused." - - with bittensor.__console__.status( - ":satellite: Setting weights on [white]{}[/white] ...".format(subtensor.network) - ): - try: - success, error_message = subtensor._do_set_weights( - wallet=wallet, - netuid=netuid, - uids=weight_uids, - vals=weight_vals, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalization or inclusion." - - if success is True: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix="Set weights", - suffix="Finalized: " + str(success), - ) - return True, "Successfully set weights and Finalized." - else: - bittensor.logging.error( - msg=error_message, - prefix="Set weights", - suffix="Failed: ", - ) - return False, error_message - - except Exception as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: error:{}".format(e) - ) - bittensor.logging.warning( - prefix="Set weights", suffix="Failed: " + str(e) - ) - return False, str(e) diff --git a/bittensor/extrinsics/staking.py b/bittensor/extrinsics/staking.py deleted file mode 100644 index b6d5cf5d60..0000000000 --- a/bittensor/extrinsics/staking.py +++ /dev/null @@ -1,760 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from rich.prompt import Confirm -from time import sleep -from typing import List, Union, Optional, Tuple - -import bittensor -from ..utils.formatting import float_to_u64, float_to_u16 - -from bittensor.utils.balance import Balance - -console = bittensor.__console__ - - -def _check_threshold_amount( - subtensor: "bittensor.subtensor", stake_balance: Balance -) -> Tuple[bool, Balance]: - """ - Checks if the new stake balance will be above the minimum required stake threshold. - - Args: - stake_balance (Balance): - the balance to check for threshold limits. - - Returns: - success, threshold (bool, Balance): - ``true`` if the staking balance is above the threshold, or ``false`` if the - staking balance is below the threshold. - The threshold balance required to stake. - """ - min_req_stake: Balance = subtensor.get_minimum_required_stake() - - if min_req_stake > stake_balance: - return False, min_req_stake - else: - return True, min_req_stake - - -def add_stake_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58: Optional[str] = None, - amount: Optional[Union[Balance, float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Adds the specified amount of stake to passed hotkey ``uid``. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - hotkey_ss58 (Optional[str]): - The ``ss58`` address of the hotkey account to stake to defaults to the wallet's hotkey. - amount (Union[Balance, float]): - Amount to stake as Bittensor balance, or ``float`` interpreted as Tao. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - - Raises: - bittensor.errors.NotRegisteredError: - If the wallet is not registered on the chain. - bittensor.errors.NotDelegateError: - If the hotkey is not a delegate on the chain. - """ - # Decrypt keys, - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - # Default to wallet's own hotkey if the value is not passed. - if hotkey_ss58 is None: - hotkey_ss58 = wallet.hotkey.ss58_address - - # Flag to indicate if we are using the wallet's own hotkey. - own_hotkey: bool - - with bittensor.__console__.status( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - subtensor.network - ) - ): - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - # Get hotkey owner - hotkey_owner = subtensor.get_hotkey_owner(hotkey_ss58) - own_hotkey = wallet.coldkeypub.ss58_address == hotkey_owner - if not own_hotkey: - # This is not the wallet's own hotkey so we are delegating. - if not subtensor.is_hotkey_delegate(hotkey_ss58): - raise bittensor.errors.NotDelegateError( - "Hotkey: {} is not a delegate.".format(hotkey_ss58) - ) - - # Get hotkey take - hotkey_take = subtensor.get_delegate_take(hotkey_ss58) - - # Get current stake - old_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=hotkey_ss58 - ) - - # Grab the existential deposit. - existential_deposit = subtensor.get_existential_deposit() - - # Convert to bittensor.Balance - if amount is None: - # Stake it all. - staking_balance = bittensor.Balance.from_tao(old_balance.tao) - elif not isinstance(amount, bittensor.Balance): - staking_balance = bittensor.Balance.from_tao(amount) - else: - staking_balance = amount - - # Leave existential balance to keep key alive. - if staking_balance > old_balance - existential_deposit: - # If we are staking all, we need to leave at least the existential deposit. - staking_balance = old_balance - existential_deposit - else: - staking_balance = staking_balance - - # Check enough to stake. - if staking_balance > old_balance: - bittensor.__console__.print( - ":cross_mark: [red]Not enough stake[/red]:[bold white]\n balance:{}\n amount: {}\n coldkey: {}[/bold white]".format( - old_balance, staking_balance, wallet.name - ) - ) - return False - - # If nominating, we need to check if the new stake balance will be above the minimum required stake threshold. - if not own_hotkey: - new_stake_balance = old_stake + staking_balance - is_above_threshold, threshold = _check_threshold_amount( - subtensor, new_stake_balance - ) - if not is_above_threshold: - bittensor.__console__.print( - f":cross_mark: [red]New stake balance of {new_stake_balance} is below the minimum required nomination stake threshold {threshold}.[/red]" - ) - return False - - # Ask before moving on. - if prompt: - if not own_hotkey: - # We are delegating. - if not Confirm.ask( - "Do you want to delegate:[bold white]\n amount: {}\n to: {}\n take: {}\n owner: {}[/bold white]".format( - staking_balance, wallet.hotkey_str, hotkey_take, hotkey_owner - ) - ): - return False - else: - if not Confirm.ask( - "Do you want to stake:[bold white]\n amount: {}\n to: {}[/bold white]".format( - staking_balance, wallet.hotkey_str - ) - ): - return False - - try: - with bittensor.__console__.status( - ":satellite: Staking to: [bold white]{}[/bold white] ...".format( - subtensor.network - ) - ): - staking_response: bool = __do_add_stake_single( - subtensor=subtensor, - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=staking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if staking_response is True: # If we successfully staked. - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - with bittensor.__console__.status( - ":satellite: Checking Balance on: [white]{}[/white] ...".format( - subtensor.network - ) - ): - new_balance = subtensor.get_balance( - address=wallet.coldkeypub.ss58_address - ) - block = subtensor.get_current_block() - new_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, - hotkey_ss58=hotkey_ss58, - block=block, - ) # Get current stake - - bittensor.__console__.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_balance, new_balance - ) - ) - bittensor.__console__.print( - "Stake:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_stake, new_stake - ) - ) - return True - else: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: Error unknown." - ) - return False - - except bittensor.errors.NotRegisteredError: - bittensor.__console__.print( - ":cross_mark: [red]Hotkey: {} is not registered.[/red]".format( - wallet.hotkey_str - ) - ) - return False - except bittensor.errors.StakeError as e: - bittensor.__console__.print(":cross_mark: [red]Stake Error: {}[/red]".format(e)) - return False - - -def add_stake_multiple_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58s: List[str], - amounts: Optional[List[Union[Balance, float]]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Adds stake to each ``hotkey_ss58`` in the list, using each amount, from a common coldkey. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object for the coldkey. - hotkey_ss58s (List[str]): - List of hotkeys to stake to. - amounts (List[Union[Balance, float]]): - List of amounts to stake. If ``None``, stake all to the first hotkey. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. Flag is ``true`` if any wallet was staked. If we did not wait for finalization / inclusion, the response is ``true``. - """ - if not isinstance(hotkey_ss58s, list) or not all( - isinstance(hotkey_ss58, str) for hotkey_ss58 in hotkey_ss58s - ): - raise TypeError("hotkey_ss58s must be a list of str") - - if len(hotkey_ss58s) == 0: - return True - - if amounts is not None and len(amounts) != len(hotkey_ss58s): - raise ValueError("amounts must be a list of the same length as hotkey_ss58s") - - if amounts is not None and not all( - isinstance(amount, (Balance, float)) for amount in amounts - ): - raise TypeError( - "amounts must be a [list of bittensor.Balance or float] or None" - ) - - if amounts is None: - amounts = [None] * len(hotkey_ss58s) - else: - # Convert to Balance - amounts = [ - bittensor.Balance.from_tao(amount) if isinstance(amount, float) else amount - for amount in amounts - ] - - if sum(amount.tao for amount in amounts) == 0: - # Staking 0 tao - return True - - # Decrypt coldkey. - wallet.coldkey - - old_stakes = [] - with bittensor.__console__.status( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - subtensor.network - ) - ): - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - - # Get the old stakes. - for hotkey_ss58 in hotkey_ss58s: - old_stakes.append( - subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=hotkey_ss58 - ) - ) - - # Remove existential balance to keep key alive. - ## Keys must maintain a balance of at least 1000 rao to stay alive. - total_staking_rao = sum( - [amount.rao if amount is not None else 0 for amount in amounts] - ) - if total_staking_rao == 0: - # Staking all to the first wallet. - if old_balance.rao > 1000: - old_balance -= bittensor.Balance.from_rao(1000) - - elif total_staking_rao < 1000: - # Staking less than 1000 rao to the wallets. - pass - else: - # Staking more than 1000 rao to the wallets. - ## Reduce the amount to stake to each wallet to keep the balance above 1000 rao. - percent_reduction = 1 - (1000 / total_staking_rao) - amounts = [ - Balance.from_tao(amount.tao * percent_reduction) for amount in amounts - ] - - successful_stakes = 0 - for idx, (hotkey_ss58, amount, old_stake) in enumerate( - zip(hotkey_ss58s, amounts, old_stakes) - ): - staking_all = False - # Convert to bittensor.Balance - if amount == None: - # Stake it all. - staking_balance = bittensor.Balance.from_tao(old_balance.tao) - staking_all = True - else: - # Amounts are cast to balance earlier in the function - assert isinstance(amount, bittensor.Balance) - staking_balance = amount - - # Check enough to stake - if staking_balance > old_balance: - bittensor.__console__.print( - ":cross_mark: [red]Not enough balance[/red]: [green]{}[/green] to stake: [blue]{}[/blue] from coldkey: [white]{}[/white]".format( - old_balance, staking_balance, wallet.name - ) - ) - continue - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to stake:\n[bold white] amount: {}\n hotkey: {}[/bold white ]?".format( - staking_balance, wallet.hotkey_str - ) - ): - continue - - try: - staking_response: bool = __do_add_stake_single( - subtensor=subtensor, - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=staking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if staking_response == True: # If we successfully staked. - # We only wait here if we expect finalization. - - if idx < len(hotkey_ss58s) - 1: - # Wait for tx rate limit. - tx_rate_limit_blocks = subtensor.tx_rate_limit() - if tx_rate_limit_blocks > 0: - bittensor.__console__.print( - ":hourglass: [yellow]Waiting for tx rate limit: [white]{}[/white] blocks[/yellow]".format( - tx_rate_limit_blocks - ) - ) - sleep(tx_rate_limit_blocks * 12) # 12 seconds per block - - if not wait_for_finalization and not wait_for_inclusion: - old_balance -= staking_balance - successful_stakes += 1 - if staking_all: - # If staked all, no need to continue - break - - continue - - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - - block = subtensor.get_current_block() - new_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, - hotkey_ss58=hotkey_ss58, - block=block, - ) - new_balance = subtensor.get_balance( - wallet.coldkeypub.ss58_address, block=block - ) - bittensor.__console__.print( - "Stake ({}): [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - hotkey_ss58, old_stake, new_stake - ) - ) - old_balance = new_balance - successful_stakes += 1 - if staking_all: - # If staked all, no need to continue - break - - else: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: Error unknown." - ) - continue - - except bittensor.errors.NotRegisteredError: - bittensor.__console__.print( - ":cross_mark: [red]Hotkey: {} is not registered.[/red]".format( - hotkey_ss58 - ) - ) - continue - except bittensor.errors.StakeError as e: - bittensor.__console__.print( - ":cross_mark: [red]Stake Error: {}[/red]".format(e) - ) - continue - - if successful_stakes != 0: - with bittensor.__console__.status( - ":satellite: Checking Balance on: ([white]{}[/white] ...".format( - subtensor.network - ) - ): - new_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - bittensor.__console__.print( - "Balance: [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_balance, new_balance - ) - ) - return True - - return False - - -def __do_add_stake_single( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58: str, - amount: "bittensor.Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, -) -> bool: - r""" - Executes a stake call to the chain using the wallet and the amount specified. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - hotkey_ss58 (str): - Hotkey to stake to. - amount (bittensor.Balance): - Amount to stake as Bittensor balance object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - Raises: - bittensor.errors.StakeError: - If the extrinsic fails to be finalized or included in the block. - bittensor.errors.NotDelegateError: - If the hotkey is not a delegate. - bittensor.errors.NotRegisteredError: - If the hotkey is not registered in any subnets. - - """ - # Decrypt keys, - wallet.coldkey - - hotkey_owner = subtensor.get_hotkey_owner(hotkey_ss58) - own_hotkey = wallet.coldkeypub.ss58_address == hotkey_owner - if not own_hotkey: - # We are delegating. - # Verify that the hotkey is a delegate. - if not subtensor.is_hotkey_delegate(hotkey_ss58=hotkey_ss58): - raise bittensor.errors.NotDelegateError( - "Hotkey: {} is not a delegate.".format(hotkey_ss58) - ) - - success = subtensor._do_stake( - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - return success - - -def set_childkey_take_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey: str, - netuid: int, - take: float, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> Tuple[bool, str]: - """ - Sets childkey take. - - Args: - subtensor (bittensor.subtensor): Subtensor endpoint to use. - wallet (bittensor.wallet): Bittensor wallet object. - hotkey (str): Childkey hotkey. - take (float): Childkey take value. - netuid (int): Unique identifier of for the subnet. - wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - Raises: - bittensor.errors.ChildHotkeyError: If the extrinsic fails to be finalized or included in the block. - bittensor.errors.NotRegisteredError: If the hotkey is not registered in any subnets. - - """ - - # Ask before moving on. - if prompt: - if not Confirm.ask( - f"Do you want to set childkey take to: [bold white]{take*100}%[/bold white]?" - ): - return False, "Operation Cancelled" - - # Decrypt coldkey. - wallet.coldkey - - with bittensor.__console__.status( - f":satellite: Setting childkey take on [white]{subtensor.network}[/white] ..." - ): - try: - if 0 < take <= 0.18: - take_u16 = float_to_u16(take) - else: - return False, "Invalid take value" - - success, error_message = subtensor._do_set_childkey_take( - wallet=wallet, - hotkey=hotkey, - netuid=netuid, - take=take_u16, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return ( - True, - "Not waiting for finalization or inclusion. Set childkey take initiated.", - ) - - if success: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix="Setting childkey take", - suffix="Finalized: " + str(success), - ) - return True, "Successfully set childkey take and Finalized." - else: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {error_message}" - ) - bittensor.logging.warning( - prefix="Setting childkey take", - suffix="Failed: " + str(error_message), - ) - return False, error_message - - except Exception as e: - return False, f"Exception occurred while setting childkey take: {str(e)}" - - -def set_children_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey: str, - netuid: int, - children_with_proportions: List[Tuple[float, str]], - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> Tuple[bool, str]: - """ - Sets children hotkeys with proportions assigned from the parent. - - Args: - subtensor (bittensor.subtensor): Subtensor endpoint to use. - wallet (bittensor.wallet): Bittensor wallet object. - hotkey (str): Parent hotkey. - children_with_proportions (List[str]): Children hotkeys. - netuid (int): Unique identifier of for the subnet. - wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - Raises: - bittensor.errors.ChildHotkeyError: If the extrinsic fails to be finalized or included in the block. - bittensor.errors.NotRegisteredError: If the hotkey is not registered in any subnets. - - """ - # Check if all children are being revoked - all_revoked = len(children_with_proportions) == 0 - - operation = "Revoking all child hotkeys" if all_revoked else "Setting child hotkeys" - - # Ask before moving on. - if prompt: - if all_revoked: - if not Confirm.ask( - f"Do you want to revoke all children hotkeys for hotkey {hotkey}?" - ): - return False, "Operation Cancelled" - else: - if not Confirm.ask( - "Do you want to set children hotkeys with proportions:\n[bold white]{}[/bold white]?".format( - "\n".join( - f" {child[1]}: {child[0]}" - for child in children_with_proportions - ) - ) - ): - return False, "Operation Cancelled" - - # Decrypt coldkey. - wallet.coldkey - - user_hotkey_ss58 = wallet.hotkey.ss58_address # Default to wallet's own hotkey. - if hotkey != user_hotkey_ss58: - raise ValueError("Cannot set/revoke child hotkeys for others.") - - with bittensor.__console__.status( - f":satellite: {operation} on [white]{subtensor.network}[/white] ..." - ): - try: - if not all_revoked: - normalized_children = prepare_child_proportions( - children_with_proportions - ) - else: - normalized_children = [] - - success, error_message = subtensor._do_set_children( - wallet=wallet, - hotkey=hotkey, - netuid=netuid, - children=normalized_children, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return ( - True, - f"Not waiting for finalization or inclusion. {operation} initiated.", - ) - - if success: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.logging.success( - prefix=operation, - suffix="Finalized: " + str(success), - ) - return True, f"Successfully {operation.lower()} and Finalized." - else: - bittensor.__console__.print( - f":cross_mark: [red]Failed[/red]: {error_message}" - ) - bittensor.logging.warning( - prefix=operation, - suffix="Failed: " + str(error_message), - ) - return False, error_message - - except Exception as e: - return False, f"Exception occurred while {operation.lower()}: {str(e)}" - - -def prepare_child_proportions(children_with_proportions): - """ - Convert proportions to u64 and normalize, ensuring total does not exceed u64 max. - """ - children_u64 = [ - (float_to_u64(proportion), child) - for proportion, child in children_with_proportions - ] - total = sum(proportion for proportion, _ in children_u64) - - if total > (2**64 - 1): - excess = total - (2**64 - 1) - if excess > (2**64 * 0.01): # Example threshold of 1% of u64 max - raise ValueError("Excess is too great to normalize proportions") - largest_child_index = max( - range(len(children_u64)), key=lambda i: children_u64[i][0] - ) - children_u64[largest_child_index] = ( - children_u64[largest_child_index][0] - excess, - children_u64[largest_child_index][1], - ) - - return children_u64 diff --git a/bittensor/extrinsics/transfer.py b/bittensor/extrinsics/transfer.py deleted file mode 100644 index aa340ab406..0000000000 --- a/bittensor/extrinsics/transfer.py +++ /dev/null @@ -1,164 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor - -from rich.prompt import Confirm -from typing import Union -from ..utils.balance import Balance -from ..utils import is_valid_bittensor_address_or_public_key - - -def transfer_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - dest: str, - amount: Union[Balance, float], - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - keep_alive: bool = True, - prompt: bool = False, -) -> bool: - r"""Transfers funds from this wallet to the destination public key address. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object to make transfer from. - dest (str, ss58_address or ed25519): - Destination public key address of reciever. - amount (Union[Balance, int]): - Amount to stake as Bittensor balance, or ``float`` interpreted as Tao. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - keep_alive (bool): - If set, keeps the account alive by keeping the balance above the existential deposit. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - # Validate destination address. - if not is_valid_bittensor_address_or_public_key(dest): - bittensor.__console__.print( - ":cross_mark: [red]Invalid destination address[/red]:[bold white]\n {}[/bold white]".format( - dest - ) - ) - return False - - if isinstance(dest, bytes): - # Convert bytes to hex string. - dest = "0x" + dest.hex() - - try: - # Unlock wallet coldkey. - wallet.coldkey - - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - # Convert to bittensor.Balance - if not isinstance(amount, bittensor.Balance): - transfer_balance = bittensor.Balance.from_tao(amount) - else: - transfer_balance = amount - - # Check balance. - with bittensor.__console__.status(":satellite: Checking Balance..."): - account_balance = subtensor.get_balance(wallet.coldkey.ss58_address) - # check existential deposit. - existential_deposit = subtensor.get_existential_deposit() - - with bittensor.__console__.status(":satellite: Transferring..."): - fee = subtensor.get_transfer_fee( - wallet=wallet, dest=dest, value=transfer_balance.rao - ) - - if not keep_alive: - # Check if the transfer should keep_alive the account - existential_deposit = bittensor.Balance(0) - - # Check if we have enough balance. - if account_balance < (transfer_balance + fee + existential_deposit): - bittensor.__console__.print( - ":cross_mark: [red]Not enough balance[/red]:[bold white]\n balance: {}\n amount: {}\n for fee: {}[/bold white]".format( - account_balance, transfer_balance, fee - ) - ) - return False - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to transfer:[bold white]\n amount: {}\n from: {}:{}\n to: {}\n for fee: {}[/bold white]".format( - transfer_balance, wallet.name, wallet.coldkey.ss58_address, dest, fee - ) - ): - return False - - with bittensor.__console__.status(":satellite: Transferring..."): - success, block_hash, err_msg = subtensor._do_transfer( - wallet, - dest, - transfer_balance, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if success: - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - bittensor.__console__.print( - "[green]Block Hash: {}[/green]".format(block_hash) - ) - - explorer_urls = bittensor.utils.get_explorer_url_for_network( - subtensor.network, block_hash, bittensor.__network_explorer_map__ - ) - if explorer_urls != {} and explorer_urls: - bittensor.__console__.print( - "[green]Opentensor Explorer Link: {}[/green]".format( - explorer_urls.get("opentensor") - ) - ) - bittensor.__console__.print( - "[green]Taostats Explorer Link: {}[/green]".format( - explorer_urls.get("taostats") - ) - ) - else: - bittensor.__console__.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - - if success: - with bittensor.__console__.status(":satellite: Checking Balance..."): - new_balance = subtensor.get_balance(wallet.coldkey.ss58_address) - bittensor.__console__.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - account_balance, new_balance - ) - ) - return True - - return False diff --git a/bittensor/extrinsics/unstaking.py b/bittensor/extrinsics/unstaking.py deleted file mode 100644 index a5de71b7d7..0000000000 --- a/bittensor/extrinsics/unstaking.py +++ /dev/null @@ -1,470 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor -from rich.prompt import Confirm -from time import sleep -from typing import List, Union, Optional -from bittensor.utils.balance import Balance - - -def __do_remove_stake_single( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58: str, - amount: "bittensor.Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, -) -> bool: - r""" - Executes an unstake call to the chain using the wallet and the amount specified. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - hotkey_ss58 (str): - Hotkey address to unstake from. - amount (bittensor.Balance): - Amount to unstake as Bittensor balance object. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - Raises: - bittensor.errors.StakeError: - If the extrinsic fails to be finalized or included in the block. - bittensor.errors.NotRegisteredError: - If the hotkey is not registered in any subnets. - - """ - # Decrypt keys, - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - success = subtensor._do_unstake( - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - return success - - -def check_threshold_amount( - subtensor: "bittensor.subtensor", stake_balance: Balance -) -> bool: - """ - Checks if the remaining stake balance is above the minimum required stake threshold. - - Args: - stake_balance (Balance): - the balance to check for threshold limits. - - Returns: - success (bool): - ``true`` if the unstaking is above the threshold or 0, or ``false`` if the - unstaking is below the threshold, but not 0. - """ - min_req_stake: Balance = subtensor.get_minimum_required_stake() - - if min_req_stake > stake_balance > 0: - bittensor.__console__.print( - f":cross_mark: [yellow]Remaining stake balance of {stake_balance} less than minimum of {min_req_stake} TAO[/yellow]" - ) - return False - else: - return True - - -def unstake_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58: Optional[str] = None, - amount: Optional[Union[Balance, float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Removes stake into the wallet coldkey from the specified hotkey ``uid``. - - Args: - wallet (bittensor.wallet): - Bittensor wallet object. - hotkey_ss58 (Optional[str]): - The ``ss58`` address of the hotkey to unstake from. By default, the wallet hotkey is used. - amount (Union[Balance, float]): - Amount to stake as Bittensor balance, or ``float`` interpreted as Tao. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - # Decrypt keys, - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - if hotkey_ss58 is None: - hotkey_ss58 = wallet.hotkey.ss58_address # Default to wallet's own hotkey. - - with bittensor.__console__.status( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - subtensor.network - ) - ): - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - old_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=hotkey_ss58 - ) - - hotkey_owner = subtensor.get_hotkey_owner(hotkey_ss58) - own_hotkey: bool = wallet.coldkeypub.ss58_address == hotkey_owner - - # Convert to bittensor.Balance - if amount is None: - # Unstake it all. - unstaking_balance = old_stake - elif not isinstance(amount, bittensor.Balance): - unstaking_balance = bittensor.Balance.from_tao(amount) - else: - unstaking_balance = amount - - # Check enough to unstake. - stake_on_uid = old_stake - if unstaking_balance > stake_on_uid: - bittensor.__console__.print( - ":cross_mark: [red]Not enough stake[/red]: [green]{}[/green] to unstake: [blue]{}[/blue] from hotkey: [white]{}[/white]".format( - stake_on_uid, unstaking_balance, wallet.hotkey_str - ) - ) - return False - - # If nomination stake, check threshold. - if not own_hotkey and not check_threshold_amount( - subtensor=subtensor, stake_balance=(stake_on_uid - unstaking_balance) - ): - bittensor.__console__.print( - ":warning: [yellow]This action will unstake the entire staked balance![/yellow]" - ) - unstaking_balance = stake_on_uid - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to unstake:\n[bold white] amount: {}\n hotkey: {}[/bold white ]?".format( - unstaking_balance, wallet.hotkey_str - ) - ): - return False - - try: - with bittensor.__console__.status( - ":satellite: Unstaking from chain: [white]{}[/white] ...".format( - subtensor.network - ) - ): - staking_response: bool = __do_remove_stake_single( - subtensor=subtensor, - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=unstaking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if staking_response is True: # If we successfully unstaked. - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - with bittensor.__console__.status( - ":satellite: Checking Balance on: [white]{}[/white] ...".format( - subtensor.network - ) - ): - new_balance = subtensor.get_balance( - address=wallet.coldkeypub.ss58_address - ) - new_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=hotkey_ss58 - ) # Get stake on hotkey. - bittensor.__console__.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_balance, new_balance - ) - ) - bittensor.__console__.print( - "Stake:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_stake, new_stake - ) - ) - return True - else: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: Unknown Error." - ) - return False - - except bittensor.errors.NotRegisteredError: - bittensor.__console__.print( - ":cross_mark: [red]Hotkey: {} is not registered.[/red]".format( - wallet.hotkey_str - ) - ) - return False - except bittensor.errors.StakeError as e: - bittensor.__console__.print(":cross_mark: [red]Stake Error: {}[/red]".format(e)) - return False - - -def unstake_multiple_extrinsic( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - hotkey_ss58s: List[str], - amounts: Optional[List[Union[Balance, float]]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, -) -> bool: - r"""Removes stake from each ``hotkey_ss58`` in the list, using each amount, to a common coldkey. - - Args: - wallet (bittensor.wallet): - The wallet with the coldkey to unstake to. - hotkey_ss58s (List[str]): - List of hotkeys to unstake from. - amounts (List[Union[Balance, float]]): - List of amounts to unstake. If ``None``, unstake all. - wait_for_inclusion (bool): - If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): - If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): - If ``true``, the call waits for confirmation from the user before proceeding. - Returns: - success (bool): - Flag is ``true`` if extrinsic was finalized or included in the block. Flag is ``true`` if any wallet was unstaked. If we did not wait for finalization / inclusion, the response is ``true``. - """ - if not isinstance(hotkey_ss58s, list) or not all( - isinstance(hotkey_ss58, str) for hotkey_ss58 in hotkey_ss58s - ): - raise TypeError("hotkey_ss58s must be a list of str") - - if len(hotkey_ss58s) == 0: - return True - - if amounts is not None and len(amounts) != len(hotkey_ss58s): - raise ValueError("amounts must be a list of the same length as hotkey_ss58s") - - if amounts is not None and not all( - isinstance(amount, (Balance, float)) for amount in amounts - ): - raise TypeError( - "amounts must be a [list of bittensor.Balance or float] or None" - ) - - if amounts is None: - amounts = [None] * len(hotkey_ss58s) - else: - # Convert to Balance - amounts = [ - bittensor.Balance.from_tao(amount) if isinstance(amount, float) else amount - for amount in amounts - ] - - if sum(amount.tao for amount in amounts) == 0: - # Staking 0 tao - return True - - # Unlock coldkey. - try: - wallet.coldkey - except bittensor.KeyFileError: - bittensor.__console__.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" - ) - return False - - old_stakes = [] - own_hotkeys = [] - with bittensor.__console__.status( - ":satellite: Syncing with chain: [white]{}[/white] ...".format( - subtensor.network - ) - ): - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - - for hotkey_ss58 in hotkey_ss58s: - old_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, hotkey_ss58=hotkey_ss58 - ) # Get stake on hotkey. - old_stakes.append(old_stake) # None if not registered. - - hotkey_owner = subtensor.get_hotkey_owner(hotkey_ss58) - own_hotkeys.append(wallet.coldkeypub.ss58_address == hotkey_owner) - - successful_unstakes = 0 - for idx, (hotkey_ss58, amount, old_stake, own_hotkey) in enumerate( - zip(hotkey_ss58s, amounts, old_stakes, own_hotkeys) - ): - # Covert to bittensor.Balance - if amount is None: - # Unstake it all. - unstaking_balance = old_stake - elif not isinstance(amount, bittensor.Balance): - unstaking_balance = bittensor.Balance.from_tao(amount) - else: - unstaking_balance = amount - - # Check enough to unstake. - stake_on_uid = old_stake - if unstaking_balance > stake_on_uid: - bittensor.__console__.print( - ":cross_mark: [red]Not enough stake[/red]: [green]{}[/green] to unstake: [blue]{}[/blue] from hotkey: [white]{}[/white]".format( - stake_on_uid, unstaking_balance, wallet.hotkey_str - ) - ) - continue - - # If nomination stake, check threshold. - if not own_hotkey and not check_threshold_amount( - subtensor=subtensor, stake_balance=(stake_on_uid - unstaking_balance) - ): - bittensor.__console__.print( - ":warning: [yellow]This action will unstake the entire staked balance![/yellow]" - ) - unstaking_balance = stake_on_uid - - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to unstake:\n[bold white] amount: {}\n hotkey: {}[/bold white ]?".format( - unstaking_balance, wallet.hotkey_str - ) - ): - continue - - try: - with bittensor.__console__.status( - ":satellite: Unstaking from chain: [white]{}[/white] ...".format( - subtensor.network - ) - ): - staking_response: bool = __do_remove_stake_single( - subtensor=subtensor, - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=unstaking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if staking_response is True: # If we successfully unstaked. - # We only wait here if we expect finalization. - - if idx < len(hotkey_ss58s) - 1: - # Wait for tx rate limit. - tx_rate_limit_blocks = subtensor.tx_rate_limit() - if tx_rate_limit_blocks > 0: - bittensor.__console__.print( - ":hourglass: [yellow]Waiting for tx rate limit: [white]{}[/white] blocks[/yellow]".format( - tx_rate_limit_blocks - ) - ) - sleep(tx_rate_limit_blocks * 12) # 12 seconds per block - - if not wait_for_finalization and not wait_for_inclusion: - successful_unstakes += 1 - continue - - bittensor.__console__.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - with bittensor.__console__.status( - ":satellite: Checking Balance on: [white]{}[/white] ...".format( - subtensor.network - ) - ): - block = subtensor.get_current_block() - new_stake = subtensor.get_stake_for_coldkey_and_hotkey( - coldkey_ss58=wallet.coldkeypub.ss58_address, - hotkey_ss58=hotkey_ss58, - block=block, - ) - bittensor.__console__.print( - "Stake ({}): [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - hotkey_ss58, stake_on_uid, new_stake - ) - ) - successful_unstakes += 1 - else: - bittensor.__console__.print( - ":cross_mark: [red]Failed[/red]: Unknown Error." - ) - continue - - except bittensor.errors.NotRegisteredError: - bittensor.__console__.print( - ":cross_mark: [red]{} is not registered.[/red]".format(hotkey_ss58) - ) - continue - except bittensor.errors.StakeError as e: - bittensor.__console__.print( - ":cross_mark: [red]Stake Error: {}[/red]".format(e) - ) - continue - - if successful_unstakes != 0: - with bittensor.__console__.status( - ":satellite: Checking Balance on: ([white]{}[/white] ...".format( - subtensor.network - ) - ): - new_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - bittensor.__console__.print( - "Balance: [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_balance, new_balance - ) - ) - return True - - return False diff --git a/bittensor/keyfile.py b/bittensor/keyfile.py deleted file mode 100644 index d2c75c1041..0000000000 --- a/bittensor/keyfile.py +++ /dev/null @@ -1,866 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import base64 -import json -import stat -import getpass -import bittensor -from bittensor.errors import KeyFileError -from typing import Optional -from pathlib import Path - -from ansible_vault import Vault -from ansible.parsing.vault import AnsibleVaultError -from cryptography.exceptions import InvalidSignature, InvalidKey -from cryptography.fernet import Fernet, InvalidToken -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC -from nacl import pwhash, secret -from nacl.exceptions import CryptoError -from password_strength import PasswordPolicy -from substrateinterface.utils.ss58 import ss58_encode -from termcolor import colored -from rich.prompt import Confirm - - -NACL_SALT = b"\x13q\x83\xdf\xf1Z\t\xbc\x9c\x90\xb5Q\x879\xe9\xb1" - - -def serialized_keypair_to_keyfile_data(keypair: "bittensor.Keypair") -> bytes: - """Serializes keypair object into keyfile data. - - Args: - keypair (bittensor.Keypair): The keypair object to be serialized. - Returns: - data (bytes): Serialized keypair data. - """ - json_data = { - "accountId": "0x" + keypair.public_key.hex() if keypair.public_key else None, - "publicKey": "0x" + keypair.public_key.hex() if keypair.public_key else None, - "privateKey": "0x" + keypair.private_key.hex() if keypair.private_key else None, - "secretPhrase": keypair.mnemonic if keypair.mnemonic else None, - "secretSeed": ( - "0x" - + ( - keypair.seed_hex - if isinstance(keypair.seed_hex, str) - else keypair.seed_hex.hex() - ) - if keypair.seed_hex - else None - ), - "ss58Address": keypair.ss58_address if keypair.ss58_address else None, - } - data = json.dumps(json_data).encode() - return data - - -def deserialize_keypair_from_keyfile_data(keyfile_data: bytes) -> "bittensor.Keypair": - """Deserializes Keypair object from passed keyfile data. - - Args: - keyfile_data (bytes): The keyfile data as bytes to be loaded. - Returns: - keypair (bittensor.Keypair): The Keypair loaded from bytes. - Raises: - KeyFileError: Raised if the passed bytes cannot construct a keypair object. - """ - keyfile_data = keyfile_data.decode() - try: - keyfile_dict = dict(json.loads(keyfile_data)) - except: - string_value = str(keyfile_data) - if string_value[:2] == "0x": - string_value = ss58_encode(string_value) - keyfile_dict = { - "accountId": None, - "publicKey": None, - "privateKey": None, - "secretPhrase": None, - "secretSeed": None, - "ss58Address": string_value, - } - else: - raise bittensor.KeyFileError( - "Keypair could not be created from keyfile data: {}".format( - string_value - ) - ) - - if "secretSeed" in keyfile_dict and keyfile_dict["secretSeed"] is not None: - return bittensor.Keypair.create_from_seed(keyfile_dict["secretSeed"]) - - elif "secretPhrase" in keyfile_dict and keyfile_dict["secretPhrase"] is not None: - return bittensor.Keypair.create_from_mnemonic( - mnemonic=keyfile_dict["secretPhrase"] - ) - - elif keyfile_dict.get("privateKey", None) is not None: - # May have the above dict keys also, but we want to preserve the first two - return bittensor.Keypair.create_from_private_key( - keyfile_dict["privateKey"], ss58_format=bittensor.__ss58_format__ - ) - - if "ss58Address" in keyfile_dict and keyfile_dict["ss58Address"] is not None: - return bittensor.Keypair(ss58_address=keyfile_dict["ss58Address"]) - - else: - raise bittensor.KeyFileError( - "Keypair could not be created from keyfile data: {}".format(keyfile_dict) - ) - - -def validate_password(password: str) -> bool: - """Validates the password against a password policy. - - Args: - password (str): The password to verify. - Returns: - valid (bool): ``True`` if the password meets validity requirements. - """ - policy = PasswordPolicy.from_names(strength=0.20, entropybits=10, length=6) - if not password: - return False - tested_pass = policy.password(password) - result = tested_pass.test() - if len(result) > 0: - print( - colored( - "Password not strong enough. Try increasing the length of the password or the password complexity" - ) - ) - return False - password_verification = getpass.getpass("Retype your password: ") - if password != password_verification: - print("Passwords do not match") - return False - return True - - -def ask_password_to_encrypt() -> str: - """Prompts the user to enter a password for key encryption. - - Returns: - password (str): The valid password entered by the user. - """ - valid = False - while not valid: - password = getpass.getpass("Specify password for key encryption: ") - valid = validate_password(password) - return password - - -def keyfile_data_is_encrypted_nacl(keyfile_data: bytes) -> bool: - """Returns true if the keyfile data is NaCl encrypted. - - Args: - keyfile_data ( bytes, required ): - Bytes to validate. - Returns: - is_nacl (bool): - ``True`` if data is ansible encrypted. - """ - return keyfile_data[: len("$NACL")] == b"$NACL" - - -def keyfile_data_is_encrypted_ansible(keyfile_data: bytes) -> bool: - """Returns true if the keyfile data is ansible encrypted. - - Args: - keyfile_data (bytes): The bytes to validate. - Returns: - is_ansible (bool): True if the data is ansible encrypted. - """ - return keyfile_data[:14] == b"$ANSIBLE_VAULT" - - -def keyfile_data_is_encrypted_legacy(keyfile_data: bytes) -> bool: - """Returns true if the keyfile data is legacy encrypted. - Args: - keyfile_data (bytes): The bytes to validate. - Returns: - is_legacy (bool): ``True`` if the data is legacy encrypted. - """ - return keyfile_data[:6] == b"gAAAAA" - - -def keyfile_data_is_encrypted(keyfile_data: bytes) -> bool: - """Returns ``true`` if the keyfile data is encrypted. - - Args: - keyfile_data (bytes): The bytes to validate. - Returns: - is_encrypted (bool): ``True`` if the data is encrypted. - """ - return ( - keyfile_data_is_encrypted_nacl(keyfile_data) - or keyfile_data_is_encrypted_ansible(keyfile_data) - or keyfile_data_is_encrypted_legacy(keyfile_data) - ) - - -def keyfile_data_encryption_method(keyfile_data: bytes) -> bool: - """Returns ``true`` if the keyfile data is encrypted. - - Args: - keyfile_data ( bytes, required ): - Bytes to validate - Returns: - encryption_method (bool): - ``True`` if data is encrypted. - """ - - if keyfile_data_is_encrypted_nacl(keyfile_data): - return "NaCl" - elif keyfile_data_is_encrypted_ansible(keyfile_data): - return "Ansible Vault" - elif keyfile_data_is_encrypted_legacy(keyfile_data): - return "legacy" - - -def legacy_encrypt_keyfile_data(keyfile_data: bytes, password: str = None) -> bytes: - password = ask_password_to_encrypt() if password is None else password - console = bittensor.__console__ - with console.status( - ":exclamation_mark: Encrypting key with legacy encrpytion method..." - ): - vault = Vault(password) - return vault.vault.encrypt(keyfile_data) - - -def encrypt_keyfile_data(keyfile_data: bytes, password: str = None) -> bytes: - """Encrypts the passed keyfile data using ansible vault. - - Args: - keyfile_data (bytes): The bytes to encrypt. - password (str, optional): The password used to encrypt the data. If ``None``, asks for user input. - Returns: - encrypted_data (bytes): The encrypted data. - """ - password = bittensor.ask_password_to_encrypt() if password is None else password - password = bytes(password, "utf-8") - kdf = pwhash.argon2i.kdf - key = kdf( - secret.SecretBox.KEY_SIZE, - password, - NACL_SALT, - opslimit=pwhash.argon2i.OPSLIMIT_SENSITIVE, - memlimit=pwhash.argon2i.MEMLIMIT_SENSITIVE, - ) - box = secret.SecretBox(key) - encrypted = box.encrypt(keyfile_data) - return b"$NACL" + encrypted - - -def get_coldkey_password_from_environment(coldkey_name: str) -> Optional[str]: - """Retrieves the cold key password from the environment variables. - - Args: - coldkey_name (str): The name of the cold key. - Returns: - password (str): The password retrieved from the environment variables, or ``None`` if not found. - """ - envs = { - normalized_env_name: env_value - for env_name, env_value in os.environ.items() - if (normalized_env_name := env_name.upper()).startswith("BT_COLD_PW_") - } - return envs.get(f"BT_COLD_PW_{coldkey_name.replace('-', '_').upper()}") - - -def decrypt_keyfile_data( - keyfile_data: bytes, password: str = None, coldkey_name: Optional[str] = None -) -> bytes: - """Decrypts the passed keyfile data using ansible vault. - - Args: - keyfile_data (bytes): The bytes to decrypt. - password (str, optional): The password used to decrypt the data. If ``None``, asks for user input. - coldkey_name (str, optional): The name of the cold key. If provided, retrieves the password from environment variables. - Returns: - decrypted_data (bytes): The decrypted data. - Raises: - KeyFileError: Raised if the file is corrupted or if the password is incorrect. - """ - if coldkey_name is not None and password is None: - password = get_coldkey_password_from_environment(coldkey_name) - - try: - password = ( - getpass.getpass("Enter password to unlock key: ") - if password is None - else password - ) - console = bittensor.__console__ - with console.status(":key: Decrypting key..."): - # NaCl SecretBox decrypt. - if keyfile_data_is_encrypted_nacl(keyfile_data): - password = bytes(password, "utf-8") - kdf = pwhash.argon2i.kdf - key = kdf( - secret.SecretBox.KEY_SIZE, - password, - NACL_SALT, - opslimit=pwhash.argon2i.OPSLIMIT_SENSITIVE, - memlimit=pwhash.argon2i.MEMLIMIT_SENSITIVE, - ) - box = secret.SecretBox(key) - try: - decrypted_keyfile_data = box.decrypt(keyfile_data[len("$NACL") :]) - except CryptoError: - raise bittensor.KeyFileError("Invalid password") - # Ansible decrypt. - elif keyfile_data_is_encrypted_ansible(keyfile_data): - vault = Vault(password) - try: - decrypted_keyfile_data = vault.load(keyfile_data) - except AnsibleVaultError: - raise bittensor.KeyFileError("Invalid password") - # Legacy decrypt. - elif keyfile_data_is_encrypted_legacy(keyfile_data): - __SALT = ( - b"Iguesscyborgslikemyselfhaveatendencytobeparanoidaboutourorigins" - ) - kdf = PBKDF2HMAC( - algorithm=hashes.SHA256(), - salt=__SALT, - length=32, - iterations=10000000, - backend=default_backend(), - ) - key = base64.urlsafe_b64encode(kdf.derive(password.encode())) - cipher_suite = Fernet(key) - decrypted_keyfile_data = cipher_suite.decrypt(keyfile_data) - # Unknown. - else: - raise bittensor.KeyFileError( - "keyfile data: {} is corrupt".format(keyfile_data) - ) - - except (InvalidSignature, InvalidKey, InvalidToken): - raise bittensor.KeyFileError("Invalid password") - - if not isinstance(decrypted_keyfile_data, bytes): - decrypted_keyfile_data = json.dumps(decrypted_keyfile_data).encode() - return decrypted_keyfile_data - - -class keyfile: - """Defines an interface for a substrate interface keypair stored on device.""" - - def __init__(self, path: str): - self.path = os.path.expanduser(path) - self.name = Path(self.path).parent.stem - - def __str__(self): - if not self.exists_on_device(): - return "keyfile (empty, {})>".format(self.path) - if self.is_encrypted(): - return "Keyfile ({} encrypted, {})>".format( - keyfile_data_encryption_method(self._read_keyfile_data_from_file()), - self.path, - ) - else: - return "keyfile (decrypted, {})>".format(self.path) - - def __repr__(self): - return self.__str__() - - @property - def keypair(self) -> "bittensor.Keypair": - """Returns the keypair from path, decrypts data if the file is encrypted. - - Returns: - keypair (bittensor.Keypair): The keypair stored under the path. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, writable, corrupted, or if the password is incorrect. - """ - return self.get_keypair() - - @property - def data(self) -> bytes: - """Returns the keyfile data under path. - - Returns: - keyfile_data (bytes): The keyfile data stored under the path. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, or writable. - """ - return self._read_keyfile_data_from_file() - - @property - def keyfile_data(self) -> bytes: - """Returns the keyfile data under path. - - Returns: - keyfile_data (bytes): The keyfile data stored under the path. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, or writable. - """ - return self._read_keyfile_data_from_file() - - def set_keypair( - self, - keypair: "bittensor.Keypair", - encrypt: bool = True, - overwrite: bool = False, - password: str = None, - ): - """Writes the keypair to the file and optionally encrypts data. - - Args: - keypair (bittensor.Keypair): The keypair to store under the path. - encrypt (bool, optional): If ``True``, encrypts the file under the path. Default is ``True``. - overwrite (bool, optional): If ``True``, forces overwrite of the current file. Default is ``False``. - password (str, optional): The password used to encrypt the file. If ``None``, asks for user input. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, writable, or if the password is incorrect. - """ - self.make_dirs() - keyfile_data = serialized_keypair_to_keyfile_data(keypair) - if encrypt: - keyfile_data = bittensor.encrypt_keyfile_data(keyfile_data, password) - self._write_keyfile_data_to_file(keyfile_data, overwrite=overwrite) - - def get_keypair(self, password: str = None) -> "bittensor.Keypair": - """Returns the keypair from the path, decrypts data if the file is encrypted. - - Args: - password (str, optional): The password used to decrypt the file. If ``None``, asks for user input. - Returns: - keypair (bittensor.Keypair): The keypair stored under the path. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, writable, corrupted, or if the password is incorrect. - """ - keyfile_data = self._read_keyfile_data_from_file() - if keyfile_data_is_encrypted(keyfile_data): - decrypted_keyfile_data = decrypt_keyfile_data( - keyfile_data, password, coldkey_name=self.name - ) - else: - decrypted_keyfile_data = keyfile_data - return deserialize_keypair_from_keyfile_data(decrypted_keyfile_data) - - def make_dirs(self): - """Creates directories for the path if they do not exist.""" - directory = os.path.dirname(self.path) - if not os.path.exists(directory): - os.makedirs(directory) - - def exists_on_device(self) -> bool: - """Returns ``True`` if the file exists on the device. - - Returns: - on_device (bool): ``True`` if the file is on the device. - """ - if not os.path.isfile(self.path): - return False - return True - - def is_readable(self) -> bool: - """Returns ``True`` if the file under path is readable. - - Returns: - readable (bool): ``True`` if the file is readable. - """ - if not self.exists_on_device(): - return False - if not os.access(self.path, os.R_OK): - return False - return True - - def is_writable(self) -> bool: - """Returns ``True`` if the file under path is writable. - - Returns: - writable (bool): ``True`` if the file is writable. - """ - if os.access(self.path, os.W_OK): - return True - return False - - def is_encrypted(self) -> bool: - """Returns ``True`` if the file under path is encrypted. - - Returns: - encrypted (bool): ``True`` if the file is encrypted. - """ - if not self.exists_on_device(): - return False - if not self.is_readable(): - return False - return keyfile_data_is_encrypted(self._read_keyfile_data_from_file()) - - def _may_overwrite(self) -> bool: - """Asks the user if it is okay to overwrite the file. - - Returns: - may_overwrite (bool): ``True`` if the user allows overwriting the file. - """ - choice = input("File {} already exists. Overwrite? (y/N) ".format(self.path)) - return choice == "y" - - def check_and_update_encryption( - self, print_result: bool = True, no_prompt: bool = False - ): - """Check the version of keyfile and update if needed. - - Args: - print_result (bool): - Print the checking result or not. - no_prompt (bool): - Skip if no prompt. - Raises: - KeyFileError: - Raised if the file does not exists, is not readable, writable. - Returns: - result (bool): - Return ``True`` if the keyfile is the most updated with nacl, else ``False``. - """ - if not self.exists_on_device(): - if print_result: - bittensor.__console__.print(f"Keyfile does not exist. {self.path}") - return False - if not self.is_readable(): - if print_result: - bittensor.__console__.print(f"Keyfile is not redable. {self.path}") - return False - if not self.is_writable(): - if print_result: - bittensor.__console__.print(f"Keyfile is not writable. {self.path}") - return False - - update_keyfile = False - if not no_prompt: - keyfile_data = self._read_keyfile_data_from_file() - - # If the key is not nacl encrypted. - if keyfile_data_is_encrypted( - keyfile_data - ) and not keyfile_data_is_encrypted_nacl(keyfile_data): - terminate = False - bittensor.__console__.print( - f"You may update the keyfile to improve the security for storing your keys.\nWhile the key and the password stays the same, it would require providing your password once.\n:key:{self}\n" - ) - update_keyfile = Confirm.ask("Update keyfile?") - if update_keyfile: - stored_mnemonic = False - while not stored_mnemonic: - bittensor.__console__.print( - f"\nPlease make sure you have the mnemonic stored in case an error occurs during the transfer.", - style="white on red", - ) - stored_mnemonic = Confirm.ask("Have you stored the mnemonic?") - if not stored_mnemonic and not Confirm.ask( - "You must proceed with a stored mnemonic, retry and continue this keyfile update?" - ): - terminate = True - break - - decrypted_keyfile_data = None - while decrypted_keyfile_data == None and not terminate: - try: - password = getpass.getpass( - "\nEnter password to update keyfile: " - ) - decrypted_keyfile_data = decrypt_keyfile_data( - keyfile_data, coldkey_name=self.name, password=password - ) - except KeyFileError: - if not Confirm.ask( - "Invalid password, retry and continue this keyfile update?" - ): - terminate = True - break - - if not terminate: - encrypted_keyfile_data = encrypt_keyfile_data( - decrypted_keyfile_data, password=password - ) - self._write_keyfile_data_to_file( - encrypted_keyfile_data, overwrite=True - ) - - if print_result or update_keyfile: - keyfile_data = self._read_keyfile_data_from_file() - if not keyfile_data_is_encrypted(keyfile_data): - if print_result: - bittensor.__console__.print( - f"\nKeyfile is not encrypted. \n:key: {self}" - ) - return False - elif keyfile_data_is_encrypted_nacl(keyfile_data): - if print_result: - bittensor.__console__.print( - f"\n:white_heavy_check_mark: Keyfile is updated. \n:key: {self}" - ) - return True - else: - if print_result: - bittensor.__console__.print( - f'\n:cross_mark: Keyfile is outdated, please update with "btcli wallet update" \n:key: {self}' - ) - return False - return False - - def encrypt(self, password: str = None): - """Encrypts the file under the path. - - Args: - password (str, optional): The password for encryption. If ``None``, asks for user input. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, or writable. - """ - if not self.exists_on_device(): - raise bittensor.KeyFileError( - "Keyfile at: {} does not exist".format(self.path) - ) - if not self.is_readable(): - raise bittensor.KeyFileError( - "Keyfile at: {} is not readable".format(self.path) - ) - if not self.is_writable(): - raise bittensor.KeyFileError( - "Keyfile at: {} is not writable".format(self.path) - ) - keyfile_data = self._read_keyfile_data_from_file() - if not keyfile_data_is_encrypted(keyfile_data): - as_keypair = deserialize_keypair_from_keyfile_data(keyfile_data) - keyfile_data = serialized_keypair_to_keyfile_data(as_keypair) - keyfile_data = encrypt_keyfile_data(keyfile_data, password) - self._write_keyfile_data_to_file(keyfile_data, overwrite=True) - - def decrypt(self, password: str = None): - """Decrypts the file under the path. - - Args: - password (str, optional): The password for decryption. If ``None``, asks for user input. - Raises: - KeyFileError: Raised if the file does not exist, is not readable, writable, corrupted, or if the password is incorrect. - """ - if not self.exists_on_device(): - raise bittensor.KeyFileError( - "Keyfile at: {} does not exist".format(self.path) - ) - if not self.is_readable(): - raise bittensor.KeyFileError( - "Keyfile at: {} is not readable".format(self.path) - ) - if not self.is_writable(): - raise bittensor.KeyFileError( - "Keyfile at: {} is not writable".format(self.path) - ) - keyfile_data = self._read_keyfile_data_from_file() - if keyfile_data_is_encrypted(keyfile_data): - keyfile_data = decrypt_keyfile_data( - keyfile_data, password, coldkey_name=self.name - ) - as_keypair = deserialize_keypair_from_keyfile_data(keyfile_data) - keyfile_data = serialized_keypair_to_keyfile_data(as_keypair) - self._write_keyfile_data_to_file(keyfile_data, overwrite=True) - - def _read_keyfile_data_from_file(self) -> bytes: - """Reads the keyfile data from the file. - - Returns: - keyfile_data (bytes): The keyfile data stored under the path. - Raises: - KeyFileError: Raised if the file does not exist or is not readable. - """ - if not self.exists_on_device(): - raise bittensor.KeyFileError( - "Keyfile at: {} does not exist".format(self.path) - ) - if not self.is_readable(): - raise bittensor.KeyFileError( - "Keyfile at: {} is not readable".format(self.path) - ) - with open(self.path, "rb") as file: - data = file.read() - return data - - def _write_keyfile_data_to_file(self, keyfile_data: bytes, overwrite: bool = False): - """Writes the keyfile data to the file. - - Args: - keyfile_data (bytes): The byte data to store under the path. - overwrite (bool, optional): If ``True``, overwrites the data without asking for permission from the user. Default is ``False``. - Raises: - KeyFileError: Raised if the file is not writable or the user responds No to the overwrite prompt. - """ - # Check overwrite. - if self.exists_on_device() and not overwrite: - if not self._may_overwrite(): - raise bittensor.KeyFileError( - "Keyfile at: {} is not writable".format(self.path) - ) - with open(self.path, "wb") as keyfile: - keyfile.write(keyfile_data) - # Set file permissions. - os.chmod(self.path, stat.S_IRUSR | stat.S_IWUSR) - - -class Mockkeyfile: - """ - The Mockkeyfile is a mock object representing a keyfile that does not exist on the device. - - It is designed for use in testing scenarios and simulations where actual filesystem operations are not required. - The keypair stored in the Mockkeyfile is treated as non-encrypted and the data is stored as a serialized string. - """ - - def __init__(self, path: str): - """ - Initializes a Mockkeyfile object. - - Args: - path (str): The path of the mock keyfile. - """ - self.path = path - self._mock_keypair = None - self._mock_data = None - - def __str__(self): - """ - Returns a string representation of the Mockkeyfile. The representation will indicate if the keyfile is empty, encrypted, or decrypted. - - Returns: - str: The string representation of the Mockkeyfile. - """ - return f"Mockkeyfile({self.path})" - - def __repr__(self): - """ - Returns a string representation of the Mockkeyfile, same as :func:`__str__()`. - - Returns: - str: The string representation of the Mockkeyfile. - """ - return self.__str__() - - @property - def keypair(self): - """ - Returns the mock keypair stored in the keyfile. - - Returns: - bittensor.Keypair: The mock keypair. - """ - return self._mock_keypair - - @property - def data(self): - """ - Returns the serialized keypair data stored in the keyfile. - - Returns: - bytes: The serialized keypair data. - """ - return self._mock_data - - def set_keypair(self, keypair, encrypt=True, overwrite=False, password=None): - """ - Sets the mock keypair in the keyfile. The ``encrypt`` and ``overwrite`` parameters are ignored. - - Args: - keypair (bittensor.Keypair): The mock keypair to be set. - encrypt (bool, optional): Ignored in this context. Defaults to ``True``. - overwrite (bool, optional): Ignored in this context. Defaults to ``False``. - password (str, optional): Ignored in this context. Defaults to ``None``. - """ - self._mock_keypair = keypair - self._mock_data = None # You may need to serialize the keypair here - - def get_keypair(self, password=None): - """ - Returns the mock keypair stored in the keyfile. The ``password`` parameter is ignored. - - Args: - password (str, optional): Ignored in this context. Defaults to ``None``. - - Returns: - bittensor.Keypair: The mock keypair stored in the keyfile. - """ - return self._mock_keypair - - def make_dirs(self): - """ - Creates the directories for the mock keyfile. Does nothing in this class, since no actual filesystem operations are needed. - """ - pass - - def exists_on_device(self): - """ - Returns ``True`` indicating that the mock keyfile exists on the device (although it is not created on the actual file system). - - Returns: - bool: Always returns ``True`` for Mockkeyfile. - """ - return True - - def is_readable(self): - """ - Returns ``True`` indicating that the mock keyfile is readable (although it is not read from the actual file system). - - Returns: - bool: Always returns ``True`` for Mockkeyfile. - """ - return True - - def is_writable(self): - """ - Returns ``True`` indicating that the mock keyfile is writable (although it is not written to the actual file system). - - Returns: - bool: Always returns ``True`` for Mockkeyfile. - """ - return True - - def is_encrypted(self): - """ - Returns ``False`` indicating that the mock keyfile is not encrypted. - - Returns: - bool: Always returns ``False`` for Mockkeyfile. - """ - return False - - def encrypt(self, password=None): - """ - Raises a ValueError since encryption is not supported for the mock keyfile. - - Args: - password (str, optional): Ignored in this context. Defaults to ``None``. - - Raises: - ValueError: Always raises this exception for Mockkeyfile. - """ - raise ValueError("Cannot encrypt a Mockkeyfile") - - def decrypt(self, password=None): - """ - Returns without doing anything since the mock keyfile is not encrypted. - - Args: - password (str, optional): Ignored in this context. Defaults to ``None``. - """ - pass - - def check_and_update_encryption(self, no_prompt=None, print_result=False): - return diff --git a/bittensor/metagraph.py b/bittensor/metagraph.py deleted file mode 100644 index 420c847a09..0000000000 --- a/bittensor/metagraph.py +++ /dev/null @@ -1,1191 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from abc import ABC, abstractmethod -import os -import pickle -import numpy as np -from numpy.typing import NDArray -import bittensor -from os import listdir -from os.path import join -from typing import List, Optional, Union, Tuple, cast - -from bittensor.chain_data import AxonInfo -from bittensor.utils.registration import torch, use_torch -from bittensor.utils import weight_utils - -METAGRAPH_STATE_DICT_NDARRAY_KEYS = [ - "version", - "n", - "block", - "stake", - "total_stake", - "ranks", - "trust", - "consensus", - "validator_trust", - "incentive", - "emission", - "dividends", - "active", - "last_update", - "validator_permit", - "uids", -] - - -def get_save_dir(network: str, netuid: int) -> str: - """ - Return directory path from ``network`` and ``netuid``. - - Args: - network (str): Network name. - netuid (int): Network UID. - - Returns: - str: Directory path. - """ - return os.path.expanduser( - f"~/.bittensor/metagraphs/network-{str(network)}/netuid-{str(netuid)}/" - ) - - -def latest_block_path(dir_path: str) -> str: - """ - Get the latest block path from the directory. - - Args: - dir_path (str): Directory path. - - Returns: - str: Latest block path. - """ - latest_block = -1 - latest_file_full_path = None - for filename in listdir(dir_path): - full_path_filename = os.path.expanduser(join(dir_path, filename)) - try: - block_number = int(filename.split("-")[1].split(".")[0]) - if block_number > latest_block: - latest_block = block_number - latest_file_full_path = full_path_filename - except Exception as e: - pass - if not latest_file_full_path: - raise ValueError(f"Metagraph not found at: {dir_path}") - else: - return latest_file_full_path - - -class MetagraphMixin(ABC): - """ - The metagraph class is a core component of the Bittensor network, representing the neural graph that forms the backbone of the decentralized machine learning system. - - The metagraph is a dynamic representation of the network's state, capturing the interconnectedness and attributes of neurons (participants) in the Bittensor ecosystem. This class is not just a static structure but a live reflection of the network, constantly updated and synchronized with the state of the blockchain. - - In Bittensor, neurons are akin to nodes in a distributed system, each contributing computational resources and participating in the network's collective intelligence. The metagraph tracks various attributes of these neurons, such as stake, trust, and consensus, which are crucial for the network's incentive mechanisms and the Yuma Consensus algorithm as outlined in the `NeurIPS paper `_. These attributes - govern how neurons interact, how they are incentivized, and their roles within the network's - decision-making processes. - - Args: - netuid (int): A unique identifier that distinguishes between different instances or versions of the Bittensor network. - network (str): The name of the network, signifying specific configurations or iterations within the Bittensor ecosystem. - version (NDArray): The version number of the network, integral for tracking network updates. - n (NDArray): The total number of neurons in the network, reflecting its size and complexity. - block (NDArray): The current block number in the blockchain, crucial for synchronizing with the network's latest state. - stake: Represents the cryptocurrency staked by neurons, impacting their influence and earnings within the network. - total_stake: The cumulative stake across all neurons. - ranks: Neuron rankings as per the Yuma Consensus algorithm, influencing their incentive distribution and network authority. - trust: Scores indicating the reliability of neurons, mainly miners, within the network's operational context. - consensus: Scores reflecting each neuron's alignment with the network's collective decisions. - validator_trust: Trust scores for validator neurons, crucial for network security and validation. - incentive: Rewards allocated to neurons, particularly miners, for their network contributions. - emission: The rate at which rewards are distributed to neurons. - dividends: Rewards received primarily by validators as part of the incentive mechanism. - active: Status indicating whether a neuron is actively participating in the network. - last_update: Timestamp of the latest update to a neuron's data. - validator_permit: Indicates if a neuron is authorized to act as a validator. - weights: Inter-neuronal weights set by each neuron, influencing network dynamics. - bonds: Represents speculative investments by neurons in others, part of the reward mechanism. - uids: Unique identifiers for each neuron, essential for network operations. - axons (List): Details about each neuron's axon, critical for facilitating network communication. - - The metagraph plays a pivotal role in Bittensor's decentralized AI operations, influencing everything from data propagation to reward distribution. It embodies the principles of decentralized governance - and collaborative intelligence, ensuring that the network remains adaptive, secure, and efficient. - - Example Usage: - Initializing the metagraph to represent the current state of the Bittensor network:: - - metagraph = bt.metagraph(netuid=config.netuid, network=subtensor.network, sync=False) - - Synchronizing the metagraph with the network to reflect the latest state and neuron data:: - - metagraph.sync(subtensor=subtensor) - - Accessing metagraph properties to inform network interactions and decisions:: - - total_stake = metagraph.S - neuron_ranks = metagraph.R - neuron_incentives = metagraph.I - ... - - Maintaining a local copy of hotkeys for querying and interacting with network entities:: - - hotkeys = deepcopy(metagraph.hotkeys) - """ - - netuid: int - network: str - version: Union["torch.nn.Parameter", Tuple[NDArray]] - n: Union["torch.nn.Parameter", NDArray] - block: Union["torch.nn.Parameter", NDArray] - stake: Union["torch.nn.Parameter", NDArray] - total_stake: Union["torch.nn.Parameter", NDArray] - ranks: Union["torch.nn.Parameter", NDArray] - trust: Union["torch.nn.Parameter", NDArray] - consensus: Union["torch.nn.Parameter", NDArray] - validator_trust: Union["torch.nn.Parameter", NDArray] - incentive: Union["torch.nn.Parameter", NDArray] - emission: Union["torch.nn.Parameter", NDArray] - dividends: Union["torch.nn.Parameter", NDArray] - active: Union["torch.nn.Parameter", NDArray] - last_update: Union["torch.nn.Parameter", NDArray] - validator_permit: Union["torch.nn.Parameter", NDArray] - weights: Union["torch.nn.Parameter", NDArray] - bonds: Union["torch.nn.Parameter", NDArray] - uids: Union["torch.nn.Parameter", NDArray] - axons: List[AxonInfo] - - @property - def S(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Represents the stake of each neuron in the Bittensor network. Stake is an important concept in the - Bittensor ecosystem, signifying the amount of network weight (or “stake”) each neuron holds, - represented on a digital ledger. The stake influences a neuron's ability to contribute to and benefit - from the network, playing a crucial role in the distribution of incentives and decision-making processes. - - Returns: - NDArray: A tensor representing the stake of each neuron in the network. Higher values signify a greater stake held by the respective neuron. - """ - return self.total_stake - - @property - def R(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Contains the ranks of neurons in the Bittensor network. Ranks are determined by the network based - on each neuron's performance and contributions. Higher ranks typically indicate a greater level of - contribution or performance by a neuron. These ranks are crucial in determining the distribution of - incentives within the network, with higher-ranked neurons receiving more incentive. - - Returns: - NDArray: A tensor where each element represents the rank of a neuron. Higher values indicate higher ranks within the network. - """ - return self.ranks - - @property - def I(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Incentive values of neurons represent the rewards they receive for their contributions to the network. - The Bittensor network employs an incentive mechanism that rewards neurons based on their - informational value, stake, and consensus with other peers. This ensures that the most valuable and - trusted contributions are incentivized. - - Returns: - NDArray: A tensor of incentive values, indicating the rewards or benefits accrued by each neuron based on their contributions and network consensus. - """ - return self.incentive - - @property - def E(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Denotes the emission values of neurons in the Bittensor network. Emissions refer to the distribution or - release of rewards (often in the form of cryptocurrency) to neurons, typically based on their stake and - performance. This mechanism is central to the network's incentive model, ensuring that active and - contributing neurons are appropriately rewarded. - - Returns: - NDArray: A tensor where each element represents the emission value for a neuron, indicating the amount of reward distributed to that neuron. - """ - return self.emission - - @property - def C(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Represents the consensus values of neurons in the Bittensor network. Consensus is a measure of how - much a neuron's contributions are trusted and agreed upon by the majority of the network. It is - calculated based on a staked weighted trust system, where the network leverages the collective - judgment of all participating peers. Higher consensus values indicate that a neuron's contributions - are more widely trusted and valued across the network. - - Returns: - NDArray: A tensor of consensus values, where each element reflects the level of trust and agreement a neuron has achieved within the network. - - """ - return self.consensus - - @property - def T(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Represents the trust values assigned to each neuron in the Bittensor network. Trust is a key metric that - reflects the reliability and reputation of a neuron based on its past behavior and contributions. It is - an essential aspect of the network's functioning, influencing decision-making processes and interactions - between neurons. - - The trust matrix is inferred from the network's inter-peer weights, indicating the level of trust each neuron - has in others. A higher value in the trust matrix suggests a stronger trust relationship between neurons. - - Returns: - NDArray: A tensor of trust values, where each element represents the trust level of a neuron. Higher values denote a higher level of trust within the network. - """ - return self.trust - - @property - def Tv(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Contains the validator trust values of neurons in the Bittensor network. Validator trust is specifically - associated with neurons that act as validators within the network. This specialized form of trust reflects - the validators' reliability and integrity in their role, which is crucial for maintaining the network's - stability and security. - - Validator trust values are particularly important for the network's consensus and validation processes, - determining the validators' influence and responsibilities in these critical functions. - - Returns: - NDArray: A tensor of validator trust values, specifically applicable to neurons serving as validators, where higher values denote greater trustworthiness in their validation roles. - """ - return self.validator_trust - - @property - def D(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Represents the dividends received by neurons in the Bittensor network. Dividends are a form of reward or - distribution, typically given to neurons based on their stake, performance, and contribution to the network. - They are an integral part of the network's incentive structure, encouraging active and beneficial participation. - - Returns: - NDArray: A tensor of dividend values, where each element indicates the dividends received by a neuron, reflecting their share of network rewards. - """ - return self.dividends - - @property - def B(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Bonds in the Bittensor network represent a speculative reward mechanism where neurons can accumulate - bonds in other neurons. Bonds are akin to investments or stakes in other neurons, reflecting a belief in - their future value or performance. This mechanism encourages correct weighting and collaboration - among neurons while providing an additional layer of incentive. - - Returns: - NDArray: A tensor representing the bonds held by each neuron, where each value signifies the proportion of bonds owned by one neuron in another. - """ - return self.bonds - - @property - def W(self) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Represents the weights assigned to each neuron in the Bittensor network. In the context of Bittensor, - weights are crucial for determining the influence and interaction between neurons. Each neuron is responsible - for setting its weights, which are then recorded on a digital ledger. These weights are reflective of the - neuron's assessment or judgment of other neurons in the network. - - The weight matrix :math:`W = [w_{ij}]` is a key component of the network's architecture, where the :math:`i^{th}` row is set by - neuron :math:`i` and represents its weights towards other neurons. These weights influence the ranking and incentive - mechanisms within the network. Higher weights from a neuron towards another can imply greater trust or value - placed on that neuron's contributions. - - Returns: - NDArray: A tensor of inter-peer weights, where each element :math:`w_{ij}` represents the weight assigned by neuron :math:`i` to neuron :math:`j`. This matrix is fundamental to the network's functioning, influencing the distribution of incentives and the inter-neuronal dynamics. - """ - return self.weights - - @property - def hotkeys(self) -> List[str]: - """ - Represents a list of ``hotkeys`` for each neuron in the Bittensor network. - - Hotkeys are unique identifiers used by neurons for active participation in the network, such as sending and receiving information or - transactions. They are akin to public keys in cryptographic systems and are essential for identifying and authenticating neurons within the network's operations. - - Returns: - List[str]: A list of hotkeys, with each string representing the hotkey of a corresponding neuron. - - These keys are crucial for the network's security and integrity, ensuring proper identification and authorization of network participants. - - Note: - While the `NeurIPS paper `_ may not explicitly detail the concept of hotkeys, they are a fundamental of decentralized networks for secure and authenticated interactions. - """ - return [axon.hotkey for axon in self.axons] - - @property - def coldkeys(self) -> List[str]: - """ - Contains a list of ``coldkeys`` for each neuron in the Bittensor network. - - Coldkeys are similar to hotkeys but are typically used for more secure, offline activities such as storing assets or offline signing of transactions. They are an important aspect of a neuron's security, providing an additional layer of protection for sensitive operations and assets. - - Returns: - List[str]: A list of coldkeys, each string representing the coldkey of a neuron. These keys play a vital role in the secure management of assets and sensitive operations within the network. - - Note: - The concept of coldkeys, while not explicitly covered in the NeurIPS paper, is a standard practice in - blockchain and decentralized networks for enhanced security and asset protection. - """ - return [axon.coldkey for axon in self.axons] - - @property - def addresses(self) -> List[str]: - """ - Provides a list of IP addresses for each neuron in the Bittensor network. These addresses are used for - network communication, allowing neurons to connect, interact, and exchange information with each other. - IP addresses are fundamental for the network's peer-to-peer communication infrastructure. - - Returns: - List[str]: A list of IP addresses, with each string representing the address of a neuron. These addresses enable the decentralized, distributed nature of the network, facilitating direct communication and data exchange among neurons. - - Note: - While IP addresses are a basic aspect of network communication, specific details about their use in - the Bittensor network may not be covered in the `NeurIPS paper `_. They are, however, integral to the - functioning of any distributed network. - """ - return [axon.ip_str() for axon in self.axons] - - @abstractmethod - def __init__( - self, netuid: int, network: str = "finney", lite: bool = True, sync: bool = True - ): - """ - Initializes a new instance of the metagraph object, setting up the basic structure and parameters based on the provided arguments. - This method is the entry point for creating a metagraph object, - which is a central component in representing the state of the Bittensor network. - Args: - netuid (int): The unique identifier for the network, distinguishing this instance of the metagraph within potentially multiple network configurations. - network (str): The name of the network, which can indicate specific configurations or versions of the Bittensor network. - lite (bool): A flag indicating whether to use a lite version of the metagraph. The lite version may contain less detailed information but can be quicker to initialize and sync. - sync (bool): A flag indicating whether to synchronize the metagraph with the network upon initialization. Synchronization involves updating the metagraph's parameters to reflect the current state of the network. - Example: - Initializing a metagraph object for the Bittensor network with a specific network UID:: - metagraph = metagraph(netuid=123, network="finney", lite=True, sync=True) - """ - pass - - def __str__(self) -> str: - """ - Provides a human-readable string representation of the metagraph object. This representation includes key identifiers and attributes of the metagraph, making it easier to quickly understand - the state and configuration of the metagraph in a simple format. - - Returns: - str: A string that succinctly represents the metagraph, including its network UID, the total number of neurons (n), the current block number, and the network's name. This format is particularly useful for logging, debugging, and displaying the metagraph in a concise manner. - - Example: - When printing the metagraph object or using it in a string context, this method is automatically invoked:: - - print(metagraph) # Output: "metagraph(netuid:1, n:100, block:500, network:finney)" - """ - return "metagraph(netuid:{}, n:{}, block:{}, network:{})".format( - self.netuid, self.n.item(), self.block.item(), self.network - ) - - def __repr__(self) -> str: - """ - Provides a detailed string representation of the metagraph object, intended for unambiguous understanding and debugging purposes. This method simply calls the :func:`__str__` method, ensuring - consistency between the informal and formal string representations of the metagraph. - - Returns: - str: The same string representation as provided by the :func:`__str__` method, detailing the metagraph's key attributes including network UID, number of neurons, block number, and network name. - - Example: - The :func:`__repr__` output can be used in debugging to get a clear and concise description of the metagraph:: - - metagraph_repr = repr(metagraph) - print(metagraph_repr) # Output mirrors that of __str__ - """ - return self.__str__() - - def metadata(self) -> dict: - """ - Retrieves the metadata of the metagraph, providing key information about the current state of the - Bittensor network. This metadata includes details such as the network's unique identifier (``netuid``), - the total number of neurons (``n``), the current block number, the network's name, and the version of - the Bittensor network. - - Returns: - dict: A dictionary containing essential metadata about the metagraph, including: - - - ``netuid``: The unique identifier for the network. - - ``n``: The total number of neurons in the network. - - ``block``: The current block number in the network's blockchain. - - ``network``: The name of the Bittensor network. - - ``version``: The version number of the Bittensor software. - - Note: - This metadata is crucial for understanding the current state and configuration of the network, as well as for tracking its evolution over time. - """ - return { - "netuid": self.netuid, - "n": self.n.item(), - "block": self.block.item(), - "network": self.network, - "version": bittensor.__version__, - } - - def state_dict(self): - return { - "netuid": self.netuid, - "network": self.network, - "version": self.version, - "n": self.n, - "block": self.block, - "stake": self.stake, - "total_stake": self.total_stake, - "ranks": self.ranks, - "trust": self.trust, - "consensus": self.consensus, - "validator_trust": self.validator_trust, - "incentive": self.incentive, - "emission": self.emission, - "dividends": self.dividends, - "active": self.active, - "last_update": self.last_update, - "validator_permit": self.validator_permit, - "weights": self.weights, - "bonds": self.bonds, - "uids": self.uids, - "axons": self.axons, - } - - def sync( - self, - block: Optional[int] = None, - lite: bool = True, - subtensor: Optional["bittensor.subtensor"] = None, - ): - """ - Synchronizes the metagraph with the Bittensor network's current state. It updates the metagraph's attributes - to reflect the latest data from the network, ensuring the metagraph represents the most current state of the network. - - Args: - block (Optional[int]): A specific block number to synchronize with. If None, the metagraph syncs with the latest block. - This allows for historical analysis or specific state examination of the network. - lite (bool): If True, a lite version of the metagraph is used for quicker synchronization. This is beneficial - when full detail is not necessary, allowing for reduced computational and time overhead. - subtensor (Optional[bittensor.subtensor]): An instance of the subtensor class from Bittensor, providing an - interface to the underlying blockchain data. If provided, this - instance is used for data retrieval during synchronization. - - Returns: - metagraph: The metagraph instance, updated to the state of the specified block or the latest network state. - - Example: - Sync the metagraph with the latest block from the subtensor, using the lite version for efficiency:: - - metagraph.sync(subtensor=subtensor) - - Sync with a specific block number for detailed analysis:: - - metagraph.sync(block=12345, lite=False, subtensor=subtensor) - - NOTE: - If attempting to access data beyond the previous 300 blocks, you **must** use the ``archive`` network for subtensor. - Light nodes are configured only to store the previous 300 blocks if connecting to finney or test networks. - - For example:: - - subtensor = bittensor.subtensor(network='archive') - """ - - # Initialize subtensor - subtensor = self._initialize_subtensor(subtensor) - - if ( - subtensor.chain_endpoint != bittensor.__archive_entrypoint__ # type: ignore - or subtensor.network != "archive" # type: ignore - ): - cur_block = subtensor.get_current_block() # type: ignore - if block and block < (cur_block - 300): - bittensor.logging.warning( - "Attempting to sync longer than 300 blocks ago on a non-archive node. Please use the 'archive' network for subtensor and retry." - ) - - # Assign neurons based on 'lite' flag - self._assign_neurons(block, lite, subtensor) - - # Set attributes for metagraph - self._set_metagraph_attributes(block, subtensor) - - # If not a 'lite' version, compute and set weights and bonds for each neuron - if not lite: - self._set_weights_and_bonds(subtensor=subtensor) - - def _initialize_subtensor(self, subtensor): - """ - Initializes the subtensor to be used for syncing the metagraph. - - This method ensures that a subtensor instance is available and properly set up for data retrieval during the synchronization process. - - If no subtensor is provided, this method is responsible for creating a new instance of the subtensor, configured according to the current network settings. - - Args: - subtensor: The subtensor instance provided for initialization. If ``None``, a new subtensor instance is created using the current network configuration. - - Returns: - subtensor: The initialized subtensor instance, ready to be used for syncing the metagraph. - - Internal Usage: - Used internally during the sync process to ensure a valid subtensor instance is available:: - - subtensor = self._initialize_subtensor(subtensor) - """ - if not subtensor: - # TODO: Check and test the initialization of the new subtensor - subtensor = bittensor.subtensor(network=self.network) - return subtensor - - def _assign_neurons(self, block, lite, subtensor): - """ - Assigns neurons to the metagraph based on the provided block number and the lite flag. - - This method is responsible for fetching and setting the neuron data in the metagraph, which includes neuron attributes like UID, stake, trust, and other relevant information. - - Args: - block: The block number for which the neuron data needs to be fetched. If ``None``, the latest block data is used. - lite: A boolean flag indicating whether to use a lite version of the neuron data. The lite version typically includes essential information and is quicker to fetch and process. - subtensor: The subtensor instance used for fetching neuron data from the network. - - Internal Usage: - Used internally during the sync process to fetch and set neuron data:: - - self._assign_neurons(block, lite, subtensor) - """ - # TODO: Check and test the conditions for assigning neurons - if lite: - self.neurons = subtensor.neurons_lite(block=block, netuid=self.netuid) - else: - self.neurons = subtensor.neurons(block=block, netuid=self.netuid) - self.lite = lite - - @staticmethod - def _create_tensor(data, dtype) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Creates a numpy array with the given data and data type. This method is a utility function used internally to encapsulate data into a np.array, making it compatible with the metagraph's numpy model structure. - - Args: - data: The data to be included in the tensor. This could be any numeric data, like stakes, ranks, etc. - dtype: The data type for the tensor, typically a numpy data type like ``np.float32`` or ``np.int64``. - - Returns: - A tensor parameter encapsulating the provided data. - - Internal Usage: - Used internally to create tensor parameters for various metagraph attributes:: - - self.stake = self._create_tensor(neuron_stakes, dtype=np.float32) - """ - # TODO: Check and test the creation of tensor - return ( - torch.nn.Parameter(torch.tensor(data, dtype=dtype), requires_grad=False) - if use_torch() - else np.array(data, dtype=dtype) - ) - - def _set_weights_and_bonds(self, subtensor: Optional[bittensor.subtensor] = None): - """ - Computes and sets the weights and bonds for each neuron in the metagraph. This method is responsible for processing the raw weight and bond data obtained from the network and converting it into a structured format suitable for the metagraph model. - - Args: - subtensor: The subtensor instance used for fetching weights and bonds data. If ``None``, the weights and bonds are not updated. - - Internal Usage: - Used internally during the sync process to update the weights and bonds of the neurons:: - - self._set_weights_and_bonds(subtensor=subtensor) - """ - # TODO: Check and test the computation of weights and bonds - if self.netuid == 0: - self.weights = self._process_root_weights( - [neuron.weights for neuron in self.neurons], - "weights", - subtensor, # type: ignore - ) - else: - self.weights = self._process_weights_or_bonds( - [neuron.weights for neuron in self.neurons], "weights" - ) - self.bonds = self._process_weights_or_bonds( - [neuron.bonds for neuron in self.neurons], "bonds" - ) - - def _process_weights_or_bonds( - self, data, attribute: str - ) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Processes the raw weights or bonds data and converts it into a structured tensor format. This method handles the transformation of neuron connection data (``weights`` or ``bonds``) from a list or other unstructured format into a tensor that can be utilized within the metagraph model. - - Args: - data: The raw weights or bonds data to be processed. This data typically comes from the subtensor. - attribute: A string indicating whether the data is ``weights`` or ``bonds``, which determines the specific processing steps to be applied. - - Returns: - A tensor parameter encapsulating the processed weights or bonds data. - - Internal Usage: - Used internally to process and set weights or bonds for the neurons:: - - self.weights = self._process_weights_or_bonds(raw_weights_data, "weights") - """ - data_array: list[Union[NDArray[np.float32], "torch.Tensor"]] = [] - for item in data: - if len(item) == 0: - if use_torch(): - data_array.append(torch.zeros(len(self.neurons))) - else: - data_array.append(np.zeros(len(self.neurons), dtype=np.float32)) - else: - uids, values = zip(*item) - # TODO: Validate and test the conversion of uids and values to tensor - if attribute == "weights": - data_array.append( - weight_utils.convert_weight_uids_and_vals_to_tensor( - len(self.neurons), - list(uids), - list(values), - ) - ) - else: - da_item = weight_utils.convert_bond_uids_and_vals_to_tensor( - len(self.neurons), list(uids), list(values) - ) - if use_torch(): - data_array.append(cast("torch.LongTensor", da_item)) - else: - data_array.append( - cast(NDArray[np.float32], da_item).astype(np.float32) - ) - tensor_param: Union["torch.nn.Parameter", NDArray] = ( - ( - torch.nn.Parameter( - torch.stack(cast(list["torch.Tensor"], data_array)), - requires_grad=False, - ) - if len(data_array) - else torch.nn.Parameter() - ) - if use_torch() - else ( - np.stack(data_array) - if len(data_array) - else np.array([], dtype=np.float32) - ) - ) - if len(data_array) == 0: - bittensor.logging.warning( - f"Empty {attribute}_array on metagraph.sync(). The '{attribute}' tensor is empty." - ) - return tensor_param - - @abstractmethod - def _set_metagraph_attributes(self, block, subtensor): - pass - - def _process_root_weights( - self, data, attribute: str, subtensor: bittensor.subtensor - ) -> Union[NDArray, "torch.nn.Parameter"]: - """ - Specifically processes the root weights data for the metagraph. This method is similar to :func:`_process_weights_or_bonds` but is tailored for processing root weights, which have a different structure and significance in the network. - - Args: - data: The raw root weights data to be processed. - attribute: A string indicating the attribute type, here it's typically ``weights``. - subtensor: The subtensor instance used for additional data and context needed in processing. - - Returns: - A tensor parameter encapsulating the processed root weights data. - - Internal Usage: - Used internally to process and set root weights for the metagraph:: - - self.root_weights = self._process_root_weights( - raw_root_weights_data, "weights", subtensor - ) - - """ - data_array = [] - n_subnets = subtensor.get_total_subnets() or 0 - subnets = subtensor.get_subnets() - for item in data: - if len(item) == 0: - if use_torch(): - data_array.append(torch.zeros(n_subnets)) - else: - data_array.append(np.zeros(n_subnets, dtype=np.float32)) # type: ignore - else: - uids, values = zip(*item) - # TODO: Validate and test the conversion of uids and values to tensor - data_array.append( - weight_utils.convert_root_weight_uids_and_vals_to_tensor( # type: ignore - n_subnets, list(uids), list(values), subnets - ) - ) - - tensor_param: Union[NDArray, "torch.nn.Parameter"] = ( - ( - torch.nn.Parameter(torch.stack(data_array), requires_grad=False) - if len(data_array) - else torch.nn.Parameter() - ) - if use_torch() - else ( - np.stack(data_array) - if len(data_array) - else np.array([], dtype=np.float32) - ) - ) - if len(data_array) == 0: - bittensor.logging.warning( - f"Empty {attribute}_array on metagraph.sync(). The '{attribute}' tensor is empty." - ) - return tensor_param - - def save(self) -> "metagraph": # type: ignore - """ - Saves the current state of the metagraph to a file on disk. This function is crucial for persisting the current state of the network's metagraph, which can later be reloaded or analyzed. The save operation includes all neuron attributes and parameters, ensuring a complete snapshot of the metagraph's state. - - Returns: - metagraph: The metagraph instance after saving its state. - - Example: - Save the current state of the metagraph to the default directory:: - - metagraph.save() - - The saved state can later be loaded to restore or analyze the metagraph's state at this point. - - If using the default save path:: - - metagraph.load() - - If using a custom save path:: - - metagraph.load_from_path(dir_path) - """ - save_directory = get_save_dir(self.network, self.netuid) - os.makedirs(save_directory, exist_ok=True) - if use_torch(): - graph_filename = f"{save_directory}/block-{self.block.item()}.pt" - state_dict = self.state_dict() - state_dict["axons"] = self.axons - torch.save(state_dict, graph_filename) - state_dict = torch.load( - graph_filename - ) # verifies that the file can be loaded correctly - else: - graph_filename = f"{save_directory}/block-{self.block.item()}.pt" - state_dict = self.state_dict() - with open(graph_filename, "wb") as graph_file: - pickle.dump(state_dict, graph_file) - return self - - def load(self): - """ - Loads the state of the metagraph from the default save directory. This method is instrumental for restoring the metagraph to its last saved state. It automatically identifies the save directory based on the ``network`` and ``netuid`` properties of the metagraph, locates the latest block file in that directory, and loads all metagraph parameters from it. - - This functionality is particularly beneficial when continuity in the state of the metagraph is necessary - across different runtime sessions, or after a restart of the system. It ensures that the metagraph reflects - the exact state it was in at the last save point, maintaining consistency in the network's representation. - - The method delegates to ``load_from_path``, supplying it with the directory path constructed from the metagraph's current ``network`` and ``netuid`` properties. This abstraction simplifies the process of loading the metagraph's state for the user, requiring no direct path specifications. - - Returns: - metagraph: The metagraph instance after loading its state from the default directory. - - Example: - Load the metagraph state from the last saved snapshot in the default directory:: - - metagraph.load() - - After this operation, the metagraph's parameters and neuron data are restored to their state at the time of the last save in the default directory. - - Note: - The default save directory is determined based on the metagraph's ``network`` and ``netuid`` attributes. It is important to ensure that these attributes are set correctly and that the default save directory contains the appropriate state files for the metagraph. - """ - self.load_from_path(get_save_dir(self.network, self.netuid)) - - @abstractmethod - def load_from_path(self, dir_path: str) -> "metagraph": # type: ignore - """ - Loads the state of the metagraph from a specified directory path. This method is crucial for restoring the metagraph to a specific state based on saved data. It locates the latest block file in the given - directory and loads all metagraph parameters from it. This is particularly useful for analyses that require historical states of the network or for restoring previous states of the metagraph in different - execution environments. - - The method first identifies the latest block file in the specified directory, then loads the metagraph state including neuron attributes and parameters from this file. This ensures that the metagraph is accurately reconstituted to reflect the network state at the time of the saved block. - - Args: - dir_path (str): The directory path where the metagraph's state files are stored. This path should contain one or more saved state files, typically named in a format that includes the block number. - - Returns: - metagraph: The metagraph instance after loading its state from the specified directory path. - - Example: - Load the metagraph state from a specific directory:: - - dir_path = "/path/to/saved/metagraph/states" - metagraph.load_from_path(dir_path) - - The metagraph is now restored to the state it was in at the time of the latest saved block in the specified directory. - - Note: - This method assumes that the state files in the specified directory are correctly formatted and - contain valid data for the metagraph. It is essential to ensure that the directory path and the - state files within it are accurate and consistent with the expected metagraph structure. - """ - pass - - -BaseClass: Union["torch.nn.Module", object] = torch.nn.Module if use_torch() else object - - -class TorchMetaGraph(MetagraphMixin, BaseClass): # type: ignore - def __init__( - self, netuid: int, network: str = "finney", lite: bool = True, sync: bool = True - ): - """ - Initializes a new instance of the metagraph object, setting up the basic structure and parameters based on the provided arguments. - This method is the entry point for creating a metagraph object, - which is a central component in representing the state of the Bittensor network. - Args: - netuid (int): The unique identifier for the network, distinguishing this instance of the metagraph within potentially multiple network configurations. - network (str): The name of the network, which can indicate specific configurations or versions of the Bittensor network. - lite (bool): A flag indicating whether to use a lite version of the metagraph. The lite version may contain less detailed information but can be quicker to initialize and sync. - sync (bool): A flag indicating whether to synchronize the metagraph with the network upon initialization. Synchronization involves updating the metagraph's parameters to reflect the current state of the network. - Example: - Initializing a metagraph object for the Bittensor network with a specific network UID:: - metagraph = metagraph(netuid=123, network="finney", lite=True, sync=True) - """ - torch.nn.Module.__init__(self) - MetagraphMixin.__init__(self, netuid, network, lite, sync) - self.netuid = netuid - self.network = network - self.version = torch.nn.Parameter( - torch.tensor([bittensor.__version_as_int__], dtype=torch.int64), - requires_grad=False, - ) - self.n: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([0], dtype=torch.int64), requires_grad=False - ) - self.block: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([0], dtype=torch.int64), requires_grad=False - ) - self.stake = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.total_stake: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.ranks: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.trust: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.consensus: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.validator_trust: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.incentive: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.emission: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.dividends: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.active = torch.nn.Parameter( - torch.tensor([], dtype=torch.int64), requires_grad=False - ) - self.last_update = torch.nn.Parameter( - torch.tensor([], dtype=torch.int64), requires_grad=False - ) - self.validator_permit = torch.nn.Parameter( - torch.tensor([], dtype=torch.bool), requires_grad=False - ) - self.weights: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.bonds: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.int64), requires_grad=False - ) - self.uids = torch.nn.Parameter( - torch.tensor([], dtype=torch.int64), requires_grad=False - ) - self.axons: List[AxonInfo] = [] - if sync: - self.sync(block=None, lite=lite) - - def _set_metagraph_attributes(self, block, subtensor): - """ - Sets various attributes of the metagraph based on the latest network data fetched from the subtensor. - - This method updates parameters like the number of neurons, block number, stakes, trusts, ranks, and other neuron-specific information. - - Args: - block: The block number for which the metagraph attributes need to be set. If ``None``, the latest block data is used. - subtensor: The subtensor instance used for fetching the latest network data. - - Internal Usage: - Used internally during the sync process to update the metagraph's attributes:: - - self._set_metagraph_attributes(block, subtensor) - """ - self.n = self._create_tensor(len(self.neurons), dtype=torch.int64) - self.version = self._create_tensor( - [bittensor.__version_as_int__], dtype=torch.int64 - ) - self.block = self._create_tensor( - block if block else subtensor.block, dtype=torch.int64 - ) - self.uids = self._create_tensor( - [neuron.uid for neuron in self.neurons], dtype=torch.int64 - ) - self.trust = self._create_tensor( - [neuron.trust for neuron in self.neurons], dtype=torch.float32 - ) - self.consensus = self._create_tensor( - [neuron.consensus for neuron in self.neurons], dtype=torch.float32 - ) - self.incentive = self._create_tensor( - [neuron.incentive for neuron in self.neurons], dtype=torch.float32 - ) - self.dividends = self._create_tensor( - [neuron.dividends for neuron in self.neurons], dtype=torch.float32 - ) - self.ranks = self._create_tensor( - [neuron.rank for neuron in self.neurons], dtype=torch.float32 - ) - self.emission = self._create_tensor( - [neuron.emission for neuron in self.neurons], dtype=torch.float32 - ) - self.active = self._create_tensor( - [neuron.active for neuron in self.neurons], dtype=torch.int64 - ) - self.last_update = self._create_tensor( - [neuron.last_update for neuron in self.neurons], dtype=torch.int64 - ) - self.validator_permit = self._create_tensor( - [neuron.validator_permit for neuron in self.neurons], dtype=torch.bool - ) - self.validator_trust = self._create_tensor( - [neuron.validator_trust for neuron in self.neurons], dtype=torch.float32 - ) - self.total_stake = self._create_tensor( - [neuron.total_stake.tao for neuron in self.neurons], dtype=torch.float32 - ) - self.stake = self._create_tensor( - [neuron.stake for neuron in self.neurons], dtype=torch.float32 - ) - self.axons = [n.axon_info for n in self.neurons] - - def load_from_path(self, dir_path: str) -> "metagraph": # type: ignore - graph_file = latest_block_path(dir_path) - state_dict = torch.load(graph_file) - self.n = torch.nn.Parameter(state_dict["n"], requires_grad=False) - self.block = torch.nn.Parameter(state_dict["block"], requires_grad=False) - self.uids = torch.nn.Parameter(state_dict["uids"], requires_grad=False) - self.stake = torch.nn.Parameter(state_dict["stake"], requires_grad=False) - self.total_stake = torch.nn.Parameter( - state_dict["total_stake"], requires_grad=False - ) - self.ranks = torch.nn.Parameter(state_dict["ranks"], requires_grad=False) - self.trust = torch.nn.Parameter(state_dict["trust"], requires_grad=False) - self.consensus = torch.nn.Parameter( - state_dict["consensus"], requires_grad=False - ) - self.validator_trust = torch.nn.Parameter( - state_dict["validator_trust"], requires_grad=False - ) - self.incentive = torch.nn.Parameter( - state_dict["incentive"], requires_grad=False - ) - self.emission = torch.nn.Parameter(state_dict["emission"], requires_grad=False) - self.dividends = torch.nn.Parameter( - state_dict["dividends"], requires_grad=False - ) - self.active = torch.nn.Parameter(state_dict["active"], requires_grad=False) - self.last_update = torch.nn.Parameter( - state_dict["last_update"], requires_grad=False - ) - self.validator_permit = torch.nn.Parameter( - state_dict["validator_permit"], requires_grad=False - ) - self.uids = torch.nn.Parameter(state_dict["uids"], requires_grad=False) - self.axons = state_dict["axons"] - if "weights" in state_dict: - self.weights = torch.nn.Parameter( - state_dict["weights"], requires_grad=False - ) - if "bonds" in state_dict: - self.bonds = torch.nn.Parameter(state_dict["bonds"], requires_grad=False) - return self - - -class NonTorchMetagraph(MetagraphMixin): - def __init__( - self, netuid: int, network: str = "finney", lite: bool = True, sync: bool = True - ): - # super(metagraph, self).__init__() - MetagraphMixin.__init__(self, netuid, network, lite, sync) - - self.netuid = netuid - self.network = network - self.version = (np.array([bittensor.__version_as_int__], dtype=np.int64),) - self.n = np.array([0], dtype=np.int64) - self.block = np.array([0], dtype=np.int64) - self.stake = np.array([], dtype=np.float32) - self.total_stake = np.array([], dtype=np.float32) - self.ranks = np.array([], dtype=np.float32) - self.trust = np.array([], dtype=np.float32) - self.consensus = np.array([], dtype=np.float32) - self.validator_trust = np.array([], dtype=np.float32) - self.incentive = np.array([], dtype=np.float32) - self.emission = np.array([], dtype=np.float32) - self.dividends = np.array([], dtype=np.float32) - self.active = np.array([], dtype=np.int64) - self.last_update = np.array([], dtype=np.int64) - self.validator_permit = np.array([], dtype=bool) - self.weights = np.array([], dtype=np.float32) - self.bonds = np.array([], dtype=np.int64) - self.uids = np.array([], dtype=np.int64) - self.axons: List[AxonInfo] = [] - if sync: - self.sync(block=None, lite=lite) - - def _set_metagraph_attributes(self, block, subtensor): - """ - Sets various attributes of the metagraph based on the latest network data fetched from the subtensor. - - This method updates parameters like the number of neurons, block number, stakes, trusts, ranks, and other neuron-specific information. - - Args: - block: The block number for which the metagraph attributes need to be set. If ``None``, the latest block data is used. - subtensor: The subtensor instance used for fetching the latest network data. - - Internal Usage: - Used internally during the sync process to update the metagraph's attributes:: - - self._set_metagraph_attributes(block, subtensor) - """ - # TODO: Check and test the setting of each attribute - self.n = self._create_tensor(len(self.neurons), dtype=np.int64) - self.version = self._create_tensor( - [bittensor.__version_as_int__], dtype=np.int64 - ) - self.block = self._create_tensor( - block if block else subtensor.block, dtype=np.int64 - ) - self.uids = self._create_tensor( - [neuron.uid for neuron in self.neurons], dtype=np.int64 - ) - self.trust = self._create_tensor( - [neuron.trust for neuron in self.neurons], dtype=np.float32 - ) - self.consensus = self._create_tensor( - [neuron.consensus for neuron in self.neurons], dtype=np.float32 - ) - self.incentive = self._create_tensor( - [neuron.incentive for neuron in self.neurons], dtype=np.float32 - ) - self.dividends = self._create_tensor( - [neuron.dividends for neuron in self.neurons], dtype=np.float32 - ) - self.ranks = self._create_tensor( - [neuron.rank for neuron in self.neurons], dtype=np.float32 - ) - self.emission = self._create_tensor( - [neuron.emission for neuron in self.neurons], dtype=np.float32 - ) - self.active = self._create_tensor( - [neuron.active for neuron in self.neurons], dtype=np.int64 - ) - self.last_update = self._create_tensor( - [neuron.last_update for neuron in self.neurons], dtype=np.int64 - ) - self.validator_permit = self._create_tensor( - [neuron.validator_permit for neuron in self.neurons], dtype=bool - ) - self.validator_trust = self._create_tensor( - [neuron.validator_trust for neuron in self.neurons], dtype=np.float32 - ) - self.total_stake = self._create_tensor( - [neuron.total_stake.tao for neuron in self.neurons], dtype=np.float32 - ) - self.stake = self._create_tensor( - [neuron.stake for neuron in self.neurons], dtype=np.float32 - ) - self.axons = [n.axon_info for n in self.neurons] - - def load_from_path(self, dir_path: str) -> "metagraph": # type: ignore - graph_filename = latest_block_path(dir_path) - try: - with open(graph_filename, "rb") as graph_file: - state_dict = pickle.load(graph_file) - except pickle.UnpicklingError: - bittensor.__console__.print( - "Unable to load file. Attempting to restore metagraph using torch." - ) - bittensor.__console__.print( - ":warning:[yellow]Warning:[/yellow] This functionality exists to load " - "metagraph state from legacy saves, but will not be supported in the future." - ) - try: - import torch as real_torch - - state_dict = real_torch.load(graph_filename) - for key in METAGRAPH_STATE_DICT_NDARRAY_KEYS: - state_dict[key] = state_dict[key].detach().numpy() - del real_torch - except (RuntimeError, ImportError): - bittensor.__console__.print("Unable to load file. It may be corrupted.") - raise - - self.n = state_dict["n"] - self.block = state_dict["block"] - self.uids = state_dict["uids"] - self.stake = state_dict["stake"] - self.total_stake = state_dict["total_stake"] - self.ranks = state_dict["ranks"] - self.trust = state_dict["trust"] - self.consensus = state_dict["consensus"] - self.validator_trust = state_dict["validator_trust"] - self.incentive = state_dict["incentive"] - self.emission = state_dict["emission"] - self.dividends = state_dict["dividends"] - self.active = state_dict["active"] - self.last_update = state_dict["last_update"] - self.validator_permit = state_dict["validator_permit"] - self.axons = state_dict["axons"] - if "weights" in state_dict: - self.weights = state_dict["weights"] - if "bonds" in state_dict: - self.bonds = state_dict["bonds"] - return self - - -metagraph = TorchMetaGraph if use_torch() else NonTorchMetagraph diff --git a/bittensor/mock/__init__.py b/bittensor/mock/__init__.py deleted file mode 100644 index b4f0efd5ca..0000000000 --- a/bittensor/mock/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from .subtensor_mock import MockSubtensor as MockSubtensor diff --git a/bittensor/mock/keyfile_mock.py b/bittensor/mock/keyfile_mock.py deleted file mode 100644 index e13126cc17..0000000000 --- a/bittensor/mock/keyfile_mock.py +++ /dev/null @@ -1,90 +0,0 @@ -# The MIT License (MIT) - -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from bittensor import serialized_keypair_to_keyfile_data, keyfile, Keypair - - -class MockKeyfile(keyfile): - """Defines an interface to a mocked keyfile object (nothing is created on device) keypair is treated as non encrypted and the data is just the string version.""" - - def __init__(self, path: str): - super().__init__(path) - - self._mock_keypair = Keypair.create_from_mnemonic( - mnemonic="arrive produce someone view end scout bargain coil slight festival excess struggle" - ) - self._mock_data = serialized_keypair_to_keyfile_data(self._mock_keypair) - - def __str__(self): - if not self.exists_on_device(): - return "Keyfile (empty, {})>".format(self.path) - if self.is_encrypted(): - return "Keyfile (encrypted, {})>".format(self.path) - else: - return "Keyfile (decrypted, {})>".format(self.path) - - def __repr__(self): - return self.__str__() - - @property - def keypair(self) -> "Keypair": - return self._mock_keypair - - @property - def data(self) -> bytes: - return bytes(self._mock_data) - - @property - def keyfile_data(self) -> bytes: - return bytes(self._mock_data) - - def set_keypair( - self, - keypair: "Keypair", - encrypt: bool = True, - overwrite: bool = False, - password: str = None, - ): - self._mock_keypair = keypair - self._mock_data = serialized_keypair_to_keyfile_data(self._mock_keypair) - - def get_keypair(self, password: str = None) -> "Keypair": - return self._mock_keypair - - def make_dirs(self): - return - - def exists_on_device(self) -> bool: - return True - - def is_readable(self) -> bool: - return True - - def is_writable(self) -> bool: - return True - - def is_encrypted(self) -> bool: - return False - - def encrypt(self, password: str = None): - raise ValueError("Cannot encrypt a mock keyfile") - - def decrypt(self, password: str = None): - return diff --git a/bittensor/mock/subtensor_mock.py b/bittensor/mock/subtensor_mock.py deleted file mode 100644 index 5c2c3b42d6..0000000000 --- a/bittensor/mock/subtensor_mock.py +++ /dev/null @@ -1,1469 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022-2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from random import randint -from types import SimpleNamespace -from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union -from unittest.mock import MagicMock -from dataclasses import dataclass -from abc import abstractclassmethod -from collections.abc import Mapping - -from hashlib import sha256 -from ..wallet import wallet - -from ..chain_data import ( - NeuronInfo, - NeuronInfoLite, - PrometheusInfo, - DelegateInfo, - SubnetInfo, - AxonInfo, -) -from ..errors import ChainQueryError -from ..subtensor import Subtensor -from ..utils import RAOPERTAO, U16_NORMALIZED_FLOAT -from ..utils.balance import Balance -from ..utils.registration import POWSolution - -from typing import TypedDict - - -# Mock Testing Constant -__GLOBAL_MOCK_STATE__ = {} - - -class AxonServeCallParams(TypedDict): - """ - Axon serve chain call parameters. - """ - - version: int - ip: int - port: int - ip_type: int - netuid: int - - -class PrometheusServeCallParams(TypedDict): - """ - Prometheus serve chain call parameters. - """ - - version: int - ip: int - port: int - ip_type: int - netuid: int - - -BlockNumber = int - - -class InfoDict(Mapping): - @abstractclassmethod - def default(cls): - raise NotImplementedError - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __iter__(self): - return iter(self.__dict__) - - def __len__(self): - return len(self.__dict__) - - -@dataclass -class AxonInfoDict(InfoDict): - block: int - version: int - ip: int # integer representation of ip address - port: int - ip_type: int - protocol: int - placeholder1: int # placeholder for future use - placeholder2: int - - @classmethod - def default(cls): - return cls( - block=0, - version=0, - ip=0, - port=0, - ip_type=0, - protocol=0, - placeholder1=0, - placeholder2=0, - ) - - -@dataclass -class PrometheusInfoDict(InfoDict): - block: int - version: int - ip: int # integer representation of ip address - port: int - ip_type: int - - @classmethod - def default(cls): - return cls(block=0, version=0, ip=0, port=0, ip_type=0) - - -@dataclass -class MockSubtensorValue: - value: Optional[Any] - - -class MockMapResult: - records: Optional[List[Tuple[MockSubtensorValue, MockSubtensorValue]]] - - def __init__( - self, - records: Optional[ - List[Tuple[Union[Any, MockSubtensorValue], Union[Any, MockSubtensorValue]]] - ] = None, - ): - _records = [ - ( - ( - MockSubtensorValue(value=record[0]), - MockSubtensorValue(value=record[1]), - ) - # Make sure record is a tuple of MockSubtensorValue (dict with value attr) - if not ( - isinstance(record, tuple) - and all( - isinstance(item, dict) and hasattr(item, "value") - for item in record - ) - ) - else record - ) - for record in records - ] - - self.records = _records - - def __iter__(self): - return iter(self.records) - - -class MockSystemState(TypedDict): - Account: Dict[str, Dict[int, int]] # address -> block -> balance - - -class MockSubtensorState(TypedDict): - Rho: Dict[int, Dict[BlockNumber, int]] # netuid -> block -> rho - Kappa: Dict[int, Dict[BlockNumber, int]] # netuid -> block -> kappa - Difficulty: Dict[int, Dict[BlockNumber, int]] # netuid -> block -> difficulty - ImmunityPeriod: Dict[ - int, Dict[BlockNumber, int] - ] # netuid -> block -> immunity_period - ValidatorBatchSize: Dict[ - int, Dict[BlockNumber, int] - ] # netuid -> block -> validator_batch_size - Active: Dict[int, Dict[BlockNumber, bool]] # (netuid, uid), block -> active - Stake: Dict[str, Dict[str, Dict[int, int]]] # (hotkey, coldkey) -> block -> stake - - Delegates: Dict[str, Dict[int, float]] # address -> block -> delegate_take - - NetworksAdded: Dict[int, Dict[BlockNumber, bool]] # netuid -> block -> added - - -class MockChainState(TypedDict): - System: MockSystemState - SubtensorModule: MockSubtensorState - - -class MockSubtensor(Subtensor): - """ - A Mock Subtensor class for running tests. - This should mock only methods that make queries to the chain. - e.g. We mock `Subtensor.query_subtensor` instead of all query methods. - - This class will also store a local (mock) state of the chain. - """ - - chain_state: MockChainState - block_number: int - - @classmethod - def reset(cls) -> None: - __GLOBAL_MOCK_STATE__.clear() - - _ = cls() - - def setup(self) -> None: - if not hasattr(self, "chain_state") or getattr(self, "chain_state") is None: - self.chain_state = { - "System": {"Account": {}}, - "Balances": {"ExistentialDeposit": {0: 500}}, - "SubtensorModule": { - "NetworksAdded": {}, - "Rho": {}, - "Kappa": {}, - "Difficulty": {}, - "ImmunityPeriod": {}, - "ValidatorBatchSize": {}, - "ValidatorSequenceLength": {}, - "ValidatorEpochsPerReset": {}, - "ValidatorEpochLength": {}, - "MaxAllowedValidators": {}, - "MinAllowedWeights": {}, - "MaxWeightLimit": {}, - "SynergyScalingLawPower": {}, - "ScalingLawPower": {}, - "SubnetworkN": {}, - "MaxAllowedUids": {}, - "NetworkModality": {}, - "BlocksSinceLastStep": {}, - "Tempo": {}, - "NetworkConnect": {}, - "EmissionValues": {}, - "Burn": {}, - "Active": {}, - "Uids": {}, - "Keys": {}, - "Owner": {}, - "IsNetworkMember": {}, - "LastUpdate": {}, - "Rank": {}, - "Emission": {}, - "Incentive": {}, - "Consensus": {}, - "Trust": {}, - "ValidatorTrust": {}, - "Dividends": {}, - "PruningScores": {}, - "ValidatorPermit": {}, - "Weights": {}, - "Bonds": {}, - "Stake": {}, - "TotalStake": {0: 0}, - "TotalIssuance": {0: 0}, - "TotalHotkeyStake": {}, - "TotalColdkeyStake": {}, - "TxRateLimit": {0: 0}, # No limit - "Delegates": {}, - "Axons": {}, - "Prometheus": {}, - "SubnetOwner": {}, - "Commits": {}, - "AdjustmentAlpha": {}, - "BondsMovingAverage": {}, - }, - } - - self.block_number = 0 - - self.network = "mock" - self.chain_endpoint = "mock_endpoint" - self.substrate = MagicMock() - - def __init__(self, *args, **kwargs) -> None: - self.__dict__ = __GLOBAL_MOCK_STATE__ - - if not hasattr(self, "chain_state") or getattr(self, "chain_state") is None: - self.setup() - - def get_block_hash(self, block_id: int) -> str: - return "0x" + sha256(str(block_id).encode()).hexdigest()[:64] - - def create_subnet(self, netuid: int) -> None: - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - # Per Subnet - subtensor_state["Rho"][netuid] = {} - subtensor_state["Rho"][netuid][0] = 10 - subtensor_state["Kappa"][netuid] = {} - subtensor_state["Kappa"][netuid][0] = 32_767 - subtensor_state["Difficulty"][netuid] = {} - subtensor_state["Difficulty"][netuid][0] = 10_000_000 - subtensor_state["ImmunityPeriod"][netuid] = {} - subtensor_state["ImmunityPeriod"][netuid][0] = 4096 - subtensor_state["ValidatorBatchSize"][netuid] = {} - subtensor_state["ValidatorBatchSize"][netuid][0] = 32 - subtensor_state["ValidatorSequenceLength"][netuid] = {} - subtensor_state["ValidatorSequenceLength"][netuid][0] = 256 - subtensor_state["ValidatorEpochsPerReset"][netuid] = {} - subtensor_state["ValidatorEpochsPerReset"][netuid][0] = 60 - subtensor_state["ValidatorEpochLength"][netuid] = {} - subtensor_state["ValidatorEpochLength"][netuid][0] = 100 - subtensor_state["MaxAllowedValidators"][netuid] = {} - subtensor_state["MaxAllowedValidators"][netuid][0] = 128 - subtensor_state["MinAllowedWeights"][netuid] = {} - subtensor_state["MinAllowedWeights"][netuid][0] = 1024 - subtensor_state["MaxWeightLimit"][netuid] = {} - subtensor_state["MaxWeightLimit"][netuid][0] = 1_000 - subtensor_state["SynergyScalingLawPower"][netuid] = {} - subtensor_state["SynergyScalingLawPower"][netuid][0] = 50 - subtensor_state["ScalingLawPower"][netuid] = {} - subtensor_state["ScalingLawPower"][netuid][0] = 50 - subtensor_state["SubnetworkN"][netuid] = {} - subtensor_state["SubnetworkN"][netuid][0] = 0 - subtensor_state["MaxAllowedUids"][netuid] = {} - subtensor_state["MaxAllowedUids"][netuid][0] = 4096 - subtensor_state["NetworkModality"][netuid] = {} - subtensor_state["NetworkModality"][netuid][0] = 0 - subtensor_state["BlocksSinceLastStep"][netuid] = {} - subtensor_state["BlocksSinceLastStep"][netuid][0] = 0 - subtensor_state["Tempo"][netuid] = {} - subtensor_state["Tempo"][netuid][0] = 99 - - # subtensor_state['NetworkConnect'][netuid] = {} - # subtensor_state['NetworkConnect'][netuid][0] = {} - subtensor_state["EmissionValues"][netuid] = {} - subtensor_state["EmissionValues"][netuid][0] = 0 - subtensor_state["Burn"][netuid] = {} - subtensor_state["Burn"][netuid][0] = 0 - subtensor_state["Commits"][netuid] = {} - - # Per-UID/Hotkey - - subtensor_state["Uids"][netuid] = {} - subtensor_state["Keys"][netuid] = {} - subtensor_state["Owner"][netuid] = {} - - subtensor_state["LastUpdate"][netuid] = {} - subtensor_state["Active"][netuid] = {} - subtensor_state["Rank"][netuid] = {} - subtensor_state["Emission"][netuid] = {} - subtensor_state["Incentive"][netuid] = {} - subtensor_state["Consensus"][netuid] = {} - subtensor_state["Trust"][netuid] = {} - subtensor_state["ValidatorTrust"][netuid] = {} - subtensor_state["Dividends"][netuid] = {} - subtensor_state["PruningScores"][netuid] = {} - subtensor_state["PruningScores"][netuid][0] = {} - subtensor_state["ValidatorPermit"][netuid] = {} - - subtensor_state["Weights"][netuid] = {} - subtensor_state["Bonds"][netuid] = {} - - subtensor_state["Axons"][netuid] = {} - subtensor_state["Prometheus"][netuid] = {} - - subtensor_state["NetworksAdded"][netuid] = {} - subtensor_state["NetworksAdded"][netuid][0] = True - - subtensor_state["AdjustmentAlpha"][netuid] = {} - subtensor_state["AdjustmentAlpha"][netuid][0] = 1000 - - subtensor_state["BondsMovingAverage"][netuid] = {} - subtensor_state["BondsMovingAverage"][netuid][0] = 1000 - else: - raise Exception("Subnet already exists") - - def set_difficulty(self, netuid: int, difficulty: int) -> None: - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - raise Exception("Subnet does not exist") - - subtensor_state["Difficulty"][netuid][self.block_number] = difficulty - - def _register_neuron(self, netuid: int, hotkey: str, coldkey: str) -> int: - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - raise Exception("Subnet does not exist") - - subnetwork_n = self._get_most_recent_storage( - subtensor_state["SubnetworkN"][netuid] - ) - - if subnetwork_n > 0 and any( - self._get_most_recent_storage(subtensor_state["Keys"][netuid][uid]) - == hotkey - for uid in range(subnetwork_n) - ): - # already_registered - raise Exception("Hotkey already registered") - else: - # Not found - if subnetwork_n >= self._get_most_recent_storage( - subtensor_state["MaxAllowedUids"][netuid] - ): - # Subnet full, replace neuron randomly - uid = randint(0, subnetwork_n - 1) - else: - # Subnet not full, add new neuron - # Append as next uid and increment subnetwork_n - uid = subnetwork_n - subtensor_state["SubnetworkN"][netuid][self.block_number] = ( - subnetwork_n + 1 - ) - - subtensor_state["Stake"][hotkey] = {} - subtensor_state["Stake"][hotkey][coldkey] = {} - subtensor_state["Stake"][hotkey][coldkey][self.block_number] = 0 - - subtensor_state["Uids"][netuid][hotkey] = {} - subtensor_state["Uids"][netuid][hotkey][self.block_number] = uid - - subtensor_state["Keys"][netuid][uid] = {} - subtensor_state["Keys"][netuid][uid][self.block_number] = hotkey - - subtensor_state["Owner"][hotkey] = {} - subtensor_state["Owner"][hotkey][self.block_number] = coldkey - - subtensor_state["Active"][netuid][uid] = {} - subtensor_state["Active"][netuid][uid][self.block_number] = True - - subtensor_state["LastUpdate"][netuid][uid] = {} - subtensor_state["LastUpdate"][netuid][uid][self.block_number] = ( - self.block_number - ) - - subtensor_state["Rank"][netuid][uid] = {} - subtensor_state["Rank"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["Emission"][netuid][uid] = {} - subtensor_state["Emission"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["Incentive"][netuid][uid] = {} - subtensor_state["Incentive"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["Consensus"][netuid][uid] = {} - subtensor_state["Consensus"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["Trust"][netuid][uid] = {} - subtensor_state["Trust"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["ValidatorTrust"][netuid][uid] = {} - subtensor_state["ValidatorTrust"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["Dividends"][netuid][uid] = {} - subtensor_state["Dividends"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["PruningScores"][netuid][uid] = {} - subtensor_state["PruningScores"][netuid][uid][self.block_number] = 0.0 - - subtensor_state["ValidatorPermit"][netuid][uid] = {} - subtensor_state["ValidatorPermit"][netuid][uid][self.block_number] = False - - subtensor_state["Weights"][netuid][uid] = {} - subtensor_state["Weights"][netuid][uid][self.block_number] = [] - - subtensor_state["Bonds"][netuid][uid] = {} - subtensor_state["Bonds"][netuid][uid][self.block_number] = [] - - subtensor_state["Axons"][netuid][hotkey] = {} - subtensor_state["Axons"][netuid][hotkey][self.block_number] = {} - - subtensor_state["Prometheus"][netuid][hotkey] = {} - subtensor_state["Prometheus"][netuid][hotkey][self.block_number] = {} - - if hotkey not in subtensor_state["IsNetworkMember"]: - subtensor_state["IsNetworkMember"][hotkey] = {} - subtensor_state["IsNetworkMember"][hotkey][netuid] = {} - subtensor_state["IsNetworkMember"][hotkey][netuid][self.block_number] = True - - return uid - - @staticmethod - def _convert_to_balance(balance: Union["Balance", float, int]) -> "Balance": - if isinstance(balance, float): - balance = Balance.from_tao(balance) - - if isinstance(balance, int): - balance = Balance.from_rao(balance) - - return balance - - def force_register_neuron( - self, - netuid: int, - hotkey: str, - coldkey: str, - stake: Union["Balance", float, int] = Balance(0), - balance: Union["Balance", float, int] = Balance(0), - ) -> int: - """ - Force register a neuron on the mock chain, returning the UID. - """ - stake = self._convert_to_balance(stake) - balance = self._convert_to_balance(balance) - - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - raise Exception("Subnet does not exist") - - uid = self._register_neuron(netuid=netuid, hotkey=hotkey, coldkey=coldkey) - - subtensor_state["TotalStake"][self.block_number] = ( - self._get_most_recent_storage(subtensor_state["TotalStake"]) + stake.rao - ) - subtensor_state["Stake"][hotkey][coldkey][self.block_number] = stake.rao - - if balance.rao > 0: - self.force_set_balance(coldkey, balance) - self.force_set_balance(coldkey, balance) - - return uid - - def force_set_balance( - self, ss58_address: str, balance: Union["Balance", float, int] = Balance(0) - ) -> Tuple[bool, Optional[str]]: - """ - Returns: - Tuple[bool, Optional[str]]: (success, err_msg) - """ - balance = self._convert_to_balance(balance) - - if ss58_address not in self.chain_state["System"]["Account"]: - self.chain_state["System"]["Account"][ss58_address] = { - "data": {"free": {0: 0}} - } - - old_balance = self.get_balance(ss58_address, self.block_number) - diff = balance.rao - old_balance.rao - - # Update total issuance - self.chain_state["SubtensorModule"]["TotalIssuance"][self.block_number] = ( - self._get_most_recent_storage( - self.chain_state["SubtensorModule"]["TotalIssuance"] - ) - + diff - ) - - self.chain_state["System"]["Account"][ss58_address] = { - "data": {"free": {self.block_number: balance.rao}} - } - - return True, None - - # Alias for force_set_balance - sudo_force_set_balance = force_set_balance - - def do_block_step(self) -> None: - self.block_number += 1 - - # Doesn't do epoch - subtensor_state = self.chain_state["SubtensorModule"] - for subnet in subtensor_state["NetworksAdded"]: - subtensor_state["BlocksSinceLastStep"][subnet][self.block_number] = ( - self._get_most_recent_storage( - subtensor_state["BlocksSinceLastStep"][subnet] - ) - + 1 - ) - - def _handle_type_default(self, name: str, params: List[object]) -> object: - defaults_mapping = { - "TotalStake": 0, - "TotalHotkeyStake": 0, - "TotalColdkeyStake": 0, - "Stake": 0, - } - - return defaults_mapping.get(name, None) - - def commit(self, wallet: "wallet", netuid: int, data: str) -> None: - uid = self.get_uid_for_hotkey_on_subnet( - hotkey_ss58=wallet.hotkey.ss58_address, - netuid=netuid, - ) - if uid is None: - raise Exception("Neuron not found") - subtensor_state = self.chain_state["SubtensorModule"] - subtensor_state["Commits"][netuid].setdefault(self.block_number, {})[uid] = data - - def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> str: - if block and self.block_number < block: - raise Exception("Cannot query block in the future") - block = block or self.block_number - - subtensor_state = self.chain_state["SubtensorModule"] - return subtensor_state["Commits"][netuid][block][uid] - - def query_subtensor( - self, - name: str, - block: Optional[int] = None, - params: Optional[List[object]] = [], - ) -> MockSubtensorValue: - if block: - if self.block_number < block: - raise Exception("Cannot query block in the future") - - else: - block = self.block_number - - state = self.chain_state["SubtensorModule"][name] - if state is not None: - # Use prefix - if len(params) > 0: - while state is not None and len(params) > 0: - state = state.get(params.pop(0), None) - if state is None: - return SimpleNamespace( - value=self._handle_type_default(name, params) - ) - - # Use block - state_at_block = state.get(block, None) - while state_at_block is None and block > 0: - block -= 1 - state_at_block = state.get(block, None) - if state_at_block is not None: - return SimpleNamespace(value=state_at_block) - - return SimpleNamespace(value=self._handle_type_default(name, params)) - else: - return SimpleNamespace(value=self._handle_type_default(name, params)) - - def query_map_subtensor( - self, - name: str, - block: Optional[int] = None, - params: Optional[List[object]] = [], - ) -> Optional[MockMapResult]: - """ - Note: Double map requires one param - """ - if block: - if self.block_number < block: - raise Exception("Cannot query block in the future") - - else: - block = self.block_number - - state = self.chain_state["SubtensorModule"][name] - if state is not None: - # Use prefix - if len(params) > 0: - while state is not None and len(params) > 0: - state = state.get(params.pop(0), None) - if state is None: - return MockMapResult([]) - - # Check if single map or double map - if len(state.keys()) == 0: - return MockMapResult([]) - - inner = list(state.values())[0] - # Should have at least one key - if len(inner.keys()) == 0: - raise Exception("Invalid state") - - # Check if double map - if isinstance(list(inner.values())[0], dict): - # is double map - raise ChainQueryError("Double map requires one param") - - # Iterate over each key and add value to list, max at block - records = [] - for key in state: - result = self._get_most_recent_storage(state[key], block) - if result is None: - continue # Skip if no result for this key at `block` or earlier - - records.append((key, result)) - - return MockMapResult(records) - else: - return MockMapResult([]) - - def query_constant( - self, module_name: str, constant_name: str, block: Optional[int] = None - ) -> Optional[object]: - if block: - if self.block_number < block: - raise Exception("Cannot query block in the future") - - else: - block = self.block_number - - state = self.chain_state.get(module_name, None) - if state is not None: - if constant_name in state: - state = state[constant_name] - else: - return None - - # Use block - state_at_block = self._get_most_recent_storage(state, block) - if state_at_block is not None: - return SimpleNamespace(value=state_at_block) - - return state_at_block["data"]["free"] # Can be None - else: - return None - - def get_current_block(self) -> int: - return self.block_number - - # ==== Balance RPC methods ==== - - def get_balance(self, address: str, block: int = None) -> "Balance": - if block: - if self.block_number < block: - raise Exception("Cannot query block in the future") - - else: - block = self.block_number - - state = self.chain_state["System"]["Account"] - if state is not None: - if address in state: - state = state[address] - else: - return Balance(0) - - # Use block - balance_state = state["data"]["free"] - state_at_block = self._get_most_recent_storage( - balance_state, block - ) # Can be None - if state_at_block is not None: - bal_as_int = state_at_block - return Balance.from_rao(bal_as_int) - else: - return Balance(0) - else: - return Balance(0) - - def get_balances(self, block: int = None) -> Dict[str, "Balance"]: - balances = {} - for address in self.chain_state["System"]["Account"]: - balances[address] = self.get_balance(address, block) - - return balances - - # ==== Neuron RPC methods ==== - - def neuron_for_uid( - self, uid: int, netuid: int, block: Optional[int] = None - ) -> Optional[NeuronInfo]: - if uid is None: - return NeuronInfo.get_null_neuron() - - if block: - if self.block_number < block: - raise Exception("Cannot query block in the future") - - else: - block = self.block_number - - if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: - return None - - neuron_info = self._neuron_subnet_exists(uid, netuid, block) - if neuron_info is None: - return None - - else: - return neuron_info - - def neurons(self, netuid: int, block: Optional[int] = None) -> List[NeuronInfo]: - if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: - raise Exception("Subnet does not exist") - - neurons = [] - subnet_n = self._get_most_recent_storage( - self.chain_state["SubtensorModule"]["SubnetworkN"][netuid], block - ) - for uid in range(subnet_n): - neuron_info = self.neuron_for_uid(uid, netuid, block) - if neuron_info is not None: - neurons.append(neuron_info) - - return neurons - - @staticmethod - def _get_most_recent_storage( - storage: Dict[BlockNumber, Any], block_number: Optional[int] = None - ) -> Any: - if block_number is None: - items = list(storage.items()) - items.sort(key=lambda x: x[0], reverse=True) - if len(items) == 0: - return None - - return items[0][1] - - else: - while block_number >= 0: - if block_number in storage: - return storage[block_number] - - block_number -= 1 - - return None - - def _get_axon_info( - self, netuid: int, hotkey: str, block: Optional[int] = None - ) -> AxonInfoDict: - # Axons [netuid][hotkey][block_number] - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["Axons"]: - return AxonInfoDict.default() - - if hotkey not in subtensor_state["Axons"][netuid]: - return AxonInfoDict.default() - - result = self._get_most_recent_storage( - subtensor_state["Axons"][netuid][hotkey], block - ) - if not result: - return AxonInfoDict.default() - - return result - - def _get_prometheus_info( - self, netuid: int, hotkey: str, block: Optional[int] = None - ) -> PrometheusInfoDict: - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["Prometheus"]: - return PrometheusInfoDict.default() - - if hotkey not in subtensor_state["Prometheus"][netuid]: - return PrometheusInfoDict.default() - - result = self._get_most_recent_storage( - subtensor_state["Prometheus"][netuid][hotkey], block - ) - if not result: - return PrometheusInfoDict.default() - - return result - - def _neuron_subnet_exists( - self, uid: int, netuid: int, block: Optional[int] = None - ) -> Optional[NeuronInfo]: - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - return None - - if self._get_most_recent_storage(subtensor_state["SubnetworkN"][netuid]) <= uid: - return None - - hotkey = self._get_most_recent_storage(subtensor_state["Keys"][netuid][uid]) - if hotkey is None: - return None - - axon_info_ = self._get_axon_info(netuid, hotkey, block) - - prometheus_info = self._get_prometheus_info(netuid, hotkey, block) - - coldkey = self._get_most_recent_storage(subtensor_state["Owner"][hotkey], block) - active = self._get_most_recent_storage( - subtensor_state["Active"][netuid][uid], block - ) - rank = self._get_most_recent_storage( - subtensor_state["Rank"][netuid][uid], block - ) - emission = self._get_most_recent_storage( - subtensor_state["Emission"][netuid][uid], block - ) - incentive = self._get_most_recent_storage( - subtensor_state["Incentive"][netuid][uid], block - ) - consensus = self._get_most_recent_storage( - subtensor_state["Consensus"][netuid][uid], block - ) - trust = self._get_most_recent_storage( - subtensor_state["Trust"][netuid][uid], block - ) - validator_trust = self._get_most_recent_storage( - subtensor_state["ValidatorTrust"][netuid][uid], block - ) - dividends = self._get_most_recent_storage( - subtensor_state["Dividends"][netuid][uid], block - ) - pruning_score = self._get_most_recent_storage( - subtensor_state["PruningScores"][netuid][uid], block - ) - last_update = self._get_most_recent_storage( - subtensor_state["LastUpdate"][netuid][uid], block - ) - validator_permit = self._get_most_recent_storage( - subtensor_state["ValidatorPermit"][netuid][uid], block - ) - - weights = self._get_most_recent_storage( - subtensor_state["Weights"][netuid][uid], block - ) - bonds = self._get_most_recent_storage( - subtensor_state["Bonds"][netuid][uid], block - ) - - stake_dict = { - coldkey: Balance.from_rao( - self._get_most_recent_storage( - subtensor_state["Stake"][hotkey][coldkey], block - ) - ) - for coldkey in subtensor_state["Stake"][hotkey] - } - - stake = sum(stake_dict.values()) - - weights = [[int(weight[0]), int(weight[1])] for weight in weights] - bonds = [[int(bond[0]), int(bond[1])] for bond in bonds] - rank = U16_NORMALIZED_FLOAT(rank) - emission = emission / RAOPERTAO - incentive = U16_NORMALIZED_FLOAT(incentive) - consensus = U16_NORMALIZED_FLOAT(consensus) - trust = U16_NORMALIZED_FLOAT(trust) - validator_trust = U16_NORMALIZED_FLOAT(validator_trust) - dividends = U16_NORMALIZED_FLOAT(dividends) - prometheus_info = PrometheusInfo.fix_decoded_values(prometheus_info) - axon_info_ = AxonInfo.from_neuron_info( - {"hotkey": hotkey, "coldkey": coldkey, "axon_info": axon_info_} - ) - - neuron_info = NeuronInfo( - hotkey=hotkey, - coldkey=coldkey, - uid=uid, - netuid=netuid, - active=active, - rank=rank, - emission=emission, - incentive=incentive, - consensus=consensus, - trust=trust, - validator_trust=validator_trust, - dividends=dividends, - pruning_score=pruning_score, - last_update=last_update, - validator_permit=validator_permit, - stake=stake, - stake_dict=stake_dict, - total_stake=stake, - prometheus_info=prometheus_info, - axon_info=axon_info_, - weights=weights, - bonds=bonds, - is_null=False, - ) - - return neuron_info - - def neuron_for_uid_lite( - self, uid: int, netuid: int, block: Optional[int] = None - ) -> Optional[NeuronInfoLite]: - if block: - if self.block_number < block: - raise Exception("Cannot query block in the future") - - else: - block = self.block_number - - if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: - raise Exception("Subnet does not exist") - - neuron_info = self._neuron_subnet_exists(uid, netuid, block) - if neuron_info is None: - return None - - else: - neuron_info_dict = neuron_info.__dict__ - del neuron_info - del neuron_info_dict["weights"] - del neuron_info_dict["bonds"] - - neuron_info_lite = NeuronInfoLite(**neuron_info_dict) - return neuron_info_lite - - def neurons_lite( - self, netuid: int, block: Optional[int] = None - ) -> List[NeuronInfoLite]: - if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: - raise Exception("Subnet does not exist") - - neurons = [] - subnet_n = self._get_most_recent_storage( - self.chain_state["SubtensorModule"]["SubnetworkN"][netuid] - ) - for uid in range(subnet_n): - neuron_info = self.neuron_for_uid_lite(uid, netuid, block) - if neuron_info is not None: - neurons.append(neuron_info) - - return neurons - - # Extrinsics - def _do_delegation( - self, - wallet: "wallet", - delegate_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - # Check if delegate - if not self.is_hotkey_delegate(hotkey_ss58=delegate_ss58): - raise Exception("Not a delegate") - - # do stake - success = self._do_stake( - wallet=wallet, - hotkey_ss58=delegate_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - return success - - def _do_undelegation( - self, - wallet: "wallet", - delegate_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - # Check if delegate - if not self.is_hotkey_delegate(hotkey_ss58=delegate_ss58): - raise Exception("Not a delegate") - - # do unstake - self._do_unstake( - wallet=wallet, - hotkey_ss58=delegate_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - def _do_nominate( - self, - wallet: "wallet", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - hotkey_ss58 = wallet.hotkey.ss58_address - coldkey_ss58 = wallet.coldkeypub.ss58_address - - subtensor_state = self.chain_state["SubtensorModule"] - if self.is_hotkey_delegate(hotkey_ss58=hotkey_ss58): - return True - - else: - subtensor_state["Delegates"][hotkey_ss58] = {} - subtensor_state["Delegates"][hotkey_ss58][self.block_number] = ( - 0.18 # Constant for now - ) - - return True - - def get_transfer_fee( - self, wallet: "wallet", dest: str, value: Union["Balance", float, int] - ) -> "Balance": - return Balance(700) - - def _do_transfer( - self, - wallet: "wallet", - dest: str, - transfer_balance: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> Tuple[bool, Optional[str], Optional[str]]: - bal = self.get_balance(wallet.coldkeypub.ss58_address) - dest_bal = self.get_balance(dest) - transfer_fee = self.get_transfer_fee(wallet, dest, transfer_balance) - - existential_deposit = self.get_existential_deposit() - - if bal < transfer_balance + existential_deposit + transfer_fee: - raise Exception("Insufficient balance") - - # Remove from the free balance - self.chain_state["System"]["Account"][wallet.coldkeypub.ss58_address]["data"][ - "free" - ][self.block_number] = (bal - transfer_balance - transfer_fee).rao - - # Add to the free balance - if dest not in self.chain_state["System"]["Account"]: - self.chain_state["System"]["Account"][dest] = {"data": {"free": {}}} - - self.chain_state["System"]["Account"][dest]["data"]["free"][ - self.block_number - ] = (dest_bal + transfer_balance).rao - - return True, None, None - - def _do_pow_register( - self, - netuid: int, - wallet: "wallet", - pow_result: "POWSolution", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - # Assume pow result is valid - - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - raise Exception("Subnet does not exist") - - self._register_neuron( - netuid=netuid, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - return True, None - - def _do_burned_register( - self, - netuid: int, - wallet: "wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - subtensor_state = self.chain_state["SubtensorModule"] - if netuid not in subtensor_state["NetworksAdded"]: - raise Exception("Subnet does not exist") - - bal = self.get_balance(wallet.coldkeypub.ss58_address) - burn = self.recycle(netuid=netuid) - existential_deposit = self.get_existential_deposit() - - if bal < burn + existential_deposit: - raise Exception("Insufficient funds") - - self._register_neuron( - netuid=netuid, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - # Burn the funds - self.chain_state["System"]["Account"][wallet.coldkeypub.ss58_address]["data"][ - "free" - ][self.block_number] = (bal - burn).rao - - return True, None - - def _do_stake( - self, - wallet: "wallet", - hotkey_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - subtensor_state = self.chain_state["SubtensorModule"] - - bal = self.get_balance(wallet.coldkeypub.ss58_address) - curr_stake = self.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=hotkey_ss58, coldkey_ss58=wallet.coldkeypub.ss58_address - ) - if curr_stake is None: - curr_stake = Balance(0) - existential_deposit = self.get_existential_deposit() - - if bal < amount + existential_deposit: - raise Exception("Insufficient funds") - - stake_state = subtensor_state["Stake"] - - # Stake the funds - if not hotkey_ss58 in stake_state: - stake_state[hotkey_ss58] = {} - if not wallet.coldkeypub.ss58_address in stake_state[hotkey_ss58]: - stake_state[hotkey_ss58][wallet.coldkeypub.ss58_address] = {} - - stake_state[hotkey_ss58][wallet.coldkeypub.ss58_address][self.block_number] = ( - amount.rao - ) - - # Add to total_stake storage - subtensor_state["TotalStake"][self.block_number] = ( - self._get_most_recent_storage(subtensor_state["TotalStake"]) + amount.rao - ) - - total_hotkey_stake_state = subtensor_state["TotalHotkeyStake"] - if not hotkey_ss58 in total_hotkey_stake_state: - total_hotkey_stake_state[hotkey_ss58] = {} - - total_coldkey_stake_state = subtensor_state["TotalColdkeyStake"] - if not wallet.coldkeypub.ss58_address in total_coldkey_stake_state: - total_coldkey_stake_state[wallet.coldkeypub.ss58_address] = {} - - curr_total_hotkey_stake = self.query_subtensor( - name="TotalHotkeyStake", - params=[hotkey_ss58], - block=min(self.block_number - 1, 0), - ) - curr_total_coldkey_stake = self.query_subtensor( - name="TotalColdkeyStake", - params=[wallet.coldkeypub.ss58_address], - block=min(self.block_number - 1, 0), - ) - - total_hotkey_stake_state[hotkey_ss58][self.block_number] = ( - curr_total_hotkey_stake.value + amount.rao - ) - total_coldkey_stake_state[wallet.coldkeypub.ss58_address][self.block_number] = ( - curr_total_coldkey_stake.value + amount.rao - ) - - # Remove from free balance - self.chain_state["System"]["Account"][wallet.coldkeypub.ss58_address]["data"][ - "free" - ][self.block_number] = (bal - amount).rao - - return True - - def _do_unstake( - self, - wallet: "wallet", - hotkey_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - subtensor_state = self.chain_state["SubtensorModule"] - - bal = self.get_balance(wallet.coldkeypub.ss58_address) - curr_stake = self.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=hotkey_ss58, coldkey_ss58=wallet.coldkeypub.ss58_address - ) - if curr_stake is None: - curr_stake = Balance(0) - - if curr_stake < amount: - raise Exception("Insufficient funds") - - stake_state = subtensor_state["Stake"] - - if curr_stake.rao == 0: - return True - - # Unstake the funds - # We know that the hotkey has stake, so we can just remove it - stake_state[hotkey_ss58][wallet.coldkeypub.ss58_address][self.block_number] = ( - curr_stake - amount - ).rao - # Add to the free balance - if wallet.coldkeypub.ss58_address not in self.chain_state["System"]["Account"]: - self.chain_state["System"]["Account"][wallet.coldkeypub.ss58_address] = { - "data": {"free": {}} - } - - # Remove from total stake storage - subtensor_state["TotalStake"][self.block_number] = ( - self._get_most_recent_storage(subtensor_state["TotalStake"]) - amount.rao - ) - - total_hotkey_stake_state = subtensor_state["TotalHotkeyStake"] - if not hotkey_ss58 in total_hotkey_stake_state: - total_hotkey_stake_state[hotkey_ss58] = {} - total_hotkey_stake_state[hotkey_ss58][self.block_number] = ( - 0 # Shouldn't happen - ) - - total_coldkey_stake_state = subtensor_state["TotalColdkeyStake"] - if not wallet.coldkeypub.ss58_address in total_coldkey_stake_state: - total_coldkey_stake_state[wallet.coldkeypub.ss58_address] = {} - total_coldkey_stake_state[wallet.coldkeypub.ss58_address][ - self.block_number - ] = amount.rao # Shouldn't happen - - total_hotkey_stake_state[hotkey_ss58][self.block_number] = ( - self._get_most_recent_storage( - subtensor_state["TotalHotkeyStake"][hotkey_ss58] - ) - - amount.rao - ) - total_coldkey_stake_state[wallet.coldkeypub.ss58_address][self.block_number] = ( - self._get_most_recent_storage( - subtensor_state["TotalColdkeyStake"][wallet.coldkeypub.ss58_address] - ) - - amount.rao - ) - - self.chain_state["System"]["Account"][wallet.coldkeypub.ss58_address]["data"][ - "free" - ][self.block_number] = (bal + amount).rao - - return True - - @staticmethod - def min_required_stake(): - """ - As the minimum required stake may change, this method allows us to dynamically - update the amount in the mock without updating the tests - """ - # valid minimum threshold as of 2024/05/01 - return 100_000_000 # RAO - - def get_minimum_required_stake(self): - return Balance.from_rao(self.min_required_stake()) - - def get_delegate_by_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> Optional["DelegateInfo"]: - subtensor_state = self.chain_state["SubtensorModule"] - - if hotkey_ss58 not in subtensor_state["Delegates"]: - return None - - newest_state = self._get_most_recent_storage( - subtensor_state["Delegates"][hotkey_ss58], block - ) - if newest_state is None: - return None - - nom_result = [] - nominators = subtensor_state["Stake"][hotkey_ss58] - for nominator in nominators: - nom_amount = self.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=hotkey_ss58, coldkey_ss58=nominator, block=block - ) - if nom_amount is not None and nom_amount.rao > 0: - nom_result.append((nominator, nom_amount)) - - registered_subnets = [] - for subnet in self.get_all_subnet_netuids(block=block): - uid = self.get_uid_for_hotkey_on_subnet( - hotkey_ss58=hotkey_ss58, netuid=subnet, block=block - ) - - if uid is not None: - registered_subnets.append((subnet, uid)) - - info = DelegateInfo( - hotkey_ss58=hotkey_ss58, - total_stake=self.get_total_stake_for_hotkey(ss58_address=hotkey_ss58) - or Balance(0), - nominators=nom_result, - owner_ss58=self.get_hotkey_owner(hotkey_ss58=hotkey_ss58, block=block), - take=0.18, - validator_permits=[ - subnet - for subnet, uid in registered_subnets - if self.neuron_has_validator_permit(uid=uid, netuid=subnet, block=block) - ], - registrations=[subnet for subnet, _ in registered_subnets], - return_per_1000=Balance.from_tao(1234567), # Doesn't matter for mock? - total_daily_return=Balance.from_tao(1234567), # Doesn't matter for mock? - ) - - return info - - def get_delegates(self, block: Optional[int] = None) -> List["DelegateInfo"]: - subtensor_state = self.chain_state["SubtensorModule"] - delegates_info = [] - for hotkey in subtensor_state["Delegates"]: - info = self.get_delegate_by_hotkey(hotkey_ss58=hotkey, block=block) - if info is not None: - delegates_info.append(info) - - return delegates_info - - def get_delegated( - self, coldkey_ss58: str, block: Optional[int] = None - ) -> List[Tuple["DelegateInfo", "Balance"]]: - """Returns the list of delegates that a given coldkey is staked to.""" - delegates = self.get_delegates(block=block) - - result = [] - for delegate in delegates: - if coldkey_ss58 in delegate.nominators: - result.append((delegate, delegate.nominators[coldkey_ss58])) - - return result - - def get_all_subnets_info(self, block: Optional[int] = None) -> List[SubnetInfo]: - subtensor_state = self.chain_state["SubtensorModule"] - result = [] - for subnet in subtensor_state["NetworksAdded"]: - info = self.get_subnet_info(netuid=subnet, block=block) - if info is not None: - result.append(info) - - return result - - def get_subnet_info( - self, netuid: int, block: Optional[int] = None - ) -> Optional[SubnetInfo]: - if not self.subnet_exists(netuid=netuid, block=block): - return None - - def query_subnet_info(name: str) -> Optional[object]: - return self.query_subtensor(name=name, block=block, params=[netuid]).value - - info = SubnetInfo( - netuid=netuid, - rho=query_subnet_info(name="Rho"), - kappa=query_subnet_info(name="Kappa"), - difficulty=query_subnet_info(name="Difficulty"), - immunity_period=query_subnet_info(name="ImmunityPeriod"), - max_allowed_validators=query_subnet_info(name="MaxAllowedValidators"), - min_allowed_weights=query_subnet_info(name="MinAllowedWeights"), - max_weight_limit=query_subnet_info(name="MaxWeightLimit"), - scaling_law_power=query_subnet_info(name="ScalingLawPower"), - subnetwork_n=query_subnet_info(name="SubnetworkN"), - max_n=query_subnet_info(name="MaxAllowedUids"), - blocks_since_epoch=query_subnet_info(name="BlocksSinceLastStep"), - tempo=query_subnet_info(name="Tempo"), - modality=query_subnet_info(name="NetworkModality"), - connection_requirements={ - str(netuid_.value): percentile.value - for netuid_, percentile in self.query_map_subtensor( - name="NetworkConnect", block=block, params=[netuid] - ).records - }, - emission_value=query_subnet_info(name="EmissionValues"), - burn=query_subnet_info(name="Burn"), - owner_ss58=query_subnet_info(name="SubnetOwner"), - ) - - return info - - def _do_serve_prometheus( - self, - wallet: "wallet", - call_params: "PrometheusServeCallParams", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - return True, None - - def _do_set_weights( - self, - wallet: "wallet", - netuid: int, - uids: int, - vals: List[int], - version_key: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - return True, None - - def _do_serve_axon( - self, - wallet: "wallet", - call_params: "AxonServeCallParams", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - return True, None diff --git a/bittensor/mock/wallet_mock.py b/bittensor/mock/wallet_mock.py deleted file mode 100644 index 35179f8c94..0000000000 --- a/bittensor/mock/wallet_mock.py +++ /dev/null @@ -1,127 +0,0 @@ -# The MIT License (MIT) - -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import bittensor -from typing import Optional -from Crypto.Hash import keccak - -from .keyfile_mock import MockKeyfile - - -class MockWallet(bittensor.wallet): - """ - Mocked Version of the bittensor wallet class, meant to be used for testing - """ - - def __init__(self, **kwargs): - r"""Init bittensor wallet object containing a hot and coldkey. - Args: - _mock (required=True, default=False): - If true creates a mock wallet with random keys. - """ - super().__init__(**kwargs) - # For mocking. - self._is_mock = True - self._mocked_coldkey_keyfile = None - self._mocked_hotkey_keyfile = None - - @property - def hotkey_file(self) -> "bittensor.keyfile": - if self._is_mock: - if self._mocked_hotkey_keyfile == None: - self._mocked_hotkey_keyfile = MockKeyfile(path="MockedHotkey") - return self._mocked_hotkey_keyfile - else: - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - hotkey_path = os.path.join(wallet_path, "hotkeys", self.hotkey_str) - return bittensor.keyfile(path=hotkey_path) - - @property - def coldkey_file(self) -> "bittensor.keyfile": - if self._is_mock: - if self._mocked_coldkey_keyfile == None: - self._mocked_coldkey_keyfile = MockKeyfile(path="MockedColdkey") - return self._mocked_coldkey_keyfile - else: - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - coldkey_path = os.path.join(wallet_path, "coldkey") - return bittensor.keyfile(path=coldkey_path) - - @property - def coldkeypub_file(self) -> "bittensor.keyfile": - if self._is_mock: - if self._mocked_coldkey_keyfile == None: - self._mocked_coldkey_keyfile = MockKeyfile(path="MockedColdkeyPub") - return self._mocked_coldkey_keyfile - else: - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - coldkeypub_path = os.path.join(wallet_path, "coldkeypub.txt") - return bittensor.keyfile(path=coldkeypub_path) - - -def get_mock_wallet( - coldkey: "bittensor.Keypair" = None, hotkey: "bittensor.Keypair" = None -): - wallet = MockWallet(name="mock_wallet", hotkey="mock", path="/tmp/mock_wallet") - - if not coldkey: - coldkey = bittensor.Keypair.create_from_mnemonic( - bittensor.Keypair.generate_mnemonic() - ) - if not hotkey: - hotkey = bittensor.Keypair.create_from_mnemonic( - bittensor.Keypair.generate_mnemonic() - ) - - wallet.set_coldkey(coldkey, encrypt=False, overwrite=True) - wallet.set_coldkeypub(coldkey, encrypt=False, overwrite=True) - wallet.set_hotkey(hotkey, encrypt=False, overwrite=True) - - return wallet - - -def get_mock_keypair(uid: int, test_name: Optional[str] = None) -> bittensor.Keypair: - """ - Returns a mock keypair from a uid and optional test_name. - If test_name is not provided, the uid is the only seed. - If test_name is provided, the uid is hashed with the test_name to create a unique seed for the test. - """ - if test_name is not None: - hashed_test_name: bytes = keccak.new( - digest_bits=256, data=test_name.encode("utf-8") - ).digest() - hashed_test_name_as_int: int = int.from_bytes( - hashed_test_name, byteorder="big", signed=False - ) - uid = uid + hashed_test_name_as_int - - return bittensor.Keypair.create_from_seed( - seed_hex=int.to_bytes(uid, 32, "big", signed=False), - ss58_format=bittensor.__ss58_format__, - ) - - -def get_mock_hotkey(uid: int) -> str: - return get_mock_keypair(uid).ss58_address - - -def get_mock_coldkey(uid: int) -> str: - return get_mock_keypair(uid).ss58_address diff --git a/bittensor/stream.py b/bittensor/stream.py deleted file mode 100644 index 3a82edc15a..0000000000 --- a/bittensor/stream.py +++ /dev/null @@ -1,152 +0,0 @@ -import typing - -from aiohttp import ClientResponse -import bittensor - -from starlette.responses import StreamingResponse as _StreamingResponse -from starlette.types import Send, Receive, Scope -from typing import Callable, Awaitable -from pydantic import ConfigDict, BaseModel -from abc import ABC, abstractmethod - - -class BTStreamingResponseModel(BaseModel): - """ - :func:`BTStreamingResponseModel` is a Pydantic model that encapsulates the token streamer callable for Pydantic validation. - It is used within the :func:`StreamingSynapse` class to create a :func:`BTStreamingResponse` object, which is responsible for handling - the streaming of tokens. - - The token streamer is a callable that takes a send function and returns an awaitable. It is responsible for generating - the content of the streaming response, typically by processing tokens and sending them to the client. - - This model ensures that the token streamer conforms to the expected signature and provides a clear interface for - passing the token streamer to the BTStreamingResponse class. - - Attributes: - token_streamer: Callable[[Send], Awaitable[None]] - The token streamer callable, which takes a send function (provided by the ASGI server) and returns an awaitable. - It is responsible for generating the content of the streaming response. - """ - - token_streamer: Callable[[Send], Awaitable[None]] - - -class StreamingSynapse(bittensor.Synapse, ABC): - """ - The :func:`StreamingSynapse` class is designed to be subclassed for handling streaming responses in the Bittensor network. - It provides abstract methods that must be implemented by the subclass to deserialize, process streaming responses, - and extract JSON data. It also includes a method to create a streaming response object. - """ - - model_config = ConfigDict(validate_assignment=True) - - class BTStreamingResponse(_StreamingResponse): - """ - :func:`BTStreamingResponse` is a specialized subclass of the Starlette StreamingResponse designed to handle the streaming - of tokens within the Bittensor network. It is used internally by the StreamingSynapse class to manage the response - streaming process, including sending headers and calling the token streamer provided by the subclass. - - This class is not intended to be directly instantiated or modified by developers subclassing StreamingSynapse. - Instead, it is used by the :func:`create_streaming_response` method to create a response object based on the token streamer - provided by the subclass. - """ - - def __init__( - self, - model: BTStreamingResponseModel, - *, - synapse: typing.Optional["StreamingSynapse"] = None, - **kwargs, - ): - """ - Initializes the BTStreamingResponse with the given token streamer model. - - Args: - model: A BTStreamingResponseModel instance containing the token streamer callable, which is responsible for generating the content of the response. - synapse: The response Synapse to be used to update the response headers etc. - **kwargs: Additional keyword arguments passed to the parent StreamingResponse class. - """ - super().__init__(content=iter(()), **kwargs) - self.token_streamer = model.token_streamer - self.synapse = synapse - - async def stream_response(self, send: Send): - """ - Asynchronously streams the response by sending headers and calling the token streamer. - - This method is responsible for initiating the response by sending the appropriate headers, including the - content type for event-streaming. It then calls the token streamer to generate the content and sends the - response body to the client. - - Args: - send: A callable to send the response, provided by the ASGI server. - """ - headers = [(b"content-type", b"text/event-stream")] + self.raw_headers - - await send( - {"type": "http.response.start", "status": 200, "headers": headers} - ) - - await self.token_streamer(send) - - await send({"type": "http.response.body", "body": b"", "more_body": False}) - - async def __call__(self, scope: Scope, receive: Receive, send: Send): - """ - Asynchronously calls the stream_response method, allowing the BTStreamingResponse object to be used as an ASGI - application. - - This method is part of the ASGI interface and is called by the ASGI server to handle the request and send the - response. It delegates to the :func:`stream_response` method to perform the actual streaming process. - - Args: - scope: The scope of the request, containing information about the client, server, and request itself. - receive: A callable to receive the request, provided by the ASGI server. - send: A callable to send the response, provided by the ASGI server. - """ - await self.stream_response(send) - - @abstractmethod - async def process_streaming_response(self, response: ClientResponse): - """ - Abstract method that must be implemented by the subclass. - This method should provide logic to handle the streaming response, such as parsing and accumulating data. - It is called as the response is being streamed from the network, and should be implemented to handle the specific - streaming data format and requirements of the subclass. - - Args: - response: The response object to be processed, typically containing chunks of data. - """ - ... - - @abstractmethod - def extract_response_json(self, response: ClientResponse) -> dict: - """ - Abstract method that must be implemented by the subclass. - This method should provide logic to extract JSON data from the response, including headers and content. - It is called after the response has been processed and is responsible for retrieving structured data - that can be used by the application. - - Args: - response: The response object from which to extract JSON data. - """ - ... - - def create_streaming_response( - self, token_streamer: Callable[[Send], Awaitable[None]] - ) -> BTStreamingResponse: - """ - Creates a streaming response using the provided token streamer. - This method can be used by the subclass to create a response object that can be sent back to the client. - The token streamer should be implemented to generate the content of the response according to the specific - requirements of the subclass. - - Args: - token_streamer: A callable that takes a send function and returns an awaitable. It's responsible for generating the content of the response. - - Returns: - BTStreamingResponse: The streaming response object, ready to be sent to the client. - """ - model_instance = BTStreamingResponseModel(token_streamer=token_streamer) - - return self.BTStreamingResponse(model_instance, synapse=self) diff --git a/bittensor/subnets.py b/bittensor/subnets.py deleted file mode 100644 index 836a20dcb7..0000000000 --- a/bittensor/subnets.py +++ /dev/null @@ -1,74 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor as bt -from abc import ABC, abstractmethod -from typing import Any, List, Union, Optional - - -class SubnetsAPI(ABC): - def __init__(self, wallet: "bt.wallet"): - self.wallet = wallet - self.dendrite = bt.dendrite(wallet=wallet) - - async def __call__(self, *args, **kwargs): - return await self.query_api(*args, **kwargs) - - @abstractmethod - def prepare_synapse(self, *args, **kwargs) -> Any: - """ - Prepare the synapse-specific payload. - """ - ... - - @abstractmethod - def process_responses(self, responses: List[Union["bt.Synapse", Any]]) -> Any: - """ - Process the responses from the network. - """ - ... - - async def query_api( - self, - axons: Union[bt.axon, List[bt.axon]], - deserialize: Optional[bool] = False, - timeout: Optional[int] = 12, - **kwargs: Optional[Any], - ) -> Any: - """ - Queries the API nodes of a subnet using the given synapse and bespoke query function. - - Args: - axons (Union[bt.axon, List[bt.axon]]): The list of axon(s) to query. - deserialize (bool, optional): Whether to deserialize the responses. Defaults to False. - timeout (int, optional): The timeout in seconds for the query. Defaults to 12. - **kwargs: Keyword arguments for the prepare_synapse_fn. - - Returns: - Any: The result of the process_responses_fn. - """ - synapse = self.prepare_synapse(**kwargs) - bt.logging.debug(f"Querying validator axons with synapse {synapse.name}...") - responses = await self.dendrite( - axons=axons, - synapse=synapse, - deserialize=deserialize, - timeout=timeout, - ) - return self.process_responses(responses) diff --git a/bittensor/subtensor.py b/bittensor/subtensor.py deleted file mode 100644 index 2c8f1d75c6..0000000000 --- a/bittensor/subtensor.py +++ /dev/null @@ -1,5871 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -""" -The ``bittensor.subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor -blockchain, facilitating a range of operations essential for the decentralized machine learning network. -""" - -from __future__ import annotations - -import argparse -import copy -import socket -import time -from typing import List, Dict, Union, Optional, Tuple, TypedDict, Any - -import numpy as np -import scalecodec -from numpy.typing import NDArray -from retry import retry -from scalecodec.base import RuntimeConfiguration -from scalecodec.exceptions import RemainingScaleBytesNotEmptyException -from scalecodec.type_registry import load_type_registry_preset -from scalecodec.types import GenericCall, ScaleType -from substrateinterface.base import QueryMapResult, SubstrateInterface, ExtrinsicReceipt -from substrateinterface.exceptions import SubstrateRequestException - -import bittensor -from bittensor.btlogging import logging as _logger -from bittensor.utils import torch, weight_utils, format_error_message -from .chain_data import ( - DelegateInfoLite, - NeuronInfo, - DelegateInfo, - PrometheusInfo, - SubnetInfo, - SubnetHyperparameters, - StakeInfo, - NeuronInfoLite, - AxonInfo, - ProposalVoteData, - IPInfo, - custom_rpc_type_registry, -) -from .errors import ( - IdentityError, - NominationError, - StakeError, - TakeError, -) -from .extrinsics.commit_weights import ( - commit_weights_extrinsic, - reveal_weights_extrinsic, -) -from .extrinsics.delegation import ( - delegate_extrinsic, - nominate_extrinsic, - undelegate_extrinsic, - increase_take_extrinsic, - decrease_take_extrinsic, -) -from .extrinsics.network import ( - register_subnetwork_extrinsic, - set_hyperparameter_extrinsic, -) -from .extrinsics.prometheus import prometheus_extrinsic -from .extrinsics.registration import ( - register_extrinsic, - burned_register_extrinsic, - run_faucet_extrinsic, - swap_hotkey_extrinsic, -) -from .extrinsics.root import root_register_extrinsic, set_root_weights_extrinsic -from .extrinsics.senate import ( - register_senate_extrinsic, - leave_senate_extrinsic, - vote_senate_extrinsic, -) -from .extrinsics.serving import ( - serve_extrinsic, - serve_axon_extrinsic, - publish_metadata, - get_metadata, -) -from .extrinsics.set_weights import set_weights_extrinsic -from .extrinsics.staking import ( - add_stake_extrinsic, - add_stake_multiple_extrinsic, - set_children_extrinsic, - set_childkey_take_extrinsic, -) -from .extrinsics.transfer import transfer_extrinsic -from .extrinsics.unstaking import ( - unstake_extrinsic, - unstake_multiple_extrinsic, -) -from .types import AxonServeCallParams, PrometheusServeCallParams -from .utils import ( - U16_NORMALIZED_FLOAT, - ss58_to_vec_u8, - U64_NORMALIZED_FLOAT, - networking, -) -from .utils.balance import Balance -from .utils.registration import POWSolution -from .utils.registration import legacy_torch_api_compat -from .utils.subtensor import get_subtensor_errors, format_parent, format_children - -KEY_NONCE: Dict[str, int] = {} - - -class ParamWithTypes(TypedDict): - name: str # Name of the parameter. - type: str # ScaleType string of the parameter. - - -class Subtensor: - """ - The Subtensor class in Bittensor serves as a crucial interface for interacting with the Bittensor blockchain, - facilitating a range of operations essential for the decentralized machine learning network. - - This class enables neurons (network participants) to engage in activities such as registering on the network, - managing staked weights, setting inter-neuronal weights, and participating in consensus mechanisms. - - The Bittensor network operates on a digital ledger where each neuron holds stakes (S) and learns a set - of inter-peer weights (W). These weights, set by the neurons themselves, play a critical role in determining - the ranking and incentive mechanisms within the network. Higher-ranked neurons, as determined by their - contributions and trust within the network, receive more incentives. - - The Subtensor class connects to various Bittensor networks like the main ``finney`` network or local test - networks, providing a gateway to the blockchain layer of Bittensor. It leverages a staked weighted trust - system and consensus to ensure fair and distributed incentive mechanisms, where incentives (I) are - primarily allocated to neurons that are trusted by the majority of the network. - - Additionally, Bittensor introduces a speculation-based reward mechanism in the form of bonds (B), allowing - neurons to accumulate bonds in other neurons, speculating on their future value. This mechanism aligns - with market-based speculation, incentivizing neurons to make judicious decisions in their inter-neuronal - investments. - - Example Usage:: - - # Connect to the main Bittensor network (Finney). - finney_subtensor = subtensor(network='finney') - - # Close websocket connection with the Bittensor network. - finney_subtensor.close() - - # (Re)creates the websocket connection with the Bittensor network. - finney_subtensor.connect_websocket() - - # Register a new neuron on the network. - wallet = bittensor.wallet(...) # Assuming a wallet instance is created. - success = finney_subtensor.register(wallet=wallet, netuid=netuid) - - # Set inter-neuronal weights for collaborative learning. - success = finney_subtensor.set_weights(wallet=wallet, netuid=netuid, uids=[...], weights=[...]) - - # Speculate by accumulating bonds in other promising neurons. - success = finney_subtensor.delegate(wallet=wallet, delegate_ss58=other_neuron_ss58, amount=bond_amount) - - # Get the metagraph for a specific subnet using given subtensor connection - metagraph = subtensor.metagraph(netuid=netuid) - - By facilitating these operations, the Subtensor class is instrumental in maintaining the decentralized - intelligence and dynamic learning environment of the Bittensor network, as envisioned in its foundational - principles and mechanisms described in the `NeurIPS paper - `_. paper. - """ - - def __init__( - self, - network: Optional[str] = None, - config: Optional[bittensor.config] = None, - _mock: bool = False, - log_verbose: bool = True, - connection_timeout: int = 600, - ) -> None: - """ - Initializes a Subtensor interface for interacting with the Bittensor blockchain. - - NOTE: - Currently subtensor defaults to the ``finney`` network. This will change in a future release. - - We strongly encourage users to run their own local subtensor node whenever possible. This increases - decentralization and resilience of the network. In a future release, local subtensor will become the - default and the fallback to ``finney`` removed. Please plan ahead for this change. We will provide detailed - instructions on how to run a local subtensor node in the documentation in a subsequent release. - - Args: - network (str, optional): The network name to connect to (e.g., ``finney``, ``local``). This can also be the - chain endpoint (e.g., ``wss://entrypoint-finney.opentensor.ai:443``) and will be correctly parsed into - the network and chain endpoint. If not specified, defaults to the main Bittensor network. - config (bittensor.config, optional): Configuration object for the subtensor. If not provided, a default - configuration is used. - _mock (bool, optional): If set to ``True``, uses a mocked connection for testing purposes. - - This initialization sets up the connection to the specified Bittensor network, allowing for various - blockchain operations such as neuron registration, stake management, and setting weights. - - """ - # Determine config.subtensor.chain_endpoint and config.subtensor.network config. - # If chain_endpoint is set, we override the network flag, otherwise, the chain_endpoint is assigned by the - # network. - # Argument importance: network > chain_endpoint > config.subtensor.chain_endpoint > config.subtensor.network - - # Check if network is a config object. (Single argument passed as first positional) - if isinstance(network, bittensor.config): - if network.subtensor is None: - _logger.warning( - "If passing a bittensor config object, it must not be empty. Using default subtensor config." - ) - config = None - else: - config = network - network = None - - if config is None: - config = Subtensor.config() - self.config = copy.deepcopy(config) # type: ignore - - # Setup config.subtensor.network and config.subtensor.chain_endpoint - self.chain_endpoint, self.network = Subtensor.setup_config(network, config) # type: ignore - - if ( - self.network == "finney" - or self.chain_endpoint == bittensor.__finney_entrypoint__ - ) and log_verbose: - _logger.info( - f"You are connecting to {self.network} network with endpoint {self.chain_endpoint}." - ) - _logger.warning( - "We strongly encourage running a local subtensor node whenever possible. " - "This increases decentralization and resilience of the network." - ) - _logger.warning( - "In a future release, local subtensor will become the default endpoint. " - "To get ahead of this change, please run a local subtensor node and point to it." - ) - - self.log_verbose = log_verbose - self._connection_timeout = connection_timeout - self._get_substrate() - - self._subtensor_errors: Dict[str, Dict[str, str]] = {} - - def __str__(self) -> str: - if self.network == self.chain_endpoint: - # Connecting to chain endpoint without network known. - return "subtensor({})".format(self.chain_endpoint) - else: - # Connecting to network with endpoint known. - return "subtensor({}, {})".format(self.network, self.chain_endpoint) - - def __repr__(self) -> str: - return self.__str__() - - def _get_substrate(self): - """Establishes a connection to the Substrate node using configured parameters.""" - try: - # Set up params. - self.substrate = SubstrateInterface( - ss58_format=bittensor.__ss58_format__, - use_remote_preset=True, - url=self.chain_endpoint, - type_registry=bittensor.__type_registry__, - ) - if self.log_verbose: - _logger.info( - f"Connected to {self.network} network and {self.chain_endpoint}." - ) - - except ConnectionRefusedError: - _logger.error( - f"Could not connect to {self.network} network with {self.chain_endpoint} chain endpoint. Exiting...", - ) - _logger.info( - "You can check if you have connectivity by running this command: nc -vz localhost " - f"{self.chain_endpoint.split(':')[2]}" - ) - return - - try: - self.substrate.websocket.settimeout(self._connection_timeout) - except AttributeError as e: - _logger.warning(f"AttributeError: {e}") - except TypeError as e: - _logger.warning(f"TypeError: {e}") - except (socket.error, OSError) as e: - _logger.warning(f"Socket error: {e}") - - @staticmethod - def config() -> "bittensor.config": - """ - Creates and returns a Bittensor configuration object. - - Returns: - config (bittensor.config): A Bittensor configuration object configured with arguments added by the - `subtensor.add_args` method. - """ - parser = argparse.ArgumentParser() - Subtensor.add_args(parser) - return bittensor.config(parser, args=[]) - - @classmethod - def help(cls): - """Print help to stdout.""" - parser = argparse.ArgumentParser() - cls.add_args(parser) - print(cls.__new__.__doc__) - parser.print_help() - - @classmethod - def add_args(cls, parser: "argparse.ArgumentParser", prefix: Optional[str] = None): - """ - Adds command-line arguments to the provided ArgumentParser for configuring the Subtensor settings. - - Args: - parser (argparse.ArgumentParser): The ArgumentParser object to which the Subtensor arguments will be added. - prefix (Optional[str]): An optional prefix for the argument names. If provided, the prefix is prepended to - each argument name. - - Arguments added: - --subtensor.network: The Subtensor network flag. Possible values are 'finney', 'test', 'archive', and - 'local'. Overrides the chain endpoint if set. - --subtensor.chain_endpoint: The Subtensor chain endpoint flag. If set, it overrides the network flag. - --subtensor._mock: If true, uses a mocked connection to the chain. - - Example: - parser = argparse.ArgumentParser() - Subtensor.add_args(parser) - """ - prefix_str = "" if prefix is None else f"{prefix}." - try: - default_network = bittensor.__networks__[1] - default_chain_endpoint = bittensor.__finney_entrypoint__ - - parser.add_argument( - f"--{prefix_str}subtensor.network", - default=default_network, - type=str, - help="""The subtensor network flag. The likely choices are: - -- finney (main network) - -- test (test network) - -- archive (archive network +300 blocks) - -- local (local running network) - If this option is set it overloads subtensor.chain_endpoint with - an entry point node from that network. - """, - ) - parser.add_argument( - f"--{prefix_str}subtensor.chain_endpoint", - default=default_chain_endpoint, - type=str, - help="""The subtensor endpoint flag. If set, overrides the --network flag.""", - ) - parser.add_argument( - f"--{prefix_str}subtensor._mock", - default=False, - type=bool, - help="""If true, uses a mocked connection to the chain.""", - ) - - except argparse.ArgumentError: - # re-parsing arguments. - pass - - @staticmethod - def determine_chain_endpoint_and_network(network: str): - """Determines the chain endpoint and network from the passed network or chain_endpoint. - - Args: - network (str): The network flag. The choices are: ``-- finney`` (main network), ``-- archive`` - (archive network +300 blocks), ``-- local`` (local running network), ``-- test`` (test network). - Returns: - network (str): The network flag. - chain_endpoint (str): The chain endpoint flag. If set, overrides the ``network`` argument. - """ - if network is None: - return None, None - if network in ["finney", "local", "test", "archive"]: - if network == "finney": - # Kiru Finney staging network. - return network, bittensor.__finney_entrypoint__ - elif network == "local": - return network, bittensor.__local_entrypoint__ - elif network == "test": - return network, bittensor.__finney_test_entrypoint__ - elif network == "archive": - return network, bittensor.__archive_entrypoint__ - else: - if ( - network == bittensor.__finney_entrypoint__ - or "entrypoint-finney.opentensor.ai" in network - ): - return "finney", bittensor.__finney_entrypoint__ - elif ( - network == bittensor.__finney_test_entrypoint__ - or "test.finney.opentensor.ai" in network - ): - return "test", bittensor.__finney_test_entrypoint__ - elif ( - network == bittensor.__archive_entrypoint__ - or "archive.chain.opentensor.ai" in network - ): - return "archive", bittensor.__archive_entrypoint__ - elif "127.0.0.1" in network or "localhost" in network: - return "local", network - else: - return "unknown network", network - - @staticmethod - def setup_config(network: str, config: "bittensor.config"): - """ - Sets up and returns the configuration for the Subtensor network and endpoint. - - This method determines the appropriate network and chain endpoint based on the provided network string or - configuration object. It evaluates the network and endpoint in the following order of precedence: - 1. Provided network string. - 2. Configured chain endpoint in the `config` object. - 3. Configured network in the `config` object. - 4. Default chain endpoint. - 5. Default network. - - Args: - network (str): The name of the Subtensor network. If None, the network and endpoint will be determined from - the `config` object. - config (bittensor.config): The configuration object containing the network and chain endpoint settings. - - Returns: - tuple: A tuple containing the formatted WebSocket endpoint URL and the evaluated network name. - """ - if network is not None: - ( - evaluated_network, - evaluated_endpoint, - ) = Subtensor.determine_chain_endpoint_and_network(network) - else: - if config.get("__is_set", {}).get("subtensor.chain_endpoint"): - ( - evaluated_network, - evaluated_endpoint, - ) = Subtensor.determine_chain_endpoint_and_network( - config.subtensor.chain_endpoint - ) - - elif config.get("__is_set", {}).get("subtensor.network"): - ( - evaluated_network, - evaluated_endpoint, - ) = Subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - - elif config.subtensor.get("chain_endpoint"): - ( - evaluated_network, - evaluated_endpoint, - ) = Subtensor.determine_chain_endpoint_and_network( - config.subtensor.chain_endpoint - ) - - elif config.subtensor.get("network"): - ( - evaluated_network, - evaluated_endpoint, - ) = Subtensor.determine_chain_endpoint_and_network( - config.subtensor.network - ) - - else: - ( - evaluated_network, - evaluated_endpoint, - ) = Subtensor.determine_chain_endpoint_and_network( - bittensor.defaults.subtensor.network - ) - - return ( - networking.get_formatted_ws_endpoint_url(evaluated_endpoint), - evaluated_network, - ) - - def close(self): - """Cleans up resources for this subtensor instance like active websocket connection and active extensions.""" - self.substrate.close() - - ############## - # Delegation # - ############## - def nominate( - self, - wallet: "bittensor.wallet", - wait_for_finalization: bool = False, - wait_for_inclusion: bool = True, - ) -> bool: - """ - Becomes a delegate for the hotkey associated with the given wallet. This method is used to nominate - a neuron (identified by the hotkey in the wallet) as a delegate on the Bittensor network, allowing it - to participate in consensus and validation processes. - - Args: - wallet (bittensor.wallet): The wallet containing the hotkey to be nominated. - wait_for_finalization (bool, optional): If ``True``, waits until the transaction is finalized on the - blockchain. - wait_for_inclusion (bool, optional): If ``True``, waits until the transaction is included in a block. - - Returns: - bool: ``True`` if the nomination process is successful, ``False`` otherwise. - - This function is a key part of the decentralized governance mechanism of Bittensor, allowing for the - dynamic selection and participation of validators in the network's consensus process. - """ - return nominate_extrinsic( - subtensor=self, - wallet=wallet, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - def delegate( - self, - wallet: "bittensor.wallet", - delegate_ss58: Optional[str] = None, - amount: Optional[Union[Balance, float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Becomes a delegate for the hotkey associated with the given wallet. This method is used to nominate - a neuron (identified by the hotkey in the wallet) as a delegate on the Bittensor network, allowing it - to participate in consensus and validation processes. - - Args: - wallet (bittensor.wallet): The wallet containing the hotkey to be nominated. - delegate_ss58 (Optional[str]): The ``SS58`` address of the delegate neuron. - amount (Union[Balance, float]): The amount of TAO to undelegate. - wait_for_finalization (bool, optional): If ``True``, waits until the transaction is finalized on the - blockchain. - wait_for_inclusion (bool, optional): If ``True``, waits until the transaction is included in a block. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the nomination process is successful, False otherwise. - - This function is a key part of the decentralized governance mechanism of Bittensor, allowing for the - dynamic selection and participation of validators in the network's consensus process. - """ - return delegate_extrinsic( - subtensor=self, - wallet=wallet, - delegate_ss58=delegate_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - def undelegate( - self, - wallet: "bittensor.wallet", - delegate_ss58: Optional[str] = None, - amount: Optional[Union[Balance, float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Removes a specified amount of stake from a delegate neuron using the provided wallet. This action - reduces the staked amount on another neuron, effectively withdrawing support or speculation. - - Args: - wallet (bittensor.wallet): The wallet used for the undelegation process. - delegate_ss58 (Optional[str]): The ``SS58`` address of the delegate neuron. - amount (Union[Balance, float]): The amount of TAO to undelegate. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the undelegation is successful, False otherwise. - - This function reflects the dynamic and speculative nature of the Bittensor network, allowing neurons - to adjust their stakes and investments based on changing perceptions and performances within the network. - """ - return undelegate_extrinsic( - subtensor=self, - wallet=wallet, - delegate_ss58=delegate_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - def set_take( - self, - wallet: "bittensor.wallet", - delegate_ss58: Optional[str] = None, - take: float = 0.0, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Set delegate hotkey take - Args: - wallet (bittensor.wallet): The wallet containing the hotkey to be nominated. - delegate_ss58 (str, optional): Hotkey - take (float): Delegate take on subnet ID - wait_for_finalization (bool, optional): If ``True``, waits until the transaction is finalized on the - blockchain. - wait_for_inclusion (bool, optional): If ``True``, waits until the transaction is included in a block. - - Returns: - bool: ``True`` if the process is successful, False otherwise. - - This function is a key part of the decentralized governance mechanism of Bittensor, allowing for the - dynamic selection and participation of validators in the network's consensus process. - """ - # Ensure delegate_ss58 is not None - if delegate_ss58 is None: - raise ValueError("delegate_ss58 cannot be None") - - # Calculate u16 representation of the take - takeu16 = int(take * 0xFFFF) - - # Check if the new take is greater or lower than existing take or if existing is set - delegate = self.get_delegate_by_hotkey(delegate_ss58) - current_take = None - if delegate is not None: - current_take = int(float(delegate.take) * 65535.0) - - if takeu16 == current_take: - bittensor.__console__.print("Nothing to do, take hasn't changed") - return True - if current_take is None or current_take < takeu16: - bittensor.__console__.print( - "Current take is either not set or is lower than the new one. Will use increase_take" - ) - return increase_take_extrinsic( - subtensor=self, - wallet=wallet, - hotkey_ss58=delegate_ss58, - take=takeu16, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - else: - bittensor.__console__.print( - "Current take is higher than the new one. Will use decrease_take" - ) - return decrease_take_extrinsic( - subtensor=self, - wallet=wallet, - hotkey_ss58=delegate_ss58, - take=takeu16, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - @networking.ensure_connected - def send_extrinsic( - self, - wallet: "bittensor.wallet", - module: str, - function: str, - params: dict, - period: int = 5, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - max_retries: int = 3, - wait_time: int = 3, - max_wait: int = 20, - ) -> Optional[ExtrinsicReceipt]: - """ - Sends an extrinsic to the Bittensor blockchain using the provided wallet and parameters. This method - constructs and submits the extrinsic, handling retries and blockchain communication. - - Args: - wallet (bittensor.wallet): The wallet associated with the extrinsic. - module (str): The module name for the extrinsic. - function (str): The function name for the extrinsic. - params (dict): The parameters for the extrinsic. - period (int, optional): The number of blocks for the extrinsic to live in the mempool. Defaults to 5. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - max_retries (int, optional): The maximum number of retries for the extrinsic. Defaults to 3. - wait_time (int, optional): The wait time between retries. Defaults to 3. - max_wait (int, optional): The maximum wait time for the extrinsic. Defaults to 20. - - Returns: - Optional[ExtrinsicReceipt]: The receipt of the extrinsic if successful, None otherwise. - """ - call = self.substrate.compose_call( - call_module=module, - call_function=function, - call_params=params, - ) - - hotkey = wallet.get_hotkey().ss58_address - # Periodically update the nonce cache - if hotkey not in KEY_NONCE or self.get_current_block() % 5 == 0: - KEY_NONCE[hotkey] = self.substrate.get_account_nonce(hotkey) - - nonce = KEY_NONCE[hotkey] - - # <3 parity tech - old_init_runtime = self.substrate.init_runtime - self.substrate.init_runtime = lambda: None - self.substrate.init_runtime = old_init_runtime - response = None - - for attempt in range(1, max_retries + 1): - try: - # Create the extrinsic with new nonce - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - era={"period": period}, - nonce=nonce, - ) - - # Submit the extrinsic - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # Return immediately if we don't wait - if not wait_for_inclusion and not wait_for_finalization: - KEY_NONCE[hotkey] = nonce + 1 # update the nonce cache - return response - - # If we wait for finalization or inclusion, check if it is successful - if response.is_success: - KEY_NONCE[hotkey] = nonce + 1 # update the nonce cache - return response - else: - # Wait for a while - wait = min(wait_time * attempt, max_wait) - time.sleep(wait) - # Incr the nonce and try again - nonce = nonce + 1 - continue - - # This dies because user is spamming... incr and try again - except SubstrateRequestException as e: - if "Priority is too low" in e.args[0]["message"]: - wait = min(wait_time * attempt, max_wait) - _logger.warning( - f"Priority is too low, retrying with new nonce: {nonce} in {wait} seconds." - ) - nonce = nonce + 1 - time.sleep(wait) - continue - else: - _logger.error(f"Error sending extrinsic: {e}") - response = None - - return response - - ############### - # Set Weights # - ############### - # TODO: still needed? Can't find any usage of this method. - def set_weights( - self, - wallet: "bittensor.wallet", - netuid: int, - uids: Union[NDArray[np.int64], "torch.LongTensor", list], - weights: Union[NDArray[np.float32], "torch.FloatTensor", list], - version_key: int = bittensor.__version_as_int__, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, - ) -> Tuple[bool, str]: - """ - Sets the inter-neuronal weights for the specified neuron. This process involves specifying the - influence or trust a neuron places on other neurons in the network, which is a fundamental aspect - of Bittensor's decentralized learning architecture. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron setting the weights. - netuid (int): The unique identifier of the subnet. - uids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs that the weights are being - set for. - weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each - UID. - version_key (int, optional): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - max_retries (int, optional): The number of maximum attempts to set weights. (Default: 5) - - Returns: - Tuple[bool, str]: ``True`` if the setting of weights is successful, False otherwise. And `msg`, a string - value describing the success or potential error. - - This function is crucial in shaping the network's collective intelligence, where each neuron's - learning and contribution are influenced by the weights it sets towards others【81†source】. - """ - uid = self.get_uid_for_hotkey_on_subnet(wallet.hotkey.ss58_address, netuid) - retries = 0 - success = False - message = "No attempt made. Perhaps it is too soon to set weights!" - while ( - self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore - and retries < max_retries - ): - try: - success, message = set_weights_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - uids=uids, - weights=weights, - version_key=version_key, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - except Exception as e: - _logger.error(f"Error setting weights: {e}") - finally: - retries += 1 - - return success, message - - @networking.ensure_connected - def _do_set_weights( - self, - wallet: "bittensor.wallet", - uids: List[int], - vals: List[int], - netuid: int, - version_key: int = bittensor.__version_as_int__, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - ) -> Tuple[bool, Optional[str]]: # (success, error_message) - """ - Internal method to send a transaction to the Bittensor blockchain, setting weights - for specified neurons. This method constructs and submits the transaction, handling - retries and blockchain communication. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron setting the weights. - uids (List[int]): List of neuron UIDs for which weights are being set. - vals (List[int]): List of weight values corresponding to each UID. - netuid (int): Unique identifier for the network. - version_key (int, optional): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their - trust in other neurons based on observed performance and contributions. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="set_weights", - call_params={ - "dests": uids, - "weights": vals, - "netuid": netuid, - "version_key": version_key, - }, - ) - # Period dictates how long the extrinsic will stay as part of waiting pool - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - era={"period": 5}, - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalization or inclusion." - - response.process_events() - if response.is_success: - return True, "Successfully set weights." - else: - return False, format_error_message(response.error_message) - - return make_substrate_call_with_retry() - - ################## - # Commit Weights # - ################## - def commit_weights( - self, - wallet: "bittensor.wallet", - netuid: int, - salt: List[int], - uids: Union[NDArray[np.int64], list], - weights: Union[NDArray[np.int64], list], - version_key: int = bittensor.__version_as_int__, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, - ) -> Tuple[bool, str]: - """ - Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. - This action serves as a commitment or snapshot of the neuron's current weight distribution. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron committing the weights. - netuid (int): The unique identifier of the subnet. - salt (List[int]): list of randomly generated integers as salt to generated weighted hash. - uids (np.ndarray): NumPy array of neuron UIDs for which weights are being committed. - weights (np.ndarray): NumPy array of weight values corresponding to each UID. - version_key (int, optional): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - max_retries (int, optional): The number of maximum attempts to commit weights. (Default: 5) - - Returns: - Tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string - value describing the success or potential error. - - This function allows neurons to create a tamper-proof record of their weight distribution at a specific point in time, - enhancing transparency and accountability within the Bittensor network. - """ - retries = 0 - success = False - message = "No attempt made. Perhaps it is too soon to commit weights!" - - _logger.info( - "Committing weights with params: netuid={}, uids={}, weights={}, version_key={}".format( - netuid, uids, weights, version_key - ) - ) - - # Generate the hash of the weights - commit_hash = weight_utils.generate_weight_hash( - address=wallet.hotkey.ss58_address, - netuid=netuid, - uids=list(uids), - values=list(weights), - salt=salt, - version_key=version_key, - ) - - _logger.info("Commit Hash: {}".format(commit_hash)) - - while retries < max_retries: - try: - success, message = commit_weights_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - commit_hash=commit_hash, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - if success: - break - except Exception as e: - bittensor.logging.error(f"Error committing weights: {e}") - finally: - retries += 1 - - return success, message - - @networking.ensure_connected - def _do_commit_weights( - self, - wallet: "bittensor.wallet", - netuid: int, - commit_hash: str, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - ) -> Tuple[bool, Optional[str]]: - """ - Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights. - This method constructs and submits the transaction, handling retries and blockchain communication. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron committing the weights. - netuid (int): The unique identifier of the subnet. - commit_hash (str): The hash of the neuron's weights to be committed. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - This method ensures that the weight commitment is securely recorded on the Bittensor blockchain, providing a - verifiable record of the neuron's weight distribution at a specific point in time. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="commit_weights", - call_params={ - "netuid": netuid, - "commit_hash": commit_hash, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - response.process_events() - if response.is_success: - return True, None - else: - return False, response.error_message - - return make_substrate_call_with_retry() - - ################## - # Reveal Weights # - ################## - def reveal_weights( - self, - wallet: "bittensor.wallet", - netuid: int, - uids: Union[NDArray[np.int64], list], - weights: Union[NDArray[np.int64], list], - salt: Union[NDArray[np.int64], list], - version_key: int = bittensor.__version_as_int__, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, - ) -> Tuple[bool, str]: - """ - Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. - This action serves as a revelation of the neuron's previously committed weight distribution. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron revealing the weights. - netuid (int): The unique identifier of the subnet. - uids (np.ndarray): NumPy array of neuron UIDs for which weights are being revealed. - weights (np.ndarray): NumPy array of weight values corresponding to each UID. - salt (np.ndarray): NumPy array of salt values corresponding to the hash function. - version_key (int, optional): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - max_retries (int, optional): The number of maximum attempts to reveal weights. (Default: 5) - - Returns: - Tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string - value describing the success or potential error. - - This function allows neurons to reveal their previously committed weight distribution, ensuring transparency - and accountability within the Bittensor network. - """ - - retries = 0 - success = False - message = "No attempt made. Perhaps it is too soon to reveal weights!" - - while retries < max_retries: - try: - success, message = reveal_weights_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - uids=list(uids), - weights=list(weights), - salt=list(salt), - version_key=version_key, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - if success: - break - except Exception as e: - bittensor.logging.error(f"Error revealing weights: {e}") - finally: - retries += 1 - - return success, message - - @networking.ensure_connected - def _do_reveal_weights( - self, - wallet: "bittensor.wallet", - netuid: int, - uids: List[int], - values: List[int], - salt: List[int], - version_key: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - ) -> Tuple[bool, Optional[str]]: - """ - Internal method to send a transaction to the Bittensor blockchain, revealing the weights for a specific subnet. - This method constructs and submits the transaction, handling retries and blockchain communication. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron revealing the weights. - netuid (int): The unique identifier of the subnet. - uids (List[int]): List of neuron UIDs for which weights are being revealed. - values (List[int]): List of weight values corresponding to each UID. - salt (List[int]): List of salt values corresponding to the hash function. - version_key (int): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - This method ensures that the weight revelation is securely recorded on the Bittensor blockchain, providing transparency - and accountability for the neuron's weight distribution. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="reveal_weights", - call_params={ - "netuid": netuid, - "uids": uids, - "values": values, - "salt": salt, - "version_key": version_key, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - response.process_events() - if response.is_success: - return True, None - else: - return False, format_error_message(response.error_message) - - return make_substrate_call_with_retry() - - ################ - # Registration # - ################ - def register( - self, - wallet: "bittensor.wallet", - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - max_allowed_attempts: int = 3, - output_in_place: bool = True, - cuda: bool = False, - dev_id: Union[List[int], int] = 0, - tpb: int = 256, - num_processes: Optional[int] = None, - update_interval: Optional[int] = None, - log_verbose: bool = False, - ) -> bool: - """ - Registers a neuron on the Bittensor network using the provided wallet. Registration - is a critical step for a neuron to become an active participant in the network, enabling - it to stake, set weights, and receive incentives. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron to be registered. - netuid (int): The unique identifier of the subnet. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - Defaults to `False`. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - Defaults to `True`. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - max_allowed_attempts (int): Maximum number of attempts to register the wallet. - output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning - the progress is printed on the same lines. Defaults to `True`. - cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). Defaults to `False`. - dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids. Defaults to `0` (zero). - tpb (int): The number of threads per block (CUDA). Default to `256`. - num_processes (Optional[int]): The number of processes to use to register. Default to `None`. - update_interval (Optional[int]): The number of nonces to solve between updates. Default to `None`. - log_verbose (bool): If ``true``, the registration process will log more information. Default to `False`. - - Returns: - bool: ``True`` if the registration is successful, False otherwise. - - This function facilitates the entry of new neurons into the network, supporting the decentralized - growth and scalability of the Bittensor ecosystem. - """ - return register_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - max_allowed_attempts=max_allowed_attempts, - output_in_place=output_in_place, - cuda=cuda, - dev_id=dev_id, - tpb=tpb, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - - def swap_hotkey( - self, - wallet: "bittensor.wallet", - new_wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - ) -> bool: - """ - Swaps an old hotkey with a new hotkey for the specified wallet. - - This method initiates an extrinsic to change the hotkey associated with a wallet to a new hotkey. It provides - options to wait for inclusion and finalization of the transaction, and to prompt the user for confirmation. - - Args: - wallet (bittensor.wallet): The wallet whose hotkey is to be swapped. - new_wallet (bittensor.wallet): The new wallet with the hotkey to be set. - wait_for_inclusion (bool): Whether to wait for the transaction to be included in a block. - Default is `False`. - wait_for_finalization (bool): Whether to wait for the transaction to be finalized. Default is `True`. - prompt (bool): Whether to prompt the user for confirmation before proceeding. Default is `False`. - - Returns: - bool: True if the hotkey swap was successful, False otherwise. - """ - return swap_hotkey_extrinsic( - subtensor=self, - wallet=wallet, - new_wallet=new_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - def run_faucet( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - max_allowed_attempts: int = 3, - output_in_place: bool = True, - cuda: bool = False, - dev_id: Union[List[int], int] = 0, - tpb: int = 256, - num_processes: Optional[int] = None, - update_interval: Optional[int] = None, - log_verbose: bool = False, - ) -> bool: - """ - Facilitates a faucet transaction, allowing new neurons to receive an initial amount of TAO - for participating in the network. This function is particularly useful for newcomers to the - Bittensor network, enabling them to start with a small stake on testnet only. - - Args: - wallet (bittensor.wallet): The wallet for which the faucet transaction is to be run. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - Defaults to `False`. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - Defaults to `True`. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - max_allowed_attempts (int): Maximum number of attempts to register the wallet. - output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning - the progress is printed on the same lines. Defaults to `True`. - cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). Defaults to `False`. - dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids. Defaults to `0` (zero). - tpb (int): The number of threads per block (CUDA). Default to `256`. - num_processes (Optional[int]): The number of processes to use to register. Default to `None`. - update_interval (Optional[int]): The number of nonces to solve between updates. Default to `None`. - log_verbose (bool): If ``true``, the registration process will log more information. Default to `False`. - - Returns: - bool: ``True`` if the faucet transaction is successful, False otherwise. - - This function is part of Bittensor's onboarding process, ensuring that new neurons have - the necessary resources to begin their journey in the decentralized AI network. - - Note: - This is for testnet ONLY and is disabled currently. You must build your own staging subtensor chain with the - ``--features pow-faucet`` argument to enable this. - """ - result, _ = run_faucet_extrinsic( - subtensor=self, - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - max_allowed_attempts=max_allowed_attempts, - output_in_place=output_in_place, - cuda=cuda, - dev_id=dev_id, - tpb=tpb, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - return result - - def burned_register( - self, - wallet: "bittensor.wallet", - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - ) -> bool: - """ - Registers a neuron on the Bittensor network by recycling TAO. This method of registration - involves recycling TAO tokens, allowing them to be re-mined by performing work on the network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron to be registered. - netuid (int): The unique identifier of the subnet. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - Defaults to `False`. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - Defaults to `True`. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to `False`. - - Returns: - bool: ``True`` if the registration is successful, False otherwise. - """ - return burned_register_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - @networking.ensure_connected - def _do_pow_register( - self, - netuid: int, - wallet: "bittensor.wallet", - pow_result: POWSolution, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - """Sends a (POW) register extrinsic to the chain. - - Args: - netuid (int): The subnet to register on. - wallet (bittensor.wallet): The wallet to register. - pow_result (POWSolution): The PoW result to register. - wait_for_inclusion (bool): If ``True``, waits for the extrinsic to be included in a block. - Default to `False`. - wait_for_finalization (bool): If ``True``, waits for the extrinsic to be finalized. Default to `True`. - - Returns: - success (bool): ``True`` if the extrinsic was included in a block. - error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error - message. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - # create extrinsic call - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="register", - call_params={ - "netuid": netuid, - "block_number": pow_result.block_number, - "nonce": pow_result.nonce, - "work": [int(byte_) for byte_ in pow_result.seal], - "hotkey": wallet.hotkey.ss58_address, - "coldkey": wallet.coldkeypub.ss58_address, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.hotkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - # process if registration successful, try again if pow is still valid - response.process_events() - if not response.is_success: - return False, format_error_message(response.error_message) - # Successful registration - else: - return True, None - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_burned_register( - self, - netuid: int, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - """ - Performs a burned register extrinsic call to the Subtensor chain. - - This method sends a registration transaction to the Subtensor blockchain using the burned register mechanism. It - retries the call up to three times with exponential backoff in case of failures. - - Args: - netuid (int): The network unique identifier to register on. - wallet (bittensor.wallet): The wallet to be registered. - wait_for_inclusion (bool): Whether to wait for the transaction to be included in a block. Default is False. - wait_for_finalization (bool): Whether to wait for the transaction to be finalized. Default is True. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a boolean indicating success or failure, and an optional error message. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - # create extrinsic call - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="burned_register", - call_params={ - "netuid": netuid, - "hotkey": wallet.hotkey.ss58_address, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - # process if registration successful, try again if pow is still valid - response.process_events() - if not response.is_success: - return False, format_error_message(response.error_message) - # Successful registration - else: - return True, None - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_swap_hotkey( - self, - wallet: "bittensor.wallet", - new_wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - """ - Performs a hotkey swap extrinsic call to the Subtensor chain. - - Args: - wallet (bittensor.wallet): The wallet whose hotkey is to be swapped. - new_wallet (bittensor.wallet): The wallet with the new hotkey to be set. - wait_for_inclusion (bool): Whether to wait for the transaction to be included in a block. Default is - `False`. - wait_for_finalization (bool): Whether to wait for the transaction to be finalized. Default is `True`. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a boolean indicating success or failure, and an optional - error message. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - # create extrinsic call - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="swap_hotkey", - call_params={ - "hotkey": wallet.hotkey.ss58_address, - "new_hotkey": new_wallet.hotkey.ss58_address, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - # process if registration successful, try again if pow is still valid - response.process_events() - if not response.is_success: - return False, format_error_message(response.error_message) - # Successful registration - else: - return True, None - - return make_substrate_call_with_retry() - - ############ - # Transfer # - ############ - def transfer( - self, - wallet: "bittensor.wallet", - dest: str, - amount: Union[Balance, float], - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Executes a transfer of funds from the provided wallet to the specified destination address. - This function is used to move TAO tokens within the Bittensor network, facilitating transactions - between neurons. - - Args: - wallet (bittensor.wallet): The wallet from which funds are being transferred. - dest (str): The destination public key address. - amount (Union[Balance, float]): The amount of TAO to be transferred. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - transfer_extrinsic (bool): ``True`` if the transfer is successful, False otherwise. - - This function is essential for the fluid movement of tokens in the network, supporting - various economic activities such as staking, delegation, and reward distribution. - """ - return transfer_extrinsic( - subtensor=self, - wallet=wallet, - dest=dest, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - @networking.ensure_connected - def get_transfer_fee( - self, wallet: "bittensor.wallet", dest: str, value: Union["Balance", float, int] - ) -> "Balance": - """ - Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. - This function simulates the transfer to estimate the associated cost, taking into account the current - network conditions and transaction complexity. - - Args: - wallet (bittensor.wallet): The wallet from which the transfer is initiated. - dest (str): The ``SS58`` address of the destination account. - value (Union[Balance, float, int]): The amount of tokens to be transferred, specified as a Balance object, - or in Tao (float) or Rao (int) units. - - Returns: - Balance: The estimated transaction fee for the transfer, represented as a Balance object. - - Estimating the transfer fee is essential for planning and executing token transactions, ensuring that the - wallet has sufficient funds to cover both the transfer amount and the associated costs. This function - provides a crucial tool for managing financial operations within the Bittensor network. - """ - if isinstance(value, float): - value = Balance.from_tao(value) - elif isinstance(value, int): - value = Balance.from_rao(value) - - if isinstance(value, Balance): - call = self.substrate.compose_call( - call_module="Balances", - call_function="transfer_allow_death", - call_params={"dest": dest, "value": value.rao}, - ) - - try: - payment_info = self.substrate.get_payment_info( - call=call, keypair=wallet.coldkeypub - ) - except Exception as e: - bittensor.__console__.print( - ":cross_mark: [red]Failed to get payment info[/red]:[bold white]\n {}[/bold white]".format( - e - ) - ) - payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao - - fee = Balance.from_rao(payment_info["partialFee"]) - return fee - else: - fee = Balance.from_rao(int(2e7)) - _logger.error( - "To calculate the transaction fee, the value must be Balance, float, or int. Received type: %s. Fee " - "is %s", - type(value), - 2e7, - ) - return fee - - @networking.ensure_connected - def _do_transfer( - self, - wallet: "bittensor.wallet", - dest: str, - transfer_balance: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> Tuple[bool, Optional[str], Optional[str]]: - """Sends a transfer extrinsic to the chain. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object. - dest (str): Destination public key address. - transfer_balance (:func:`Balance`): Amount to transfer. - wait_for_inclusion (bool): If ``true``, waits for inclusion. - wait_for_finalization (bool): If ``true``, waits for finalization. - Returns: - success (bool): ``True`` if transfer was successful. - block_hash (str): Block hash of the transfer. On success and if wait_for_ finalization/inclusion is - ``True``. - error (str): Error message if transfer failed. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="Balances", - call_function="transfer_allow_death", - call_params={"dest": dest, "value": transfer_balance.rao}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True, None, None - - # Otherwise continue with finalization. - response.process_events() - if response.is_success: - block_hash = response.block_hash - return True, block_hash, None - else: - return False, None, format_error_message(response.error_message) - - return make_substrate_call_with_retry() - - def get_existential_deposit( - self, block: Optional[int] = None - ) -> Optional["Balance"]: - """ - Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit - is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with - balances below this threshold can be reaped to conserve network resources. - - Args: - block (Optional[int]): Block number at which to query the deposit amount. If ``None``, the current block is - used. - - Returns: - Optional[Balance]: The existential deposit amount, or ``None`` if the query fails. - - The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring - efficient use of storage and preventing the proliferation of dust accounts. - """ - result = self.query_constant( - module_name="Balances", constant_name="ExistentialDeposit", block=block - ) - - if result is None or not hasattr(result, "value"): - return None - - return Balance.from_rao(result.value) - - ########### - # Network # - ########### - def register_subnetwork( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization=True, - prompt: bool = False, - ) -> bool: - """ - Registers a new subnetwork on the Bittensor network using the provided wallet. This function - is used for the creation and registration of subnetworks, which are specialized segments of the - overall Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet to be used for registration. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the subnetwork registration is successful, False otherwise. - - This function allows for the expansion and diversification of the Bittensor network, supporting - its decentralized and adaptable architecture. - """ - return register_subnetwork_extrinsic( - self, - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - def set_hyperparameter( - self, - wallet: "bittensor.wallet", - netuid: int, - parameter: str, - value, - wait_for_inclusion: bool = False, - wait_for_finalization=True, - prompt: bool = False, - ) -> bool: - """ - Sets a specific hyperparameter for a given subnetwork on the Bittensor blockchain. This action - involves adjusting network-level parameters, influencing the behavior and characteristics of the - subnetwork. - - Args: - wallet (bittensor.wallet): The wallet used for setting the hyperparameter. - netuid (int): The unique identifier of the subnetwork. - parameter (str): The name of the hyperparameter to be set. - value: The new value for the hyperparameter. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the hyperparameter setting is successful, False otherwise. - - This function plays a critical role in the dynamic governance and adaptability of the Bittensor - network, allowing for fine-tuning of network operations and characteristics. - """ - return set_hyperparameter_extrinsic( - self, - wallet=wallet, - netuid=netuid, - parameter=parameter, - value=value, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - ########### - # Serving # - ########### - def serve( - self, - wallet: "bittensor.wallet", - ip: str, - port: int, - protocol: int, - netuid: int, - placeholder1: int = 0, - placeholder2: int = 0, - wait_for_inclusion: bool = False, - wait_for_finalization=True, - ) -> bool: - """ - Registers a neuron's serving endpoint on the Bittensor network. This function announces the - IP address and port where the neuron is available to serve requests, facilitating peer-to-peer - communication within the network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron being served. - ip (str): The IP address of the serving neuron. - port (int): The port number on which the neuron is serving. - protocol (int): The protocol type used by the neuron (e.g., GRPC, HTTP). - netuid (int): The unique identifier of the subnetwork. - placeholder1 (int, optional): Placeholder parameter for future extensions. Default is ``0``. - placeholder2 (int, optional): Placeholder parameter for future extensions. Default is ``0``. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Default is - ``False``. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Default - is ``True``. - - Returns: - bool: ``True`` if the serve registration is successful, False otherwise. - - This function is essential for establishing the neuron's presence in the network, enabling - it to participate in the decentralized machine learning processes of Bittensor. - """ - return serve_extrinsic( - self, - wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - ) - - def serve_axon( - self, - netuid: int, - axon: "bittensor.axon", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> bool: - """ - Registers an Axon serving endpoint on the Bittensor network for a specific neuron. This function - is used to set up the Axon, a key component of a neuron that handles incoming queries and data - processing tasks. - - Args: - netuid (int): The unique identifier of the subnetwork. - axon (bittensor.Axon): The Axon instance to be registered for serving. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - bool: ``True`` if the Axon serve registration is successful, False otherwise. - - By registering an Axon, the neuron becomes an active part of the network's distributed - computing infrastructure, contributing to the collective intelligence of Bittensor. - """ - return serve_axon_extrinsic( - self, netuid, axon, wait_for_inclusion, wait_for_finalization - ) - - @networking.ensure_connected - def _do_serve_axon( - self, - wallet: "bittensor.wallet", - call_params: AxonServeCallParams, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - """ - Internal method to submit a serve axon transaction to the Bittensor blockchain. This method - creates and submits a transaction, enabling a neuron's Axon to serve requests on the network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron. - call_params (AxonServeCallParams): Parameters required for the serve axon call. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - This function is crucial for initializing and announcing a neuron's Axon service on the network, - enhancing the decentralized computation capabilities of Bittensor. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="serve_axon", - call_params=call_params, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.hotkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - if wait_for_inclusion or wait_for_finalization: - response.process_events() - if response.is_success: - return True, None - else: - return False, format_error_message(response.error_message) - else: - return True, None - - return make_substrate_call_with_retry() - - def serve_prometheus( - self, - wallet: "bittensor.wallet", - port: int, - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> bool: - return prometheus_extrinsic( - self, - wallet=wallet, - port=port, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - @networking.ensure_connected - def _do_serve_prometheus( - self, - wallet: "bittensor.wallet", - call_params: PrometheusServeCallParams, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - """ - Sends a serve prometheus extrinsic to the chain. - Args: - wallet (:func:`bittensor.wallet`): Wallet object. - call_params (:func:`PrometheusServeCallParams`): Prometheus serve call parameters. - wait_for_inclusion (bool): If ``true``, waits for inclusion. - wait_for_finalization (bool): If ``true``, waits for finalization. - Returns: - success (bool): ``True`` if serve prometheus was successful. - error (:func:`Optional[str]`): Error message if serve prometheus failed, ``None`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="serve_prometheus", - call_params=call_params, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.hotkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - if wait_for_inclusion or wait_for_finalization: - response.process_events() - if response.is_success: - return True, None - else: - return False, format_error_message(response.error_message) - else: - return True, None - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_associate_ips( - self, - wallet: "bittensor.wallet", - ip_info_list: List["IPInfo"], - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - """ - Sends an associate IPs extrinsic to the chain. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object. - ip_info_list (:func:`List[IPInfo]`): List of IPInfo objects. - netuid (int): Netuid to associate IPs to. - wait_for_inclusion (bool): If ``true``, waits for inclusion. - wait_for_finalization (bool): If ``true``, waits for finalization. - - Returns: - success (bool): ``True`` if associate IPs was successful. - error (:func:`Optional[str]`): Error message if associate IPs failed, None otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="associate_ips", - call_params={ - "ip_info_list": [ip_info.encode() for ip_info in ip_info_list], - "netuid": netuid, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.hotkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - if wait_for_inclusion or wait_for_finalization: - response.process_events() - if response.is_success: - return True, None - else: - return False, response.error_message - else: - return True, None - - return make_substrate_call_with_retry() - - ########### - # Staking # - ########### - def add_stake( - self, - wallet: "bittensor.wallet", - hotkey_ss58: Optional[str] = None, - amount: Optional[Union["Balance", float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Adds the specified amount of stake to a neuron identified by the hotkey ``SS58`` address. Staking - is a fundamental process in the Bittensor network that enables neurons to participate actively - and earn incentives. - - Args: - wallet (bittensor.wallet): The wallet to be used for staking. - hotkey_ss58 (Optional[str]): The ``SS58`` address of the hotkey associated with the neuron. - amount (Union[Balance, float]): The amount of TAO to stake. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the staking is successful, False otherwise. - - This function enables neurons to increase their stake in the network, enhancing their influence - and potential rewards in line with Bittensor's consensus and reward mechanisms. - """ - return add_stake_extrinsic( - subtensor=self, - wallet=wallet, - hotkey_ss58=hotkey_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - def add_stake_multiple( - self, - wallet: "bittensor.wallet", - hotkey_ss58s: List[str], - amounts: Optional[List[Union["Balance", float]]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Adds stakes to multiple neurons identified by their hotkey SS58 addresses. This bulk operation - allows for efficient staking across different neurons from a single wallet. - - Args: - wallet (bittensor.wallet): The wallet used for staking. - hotkey_ss58s (List[str]): List of ``SS58`` addresses of hotkeys to stake to. - amounts (List[Union[Balance, float]], optional): Corresponding amounts of TAO to stake for each hotkey. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the staking is successful for all specified neurons, False otherwise. - - This function is essential for managing stakes across multiple neurons, reflecting the dynamic - and collaborative nature of the Bittensor network. - """ - return add_stake_multiple_extrinsic( - self, - wallet, - hotkey_ss58s, - amounts, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - @networking.ensure_connected - def _do_stake( - self, - wallet: "bittensor.wallet", - hotkey_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """Sends a stake extrinsic to the chain. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object that can sign the extrinsic. - hotkey_ss58 (str): Hotkey ``ss58`` address to stake to. - amount (:func:`Balance`): Amount to stake. - wait_for_inclusion (bool): If ``true``, waits for inclusion before returning. - wait_for_finalization (bool): If ``true``, waits for finalization before returning. - Returns: - success (bool): ``True`` if the extrinsic was successful. - Raises: - StakeError: If the extrinsic failed. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="add_stake", - call_params={"hotkey": hotkey_ss58, "amount_staked": amount.rao}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - response.process_events() - if response.is_success: - return True - else: - raise StakeError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - ############# - # Unstaking # - ############# - def unstake_multiple( - self, - wallet: "bittensor.wallet", - hotkey_ss58s: List[str], - amounts: Optional[List[Union["Balance", float]]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Performs batch unstaking from multiple hotkey accounts, allowing a neuron to reduce its staked amounts - efficiently. This function is useful for managing the distribution of stakes across multiple neurons. - - Args: - wallet (bittensor.wallet): The wallet linked to the coldkey from which the stakes are being withdrawn. - hotkey_ss58s (List[str]): A list of hotkey ``SS58`` addresses to unstake from. - amounts (List[Union[Balance, float]], optional): The amounts of TAO to unstake from each hotkey. If not - provided, unstakes all available stakes. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the batch unstaking is successful, False otherwise. - - This function allows for strategic reallocation or withdrawal of stakes, aligning with the dynamic - stake management aspect of the Bittensor network. - """ - return unstake_multiple_extrinsic( - self, - wallet, - hotkey_ss58s, - amounts, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - def unstake( - self, - wallet: "bittensor.wallet", - hotkey_ss58: Optional[str] = None, - amount: Optional[Union["Balance", float]] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Removes a specified amount of stake from a single hotkey account. This function is critical for adjusting - individual neuron stakes within the Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron from which the stake is being removed. - hotkey_ss58 (Optional[str]): The ``SS58`` address of the hotkey account to unstake from. - amount (Union[Balance, float], optional): The amount of TAO to unstake. If not specified, unstakes all. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the unstaking process is successful, False otherwise. - - This function supports flexible stake management, allowing neurons to adjust their network participation - and potential reward accruals. - """ - return unstake_extrinsic( - self, - wallet, - hotkey_ss58, - amount, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - @networking.ensure_connected - def _do_unstake( - self, - wallet: "bittensor.wallet", - hotkey_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """Sends an unstake extrinsic to the chain. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object that can sign the extrinsic. - hotkey_ss58 (str): Hotkey ``ss58`` address to unstake from. - amount (:func:`Balance`): Amount to unstake. - wait_for_inclusion (bool): If ``true``, waits for inclusion before returning. - wait_for_finalization (bool): If ``true``, waits for finalization before returning. - Returns: - success (bool): ``True`` if the extrinsic was successful. - Raises: - StakeError: If the extrinsic failed. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="remove_stake", - call_params={"hotkey": hotkey_ss58, "amount_unstaked": amount.rao}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - response.process_events() - if response.is_success: - return True - else: - raise StakeError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - ################### - # Child hotkeys # - ################### - - def set_childkey_take( - self, - wallet: "bittensor.wallet", - hotkey: str, - take: float, - netuid: int, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> tuple[bool, str]: - """Sets a childkey take extrinsic on the subnet. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object that can sign the extrinsic. - hotkey: (str): Hotkey ``ss58`` address of the child for which take is getting set. - netuid (int): Unique identifier of for the subnet. - take (float): Value of childhotkey take on subnet. - wait_for_inclusion (bool): If ``true``, waits for inclusion before returning. - wait_for_finalization (bool): If ``true``, waits for finalization before returning. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - Returns: - success (bool): ``True`` if the extrinsic was successful. - Raises: - ChildHotkeyError: If the extrinsic failed. - """ - - return set_childkey_take_extrinsic( - self, - wallet=wallet, - hotkey=hotkey, - take=take, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - @networking.ensure_connected - def _do_set_childkey_take( - self, - wallet: "bittensor.wallet", - hotkey: str, - take: int, - netuid: int, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> tuple[bool, Optional[str]]: - """Sends a set_children hotkey extrinsic on the chain. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object that can sign the extrinsic. - hotkey: (str): Hotkey ``ss58`` address of the wallet for which take is getting set. - take: (int): The take that this ss58 hotkey will have if assigned as a child hotkey as u16 value. - netuid (int): Unique identifier for the network. - wait_for_inclusion (bool): If ``true``, waits for inclusion before returning. - wait_for_finalization (bool): If ``true``, waits for finalization before returning. - Returns: - success (bool): ``True`` if the extrinsic was successful. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - # create extrinsic call - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="set_childkey_take", - call_params={ - "hotkey": hotkey, - "take": take, - "netuid": netuid, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - response.process_events() - if not response.is_success: - return False, format_error_message(response.error_message) - else: - return True, None - - return make_substrate_call_with_retry() - - def set_children( - self, - wallet: "bittensor.wallet", - hotkey: str, - children_with_proportions: List[Tuple[float, str]], - netuid: int, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> tuple[bool, str]: - """Sets a children hotkeys extrinsic on the subnet. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object that can sign the extrinsic. - hotkey: (str): Hotkey ``ss58`` address of the parent. - netuid (int): Unique identifier of for the subnet. - children_with_proportions (List[Tuple[float, str]]): List of (proportion, child_ss58) pairs. - wait_for_inclusion (bool): If ``true``, waits for inclusion before returning. - wait_for_finalization (bool): If ``true``, waits for finalization before returning. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - Returns: - success (bool): ``True`` if the extrinsic was successful. - Raises: - ChildHotkeyError: If the extrinsic failed. - """ - - return set_children_extrinsic( - self, - wallet=wallet, - hotkey=hotkey, - children_with_proportions=children_with_proportions, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - @networking.ensure_connected - def _do_set_children( - self, - wallet: "bittensor.wallet", - hotkey: str, - children: List[Tuple[int, str]], - netuid: int, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> tuple[bool, Optional[str]]: - """Sends a set_children hotkey extrinsic on the chain. - - Args: - wallet (:func:`bittensor.wallet`): Wallet object that can sign the extrinsic. - hotkey: (str): Hotkey ``ss58`` address of the parent. - children: (List[Tuple[int, str]]): A list of tuples containing the hotkey ``ss58`` addresses of the children and their proportions as u16 MAX standardized values. - netuid (int): Unique identifier for the network. - wait_for_inclusion (bool): If ``true``, waits for inclusion before returning. - wait_for_finalization (bool): If ``true``, waits for finalization before returning. - Returns: - success (bool): ``True`` if the extrinsic was successful. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - # create extrinsic call - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="set_children", - call_params={ - "hotkey": hotkey, - "children": children, - "netuid": netuid, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, None - - response.process_events() - if not response.is_success: - return False, format_error_message(response.error_message) - else: - return True, None - - return make_substrate_call_with_retry() - - ################## - # Coldkey Swap # - ################## - - def check_in_arbitration(self, ss58_address: str) -> int: - """ - Checks storage function to see if the provided coldkey is in arbitration. - If 0, `swap` has not been called on this key. If 1, swap has been called once, so - the key is not in arbitration. If >1, `swap` has been called with multiple destinations, and - the key is thus in arbitration. - """ - return self.query_module( - "SubtensorModule", "ColdkeySwapDestinations", params=[ss58_address] - ).decode() - - def get_remaining_arbitration_period( - self, coldkey_ss58: str, block: Optional[int] = None - ) -> Optional[int]: - """ - Retrieves the remaining arbitration period for a given coldkey. - Args: - coldkey_ss58 (str): The SS58 address of the coldkey. - block (Optional[int], optional): The block number to query. If None, uses the latest block. - Returns: - Optional[int]: The remaining arbitration period in blocks, or 0 if not found. - """ - arbitration_block = self.query_subtensor( - name="ColdkeyArbitrationBlock", - block=block, - params=[coldkey_ss58], - ) - - if block is None: - block = self.block - - if arbitration_block.value > block: - return arbitration_block.value - block - else: - return 0 - - ########## - # Senate # - ########## - - def register_senate( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Removes a specified amount of stake from a single hotkey account. This function is critical for adjusting - individual neuron stakes within the Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron from which the stake is being removed. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the unstaking process is successful, False otherwise. - - This function supports flexible stake management, allowing neurons to adjust their network participation - and potential reward accruals. - """ - return register_senate_extrinsic( - self, wallet, wait_for_inclusion, wait_for_finalization, prompt - ) - - def leave_senate( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Removes a specified amount of stake from a single hotkey account. This function is critical for adjusting - individual neuron stakes within the Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron from which the stake is being removed. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the unstaking process is successful, False otherwise. - - This function supports flexible stake management, allowing neurons to adjust their network participation - and potential reward accruals. - """ - return leave_senate_extrinsic( - self, wallet, wait_for_inclusion, wait_for_finalization, prompt - ) - - def vote_senate( - self, - wallet: "bittensor.wallet", - proposal_hash: str, - proposal_idx: int, - vote: bool, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Removes a specified amount of stake from a single hotkey account. This function is critical for adjusting - individual neuron stakes within the Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron from which the stake is being removed. - proposal_hash (str): The hash of the proposal being voted on. - proposal_idx (int): The index of the proposal being voted on. - vote (bool): The vote to be cast (True for yes, False for no). - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the unstaking process is successful, False otherwise. - - This function supports flexible stake management, allowing neurons to adjust their network participation - and potential reward accruals. - """ - return vote_senate_extrinsic( - self, - wallet, - proposal_hash, - proposal_idx, - vote, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - def is_senate_member(self, hotkey_ss58: str, block: Optional[int] = None) -> bool: - """ - Checks if a given neuron (identified by its hotkey SS58 address) is a member of the Bittensor senate. - The senate is a key governance body within the Bittensor network, responsible for overseeing and - approving various network operations and proposals. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int]): The blockchain block number at which to check senate membership. - - Returns: - bool: ``True`` if the neuron is a senate member at the given block, False otherwise. - - This function is crucial for understanding the governance dynamics of the Bittensor network and for - identifying the neurons that hold decision-making power within the network. - """ - senate_members = self.query_module( - module="SenateMembers", name="Members", block=block - ) - if not hasattr(senate_members, "serialize"): - return False - senate_members_serialized = senate_members.serialize() - - if not hasattr(senate_members_serialized, "count"): - return False - - return senate_members_serialized.count(hotkey_ss58) > 0 - - def get_vote_data( - self, proposal_hash: str, block: Optional[int] = None - ) -> Optional[ProposalVoteData]: - """ - Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes - information about how senate members have voted on the proposal. - - Args: - proposal_hash (str): The hash of the proposal for which voting data is requested. - block (Optional[int]): The blockchain block number to query the voting data. - - Returns: - Optional[ProposalVoteData]: An object containing the proposal's voting data, or ``None`` if not found. - - This function is important for tracking and understanding the decision-making processes within - the Bittensor network, particularly how proposals are received and acted upon by the governing body. - """ - vote_data = self.query_module( - module="Triumvirate", name="Voting", block=block, params=[proposal_hash] - ) - if not hasattr(vote_data, "serialize"): - return None - return vote_data.serialize() if vote_data is not None else None - - get_proposal_vote_data = get_vote_data - - def get_senate_members(self, block: Optional[int] = None) -> Optional[List[str]]: - """ - Retrieves the list of current senate members from the Bittensor blockchain. Senate members are - responsible for governance and decision-making within the network. - - Args: - block (Optional[int]): The blockchain block number at which to retrieve the senate members. - - Returns: - Optional[List[str]]: A list of ``SS58`` addresses of current senate members, or ``None`` if not available. - - Understanding the composition of the senate is key to grasping the governance structure and - decision-making authority within the Bittensor network. - """ - senate_members = self.query_module("SenateMembers", "Members", block=block) - if not hasattr(senate_members, "serialize"): - return None - return senate_members.serialize() if senate_members is not None else None - - def get_proposal_call_data( - self, proposal_hash: str, block: Optional[int] = None - ) -> Optional["GenericCall"]: - """ - Retrieves the call data of a specific proposal on the Bittensor blockchain. This data provides - detailed information about the proposal, including its purpose and specifications. - - Args: - proposal_hash (str): The hash of the proposal. - block (Optional[int]): The blockchain block number at which to query the proposal call data. - - Returns: - Optional[GenericCall]: An object containing the proposal's call data, or ``None`` if not found. - - This function is crucial for analyzing the types of proposals made within the network and the - specific changes or actions they intend to implement or address. - """ - proposal_data = self.query_module( - module="Triumvirate", name="ProposalOf", block=block, params=[proposal_hash] - ) - if not hasattr(proposal_data, "serialize"): - return None - - return proposal_data.serialize() if proposal_data is not None else None - - def get_proposal_hashes(self, block: Optional[int] = None) -> Optional[List[str]]: - """ - Retrieves the list of proposal hashes currently present on the Bittensor blockchain. Each hash - uniquely identifies a proposal made within the network. - - Args: - block (Optional[int]): The blockchain block number to query the proposal hashes. - - Returns: - Optional[List[str]]: A list of proposal hashes, or ``None`` if not available. - - This function enables tracking and reviewing the proposals made in the network, offering insights - into the active governance and decision-making processes. - """ - proposal_hashes = self.query_module( - module="Triumvirate", name="Proposals", block=block - ) - if not hasattr(proposal_hashes, "serialize"): - return None - - return proposal_hashes.serialize() if proposal_hashes is not None else None - - def get_proposals( - self, block: Optional[int] = None - ) -> Optional[Dict[str, Tuple["GenericCall", "ProposalVoteData"]]]: - """ - Retrieves all active proposals on the Bittensor blockchain, along with their call and voting data. - This comprehensive view allows for a thorough understanding of the proposals and their reception - by the senate. - - Args: - block (Optional[int]): The blockchain block number to query the proposals. - - Returns: - Optional[Dict[str, Tuple[bittensor.ProposalCallData, bittensor.ProposalVoteData]]]: A dictionary mapping - proposal hashes to their corresponding call and vote data, or ``None`` if not available. - - This function is integral for analyzing the governance activity on the Bittensor network, - providing a holistic view of the proposals and their impact or potential changes within the network. - """ - proposal_hashes: Optional[List[str]] = self.get_proposal_hashes(block=block) - if proposal_hashes is None: - return None - return { - proposal_hash: ( # type: ignore - self.get_proposal_call_data(proposal_hash, block=block), - self.get_proposal_vote_data(proposal_hash, block=block), - ) - for proposal_hash in proposal_hashes - } - - ######## - # Root # - ######## - - def root_register( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - prompt: bool = False, - ) -> bool: - """ - Registers the neuron associated with the wallet on the root network. This process is integral for - participating in the highest layer of decision-making and governance within the Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron to be registered on the root network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the registration on the root network is successful, False otherwise. - - This function enables neurons to engage in the most critical and influential aspects of the network's - governance, signifying a high level of commitment and responsibility in the Bittensor ecosystem. - """ - return root_register_extrinsic( - subtensor=self, - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - @networking.ensure_connected - def _do_root_register( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> Tuple[bool, Optional[str]]: - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - # create extrinsic call - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="root_register", - call_params={"hotkey": wallet.hotkey.ss58_address}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - - # process if registration successful, try again if pow is still valid - response.process_events() - if not response.is_success: - return False, format_error_message(response.error_message) - # Successful registration - else: - return True, None - - return make_substrate_call_with_retry() - - @legacy_torch_api_compat - def root_set_weights( - self, - wallet: "bittensor.wallet", - netuids: Union[NDArray[np.int64], "torch.LongTensor", list], - weights: Union[NDArray[np.float32], "torch.FloatTensor", list], - version_key: int = 0, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, - ) -> bool: - """ - Sets the weights for neurons on the root network. This action is crucial for defining the influence - and interactions of neurons at the root level of the Bittensor network. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron setting the weights. - netuids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs for which weights are - being set. - weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each - UID. - version_key (int, optional): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. - - Returns: - bool: ``True`` if the setting of root-level weights is successful, False otherwise. - - This function plays a pivotal role in shaping the root network's collective intelligence and decision-making - processes, reflecting the principles of decentralized governance and collaborative learning in Bittensor. - """ - return set_root_weights_extrinsic( - subtensor=self, - wallet=wallet, - netuids=netuids, - weights=weights, - version_key=version_key, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - @networking.ensure_connected - def _do_set_root_weights( - self, - wallet: "bittensor.wallet", - uids: List[int], - vals: List[int], - netuid: int = 0, - version_key: int = bittensor.__version_as_int__, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - ) -> Tuple[bool, Optional[str]]: # (success, error_message) - """ - Internal method to send a transaction to the Bittensor blockchain, setting weights - for specified neurons on root. This method constructs and submits the transaction, handling - retries and blockchain communication. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron setting the weights. - uids (List[int]): List of neuron UIDs for which weights are being set. - vals (List[int]): List of weight values corresponding to each UID. - netuid (int): Unique identifier for the network. - version_key (int, optional): Version key for compatibility with the network. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. - - This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their - trust in other neurons based on observed performance and contributions on the root network. - """ - - @retry(delay=2, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="set_root_weights", - call_params={ - "dests": uids, - "weights": vals, - "netuid": netuid, - "version_key": version_key, - "hotkey": wallet.hotkey.ss58_address, - }, - ) - # Period dictates how long the extrinsic will stay as part of waiting pool - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.coldkey, - era={"period": 5}, - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalziation or inclusion." - - response.process_events() - if response.is_success: - return True, "Successfully set weights." - else: - return False, response.error_message - - return make_substrate_call_with_retry() - - ################## - # Registry Calls # - ################## - - # Queries subtensor registry named storage with params and block. - @networking.ensure_connected - def query_identity( - self, - key: str, - block: Optional[int] = None, - ) -> dict: - """ - Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves - detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized - identity and governance system. - - NOTE: - See the `Bittensor CLI documentation `_ for supported identity - parameters. - - Args: - key (str): The key used to query the neuron's identity, typically the neuron's ``SS58`` address. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - result (dict): An object containing the identity information of the neuron if found, ``None`` otherwise. - - The identity information can include various attributes such as the neuron's stake, rank, and other - network-specific details, providing insights into the neuron's role and status within the Bittensor network. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry() -> "ScaleType": - return self.substrate.query( - module="Registry", - storage_function="IdentityOf", - params=[key], - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - identity_info = make_substrate_call_with_retry() - - return bittensor.utils.wallet_utils.decode_hex_identity_dict( - identity_info.value["info"] - ) - - @networking.ensure_connected - def update_identity( - self, - wallet: "bittensor.wallet", - identified: Optional[str] = None, - params: Optional[dict] = None, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Updates the identity of a neuron on the Bittensor blockchain. This function allows neurons to modify their - identity attributes, reflecting changes in their roles, stakes, or other network-specific parameters. - - NOTE: - See the `Bittensor CLI documentation `_ for supported identity - parameters. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron whose identity is being updated. - identified (str, optional): The identified ``SS58`` address of the neuron. Defaults to the wallet's coldkey - address. - params (dict, optional): A dictionary of parameters to update in the neuron's identity. - wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. - wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. - - Returns: - bool: ``True`` if the identity update is successful, False otherwise. - - This function plays a vital role in maintaining the accuracy and currency of neuron identities in the - Bittensor network, ensuring that the network's governance and consensus mechanisms operate effectively. - """ - if identified is None: - identified = wallet.coldkey.ss58_address - - params = {} if params is None else params - - call_params = bittensor.utils.wallet_utils.create_identity_dict(**params) - call_params["identified"] = identified - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry() -> bool: - call = self.substrate.compose_call( - call_module="Registry", - call_function="set_identity", - call_params=call_params, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise IdentityError(response.error_message) - - return make_substrate_call_with_retry() - - # Make some commitment on-chain about arbitrary data. - def commit(self, wallet, netuid: int, data: str): - """ - Commits arbitrary data to the Bittensor network by publishing metadata. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron committing the data. - netuid (int): The unique identifier of the subnetwork. - data (str): The data to be committed to the network. - """ - publish_metadata(self, wallet, netuid, f"Raw{len(data)}", data.encode()) - - def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> str: - """ - Retrieves the on-chain commitment for a specific neuron in the Bittensor network. - - Args: - netuid (int): The unique identifier of the subnetwork. - uid (int): The unique identifier of the neuron. - block (Optional[int]): The block number to retrieve the commitment from. If None, the latest block - is used. Default is ``None``. - - Returns: - str: The commitment data as a string. - """ - metagraph = self.metagraph(netuid) - hotkey = metagraph.hotkeys[uid] # type: ignore - - metadata = get_metadata(self, netuid, hotkey, block) - commitment = metadata["info"]["fields"][0] # type: ignore - hex_data = commitment[list(commitment.keys())[0]][2:] # type: ignore - - return bytes.fromhex(hex_data).decode() - - ################## - # Standard Calls # - ################## - - # Queries subtensor named storage with params and block. - @networking.ensure_connected - def query_subtensor( - self, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, - ) -> "ScaleType": - """ - Queries named storage from the Subtensor module on the Bittensor blockchain. This function is used to retrieve - specific data or parameters from the blockchain, such as stake, rank, or other neuron-specific attributes. - - Args: - name (str): The name of the storage function to query. - block (Optional[int]): The blockchain block number at which to perform the query. - params (Optional[List[object]], optional): A list of parameters to pass to the query function. - - Returns: - query_response (ScaleType): An object containing the requested data. - - This query function is essential for accessing detailed information about the network and its neurons, - providing valuable insights into the state and dynamics of the Bittensor ecosystem. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry() -> "ScaleType": - return self.substrate.query( - module="SubtensorModule", - storage_function=name, - params=params, - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - return make_substrate_call_with_retry() - - # Queries subtensor map storage with params and block. - @networking.ensure_connected - def query_map_subtensor( - self, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, - ) -> "QueryMapResult": - """ - Queries map storage from the Subtensor module on the Bittensor blockchain. This function is designed to - retrieve a map-like data structure, which can include various neuron-specific details or network-wide - attributes. - - Args: - name (str): The name of the map storage function to query. - block (Optional[int]): The blockchain block number at which to perform the query. - params (Optional[List[object]], optional): A list of parameters to pass to the query function. - - Returns: - QueryMapResult: An object containing the map-like data structure, or ``None`` if not found. - - This function is particularly useful for analyzing and understanding complex network structures and - relationships within the Bittensor ecosystem, such as inter-neuronal connections and stake distributions. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - return self.substrate.query_map( - module="SubtensorModule", - storage_function=name, - params=params, - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def query_constant( - self, module_name: str, constant_name: str, block: Optional[int] = None - ) -> Optional["ScaleType"]: - """ - Retrieves a constant from the specified module on the Bittensor blockchain. This function is used to - access fixed parameters or values defined within the blockchain's modules, which are essential for - understanding the network's configuration and rules. - - Args: - module_name (str): The name of the module containing the constant. - constant_name (str): The name of the constant to retrieve. - block (Optional[int]): The blockchain block number at which to query the constant. - - Returns: - Optional[ScaleType]: The value of the constant if found, ``None`` otherwise. - - Constants queried through this function can include critical network parameters such as inflation rates, - consensus rules, or validation thresholds, providing a deeper understanding of the Bittensor network's - operational parameters. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - return self.substrate.get_constant( - module_name=module_name, - constant_name=constant_name, - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - return make_substrate_call_with_retry() - - # Queries any module storage with params and block. - @networking.ensure_connected - def query_module( - self, - module: str, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, - ) -> "ScaleType": - """ - Queries any module storage on the Bittensor blockchain with the specified parameters and block number. - This function is a generic query interface that allows for flexible and diverse data retrieval from - various blockchain modules. - - Args: - module (str): The name of the module from which to query data. - name (str): The name of the storage function within the module. - block (Optional[int]): The blockchain block number at which to perform the query. - params (Optional[List[object]], optional): A list of parameters to pass to the query function. - - Returns: - Optional[ScaleType]: An object containing the requested data if found, ``None`` otherwise. - - This versatile query function is key to accessing a wide range of data and insights from different - parts of the Bittensor blockchain, enhancing the understanding and analysis of the network's state and dynamics. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry() -> "ScaleType": - return self.substrate.query( - module=module, - storage_function=name, - params=params, - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - return make_substrate_call_with_retry() - - # Queries any module map storage with params and block. - @networking.ensure_connected - def query_map( - self, - module: str, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, - ) -> QueryMapResult: - """ - Queries map storage from any module on the Bittensor blockchain. This function retrieves data structures - that represent key-value mappings, essential for accessing complex and structured data within the blockchain - modules. - - Args: - module (str): The name of the module from which to query the map storage. - name (str): The specific storage function within the module to query. - block (Optional[int]): The blockchain block number at which to perform the query. - params (Optional[List[object]], optional): Parameters to be passed to the query. - - Returns: - result (QueryMapResult): A data structure representing the map storage if found, ``None`` otherwise. - - This function is particularly useful for retrieving detailed and structured data from various blockchain - modules, offering insights into the network's state and the relationships between its different components. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry() -> "QueryMapResult": - return self.substrate.query_map( - module=module, - storage_function=name, - params=params, - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def state_call( - self, - method: str, - data: str, - block: Optional[int] = None, - ) -> Dict[Any, Any]: - """ - Makes a state call to the Bittensor blockchain, allowing for direct queries of the blockchain's state. - This function is typically used for advanced queries that require specific method calls and data inputs. - - Args: - method (str): The method name for the state call. - data (str): The data to be passed to the method. - block (Optional[int]): The blockchain block number at which to perform the state call. - - Returns: - result (Dict[Any, Any]): The result of the rpc call. - - The state call function provides a more direct and flexible way of querying blockchain data, - useful for specific use cases where standard queries are insufficient. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry() -> Dict[Any, Any]: - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="state_call", - params=[method, data, block_hash] if block_hash else [method, data], - ) - - return make_substrate_call_with_retry() - - def query_runtime_api( - self, - runtime_api: str, - method: str, - params: Optional[Union[List[int], Dict[str, int]]], - block: Optional[int] = None, - ) -> Optional[str]: - """ - Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying - runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users - who need to interact with specific runtime methods and decode complex data types. - - Args: - runtime_api (str): The name of the runtime API to query. - method (str): The specific method within the runtime API to call. - params (Optional[List[ParamWithTypes]], optional): The parameters to pass to the method call. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - Optional[bytes]: The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails. - - This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed - and specific interactions with the network's runtime environment. - """ - call_definition = bittensor.__type_registry__["runtime_api"][runtime_api][ # type: ignore - "methods" # type: ignore - ][method] # type: ignore - - json_result = self.state_call( - method=f"{runtime_api}_{method}", - data=( - "0x" - if params is None - else self._encode_params(call_definition=call_definition, params=params) - ), - block=block, - ) - - if json_result is None: - return None - - return_type = call_definition["type"] - - as_scale_bytes = scalecodec.ScaleBytes(json_result["result"]) # type: ignore - - rpc_runtime_config = RuntimeConfiguration() - rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy")) - rpc_runtime_config.update_type_registry(custom_rpc_type_registry) - - obj = rpc_runtime_config.create_scale_object(return_type, as_scale_bytes) - if obj.data.to_hex() == "0x0400": # RPC returned None result - return None - - return obj.decode() - - @networking.ensure_connected - def _encode_params( - self, - call_definition: List["ParamWithTypes"], - params: Union[List[Any], Dict[str, Any]], - ) -> str: - """Returns a hex encoded string of the params using their types.""" - param_data = scalecodec.ScaleBytes(b"") - - for i, param in enumerate(call_definition["params"]): # type: ignore - scale_obj = self.substrate.create_scale_object(param["type"]) - if type(params) is list: - param_data += scale_obj.encode(params[i]) - else: - if param["name"] not in params: - raise ValueError(f"Missing param {param['name']} in params dict.") - - param_data += scale_obj.encode(params[param["name"]]) - - return param_data.to_hex() - - ########################## - # Hyper parameter calls. # - ########################## - - def _get_hyperparameter( - self, param_name: str, netuid: int, block: Optional[int] = None - ) -> Optional[Any]: - """ - Retrieves a specified hyperparameter for a specific subnet. - - Args: - param_name (str): The name of the hyperparameter to retrieve. - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[Union[int, float]]: The value of the specified hyperparameter if the subnet exists, ``None`` - otherwise. - """ - if not self.subnet_exists(netuid, block): - return None - - result = self.query_subtensor(param_name, block, [netuid]) - if result is None or not hasattr(result, "value"): - return None - - return result.value - - def rho(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - """ - Retrieves the 'Rho' hyperparameter for a specified subnet within the Bittensor network. 'Rho' represents the - global inflation rate, which directly influences the network's token emission rate and economic model. - - Note: - This is currently fixed such that the Bittensor blockchain emmits 7200 Tao per day. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number at which to query the parameter. - - Returns: - Optional[int]: The value of the 'Rho' hyperparameter if the subnet exists, ``None`` otherwise. - - Mathematical Context: - Rho (p) is calculated based on the network's target inflation and actual neuron staking. - It adjusts the emission rate of the TAO token to balance the network's economy and dynamics. - The formula for Rho is defined as: p = (Staking_Target / Staking_Actual) * Inflation_Target. - Here, Staking_Target and Staking_Actual represent the desired and actual total stakes in the network, - while Inflation_Target is the predefined inflation rate goal. - - 'Rho' is essential for understanding the network's economic dynamics, affecting the reward distribution - and incentive structures across the network's neurons. - """ - call = self._get_hyperparameter(param_name="Rho", netuid=netuid, block=block) - return None if call is None else int(call) - - def kappa(self, netuid: int, block: Optional[int] = None) -> Optional[float]: - """ - Retrieves the 'Kappa' hyperparameter for a specified subnet. 'Kappa' is a critical parameter in - the Bittensor network that controls the distribution of stake weights among neurons, impacting their - rankings and incentive allocations. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[float]: The value of the 'Kappa' hyperparameter if the subnet exists, None otherwise. - - Mathematical Context: - Kappa (Îș) is used in the calculation of neuron ranks, which determine their share of network incentives. - It is derived from the softmax function applied to the inter-neuronal weights set by each neuron. - The formula for Kappa is: Îș_i = exp(w_i) / ÎŁ(exp(w_j)), where w_i represents the weight set by neuron i, - and the denominator is the sum of exponential weights set by all neurons. - This mechanism ensures a normalized and probabilistic distribution of ranks based on relative weights. - - Understanding 'Kappa' is crucial for analyzing stake dynamics and the consensus mechanism within the network, - as it plays a significant role in neuron ranking and incentive allocation processes. - """ - call = self._get_hyperparameter(param_name="Kappa", netuid=netuid, block=block) - return None if call is None else U16_NORMALIZED_FLOAT(int(call)) - - def difficulty(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - """ - Retrieves the 'Difficulty' hyperparameter for a specified subnet in the Bittensor network. - This parameter is instrumental in determining the computational challenge required for neurons - to participate in consensus and validation processes. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[int]: The value of the 'Difficulty' hyperparameter if the subnet exists, ``None`` otherwise. - - The 'Difficulty' parameter directly impacts the network's security and integrity by setting the - computational effort required for validating transactions and participating in the network's consensus - mechanism. - """ - call = self._get_hyperparameter( - param_name="Difficulty", netuid=netuid, block=block - ) - if call is None: - return None - return int(call) - - def recycle(self, netuid: int, block: Optional[int] = None) -> Optional["Balance"]: - """ - Retrieves the 'Burn' hyperparameter for a specified subnet. The 'Burn' parameter represents the - amount of Tao that is effectively recycled within the Bittensor network. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[Balance]: The value of the 'Burn' hyperparameter if the subnet exists, None otherwise. - - Understanding the 'Burn' rate is essential for analyzing the network registration usage, particularly - how it is correlated with user activity and the overall cost of participation in a given subnet. - """ - call = self._get_hyperparameter(param_name="Burn", netuid=netuid, block=block) - return None if call is None else Balance.from_rao(int(call)) - - # Returns network ImmunityPeriod hyper parameter. - def immunity_period( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Retrieves the 'ImmunityPeriod' hyperparameter for a specific subnet. This parameter defines the - duration during which new neurons are protected from certain network penalties or restrictions. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[int]: The value of the 'ImmunityPeriod' hyperparameter if the subnet exists, ``None`` otherwise. - - The 'ImmunityPeriod' is a critical aspect of the network's governance system, ensuring that new - participants have a grace period to establish themselves and contribute to the network without facing - immediate punitive actions. - """ - call = self._get_hyperparameter( - param_name="ImmunityPeriod", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def validator_batch_size( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network ValidatorBatchSize hyper parameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int]): The block number to retrieve the parameter from. If None, the latest block - is used. Default is ``None``. - - Returns: - Optional[int]: The value of the ValidatorBatchSize hyperparameter, or None if the subnetwork does not exist - or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorBatchSize", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def validator_prune_len( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network ValidatorPruneLen hyper parameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int]): The block number to retrieve the parameter from. If None, the latest block - is used. Default is ``None``. - - Returns: - Optional[int]: The value of the ValidatorPruneLen hyperparameter, or None if the subnetwork does not exist - or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorPruneLen", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def validator_logits_divergence( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Returns network ValidatorLogitsDivergence hyper parameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int]): The block number to retrieve the parameter from. If None, the latest block - is used. Default is ``None``. - - Returns: - Optional[float]: The value of the ValidatorLogitsDivergence hyperparameter, or None if the subnetwork does - not exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorLogitsDivergence", netuid=netuid, block=block - ) - return None if call is None else U16_NORMALIZED_FLOAT(int(call)) - - def validator_sequence_length( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network ValidatorSequenceLength hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the ValidatorSequenceLength hyperparameter, or ``None`` if the subnetwork does - not exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorSequenceLength", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def validator_epochs_per_reset( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network ValidatorEpochsPerReset hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the ValidatorEpochsPerReset hyperparameter, or ``None`` if the subnetwork does - not exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorEpochsPerReset", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def validator_epoch_length( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network ValidatorEpochLen hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the ValidatorEpochLen hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorEpochLen", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def validator_exclude_quantile( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Returns network ValidatorExcludeQuantile hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest block is used. Default is ``None``. - - Returns: - Optional[float]: The value of the ValidatorExcludeQuantile hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="ValidatorExcludeQuantile", netuid=netuid, block=block - ) - return None if call is None else U16_NORMALIZED_FLOAT(int(call)) - - def max_allowed_validators( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network ValidatorExcludeQuantile hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[float]: The value of the ValidatorExcludeQuantile hyperparameter, or ``None`` if the subnetwork - does not exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="MaxAllowedValidators", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def min_allowed_weights( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network MinAllowedWeights hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the MinAllowedWeights hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="MinAllowedWeights", block=block, netuid=netuid - ) - return None if call is None else int(call) - - def max_weight_limit( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Returns network MaxWeightsLimit hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[float]: The value of the MaxWeightsLimit hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="MaxWeightsLimit", block=block, netuid=netuid - ) - return None if call is None else U16_NORMALIZED_FLOAT(int(call)) - - def adjustment_alpha( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Returns network AdjustmentAlpha hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[float]: The value of the AdjustmentAlpha hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="AdjustmentAlpha", block=block, netuid=netuid - ) - return None if call is None else U64_NORMALIZED_FLOAT(int(call)) - - def bonds_moving_avg( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Returns network BondsMovingAverage hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[float]: The value of the BondsMovingAverage hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="BondsMovingAverage", netuid=netuid, block=block - ) - return None if call is None else U64_NORMALIZED_FLOAT(int(call)) - - def scaling_law_power( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """Returns network ScalingLawPower hyper parameter""" - call = self._get_hyperparameter( - param_name="ScalingLawPower", netuid=netuid, block=block - ) - return None if call is None else int(call) / 100.0 - - def synergy_scaling_law_power( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Returns network ScalingLawPower hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[float]: The value of the ScalingLawPower hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="SynergyScalingLawPower", netuid=netuid, block=block - ) - return None if call is None else int(call) / 100.0 - - def subnetwork_n(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - """ - Returns network SubnetworkN hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the SubnetworkN hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="SubnetworkN", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def max_n(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - """ - Returns network MaxAllowedUids hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the MaxAllowedUids hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="MaxAllowedUids", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def blocks_since_epoch( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns network BlocksSinceEpoch hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the BlocksSinceEpoch hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter( - param_name="BlocksSinceEpoch", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def blocks_since_last_update(self, netuid: int, uid: int) -> Optional[int]: - """ - Returns the number of blocks since the last update for a specific UID in the subnetwork. - - Args: - netuid (int): The unique identifier of the subnetwork. - uid (int): The unique identifier of the neuron. - - Returns: - Optional[int]: The number of blocks since the last update, or ``None`` if the subnetwork or UID does not - exist. - """ - call = self._get_hyperparameter(param_name="LastUpdate", netuid=netuid) - return None if call is None else self.get_current_block() - int(call[uid]) - - def weights_rate_limit(self, netuid: int) -> Optional[int]: - """ - Returns network WeightsSetRateLimit hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - - Returns: - Optional[int]: The value of the WeightsSetRateLimit hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter(param_name="WeightsSetRateLimit", netuid=netuid) - return None if call is None else int(call) - - def tempo(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - """ - Returns network Tempo hyperparameter. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the Tempo hyperparameter, or ``None`` if the subnetwork does not - exist or the parameter is not found. - """ - call = self._get_hyperparameter(param_name="Tempo", netuid=netuid, block=block) - return None if call is None else int(call) - - ##################### - # Account functions # - ##################### - - def get_total_stake_for_hotkey( - self, ss58_address: str, block: Optional[int] = None - ) -> Optional["Balance"]: - """ - Returns the total stake held on a hotkey including delegative. - - Args: - ss58_address (str): The SS58 address of the hotkey. - block (Optional[int], optional): The block number to retrieve the stake from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[Balance]: The total stake held on the hotkey, or ``None`` if the hotkey does not - exist or the stake is not found. - """ - _result = self.query_subtensor("TotalHotkeyStake", block, [ss58_address]) - return ( - None - if getattr(_result, "value", None) is None - else Balance.from_rao(_result.value) - ) - - def get_total_stake_for_coldkey( - self, ss58_address: str, block: Optional[int] = None - ) -> Optional["Balance"]: - """ - Returns the total stake held on a coldkey. - - Args: - ss58_address (str): The SS58 address of the coldkey. - block (Optional[int], optional): The block number to retrieve the stake from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[Balance]: The total stake held on the coldkey, or ``None`` if the coldkey does not - exist or the stake is not found. - """ - _result = self.query_subtensor("TotalColdkeyStake", block, [ss58_address]) - return ( - None - if getattr(_result, "value", None) is None - else Balance.from_rao(_result.value) - ) - - def get_stake_for_coldkey_and_hotkey( - self, hotkey_ss58: str, coldkey_ss58: str, block: Optional[int] = None - ) -> Optional["Balance"]: - """ - Returns the stake under a coldkey - hotkey pairing. - - Args: - hotkey_ss58 (str): The SS58 address of the hotkey. - coldkey_ss58 (str): The SS58 address of the coldkey. - block (Optional[int], optional): The block number to retrieve the stake from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[Balance]: The stake under the coldkey - hotkey pairing, or ``None`` if the pairing does not - exist or the stake is not found. - """ - _result = self.query_subtensor("Stake", block, [hotkey_ss58, coldkey_ss58]) - return ( - None - if getattr(_result, "value", None) is None - else Balance.from_rao(_result.value) - ) - - def get_stake( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> List[Tuple[str, "Balance"]]: - """ - Returns a list of stake tuples (coldkey, balance) for each delegating coldkey including the owner. - - Args: - hotkey_ss58 (str): The SS58 address of the hotkey. - block (Optional[int], optional): The block number to retrieve the stakes from. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - List[Tuple[str, Balance]]: A list of tuples, each containing a coldkey SS58 address and the corresponding - balance staked by that coldkey. - """ - return [ - (r[0].value, Balance.from_rao(r[1].value)) - for r in self.query_map_subtensor("Stake", block, [hotkey_ss58]) - ] - - def does_hotkey_exist(self, hotkey_ss58: str, block: Optional[int] = None) -> bool: - """ - Returns true if the hotkey is known by the chain and there are accounts. - - Args: - hotkey_ss58 (str): The SS58 address of the hotkey. - block (Optional[int], optional): The block number to check the hotkey against. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - bool: ``True`` if the hotkey is known by the chain and there are accounts, ``False`` otherwise. - """ - _result = self.query_subtensor("Owner", block, [hotkey_ss58]) - return ( - False - if getattr(_result, "value", None) is None - else _result.value != "5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM" - ) - - def get_hotkey_owner( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> Optional[str]: - """ - Returns the coldkey owner of the passed hotkey. - - Args: - hotkey_ss58 (str): The SS58 address of the hotkey. - block (Optional[int], optional): The block number to check the hotkey owner against. If ``None``, the latest - block is used. Default is ``None``. - - Returns: - Optional[str]: The SS58 address of the coldkey owner, or ``None`` if the hotkey does not exist or the owner - is not found. - """ - _result = self.query_subtensor("Owner", block, [hotkey_ss58]) - return ( - None - if getattr(_result, "value", None) is None - or not self.does_hotkey_exist(hotkey_ss58, block) - else _result.value - ) - - # TODO: check if someone still use this method. bittensor not. - def get_axon_info( - self, netuid: int, hotkey_ss58: str, block: Optional[int] = None - ) -> Optional[AxonInfo]: - """ - Returns the axon information for this hotkey account. - - Args: - netuid (int): The unique identifier of the subnetwork. - hotkey_ss58 (str): The SS58 address of the hotkey. - block (Optional[int], optional): The block number to retrieve the axon information from. If ``None``, the - latest block is used. Default is ``None``. - - Returns: - Optional[AxonInfo]: An AxonInfo object containing the axon information, or ``None`` if the axon information - is not found. - """ - result = self.query_subtensor("Axons", block, [netuid, hotkey_ss58]) - if result is not None and hasattr(result, "value"): - return AxonInfo( - ip=networking.int_to_ip(result.value["ip"]), - ip_type=result.value["ip_type"], - port=result.value["port"], - protocol=result.value["protocol"], - version=result.value["version"], - placeholder1=result.value["placeholder1"], - placeholder2=result.value["placeholder2"], - hotkey=hotkey_ss58, - coldkey="", - ) - return None - - # It is used in subtensor in neuron_info, and serving - def get_prometheus_info( - self, netuid: int, hotkey_ss58: str, block: Optional[int] = None - ) -> Optional[PrometheusInfo]: - """ - Returns the prometheus information for this hotkey account. - - Args: - netuid (int): The unique identifier of the subnetwork. - hotkey_ss58 (str): The SS58 address of the hotkey. - block (Optional[int], optional): The block number to retrieve the prometheus information from. If ``None``, - the latest block is used. Default is ``None``. - - Returns: - Optional[PrometheusInfo]: A PrometheusInfo object containing the prometheus information, or ``None`` if the - prometheus information is not found. - """ - result = self.query_subtensor("Prometheus", block, [netuid, hotkey_ss58]) - if result is not None and hasattr(result, "value"): - return PrometheusInfo( - ip=networking.int_to_ip(result.value["ip"]), - ip_type=result.value["ip_type"], - port=result.value["port"], - version=result.value["version"], - block=result.value["block"], - ) - return None - - ##################### - # Global Parameters # - ##################### - - @property - def block(self) -> int: - r"""Returns current chain block. - Returns: - block (int): - Current chain block. - """ - return self.get_current_block() - - def total_issuance(self, block: Optional[int] = None) -> Optional[Balance]: - """ - Retrieves the total issuance of the Bittensor network's native token (Tao) as of a specific - blockchain block. This represents the total amount of currency that has been issued or mined on the network. - - Args: - block (Optional[int], optional): The blockchain block number at which to perform the query. - - Returns: - Balance: The total issuance of TAO, represented as a Balance object. - - The total issuance is a key economic indicator in the Bittensor network, reflecting the overall supply - of the currency and providing insights into the network's economic health and inflationary trends. - """ - _result = self.query_subtensor("TotalIssuance", block) - return ( - None - if getattr(_result, "value", None) is None - else Balance.from_rao(_result.value) - ) - - def total_stake(self, block: Optional[int] = None) -> Optional[Balance]: - """ - Retrieves the total amount of TAO staked on the Bittensor network as of a specific blockchain block. - This represents the cumulative stake across all neurons in the network, indicating the overall level - of participation and investment by the network's participants. - - Args: - block (Optional[int], optional): The blockchain block number at which to perform the query. - - Returns: - Balance: The total amount of TAO staked on the network, represented as a Balance object. - - The total stake is an important metric for understanding the network's security, governance dynamics, - and the level of commitment by its participants. It is also a critical factor in the network's - consensus and incentive mechanisms. - """ - _result = self.query_subtensor("TotalStake", block) - return ( - None - if getattr(_result, "value", None) is None - else Balance.from_rao(_result.value) - ) - - def serving_rate_limit( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Retrieves the serving rate limit for a specific subnet within the Bittensor network. - This rate limit determines how often you can change your node's IP address on the blockchain. Expressed in - number of blocks. Applies to both subnet validator and subnet miner nodes. Used when you move your node to a new - machine. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number at which to perform the query. - - Returns: - Optional[int]: The serving rate limit of the subnet if it exists, ``None`` otherwise. - - The serving rate limit is a crucial parameter for maintaining network efficiency and preventing - overuse of resources by individual neurons. It helps ensure a balanced distribution of service - requests across the network. - """ - call = self._get_hyperparameter( - param_name="ServingRateLimit", netuid=netuid, block=block - ) - return None if call is None else int(call) - - def tx_rate_limit(self, block: Optional[int] = None) -> Optional[int]: - """ - Retrieves the transaction rate limit for the Bittensor network as of a specific blockchain block. - This rate limit sets the maximum number of transactions that can be processed within a given time frame. - - Args: - block (Optional[int], optional): The blockchain block number at which to perform the query. - - Returns: - Optional[int]: The transaction rate limit of the network, None if not available. - - The transaction rate limit is an essential parameter for ensuring the stability and scalability - of the Bittensor network. It helps in managing network load and preventing congestion, thereby - maintaining efficient and timely transaction processing. - """ - _result = self.query_subtensor("TxRateLimit", block) - return getattr(_result, "value", None) - - ###################### - # Network Parameters # - ###################### - - def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool: - """ - Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number at which to check the subnet's existence. - - Returns: - bool: ``True`` if the subnet exists, False otherwise. - - This function is critical for verifying the presence of specific subnets in the network, - enabling a deeper understanding of the network's structure and composition. - """ - _result = self.query_subtensor("NetworksAdded", block, [netuid]) - return getattr(_result, "value", False) - - def get_all_subnet_netuids(self, block: Optional[int] = None) -> List[int]: - """ - Retrieves the list of all subnet unique identifiers (netuids) currently present in the Bittensor network. - - Args: - block (Optional[int], optional): The blockchain block number at which to retrieve the subnet netuids. - - Returns: - List[int]: A list of subnet netuids. - - This function provides a comprehensive view of the subnets within the Bittensor network, - offering insights into its diversity and scale. - """ - result = self.query_map_subtensor("NetworksAdded", block) - return ( - [] - if result is None or not hasattr(result, "records") - else [netuid.value for netuid, exists in result if exists] - ) - - def get_total_subnets(self, block: Optional[int] = None) -> Optional[int]: - """ - Retrieves the total number of subnets within the Bittensor network as of a specific blockchain block. - - Args: - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - int: The total number of subnets in the network. - - Understanding the total number of subnets is essential for assessing the network's growth and - the extent of its decentralized infrastructure. - """ - _result = self.query_subtensor("TotalNetworks", block) - return getattr(_result, "value", None) - - def get_subnet_modality( - self, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Returns the NetworkModality hyperparameter for a specific subnetwork. - - Args: - netuid (int): The unique identifier of the subnetwork. - block (Optional[int], optional): The block number to retrieve the parameter from. If ``None``, the latest block is used. Default is ``None``. - - Returns: - Optional[int]: The value of the NetworkModality hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. - """ - _result = self.query_subtensor("NetworkModality", block, [netuid]) - return getattr(_result, "value", None) - - def get_subnet_connection_requirement( - self, netuid_0: int, netuid_1: int, block: Optional[int] = None - ) -> Optional[int]: - _result = self.query_subtensor("NetworkConnect", block, [netuid_0, netuid_1]) - return getattr(_result, "value", None) - - def get_emission_value_by_subnet( - self, netuid: int, block: Optional[int] = None - ) -> Optional[float]: - """ - Retrieves the emission value of a specific subnet within the Bittensor network. The emission value - represents the rate at which the subnet emits or distributes the network's native token (Tao). - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[float]: The emission value of the subnet, None if not available. - - The emission value is a critical economic parameter, influencing the incentive distribution and - reward mechanisms within the subnet. - """ - _result = self.query_subtensor("EmissionValues", block, [netuid]) - return ( - None - if getattr(_result, "value", None) is None - else Balance.from_rao(_result.value) - ) - - def get_subnet_connection_requirements( - self, netuid: int, block: Optional[int] = None - ) -> Dict[str, int]: - """ - Retrieves the connection requirements for a specific subnet within the Bittensor network. This - function provides details on the criteria that must be met for neurons to connect to the subnet. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Dict[str, int]: A dictionary detailing the connection requirements for the subnet. - - Understanding these requirements is crucial for neurons looking to participate in or interact - with specific subnets, ensuring compliance with their connection standards. - """ - result = self.query_map_subtensor("NetworkConnect", block, [netuid]) - return ( - {str(netuid.value): exists.value for netuid, exists in result.records} - if result and hasattr(result, "records") - else {} - ) - - def get_subnets(self, block: Optional[int] = None) -> List[int]: - """ - Retrieves a list of all subnets currently active within the Bittensor network. This function - provides an overview of the various subnets and their identifiers. - - Args: - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[int]: A list of network UIDs representing each active subnet. - - This function is valuable for understanding the network's structure and the diversity of subnets - available for neuron participation and collaboration. - """ - result = self.query_map_subtensor("NetworksAdded", block) - return ( - [network[0].value for network in result.records] - if result and hasattr(result, "records") - else [] - ) - - @networking.ensure_connected - def get_all_subnets_info(self, block: Optional[int] = None) -> List[SubnetInfo]: - """ - Retrieves detailed information about all subnets within the Bittensor network. This function - provides comprehensive data on each subnet, including its characteristics and operational parameters. - - Args: - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[SubnetInfo]: A list of SubnetInfo objects, each containing detailed information about a subnet. - - Gaining insights into the subnets' details assists in understanding the network's composition, - the roles of different subnets, and their unique features. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="subnetInfo_getSubnetsInfo", # custom rpc method - params=[block_hash] if block_hash else [], - ) - - json_body = make_substrate_call_with_retry() - - if not (result := json_body.get("result", None)): - return [] - - return SubnetInfo.list_from_vec_u8(result) - - @networking.ensure_connected - def get_subnet_info( - self, netuid: int, block: Optional[int] = None - ) -> Optional[SubnetInfo]: - """ - Retrieves detailed information about a specific subnet within the Bittensor network. This function - provides key data on the subnet, including its operational parameters and network status. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[SubnetInfo]: Detailed information about the subnet, or ``None`` if not found. - - This function is essential for neurons and stakeholders interested in the specifics of a particular - subnet, including its governance, performance, and role within the broader network. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="subnetInfo_getSubnetInfo", # custom rpc method - params=[netuid, block_hash] if block_hash else [netuid], - ) - - json_body = make_substrate_call_with_retry() - - if not (result := json_body.get("result", None)): - return None - - return SubnetInfo.from_vec_u8(result) - - def get_subnet_hyperparameters( - self, netuid: int, block: Optional[int] = None - ) -> Optional[Union[List, SubnetHyperparameters]]: - """ - Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters - define the operational settings and rules governing the subnet's behavior. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[SubnetHyperparameters]: The subnet's hyperparameters, or ``None`` if not available. - - Understanding the hyperparameters is crucial for comprehending how subnets are configured and - managed, and how they interact with the network's consensus and incentive mechanisms. - """ - hex_bytes_result = self.query_runtime_api( - runtime_api="SubnetInfoRuntimeApi", - method="get_subnet_hyperparams", - params=[netuid], - block=block, - ) - - if hex_bytes_result is None: - return [] - - if hex_bytes_result.startswith("0x"): - bytes_result = bytes.fromhex(hex_bytes_result[2:]) - else: - bytes_result = bytes.fromhex(hex_bytes_result) - - return SubnetHyperparameters.from_vec_u8(bytes_result) # type: ignore - - def get_subnet_owner( - self, netuid: int, block: Optional[int] = None - ) -> Optional[str]: - """ - Retrieves the owner's address of a specific subnet within the Bittensor network. The owner is - typically the entity responsible for the creation and maintenance of the subnet. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[str]: The SS58 address of the subnet's owner, or ``None`` if not available. - - Knowing the subnet owner provides insights into the governance and operational control of the subnet, - which can be important for decision-making and collaboration within the network. - """ - _result = self.query_subtensor("SubnetOwner", block, [netuid]) - return getattr(_result, "value", None) - - ############## - # Nomination # - ############## - def is_hotkey_delegate(self, hotkey_ss58: str, block: Optional[int] = None) -> bool: - """ - Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function - checks if the neuron associated with the hotkey is part of the network's delegation system. - - Args: - hotkey_ss58 (str): The SS58 address of the neuron's hotkey. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - bool: ``True`` if the hotkey is a delegate, ``False`` otherwise. - - Being a delegate is a significant status within the Bittensor network, indicating a neuron's - involvement in consensus and governance processes. - """ - return hotkey_ss58 in [ - info.hotkey_ss58 for info in self.get_delegates(block=block) - ] - - def get_delegate_take( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> Optional[float]: - """ - Retrieves the delegate 'take' percentage for a neuron identified by its hotkey. The 'take' - represents the percentage of rewards that the delegate claims from its nominators' stakes. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[float]: The delegate take percentage, None if not available. - - The delegate take is a critical parameter in the network's incentive structure, influencing - the distribution of rewards among neurons and their nominators. - """ - _result = self.query_subtensor("Delegates", block, [hotkey_ss58]) - return ( - None - if getattr(_result, "value", None) is None - else U16_NORMALIZED_FLOAT(_result.value) - ) - - def get_nominators_for_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> Union[List[Tuple[str, Balance]], int]: - """ - Retrieves a list of nominators and their stakes for a neuron identified by its hotkey. - Nominators are neurons that stake their tokens on a delegate to support its operations. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Union[List[Tuple[str, Balance]], int]: A list of tuples containing each nominator's address and staked amount or 0. - - This function provides insights into the neuron's support network within the Bittensor ecosystem, - indicating its trust and collaboration relationships. - """ - result = self.query_map_subtensor("Stake", block, [hotkey_ss58]) - return ( - [(record[0].value, record[1].value) for record in result.records] - if result and hasattr(result, "records") - else 0 - ) - - @networking.ensure_connected - def get_delegate_by_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> Optional[DelegateInfo]: - """ - Retrieves detailed information about a delegate neuron based on its hotkey. This function provides - a comprehensive view of the delegate's status, including its stakes, nominators, and reward distribution. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the delegate's hotkey. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[DelegateInfo]: Detailed information about the delegate neuron, ``None`` if not found. - - This function is essential for understanding the roles and influence of delegate neurons within - the Bittensor network's consensus and governance structures. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(encoded_hotkey_: List[int]): - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="delegateInfo_getDelegate", # custom rpc method - params=( - [encoded_hotkey_, block_hash] if block_hash else [encoded_hotkey_] - ), - ) - - encoded_hotkey = ss58_to_vec_u8(hotkey_ss58) - json_body = make_substrate_call_with_retry(encoded_hotkey) - - if not (result := json_body.get("result", None)): - return None - - return DelegateInfo.from_vec_u8(result) - - @networking.ensure_connected - def get_delegates_lite(self, block: Optional[int] = None) -> List[DelegateInfoLite]: - """ - Retrieves a lighter list of all delegate neurons within the Bittensor network. This function provides an - overview of the neurons that are actively involved in the network's delegation system. - - Analyzing the delegate population offers insights into the network's governance dynamics and the distribution - of trust and responsibility among participating neurons. - - This is a lighter version of :func:`get_delegates`. - - Args: - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[DelegateInfoLite]: A list of ``DelegateInfoLite`` objects detailing each delegate's characteristics. - - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="delegateInfo_getDelegatesLite", # custom rpc method - params=[block_hash] if block_hash else [], - ) - - json_body = make_substrate_call_with_retry() - - if not (result := json_body.get("result", None)): - return [] - - return [DelegateInfoLite(**d) for d in result] - - @networking.ensure_connected - def get_delegates(self, block: Optional[int] = None) -> List[DelegateInfo]: - """ - Retrieves a list of all delegate neurons within the Bittensor network. This function provides an overview of the - neurons that are actively involved in the network's delegation system. - - Analyzing the delegate population offers insights into the network's governance dynamics and the distribution of - trust and responsibility among participating neurons. - - Args: - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[DelegateInfo]: A list of DelegateInfo objects detailing each delegate's characteristics. - - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="delegateInfo_getDelegates", # custom rpc method - params=[block_hash] if block_hash else [], - ) - - json_body = make_substrate_call_with_retry() - - if not (result := json_body.get("result", None)): - return [] - - return DelegateInfo.list_from_vec_u8(result) - - @networking.ensure_connected - def get_delegated( - self, coldkey_ss58: str, block: Optional[int] = None - ) -> List[Tuple[DelegateInfo, Balance]]: - """ - Retrieves a list of delegates and their associated stakes for a given coldkey. This function - identifies the delegates that a specific account has staked tokens on. - - Args: - coldkey_ss58 (str): The ``SS58`` address of the account's coldkey. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[Tuple[DelegateInfo, Balance]]: A list of tuples, each containing a delegate's information and staked - amount. - - This function is important for account holders to understand their stake allocations and their - involvement in the network's delegation and consensus mechanisms. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(encoded_coldkey_: List[int]): - block_hash = None if block is None else self.substrate.get_block_hash(block) - - return self.substrate.rpc_request( - method="delegateInfo_getDelegated", - params=( - [block_hash, encoded_coldkey_] if block_hash else [encoded_coldkey_] - ), - ) - - encoded_coldkey = ss58_to_vec_u8(coldkey_ss58) - json_body = make_substrate_call_with_retry(encoded_coldkey) - - if not (result := json_body.get("result", None)): - return [] - - return DelegateInfo.delegated_list_from_vec_u8(result) - - ############################ - # Child Hotkey Information # - ############################ - - def get_childkey_take( - self, hotkey: str, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Get the childkey take of a hotkey on a specific network. - Args: - - hotkey (str): The hotkey to search for. - - netuid (int): The netuid to search for. - - block (Optional[int]): Optional parameter specifying the block number. Defaults to None. - - Returns: - - Optional[int]: The value of the "ChildkeyTake" if found, or None if any error occurs. - """ - try: - childkey_take = self.query_subtensor( - name="ChildkeyTake", - block=block, - params=[hotkey, netuid], - ) - if childkey_take: - return int(childkey_take.value) - - except SubstrateRequestException as e: - print(f"Error querying ChildKeys: {e}") - return None - except Exception as e: - print(f"Unexpected error in get_children: {e}") - return None - return None - - @networking.ensure_connected - def get_children(self, hotkey, netuid) -> list[tuple[int, str]] | list[Any] | None: - """ - Get the children of a hotkey on a specific network. - Args: - hotkey (str): The hotkey to query. - netuid (int): The network ID. - Returns: - list or None: List of (proportion, child_address) tuples, or None if an error occurred. - """ - try: - children = self.substrate.query( - module="SubtensorModule", - storage_function="ChildKeys", - params=[hotkey, netuid], - ) - if children: - return format_children(children) - else: - return [] - except SubstrateRequestException as e: - print(f"Error querying ChildKeys: {e}") - return None - except Exception as e: - print(f"Unexpected error in get_children: {e}") - return None - - @networking.ensure_connected - def get_parents(self, child_hotkey, netuid): - """ - Get the parents of a child hotkey on a specific network. - Args: - child_hotkey (str): The child hotkey to query. - netuid (int): The network ID. - Returns: - list or None: List of (proportion, parent_address) tuples, or None if an error occurred. - """ - try: - parents = self.substrate.query( - module="SubtensorModule", - storage_function="ParentKeys", - params=[child_hotkey, netuid], - ) - if not parents: - print("No parents found.") - return [] - - formatted_parents = [ - format_parent(proportion, parent) - for proportion, parent in parents - if proportion != 0 - ] - return formatted_parents - except SubstrateRequestException as e: - print(f"Error querying ParentKeys: {e}") - except Exception as e: - print(f"Unexpected error in get_parents: {e}") - - return None - - ##################### - # Stake Information # - ##################### - - def get_stake_info_for_coldkey( - self, coldkey_ss58: str, block: Optional[int] = None - ) -> Optional[List[StakeInfo]]: - """ - Retrieves stake information associated with a specific coldkey. This function provides details - about the stakes held by an account, including the staked amounts and associated delegates. - - Args: - coldkey_ss58 (str): The ``SS58`` address of the account's coldkey. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[StakeInfo]: A list of StakeInfo objects detailing the stake allocations for the account. - - Stake information is vital for account holders to assess their investment and participation - in the network's delegation and consensus processes. - """ - encoded_coldkey = ss58_to_vec_u8(coldkey_ss58) - - hex_bytes_result = self.query_runtime_api( - runtime_api="StakeInfoRuntimeApi", - method="get_stake_info_for_coldkey", - params=[encoded_coldkey], # type: ignore - block=block, - ) - - if hex_bytes_result is None: - return None - - if hex_bytes_result.startswith("0x"): - bytes_result = bytes.fromhex(hex_bytes_result[2:]) - else: - bytes_result = bytes.fromhex(hex_bytes_result) - # TODO: review if this is the correct type / works - return StakeInfo.list_from_vec_u8(bytes_result) # type: ignore - - def get_stake_info_for_coldkeys( - self, coldkey_ss58_list: List[str], block: Optional[int] = None - ) -> Optional[Dict[str, List[StakeInfo]]]: - """ - Retrieves stake information for a list of coldkeys. This function aggregates stake data for multiple - accounts, providing a collective view of their stakes and delegations. - - Args: - coldkey_ss58_list (List[str]): A list of ``SS58`` addresses of the accounts' coldkeys. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Dict[str, List[StakeInfo]]: A dictionary mapping each coldkey to a list of its StakeInfo objects. - - This function is useful for analyzing the stake distribution and delegation patterns of multiple - accounts simultaneously, offering a broader perspective on network participation and investment strategies. - """ - # TODO: review - ss58_to_vec_u8 returns List[int] but the runtime api expects List[List[int]] - encoded_coldkeys = [ - ss58_to_vec_u8(coldkey_ss58) for coldkey_ss58 in coldkey_ss58_list - ] - - hex_bytes_result = self.query_runtime_api( - runtime_api="StakeInfoRuntimeApi", - method="get_stake_info_for_coldkeys", - params=[encoded_coldkeys], # type: ignore - block=block, - ) - - if hex_bytes_result is None: - return None - - if hex_bytes_result.startswith("0x"): - bytes_result = bytes.fromhex(hex_bytes_result[2:]) - else: - bytes_result = bytes.fromhex(hex_bytes_result) - - return StakeInfo.list_of_tuple_from_vec_u8(bytes_result) # type: ignore - - @networking.ensure_connected - def get_minimum_required_stake( - self, - ) -> Balance: - """ - Returns the minimum required stake for nominators in the Subtensor network. - - This method retries the substrate call up to three times with exponential backoff in case of failures. - - Returns: - Balance: The minimum required stake as a Balance object. - - Raises: - Exception: If the substrate call fails after the maximum number of retries. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - return self.substrate.query( - module="SubtensorModule", storage_function="NominatorMinRequiredStake" - ) - - result = make_substrate_call_with_retry() - return Balance.from_rao(result.decode()) - - ################################# - # Neuron information per subnet # - ################################# - - def is_hotkey_registered_any( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> bool: - """ - Checks if a neuron's hotkey is registered on any subnet within the Bittensor network. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int]): The blockchain block number at which to perform the check. - - Returns: - bool: ``True`` if the hotkey is registered on any subnet, False otherwise. - - This function is essential for determining the network-wide presence and participation of a neuron. - """ - return len(self.get_netuids_for_hotkey(hotkey_ss58, block)) > 0 - - def is_hotkey_registered_on_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None - ) -> bool: - """ - Checks if a neuron's hotkey is registered on a specific subnet within the Bittensor network. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number at which to perform the check. - - Returns: - bool: ``True`` if the hotkey is registered on the specified subnet, False otherwise. - - This function helps in assessing the participation of a neuron in a particular subnet, - indicating its specific area of operation or influence within the network. - """ - return self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block) is not None - - def is_hotkey_registered( - self, - hotkey_ss58: str, - netuid: Optional[int] = None, - block: Optional[int] = None, - ) -> bool: - """ - Determines whether a given hotkey (public key) is registered in the Bittensor network, either - globally across any subnet or specifically on a specified subnet. This function checks the registration - status of a neuron identified by its hotkey, which is crucial for validating its participation and - activities within the network. - - Args: - hotkey_ss58 (str): The SS58 address of the neuron's hotkey. - netuid (Optional[int]): The unique identifier of the subnet to check the registration. If ``None``, the - registration is checked across all subnets. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - bool: ``True`` if the hotkey is registered in the specified context (either any subnet or a specific - subnet), ``False`` otherwise. - - This function is important for verifying the active status of neurons in the Bittensor network. It aids - in understanding whether a neuron is eligible to participate in network processes such as consensus, - validation, and incentive distribution based on its registration status. - """ - if netuid is None: - return self.is_hotkey_registered_any(hotkey_ss58, block) - else: - return self.is_hotkey_registered_on_subnet(hotkey_ss58, netuid, block) - - def get_uid_for_hotkey_on_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None - ) -> Optional[int]: - """ - Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[int]: The UID of the neuron if it is registered on the subnet, ``None`` otherwise. - - The UID is a critical identifier within the network, linking the neuron's hotkey to its - operational and governance activities on a particular subnet. - """ - _result = self.query_subtensor("Uids", block, [netuid, hotkey_ss58]) - return getattr(_result, "value", None) - - def get_all_uids_for_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> List[int]: - """ - Retrieves all unique identifiers (UIDs) associated with a given hotkey across different subnets - within the Bittensor network. This function helps in identifying all the neuron instances that are - linked to a specific hotkey. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - List[int]: A list of UIDs associated with the given hotkey across various subnets. - - This function is important for tracking a neuron's presence and activities across different - subnets within the Bittensor ecosystem. - """ - return [ - self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block) or 0 - for netuid in self.get_netuids_for_hotkey(hotkey_ss58, block) - ] - - def get_netuids_for_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> List[int]: - """ - Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function - identifies the specific subnets within the Bittensor network where the neuron associated with - the hotkey is active. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - List[int]: A list of netuids where the neuron is a member. - """ - result = self.query_map_subtensor("IsNetworkMember", block, [hotkey_ss58]) - return ( - [record[0].value for record in result.records if record[1]] - if result and hasattr(result, "records") - else [] - ) - - def get_neuron_for_pubkey_and_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None - ) -> Optional[NeuronInfo]: - """ - Retrieves information about a neuron based on its public key (hotkey SS58 address) and the specific - subnet UID (netuid). This function provides detailed neuron information for a particular subnet within - the Bittensor network. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - Optional[NeuronInfo]: Detailed information about the neuron if found, ``None`` otherwise. - - This function is crucial for accessing specific neuron data and understanding its status, stake, - and other attributes within a particular subnet of the Bittensor ecosystem. - """ - return self.neuron_for_uid( - self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block=block), - netuid, - block=block, - ) - - def get_all_neurons_for_pubkey( - self, hotkey_ss58: str, block: Optional[int] = None - ) -> List[NeuronInfo]: - """ - Retrieves information about all neuron instances associated with a given public key (hotkey ``SS58`` - address) across different subnets of the Bittensor network. This function aggregates neuron data - from various subnets to provide a comprehensive view of a neuron's presence and status within the network. - - Args: - hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. - block (Optional[int]): The blockchain block number for the query. - - Returns: - List[NeuronInfo]: A list of NeuronInfo objects detailing the neuron's presence across various subnets. - - This function is valuable for analyzing a neuron's overall participation, influence, and - contributions across the Bittensor network. - """ - netuids = self.get_netuids_for_hotkey(hotkey_ss58, block) - uids = [self.get_uid_for_hotkey_on_subnet(hotkey_ss58, net) for net in netuids] - return [self.neuron_for_uid(uid, net) for uid, net in list(zip(uids, netuids))] - - def neuron_has_validator_permit( - self, uid: int, netuid: int, block: Optional[int] = None - ) -> Optional[bool]: - """ - Checks if a neuron, identified by its unique identifier (UID), has a validator permit on a specific - subnet within the Bittensor network. This function determines whether the neuron is authorized to - participate in validation processes on the subnet. - - Args: - uid (int): The unique identifier of the neuron. - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. - - Returns: - Optional[bool]: ``True`` if the neuron has a validator permit, False otherwise. - - This function is essential for understanding a neuron's role and capabilities within a specific - subnet, particularly regarding its involvement in network validation and governance. - """ - _result = self.query_subtensor("ValidatorPermit", block, [netuid, uid]) - return getattr(_result, "value", None) - - def neuron_for_wallet( - self, wallet: "bittensor.wallet", netuid: int, block: Optional[int] = None - ) -> Optional[NeuronInfo]: - """ - Retrieves information about a neuron associated with a given wallet on a specific subnet. - This function provides detailed data about the neuron's status, stake, and activities based on - the wallet's hotkey address. - - Args: - wallet (bittensor.wallet): The wallet associated with the neuron. - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number at which to perform the query. - - Returns: - Optional[NeuronInfo]: Detailed information about the neuron if found, ``None`` otherwise. - - This function is important for wallet owners to understand and manage their neuron's presence - and activities within a particular subnet of the Bittensor network. - """ - return self.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid, block=block - ) - - @networking.ensure_connected - def neuron_for_uid( - self, uid: Optional[int], netuid: int, block: Optional[int] = None - ) -> NeuronInfo: - """ - Retrieves detailed information about a specific neuron identified by its unique identifier (UID) - within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive - view of a neuron's attributes, including its stake, rank, and operational status. - - Args: - uid (int): The unique identifier of the neuron. - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - NeuronInfo: Detailed information about the neuron if found, ``None`` otherwise. - - This function is crucial for analyzing individual neurons' contributions and status within a specific - subnet, offering insights into their roles in the network's consensus and validation mechanisms. - """ - if uid is None: - return NeuronInfo.get_null_neuron() - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - block_hash = None if block is None else self.substrate.get_block_hash(block) - params = [netuid, uid] - if block_hash: - params = params + [block_hash] - return self.substrate.rpc_request( - method="neuronInfo_getNeuron", - params=params, # custom rpc method - ) - - json_body = make_substrate_call_with_retry() - - if not (result := json_body.get("result", None)): - return NeuronInfo.get_null_neuron() - - return NeuronInfo.from_vec_u8(result) - - def neurons(self, netuid: int, block: Optional[int] = None) -> List[NeuronInfo]: - """ - Retrieves a list of all neurons within a specified subnet of the Bittensor network. This function - provides a snapshot of the subnet's neuron population, including each neuron's attributes and network - interactions. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[NeuronInfo]: A list of NeuronInfo objects detailing each neuron's characteristics in the subnet. - - Understanding the distribution and status of neurons within a subnet is key to comprehending the - network's decentralized structure and the dynamics of its consensus and governance processes. - """ - neurons_lite = self.neurons_lite(netuid=netuid, block=block) - weights = self.weights(block=block, netuid=netuid) - bonds = self.bonds(block=block, netuid=netuid) - - weights_as_dict = {uid: w for uid, w in weights} - bonds_as_dict = {uid: b for uid, b in bonds} - - neurons = [ - NeuronInfo.from_weights_bonds_and_neuron_lite( - neuron_lite, weights_as_dict, bonds_as_dict - ) - for neuron_lite in neurons_lite - ] - - return neurons - - def neuron_for_uid_lite( - self, uid: int, netuid: int, block: Optional[int] = None - ) -> Optional[NeuronInfoLite]: - """ - Retrieves a lightweight version of information about a neuron in a specific subnet, identified by - its UID. The 'lite' version focuses on essential attributes such as stake and network activity. - - Args: - uid (int): The unique identifier of the neuron. - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - Optional[NeuronInfoLite]: A simplified version of neuron information if found, ``None`` otherwise. - - This function is useful for quick and efficient analyses of neuron status and activities within a - subnet without the need for comprehensive data retrieval. - """ - if uid is None: - return NeuronInfoLite.get_null_neuron() - - hex_bytes_result = self.query_runtime_api( - runtime_api="NeuronInfoRuntimeApi", - method="get_neuron_lite", - params={ - "netuid": netuid, - "uid": uid, - }, - block=block, - ) - - if hex_bytes_result is None: - return NeuronInfoLite.get_null_neuron() - - if hex_bytes_result.startswith("0x"): - bytes_result = bytes.fromhex(hex_bytes_result[2:]) - else: - bytes_result = bytes.fromhex(hex_bytes_result) - - return NeuronInfoLite.from_vec_u8(bytes_result) # type: ignore - - def neurons_lite( - self, netuid: int, block: Optional[int] = None - ) -> List[NeuronInfoLite]: - """ - Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network. - This function provides a streamlined view of the neurons, focusing on key attributes such as stake - and network participation. - - Args: - netuid (int): The unique identifier of the subnet. - block (Optional[int], optional): The blockchain block number for the query. - - Returns: - List[NeuronInfoLite]: A list of simplified neuron information for the subnet. - - This function offers a quick overview of the neuron population within a subnet, facilitating - efficient analysis of the network's decentralized structure and neuron dynamics. - """ - hex_bytes_result = self.query_runtime_api( - runtime_api="NeuronInfoRuntimeApi", - method="get_neurons_lite", - params=[netuid], - block=block, - ) - - if hex_bytes_result is None: - return [] - - if hex_bytes_result.startswith("0x"): - bytes_result = bytes.fromhex(hex_bytes_result[2:]) - else: - bytes_result = bytes.fromhex(hex_bytes_result) - - return NeuronInfoLite.list_from_vec_u8(bytes_result) # type: ignore - - def metagraph( - self, - netuid: int, - lite: bool = True, - block: Optional[int] = None, - ) -> "bittensor.metagraph": # type: ignore - """ - Returns a synced metagraph for a specified subnet within the Bittensor network. The metagraph - represents the network's structure, including neuron connections and interactions. - - Args: - netuid (int): The network UID of the subnet to query. - lite (bool, default=True): If true, returns a metagraph using a lightweight sync (no weights, no bonds). - block (Optional[int]): Block number for synchronization, or ``None`` for the latest block. - - Returns: - bittensor.Metagraph: The metagraph representing the subnet's structure and neuron relationships. - - The metagraph is an essential tool for understanding the topology and dynamics of the Bittensor - network's decentralized architecture, particularly in relation to neuron interconnectivity and consensus - processes. - """ - metagraph_ = bittensor.metagraph( - network=self.network, netuid=netuid, lite=lite, sync=False - ) - metagraph_.sync(block=block, lite=lite, subtensor=self) - - return metagraph_ - - def incentive(self, netuid: int, block: Optional[int] = None) -> List[int]: - """ - Retrieves the list of incentives for neurons within a specific subnet of the Bittensor network. - This function provides insights into the reward distribution mechanisms and the incentives allocated - to each neuron based on their contributions and activities. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int]): The blockchain block number for the query. - - Returns: - List[int]: The list of incentives for neurons within the subnet, indexed by UID. - - Understanding the incentive structure is crucial for analyzing the network's economic model and - the motivational drivers for neuron participation and collaboration. - """ - i_map = [] - i_map_encoded = self.query_map_subtensor(name="Incentive", block=block) - if i_map_encoded.records: - for netuid_, incentives_map in i_map_encoded: - if netuid_ == netuid: - i_map = incentives_map.serialize() - break - - return i_map - - def weights( - self, netuid: int, block: Optional[int] = None - ) -> List[Tuple[int, List[Tuple[int, int]]]]: - """ - Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network. - This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the - network's trust and value assignment mechanisms. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int]): The blockchain block number for the query. - - Returns: - List[Tuple[int, List[Tuple[int, int]]]]: A list of tuples mapping each neuron's UID to its assigned weights. - - The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, - influencing their influence and reward allocation within the subnet. - """ - w_map = [] - w_map_encoded = self.query_map_subtensor( - name="Weights", block=block, params=[netuid] - ) - if w_map_encoded.records: - for uid, w in w_map_encoded: - w_map.append((uid.serialize(), w.serialize())) - - return w_map - - def bonds( - self, netuid: int, block: Optional[int] = None - ) -> List[Tuple[int, List[Tuple[int, int]]]]: - """ - Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network. - Bonds represent the investments or commitments made by neurons in one another, indicating a level - of trust and perceived value. This bonding mechanism is integral to the network's market-based approach - to measuring and rewarding machine intelligence. - - Args: - netuid (int): The network UID of the subnet to query. - block (Optional[int]): The blockchain block number for the query. - - Returns: - List[Tuple[int, List[Tuple[int, int]]]]: A list of tuples mapping each neuron's UID to its bonds with other - neurons. - - Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior - within the subnet. It reflects how neurons recognize and invest in each other's intelligence and - contributions, supporting diverse and niche systems within the Bittensor ecosystem. - """ - b_map = [] - b_map_encoded = self.query_map_subtensor( - name="Bonds", block=block, params=[netuid] - ) - if b_map_encoded.records: - for uid, b in b_map_encoded: - b_map.append((uid.serialize(), b.serialize())) - - return b_map - - def get_subnet_burn_cost(self, block: Optional[int] = None) -> Optional[str]: - """ - Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost - represents the amount of Tao that needs to be locked or burned to establish a new subnet. - - Args: - block (Optional[int]): The blockchain block number for the query. - - Returns: - int: The burn cost for subnet registration. - - The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for - controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability. - """ - lock_cost = self.query_runtime_api( - runtime_api="SubnetRegistrationRuntimeApi", - method="get_network_registration_cost", - params=[], - block=block, - ) - - if lock_cost is None: - return None - - return lock_cost - - ############## - # Extrinsics # - ############## - - @networking.ensure_connected - def _do_delegation( - self, - wallet: "bittensor.wallet", - delegate_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Delegates a specified amount of stake to a delegate's hotkey. - - This method sends a transaction to add stake to a delegate's hotkey and retries the call up to three times - with exponential backoff in case of failures. - - Args: - wallet (bittensor.wallet): The wallet from which the stake will be delegated. - delegate_ss58 (str): The SS58 address of the delegate's hotkey. - amount (Balance): The amount of stake to be delegated. - wait_for_inclusion (bool, optional): Whether to wait for the transaction to be included in a block. Default is ``True``. - wait_for_finalization (bool, optional): Whether to wait for the transaction to be finalized. Default is ``False``. - - Returns: - bool: ``True`` if the delegation is successful, ``False`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="add_stake", - call_params={"hotkey": delegate_ss58, "amount_staked": amount.rao}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise StakeError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_undelegation( - self, - wallet: "bittensor.wallet", - delegate_ss58: str, - amount: "Balance", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Removes a specified amount of stake from a delegate's hotkey. - - This method sends a transaction to remove stake from a delegate's hotkey and retries the call up to three times - with exponential backoff in case of failures. - - Args: - wallet (bittensor.wallet): The wallet from which the stake will be removed. - delegate_ss58 (str): The SS58 address of the delegate's hotkey. - amount (Balance): The amount of stake to be removed. - wait_for_inclusion (bool, optional): Whether to wait for the transaction to be included in a block. Default is ``True``. - wait_for_finalization (bool, optional): Whether to wait for the transaction to be finalized. Default is ``False``. - - Returns: - bool: ``True`` if the undelegation is successful, ``False`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="remove_stake", - call_params={ - "hotkey": delegate_ss58, - "amount_unstaked": amount.rao, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise StakeError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_nominate( - self, - wallet: "bittensor.wallet", - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Nominates the wallet's hotkey to become a delegate. - - This method sends a transaction to nominate the wallet's hotkey to become a delegate and retries the call up to - three times with exponential backoff in case of failures. - - Args: - wallet (bittensor.wallet): The wallet whose hotkey will be nominated. - wait_for_inclusion (bool, optional): Whether to wait for the transaction to be included in a block. Default is ``True``. - wait_for_finalization (bool, optional): Whether to wait for the transaction to be finalized. Default is ``False``. - - Returns: - bool: ``True`` if the nomination is successful, ``False`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="become_delegate", - call_params={"hotkey": wallet.hotkey.ss58_address}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) # sign with coldkey - response = self.substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise NominationError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_increase_take( - self, - wallet: "bittensor.wallet", - hotkey_ss58: str, - take: int, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Increases the take rate for a delegate's hotkey. - - This method sends a transaction to increase the take rate for a delegate's hotkey and retries the call up to - three times with exponential backoff in case of failures. - - Args: - wallet (bittensor.wallet): The wallet from which the transaction will be signed. - hotkey_ss58 (str): The SS58 address of the delegate's hotkey. - take (int): The new take rate to be set. - wait_for_inclusion (bool, optional): Whether to wait for the transaction to be included in a block. Default is ``True``. - wait_for_finalization (bool, optional): Whether to wait for the transaction to be finalized. Default is ``False``. - - Returns: - bool: ``True`` if the take rate increase is successful, ``False`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - with self.substrate as substrate: - call = substrate.compose_call( - call_module="SubtensorModule", - call_function="increase_take", - call_params={ - "hotkey": hotkey_ss58, - "take": take, - }, - ) - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) # sign with coldkey - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise TakeError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def _do_decrease_take( - self, - wallet: "bittensor.wallet", - hotkey_ss58: str, - take: int, - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - ) -> bool: - """ - Decreases the take rate for a delegate's hotkey. - - This method sends a transaction to decrease the take rate for a delegate's hotkey and retries the call up to - three times with exponential backoff in case of failures. - - Args: - wallet (bittensor.wallet): The wallet from which the transaction will be signed. - hotkey_ss58 (str): The SS58 address of the delegate's hotkey. - take (int): The new take rate to be set. - wait_for_inclusion (bool, optional): Whether to wait for the transaction to be included in a block. Default is ``True``. - wait_for_finalization (bool, optional): Whether to wait for the transaction to be finalized. Default is ``False``. - - Returns: - bool: ``True`` if the take rate decrease is successful, ``False`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - with self.substrate as substrate: - call = substrate.compose_call( - call_module="SubtensorModule", - call_function="decrease_take", - call_params={ - "hotkey": hotkey_ss58, - "take": take, - }, - ) - extrinsic = substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) # sign with coldkey - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # We only wait here if we expect finalization. - if not wait_for_finalization and not wait_for_inclusion: - return True - response.process_events() - if response.is_success: - return True - else: - raise TakeError(format_error_message(response.error_message)) - - return make_substrate_call_with_retry() - - ########## - # Legacy # - ########## - - @networking.ensure_connected - def get_balance(self, address: str, block: Optional[int] = None) -> Balance: - """ - Retrieves the token balance of a specific address within the Bittensor network. This function queries - the blockchain to determine the amount of Tao held by a given account. - - Args: - address (str): The Substrate address in ``ss58`` format. - block (int, optional): The blockchain block number at which to perform the query. - - Returns: - Balance: The account balance at the specified block, represented as a Balance object. - - This function is important for monitoring account holdings and managing financial transactions - within the Bittensor ecosystem. It helps in assessing the economic status and capacity of network participants. - """ - try: - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - return self.substrate.query( - module="System", - storage_function="Account", - params=[address], - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - result = make_substrate_call_with_retry() - except RemainingScaleBytesNotEmptyException: - _logger.error( - "Received a corrupted message. This likely points to an error with the network or subnet." - ) - return Balance(1000) - return Balance(result.value["data"]["free"]) - - @networking.ensure_connected - def get_current_block(self) -> int: - """ - Returns the current block number on the Bittensor blockchain. This function provides the latest block - number, indicating the most recent state of the blockchain. - - Returns: - int: The current chain block number. - - Knowing the current block number is essential for querying real-time data and performing time-sensitive - operations on the blockchain. It serves as a reference point for network activities and data synchronization. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - return self.substrate.get_block_number(None) # type: ignore - - return make_substrate_call_with_retry() - - @networking.ensure_connected - def get_balances(self, block: Optional[int] = None) -> Dict[str, Balance]: - """ - Retrieves the token balances of all accounts within the Bittensor network as of a specific blockchain block. - This function provides a comprehensive view of the token distribution among different accounts. - - Args: - block (int, optional): The blockchain block number at which to perform the query. - - Returns: - Dict[str, Balance]: A dictionary mapping each account's ``ss58`` address to its balance. - - This function is valuable for analyzing the overall economic landscape of the Bittensor network, - including the distribution of financial resources and the financial status of network participants. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=_logger) - def make_substrate_call_with_retry(): - return self.substrate.query_map( - module="System", - storage_function="Account", - block_hash=( - None if block is None else self.substrate.get_block_hash(block) - ), - ) - - result = make_substrate_call_with_retry() - return_dict = {} - for r in result: - bal = Balance(int(r[1]["data"]["free"].value)) - return_dict[r[0].value] = bal - return return_dict - - # TODO: check with the team if this is used anywhere externally. not in bittensor - @staticmethod - def _null_neuron() -> NeuronInfo: - neuron = NeuronInfo( - uid=0, - netuid=0, - active=0, - stake=Balance(0), - rank=0, - emission=0, - incentive=0, - consensus=0, - trust=0, - validator_trust=0, - dividends=0, - last_update=0, - validator_permit=False, - weights=[], - bonds=[], - prometheus_info=None, - axon_info=None, - is_null=True, - coldkey="000000000000000000000000000000000000000000000000", - hotkey="000000000000000000000000000000000000000000000000", - ) # type: ignore - return neuron - - @networking.ensure_connected - def get_block_hash(self, block_id: int) -> str: - """ - Retrieves the hash of a specific block on the Bittensor blockchain. The block hash is a unique - identifier representing the cryptographic hash of the block's content, ensuring its integrity and - immutability. - - Args: - block_id (int): The block number for which the hash is to be retrieved. - - Returns: - str: The cryptographic hash of the specified block. - - The block hash is a fundamental aspect of blockchain technology, providing a secure reference to - each block's data. It is crucial for verifying transactions, ensuring data consistency, and - maintaining the trustworthiness of the blockchain. - """ - return self.substrate.get_block_hash(block_id=block_id) - - def get_error_info_by_index(self, error_index: int) -> Tuple[str, str]: - """ - Returns the error name and description from the Subtensor error list. - - Args: - error_index (int): The index of the error to retrieve. - - Returns: - Tuple[str, str]: A tuple containing the error name and description from substrate metadata. If the error index is not found, returns ("Unknown Error", "") and logs a warning. - """ - unknown_error = ("Unknown Error", "") - - if not self._subtensor_errors: - self._subtensor_errors = get_subtensor_errors(self.substrate) - - name, description = self._subtensor_errors.get(str(error_index), unknown_error) - - if name == unknown_error[0]: - _logger.warning( - f"Subtensor returned an error with an unknown index: {error_index}" - ) - - return name, description - - -# TODO: remove this after fully migrate `bittensor.subtensor` to `bittensor.Subtensor` in `bittensor/__init__.py` -subtensor = Subtensor diff --git a/bittensor/synapse.py b/bittensor/synapse.py deleted file mode 100644 index f08b5bcb38..0000000000 --- a/bittensor/synapse.py +++ /dev/null @@ -1,864 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -import base64 -import json -import sys -import warnings - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - field_validator, - model_validator, -) -import bittensor -from typing import Optional, Any, Dict, ClassVar, Tuple - - -def get_size(obj, seen=None) -> int: - """ - Recursively finds size of objects. - - This function traverses every item of a given object and sums their sizes to compute the total size. - - Args: - obj (any type): The object to get the size of. - seen (set): Set of object ids that have been calculated. - - Returns: - int: The total size of the object. - - """ - size = sys.getsizeof(obj) - if seen is None: - seen = set() - obj_id = id(obj) - if obj_id in seen: - return 0 - # Important mark as seen *before* entering recursion to gracefully handle - # self-referential objects - seen.add(obj_id) - if isinstance(obj, dict): - size += sum([get_size(v, seen) for v in obj.values()]) - size += sum([get_size(k, seen) for k in obj.keys()]) - elif hasattr(obj, "__dict__"): - size += get_size(obj.__dict__, seen) - elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): - size += sum([get_size(i, seen) for i in obj]) - return size - - -def cast_int(raw: str) -> int: - """ - Converts a string to an integer, if the string is not ``None``. - - This function attempts to convert a string to an integer. If the string is ``None``, it simply returns ``None``. - - Args: - raw (str): The string to convert. - - Returns: - int or None: The converted integer, or ``None`` if the input was ``None``. - - """ - return int(raw) if raw is not None else raw # type: ignore - - -def cast_float(raw: str) -> float: - """ - Converts a string to a float, if the string is not ``None``. - - This function attempts to convert a string to a float. If the string is ``None``, it simply returns ``None``. - - Args: - raw (str): The string to convert. - - Returns: - float or None: The converted float, or ``None`` if the input was ``None``. - - """ - return float(raw) if raw is not None else raw # type: ignore - - -class TerminalInfo(BaseModel): - """ - TerminalInfo encapsulates detailed information about a network synapse (node) involved in a communication process. - - This class serves as a metadata carrier, - providing essential details about the state and configuration of a terminal during network interactions. This is a crucial class in the Bittensor framework. - - The TerminalInfo class contains information such as HTTP status codes and messages, processing times, - IP addresses, ports, Bittensor version numbers, and unique identifiers. These details are vital for - maintaining network reliability, security, and efficient data flow within the Bittensor network. - - This class includes Pydantic validators and root validators to enforce data integrity and format. It is - designed to be used natively within Synapses, so that you will not need to call this directly, but rather - is used as a helper class for Synapses. - - Args: - status_code (int): HTTP status code indicating the result of a network request. Essential for identifying the outcome of network interactions. - status_message (str): Descriptive message associated with the status code, providing additional context about the request's result. - process_time (float): Time taken by the terminal to process the call, important for performance monitoring and optimization. - ip (str): IP address of the terminal, crucial for network routing and data transmission. - port (int): Network port used by the terminal, key for establishing network connections. - version (int): Bittensor version running on the terminal, ensuring compatibility between different nodes in the network. - nonce (int): Unique, monotonically increasing number for each terminal, aiding in identifying and ordering network interactions. - uuid (str): Unique identifier for the terminal, fundamental for network security and identification. - hotkey (str): Encoded hotkey string of the terminal wallet, important for transaction and identity verification in the network. - signature (str): Digital signature verifying the tuple of nonce, axon_hotkey, dendrite_hotkey, and uuid, critical for ensuring data authenticity and security. - - Usage:: - - # Creating a TerminalInfo instance - terminal_info = TerminalInfo( - status_code=200, - status_message="Success", - process_time=0.1, - ip="198.123.23.1", - port=9282, - version=111, - nonce=111111, - uuid="5ecbd69c-1cec-11ee-b0dc-e29ce36fec1a", - hotkey="5EnjDGNqqWnuL2HCAdxeEtN2oqtXZw6BMBe936Kfy2PFz1J1", - signature="0x0813029319030129u4120u10841824y0182u091u230912u" - ) - - # Accessing TerminalInfo attributes - ip_address = terminal_info.ip - processing_duration = terminal_info.process_time - - # TerminalInfo can be used to monitor and verify network interactions, ensuring proper communication and security within the Bittensor network. - - TerminalInfo plays a pivotal role in providing transparency and control over network operations, making it an indispensable tool for developers and users interacting with the Bittensor ecosystem. - """ - - model_config = ConfigDict(validate_assignment=True) - - # The HTTP status code from: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status - status_code: Optional[int] = Field( - title="status_code", - description="The HTTP status code from: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status", - examples=[200], - default=None, - frozen=False, - ) - - # The HTTP status code from: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status - status_message: Optional[str] = Field( - title="status_message", - description="The status_message associated with the status_code", - examples=["Success"], - default=None, - frozen=False, - ) - - # Process time on this terminal side of call - process_time: Optional[float] = Field( - title="process_time", - description="Process time on this terminal side of call", - examples=[0.1], - default=None, - frozen=False, - ) - - # The terminal ip. - ip: Optional[str] = Field( - title="ip", - description="The ip of the axon receiving the request.", - examples=["198.123.23.1"], - default=None, - frozen=False, - ) - - # The host port of the terminal. - port: Optional[int] = Field( - title="port", - description="The port of the terminal.", - examples=["9282"], - default=None, - frozen=False, - ) - - # The bittensor version on the terminal as an int. - version: Optional[int] = Field( - title="version", - description="The bittensor version on the axon as str(int)", - examples=[111], - default=None, - frozen=False, - ) - - # A Unix timestamp to associate with the terminal - nonce: Optional[int] = Field( - title="nonce", - description="A Unix timestamp that prevents replay attacks", - examples=[111111], - default=None, - frozen=False, - ) - - # A unique identifier associated with the terminal, set on the axon side. - uuid: Optional[str] = Field( - title="uuid", - description="A unique identifier associated with the terminal", - examples=["5ecbd69c-1cec-11ee-b0dc-e29ce36fec1a"], - default=None, - frozen=False, - ) - - # The bittensor version on the terminal as an int. - hotkey: Optional[str] = Field( - title="hotkey", - description="The ss58 encoded hotkey string of the terminal wallet.", - examples=["5EnjDGNqqWnuL2HCAdxeEtN2oqtXZw6BMBe936Kfy2PFz1J1"], - default=None, - frozen=False, - ) - - # A signature verifying the tuple (axon_nonce, axon_hotkey, dendrite_hotkey, axon_uuid) - signature: Optional[str] = Field( - title="signature", - description="A signature verifying the tuple (nonce, axon_hotkey, dendrite_hotkey, uuid)", - examples=["0x0813029319030129u4120u10841824y0182u091u230912u"], - default=None, - frozen=False, - ) - - # Extract the process time on this terminal side of call as a float - _extract_process_time = field_validator("process_time", mode="before")(cast_float) - - # Extract the host port of the terminal as an int - _extract_port = field_validator("port", mode="before")(cast_int) - - # Extract the bittensor version on the terminal as an int. - _extract_version = field_validator("version", mode="before")(cast_int) - - # Extract the Unix timestamp associated with the terminal as an int - _extract_nonce = field_validator("nonce", mode="before")(cast_int) - - # Extract the HTTP status code as an int - _extract_status_code = field_validator("status_code", mode="before")(cast_int) - - -class Synapse(BaseModel): - """ - Represents a Synapse in the Bittensor network, serving as a communication schema between neurons (nodes). - - Synapses ensure the format and correctness of transmission tensors according to the Bittensor protocol. - Each Synapse type is tailored for a specific machine learning (ML) task, following unique compression and - communication processes. This helps maintain sanitized, correct, and useful information flow across the network. - - The Synapse class encompasses essential network properties such as HTTP route names, timeouts, request sizes, and - terminal information. It also includes methods for serialization, deserialization, attribute setting, and hash - computation, ensuring secure and efficient data exchange in the network. - - The class includes Pydantic validators and root validators to enforce data integrity and format. Additionally, - properties like ``is_success``, ``is_failure``, ``is_timeout``, etc., provide convenient status checks based on - dendrite responses. - - Think of Bittensor Synapses as glorified pydantic wrappers that have been designed to be used in a distributed - network. They provide a standardized way to communicate between neurons, and are the primary mechanism for - communication between neurons in Bittensor. - - Key Features: - - 1. HTTP Route Name (``name`` attribute): - Enables the identification and proper routing of requests within the network. Essential for users - defining custom routes for specific machine learning tasks. - - 2. Query Timeout (``timeout`` attribute): - Determines the maximum duration allowed for a query, ensuring timely responses and network - efficiency. Crucial for users to manage network latency and response times, particularly in - time-sensitive applications. - - 3. Request Sizes (``total_size``, ``header_size`` attributes): - Keeps track of the size of request bodies and headers, ensuring efficient data transmission without - overloading the network. Important for users to monitor and optimize the data payload, especially - in bandwidth-constrained environments. - - 4. Terminal Information (``dendrite``, ``axon`` attributes): - Stores information about the dendrite (receiving end) and axon (sending end), facilitating communication - between nodes. Users can access detailed information about the communication endpoints, aiding in - debugging and network analysis. - - 5. Body Hash Computation (``computed_body_hash``, ``required_hash_fields``): - Ensures data integrity and security by computing hashes of transmitted data. Provides users with a - mechanism to verify data integrity and detect any tampering during transmission. - It is recommended that names of fields in `required_hash_fields` are listed in the order they are - defined in the class. - - 6. Serialization and Deserialization Methods: - Facilitates the conversion of Synapse objects to and from a format suitable for network transmission. - Essential for users who need to customize data formats for specific machine learning models or tasks. - - 7. Status Check Properties (``is_success``, ``is_failure``, ``is_timeout``, etc.): - Provides quick and easy methods to check the status of a request, improving error handling and - response management. Users can efficiently handle different outcomes of network requests, enhancing - the robustness of their applications. - - Example usage:: - - # Creating a Synapse instance with default values - synapse = Synapse() - - # Setting properties and input - synapse.timeout = 15.0 - synapse.name = "MySynapse" - # Not setting fields that are not defined in your synapse class will result in an error, e.g.: - synapse.dummy_input = 1 # This will raise an error because dummy_input is not defined in the Synapse class - - # Get a dictionary of headers and body from the synapse instance - synapse_dict = synapse.model_dump_json() - - # Get a dictionary of headers from the synapse instance - headers = synapse.to_headers() - - # Reconstruct the synapse from headers using the classmethod 'from_headers' - synapse = Synapse.from_headers(headers) - - # Deserialize synapse after receiving it over the network, controlled by `deserialize` method - deserialized_synapse = synapse.deserialize() - - # Checking the status of the request - if synapse.is_success: - print("Request succeeded") - - # Checking and setting the status of the request - print(synapse.axon.status_code) - synapse.axon.status_code = 408 # Timeout - - Args: - name (str): HTTP route name, set on :func:`axon.attach`. - timeout (float): Total query length, set by the dendrite terminal. - total_size (int): Total size of request body in bytes. - header_size (int): Size of request header in bytes. - dendrite (TerminalInfo): Information about the dendrite terminal. - axon (TerminalInfo): Information about the axon terminal. - computed_body_hash (str): Computed hash of the request body. - required_hash_fields (List[str]): Fields required to compute the body hash. - - Methods: - deserialize: Custom deserialization logic for subclasses. - __setattr__: Override method to make ``required_hash_fields`` read-only. - get_total_size: Calculates and returns the total size of the object. - to_headers: Constructs a dictionary of headers from instance properties. - body_hash: Computes a SHA3-256 hash of the serialized body. - parse_headers_to_inputs: Parses headers to construct an inputs dictionary. - from_headers: Creates an instance from a headers dictionary. - - This class is a cornerstone in the Bittensor framework, providing the necessary tools for secure, efficient, and - standardized communication in a decentralized environment. - """ - - model_config = ConfigDict(validate_assignment=True) - _model_json_schema: ClassVar[Dict[str, Any]] - - def deserialize(self) -> "Synapse": - """ - Deserializes the Synapse object. - - This method is intended to be overridden by subclasses for custom deserialization logic. - In the context of the Synapse superclass, this method simply returns the instance itself. - When inheriting from this class, subclasses should provide their own implementation for - deserialization if specific deserialization behavior is desired. - - By default, if a subclass does not provide its own implementation of this method, the - Synapse's deserialize method will be used, returning the object instance as-is. - - In its default form, this method simply returns the instance of the Synapse itself without any modifications. Subclasses of Synapse can override this method to add specific deserialization behaviors, such as converting serialized data back into complex object types or performing additional data integrity checks. - - Example:: - - class CustomSynapse(Synapse): - additional_data: str - - def deserialize(self) -> "CustomSynapse": - # Custom deserialization logic - # For example, decoding a base64 encoded string in 'additional_data' - if self.additional_data: - self.additional_data = base64.b64decode(self.additional_data).decode('utf-8') - return self - - serialized_data = '{"additional_data": "SGVsbG8gV29ybGQ="}' # Base64 for 'Hello World' - custom_synapse = CustomSynapse.model_validate_json(serialized_data) - deserialized_synapse = custom_synapse.deserialize() - - # deserialized_synapse.additional_data would now be 'Hello World' - - Returns: - Synapse: The deserialized Synapse object. In this default implementation, it returns the object itself. - """ - return self - - @model_validator(mode="before") - def set_name_type(cls, values) -> dict: - values["name"] = cls.__name__ # type: ignore - return values - - # Defines the http route name which is set on axon.attach( callable( request: RequestName )) - name: Optional[str] = Field( - title="name", - description="Defines the http route name which is set on axon.attach( callable( request: RequestName ))", - examples=["Forward"], - frozen=False, - default=None, - repr=False, - ) - - # The call timeout, set by the dendrite terminal. - timeout: Optional[float] = Field( - title="timeout", - description="Defines the total query length.", - examples=[12.0], - default=12.0, - frozen=False, - repr=False, - ) - - # The call timeout, set by the dendrite terminal. - total_size: Optional[int] = Field( - title="total_size", - description="Total size of request body in bytes.", - examples=[1000], - default=0, - frozen=False, - repr=False, - ) - - # The call timeout, set by the dendrite terminal. - header_size: Optional[int] = Field( - title="header_size", - description="Size of request header in bytes.", - examples=[1000], - default=0, - frozen=False, - repr=False, - ) - - # The dendrite Terminal Information. - dendrite: Optional[TerminalInfo] = Field( - title="dendrite", - description="Dendrite Terminal Information", - examples=["bittensor.TerminalInfo"], - default=TerminalInfo(), - frozen=False, - repr=False, - ) - - # A axon terminal information - axon: Optional[TerminalInfo] = Field( - title="axon", - description="Axon Terminal Information", - examples=["bittensor.TerminalInfo"], - default=TerminalInfo(), - frozen=False, - repr=False, - ) - - computed_body_hash: Optional[str] = Field( - title="computed_body_hash", - description="The computed body hash of the request.", - examples=["0x0813029319030129u4120u10841824y0182u091u230912u"], - default="", - frozen=True, - repr=False, - ) - - required_hash_fields: ClassVar[Tuple[str, ...]] = () - - _extract_total_size = field_validator("total_size", mode="before")(cast_int) - - _extract_header_size = field_validator("header_size", mode="before")(cast_int) - - _extract_timeout = field_validator("timeout", mode="before")(cast_float) - - def __setattr__(self, name: str, value: Any): - """ - Override the :func:`__setattr__` method to make the ``required_hash_fields`` property read-only. - - This is a security mechanism such that the ``required_hash_fields`` property cannot be - overridden by the user or malicious code. - """ - if name == "body_hash": - raise AttributeError( - "body_hash property is read-only and cannot be overridden." - ) - super().__setattr__(name, value) - - def get_total_size(self) -> int: - """ - Get the total size of the current object. - - This method first calculates the size of the current object, then assigns it - to the instance variable :func:`self.total_size` and finally returns this value. - - Returns: - int: The total size of the current object. - """ - self.total_size = get_size(self) - return self.total_size - - @property - def is_success(self) -> bool: - """ - Checks if the dendrite's status code indicates success. - - This method returns ``True`` if the status code of the dendrite is ``200``, - which typically represents a successful HTTP request. - - Returns: - bool: ``True`` if dendrite's status code is ``200``, ``False`` otherwise. - """ - return self.dendrite is not None and self.dendrite.status_code == 200 - - @property - def is_failure(self) -> bool: - """ - Checks if the dendrite's status code indicates failure. - - This method returns ``True`` if the status code of the dendrite is not ``200``, - which would mean the HTTP request was not successful. - - Returns: - bool: ``True`` if dendrite's status code is not ``200``, ``False`` otherwise. - """ - return self.dendrite is not None and self.dendrite.status_code != 200 - - @property - def is_timeout(self) -> bool: - """ - Checks if the dendrite's status code indicates a timeout. - - This method returns ``True`` if the status code of the dendrite is ``408``, - which is the HTTP status code for a request timeout. - - Returns: - bool: ``True`` if dendrite's status code is ``408``, ``False`` otherwise. - """ - return self.dendrite is not None and self.dendrite.status_code == 408 - - @property - def is_blacklist(self) -> bool: - """ - Checks if the dendrite's status code indicates a blacklisted request. - - This method returns ``True`` if the status code of the dendrite is ``403``, - which is the HTTP status code for a forbidden request. - - Returns: - bool: ``True`` if dendrite's status code is ``403``, ``False`` otherwise. - """ - return self.dendrite is not None and self.dendrite.status_code == 403 - - @property - def failed_verification(self) -> bool: - """ - Checks if the dendrite's status code indicates failed verification. - - This method returns ``True`` if the status code of the dendrite is ``401``, - which is the HTTP status code for unauthorized access. - - Returns: - bool: ``True`` if dendrite's status code is ``401``, ``False`` otherwise. - """ - return self.dendrite is not None and self.dendrite.status_code == 401 - - @classmethod - def _get_cached_model_json_schema(cls) -> dict: - """ - Returns the JSON schema for the Synapse model. - - This method returns a cached version of the JSON schema for the Synapse model. - The schema is stored in the class variable ``_model_json_schema`` and is only - generated once to improve performance. - - Returns: - dict: The JSON schema for the Synapse model. - """ - if "_model_json_schema" not in cls.__dict__: - cls._model_json_schema = cls.model_json_schema() - return cls._model_json_schema - - def get_required_fields(self): - """ - Get the required fields from the model's JSON schema. - """ - schema = self._get_cached_model_json_schema() - return schema.get("required", []) - - def to_headers(self) -> dict: - """ - Converts the state of a Synapse instance into a dictionary of HTTP headers. - - This method is essential for - packaging Synapse data for network transmission in the Bittensor framework, ensuring that each key aspect of - the Synapse is represented in a format suitable for HTTP communication. - - Process: - - 1. Basic Information: It starts by including the ``name`` and ``timeout`` of the Synapse, which are fundamental for identifying the query and managing its lifespan on the network. - 2. Complex Objects: The method serializes the ``axon`` and ``dendrite`` objects, if present, into strings. This serialization is crucial for preserving the state and structure of these objects over the network. - 3. Encoding: Non-optional complex objects are serialized and encoded in base64, making them safe for HTTP transport. - 4. Size Metrics: The method calculates and adds the size of headers and the total object size, providing valuable information for network bandwidth management. - - Example Usage:: - - synapse = Synapse(name="ExampleSynapse", timeout=30) - headers = synapse.to_headers() - # headers now contains a dictionary representing the Synapse instance - - Returns: - dict: A dictionary containing key-value pairs representing the Synapse's properties, suitable for HTTP communication. - """ - # Initializing headers with 'name' and 'timeout' - headers = {"name": self.name, "timeout": str(self.timeout)} - - # Adding headers for 'axon' and 'dendrite' if they are not None - if self.axon: - headers.update( - { - f"bt_header_axon_{k}": str(v) - for k, v in self.axon.model_dump().items() - if v is not None - } - ) - if self.dendrite: - headers.update( - { - f"bt_header_dendrite_{k}": str(v) - for k, v in self.dendrite.model_dump().items() - if v is not None - } - ) - - # Getting the fields of the instance - instance_fields = self.model_dump() - - required = set(self.get_required_fields()) - # Iterating over the fields of the instance - for field, value in instance_fields.items(): - # If the object is not optional, serializing it, encoding it, and adding it to the headers - # Skipping the field if it's already in the headers or its value is None - if field in headers or value is None: - continue - - elif field in required: - try: - # create an empty (dummy) instance of type(value) to pass pydantic validation on the axon side - serialized_value = json.dumps(value.__class__.__call__()) - encoded_value = base64.b64encode(serialized_value.encode()).decode( - "utf-8" - ) - headers[f"bt_header_input_obj_{field}"] = encoded_value - except TypeError as e: - raise ValueError( - f"Error serializing {field} with value {value}. Objects must be json serializable." - ) from e - - # Adding the size of the headers and the total size to the headers - headers["header_size"] = str(sys.getsizeof(headers)) - headers["total_size"] = str(self.get_total_size()) - headers["computed_body_hash"] = self.body_hash - - return headers - - @property - def body_hash(self) -> str: - """ - Computes a SHA3-256 hash of the serialized body of the Synapse instance. - - This hash is used to - ensure the data integrity and security of the Synapse instance when it's transmitted across the - network. It is a crucial feature for verifying that the data received is the same as the data sent. - - Process: - - 1. Iterates over each required field as specified in ``required_hash_fields``. - 2. Concatenates the string representation of these fields. - 3. Applies SHA3-256 hashing to the concatenated string to produce a unique fingerprint of the data. - - Example:: - - synapse = Synapse(name="ExampleRoute", timeout=10) - hash_value = synapse.body_hash - # hash_value is the SHA3-256 hash of the serialized body of the Synapse instance - - Returns: - str: The SHA3-256 hash as a hexadecimal string, providing a fingerprint of the Synapse instance's data for integrity checks. - """ - hashes = [] - - hash_fields_field = self.model_fields.get("required_hash_fields") - instance_fields = None - if hash_fields_field: - warnings.warn( - "The 'required_hash_fields' field handling deprecated and will be removed. " - "Please update Synapse class definition to use 'required_hash_fields' class variable instead.", - DeprecationWarning, - ) - required_hash_fields = hash_fields_field.default - - if required_hash_fields: - instance_fields = self.model_dump() - # Preserve backward compatibility in which fields will added in .model_dump() order - # instead of the order one from `self.required_hash_fields` - required_hash_fields = [ - field for field in instance_fields if field in required_hash_fields - ] - - # Hack to cache the required hash fields names - if len(required_hash_fields) == len(required_hash_fields): - self.__class__.required_hash_fields = tuple(required_hash_fields) - else: - required_hash_fields = self.__class__.required_hash_fields - - if required_hash_fields: - instance_fields = instance_fields or self.model_dump() - for field in required_hash_fields: - hashes.append(bittensor.utils.hash(str(instance_fields[field]))) - - return bittensor.utils.hash("".join(hashes)) - - @classmethod - def parse_headers_to_inputs(cls, headers: dict) -> dict: - """ - Interprets and transforms a given dictionary of headers into a structured dictionary, facilitating the reconstruction of Synapse objects. - - This method is essential for parsing network-transmitted - data back into a Synapse instance, ensuring data consistency and integrity. - - Process: - - 1. Separates headers into categories based on prefixes (``axon``, ``dendrite``, etc.). - 2. Decodes and deserializes ``input_obj`` headers into their original objects. - 3. Assigns simple fields directly from the headers to the input dictionary. - - Example:: - - received_headers = { - 'bt_header_axon_address': '127.0.0.1', - 'bt_header_dendrite_port': '8080', - # Other headers... - } - inputs = Synapse.parse_headers_to_inputs(received_headers) - # inputs now contains a structured representation of Synapse properties based on the headers - - Note: - This is handled automatically when calling :func:`Synapse.from_headers(headers)` and does not need to be called directly. - - Args: - headers (dict): The headers dictionary to parse. - - Returns: - dict: A structured dictionary representing the inputs for constructing a Synapse instance. - """ - - # Initialize the input dictionary with empty sub-dictionaries for 'axon' and 'dendrite' - inputs_dict: Dict[str, Dict[str, str]] = {"axon": {}, "dendrite": {}} - - # Iterate over each item in the headers - for key, value in headers.items(): - # Handle 'axon' headers - if "bt_header_axon_" in key: - try: - new_key = key.split("bt_header_axon_")[1] - inputs_dict["axon"][new_key] = value - except Exception as e: - bittensor.logging.error( - f"Error while parsing 'axon' header {key}: {e}" - ) - continue - # Handle 'dendrite' headers - elif "bt_header_dendrite_" in key: - try: - new_key = key.split("bt_header_dendrite_")[1] - inputs_dict["dendrite"][new_key] = value - except Exception as e: - bittensor.logging.error( - f"Error while parsing 'dendrite' header {key}: {e}" - ) - continue - # Handle 'input_obj' headers - elif "bt_header_input_obj" in key: - try: - new_key = key.split("bt_header_input_obj_")[1] - # Skip if the key already exists in the dictionary - if new_key in inputs_dict: - continue - # Decode and load the serialized object - inputs_dict[new_key] = json.loads( - base64.b64decode(value.encode()).decode("utf-8") - ) - except json.JSONDecodeError as e: - bittensor.logging.error( - f"Error while json decoding 'input_obj' header {key}: {e}" - ) - continue - except Exception as e: - bittensor.logging.error( - f"Error while parsing 'input_obj' header {key}: {e}" - ) - continue - else: - pass # TODO: log unexpected keys - - # Assign the remaining known headers directly - inputs_dict["timeout"] = headers.get("timeout", None) - inputs_dict["name"] = headers.get("name", None) - inputs_dict["header_size"] = headers.get("header_size", None) - inputs_dict["total_size"] = headers.get("total_size", None) - inputs_dict["computed_body_hash"] = headers.get("computed_body_hash", None) - - return inputs_dict - - @classmethod - def from_headers(cls, headers: dict) -> "Synapse": - """ - Constructs a new Synapse instance from a given headers dictionary, enabling the re-creation of the Synapse's state as it was prior to network transmission. - - This method is a key part of the - deserialization process in the Bittensor network, allowing nodes to accurately reconstruct Synapse - objects from received data. - - Example:: - - received_headers = { - 'bt_header_axon_address': '127.0.0.1', - 'bt_header_dendrite_port': '8080', - # Other headers... - } - synapse = Synapse.from_headers(received_headers) - # synapse is a new Synapse instance reconstructed from the received headers - - Args: - headers (dict): The dictionary of headers containing serialized Synapse information. - - Returns: - Synapse: A new instance of Synapse, reconstructed from the parsed header information, replicating the original instance's state. - """ - - # Get the inputs dictionary from the headers - input_dict = cls.parse_headers_to_inputs(headers) - - # Use the dictionary unpacking operator to pass the inputs to the class constructor - synapse = cls(**input_dict) - - return synapse diff --git a/bittensor/tensor.py b/bittensor/tensor.py deleted file mode 100644 index ab46560d99..0000000000 --- a/bittensor/tensor.py +++ /dev/null @@ -1,250 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import numpy as np -import base64 -import msgpack -import msgpack_numpy -from typing import Optional, Union, List -from bittensor.utils.registration import torch, use_torch -from pydantic import ConfigDict, BaseModel, Field, field_validator - - -class DTypes(dict): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.torch: bool = False - self.update( - { - "float16": np.float16, - "float32": np.float32, - "float64": np.float64, - "uint8": np.uint8, - "int16": np.int16, - "int8": np.int8, - "int32": np.int32, - "int64": np.int64, - "bool": bool, - } - ) - - def __getitem__(self, key): - self._add_torch() - return super().__getitem__(key) - - def __contains__(self, key): - self._add_torch() - return super().__contains__(key) - - def _add_torch(self): - if self.torch is False: - torch_dtypes = { - "torch.float16": torch.float16, - "torch.float32": torch.float32, - "torch.float64": torch.float64, - "torch.uint8": torch.uint8, - "torch.int16": torch.int16, - "torch.int8": torch.int8, - "torch.int32": torch.int32, - "torch.int64": torch.int64, - "torch.bool": torch.bool, - } - self.update(torch_dtypes) - self.torch = True - - -dtypes = DTypes() - - -def cast_dtype(raw: Union[None, np.dtype, "torch.dtype", str]) -> Optional[str]: - """ - Casts the raw value to a string representing the - `numpy data type `_, or the - `torch data type `_ if using torch. - - Args: - raw (Union[None, numpy.dtype, torch.dtype, str]): The raw value to cast. - - Returns: - str: The string representing the numpy/torch data type. - - Raises: - Exception: If the raw value is of an invalid type. - """ - if not raw: - return None - if use_torch() and isinstance(raw, torch.dtype): - return dtypes[raw] - elif isinstance(raw, np.dtype): - return dtypes[raw] - elif isinstance(raw, str): - if use_torch(): - assert raw in dtypes, f"{raw} not a valid torch type in dict {dtypes}" - return raw - else: - assert raw in dtypes, f"{raw} not a valid numpy type in dict {dtypes}" - return raw - else: - raise Exception( - f"{raw} of type {type(raw)} does not have a valid type in Union[None, numpy.dtype, torch.dtype, str]" - ) - - -def cast_shape(raw: Union[None, List[int], str]) -> Optional[Union[str, list]]: - """ - Casts the raw value to a string representing the tensor shape. - - Args: - raw (Union[None, List[int], str]): The raw value to cast. - - Returns: - str: The string representing the tensor shape. - - Raises: - Exception: If the raw value is of an invalid type or if the list elements are not of type int. - """ - if not raw: - return None - elif isinstance(raw, list): - if len(raw) == 0 or isinstance(raw[0], int): - return raw - else: - raise Exception(f"{raw} list elements are not of type int") - elif isinstance(raw, str): - shape = list(map(int, raw.split("[")[1].split("]")[0].split(","))) - return shape - else: - raise Exception( - f"{raw} of type {type(raw)} does not have a valid type in Union[None, List[int], str]" - ) - - -class tensor: - def __new__(cls, tensor: Union[list, np.ndarray, "torch.Tensor"]): - if isinstance(tensor, list) or isinstance(tensor, np.ndarray): - tensor = torch.tensor(tensor) if use_torch() else np.array(tensor) - return Tensor.serialize(tensor_=tensor) - - -class Tensor(BaseModel): - """ - Represents a Tensor object. - - Args: - buffer (Optional[str]): Tensor buffer data. - dtype (str): Tensor data type. - shape (List[int]): Tensor shape. - """ - - model_config = ConfigDict(validate_assignment=True) - - def tensor(self) -> Union[np.ndarray, "torch.Tensor"]: - return self.deserialize() - - def tolist(self) -> List[object]: - return self.deserialize().tolist() - - def numpy(self) -> "numpy.ndarray": - return ( - self.deserialize().detach().numpy() if use_torch() else self.deserialize() - ) - - def deserialize(self) -> Union["np.ndarray", "torch.Tensor"]: - """ - Deserializes the Tensor object. - - Returns: - np.array or torch.Tensor: The deserialized tensor object. - - Raises: - Exception: If the deserialization process encounters an error. - """ - shape = tuple(self.shape) - buffer_bytes = base64.b64decode(self.buffer.encode("utf-8")) - numpy_object = msgpack.unpackb( - buffer_bytes, object_hook=msgpack_numpy.decode - ).copy() - if use_torch(): - torch_object = torch.as_tensor(numpy_object) - # Reshape does not work for (0) or [0] - if not (len(shape) == 1 and shape[0] == 0): - torch_object = torch_object.reshape(shape) - return torch_object.type(dtypes[self.dtype]) - else: - # Reshape does not work for (0) or [0] - if not (len(shape) == 1 and shape[0] == 0): - numpy_object = numpy_object.reshape(shape) - return numpy_object.astype(dtypes[self.dtype]) - - @staticmethod - def serialize(tensor_: Union["np.ndarray", "torch.Tensor"]) -> "Tensor": - """ - Serializes the given tensor. - - Args: - tensor_ (np.array or torch.Tensor): The tensor to serialize. - - Returns: - Tensor: The serialized tensor. - - Raises: - Exception: If the serialization process encounters an error. - """ - dtype = str(tensor_.dtype) - shape = list(tensor_.shape) - if len(shape) == 0: - shape = [0] - tensor__ = tensor_.cpu().detach().numpy().copy() if use_torch() else tensor_ - data_buffer = base64.b64encode( - msgpack.packb(tensor__, default=msgpack_numpy.encode) - ).decode("utf-8") - return Tensor(buffer=data_buffer, shape=shape, dtype=dtype) - - # Represents the tensor buffer data. - buffer: Optional[str] = Field( - default=None, - title="buffer", - description="Tensor buffer data. This field stores the serialized representation of the tensor data.", - examples=["0x321e13edqwds231231231232131"], - frozen=True, - repr=False, - ) - - # Represents the data type of the tensor. - dtype: str = Field( - title="dtype", - description="Tensor data type. This field specifies the data type of the tensor, such as numpy.float32 or torch.int64.", - examples=["np.float32"], - frozen=True, - repr=True, - ) - - # Represents the shape of the tensor. - shape: List[int] = Field( - title="shape", - description="Tensor shape. This field defines the dimensions of the tensor as a list of integers, such as [10, 10] for a 2D tensor with shape (10, 10).", - examples=[10, 10], - frozen=True, - repr=True, - ) - - # Extract the represented shape of the tensor. - _extract_shape = field_validator("shape", mode="before")(cast_shape) - - # Extract the represented data type of the tensor. - _extract_dtype = field_validator("dtype", mode="before")(cast_dtype) diff --git a/bittensor/threadpool.py b/bittensor/threadpool.py deleted file mode 100644 index 3e49786ff6..0000000000 --- a/bittensor/threadpool.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements `ThreadPoolExecutor `_.""" - -__author__ = "Brian Quinlan (brian@sweetapp.com)" - -import os -import sys -import time -import queue -import random -import weakref -import logging -import argparse -import bittensor -import itertools -import threading - -from typing import Callable -from concurrent.futures import _base - -from bittensor.btlogging.defines import BITTENSOR_LOGGER_NAME - -# Workers are created as daemon threads. This is done to allow the interpreter -# to exit when there are still idle threads in a ThreadPoolExecutor's thread -# pool (i.e. shutdown() was not called). However, allowing workers to die with -# the interpreter has two undesirable properties: -# - The workers would still be running during interpreter shutdown, -# meaning that they would fail in unpredictable ways. -# - The workers could be killed while evaluating a work item, which could -# be bad if the callable being evaluated has external side-effects e.g. -# writing to a file. -# -# To work around this problem, an exit handler is installed which tells the -# workers to exit when their work queues are empty and then waits until the -# threads finish. - -logger = logging.getLogger(BITTENSOR_LOGGER_NAME) - -_threads_queues = weakref.WeakKeyDictionary() -_shutdown = False - - -class _WorkItem(object): - def __init__(self, future, fn, start_time, args, kwargs): - self.future = future - self.fn = fn - self.start_time = start_time - self.args = args - self.kwargs = kwargs - - def run(self): - """Run the given work item""" - # Checks if future is canceled or if work item is stale - if (not self.future.set_running_or_notify_cancel()) or ( - time.time() - self.start_time > bittensor.__blocktime__ - ): - return - - try: - result = self.fn(*self.args, **self.kwargs) - except BaseException as exc: - self.future.set_exception(exc) - # Break a reference cycle with the exception 'exc' - self = None - else: - self.future.set_result(result) - - -NULL_ENTRY = (sys.maxsize, _WorkItem(None, None, time.time(), (), {})) - - -def _worker(executor_reference, work_queue, initializer, initargs): - if initializer is not None: - try: - initializer(*initargs) - except BaseException: - _base.LOGGER.critical("Exception in initializer:", exc_info=True) - executor = executor_reference() - if executor is not None: - executor._initializer_failed() - return - try: - while True: - work_item = work_queue.get(block=True) - priority = work_item[0] - item = work_item[1] - if priority == sys.maxsize: - del item - elif item is not None: - item.run() - # Delete references to object. See issue16284 - del item - continue - - executor = executor_reference() - # Exit if: - # - The interpreter is shutting down OR - # - The executor that owns the worker has been collected OR - # - The executor that owns the worker has been shutdown. - if _shutdown or executor is None or executor._shutdown: - # Flag the executor as shutting down as early as possible if it - # is not gc-ed yet. - if executor is not None: - executor._shutdown = True - # Notice other workers - work_queue.put(NULL_ENTRY) - return - del executor - except BaseException: - logger.error("work_item", work_item) - _base.LOGGER.critical("Exception in worker", exc_info=True) - - -class BrokenThreadPool(_base.BrokenExecutor): - """ - Raised when a worker thread in a `ThreadPoolExecutor `_ failed initializing. - """ - - -class PriorityThreadPoolExecutor(_base.Executor): - """Base threadpool executor with a priority queue""" - - # Used to assign unique thread names when thread_name_prefix is not supplied. - _counter = itertools.count().__next__ - - def __init__( - self, - maxsize=-1, - max_workers=None, - thread_name_prefix="", - initializer=None, - initargs=(), - ): - """Initializes a new `ThreadPoolExecutor `_ instance. - - Args: - max_workers: The maximum number of threads that can be used to - execute the given calls. - thread_name_prefix: An optional name prefix to give our threads. - initializer: An callable used to initialize worker threads. - initargs: A tuple of arguments to pass to the initializer. - """ - if max_workers is None: - # Use this number because ThreadPoolExecutor is often - # used to overlap I/O instead of CPU work. - max_workers = (os.cpu_count() or 1) * 5 - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - - if initializer is not None and not callable(initializer): - raise TypeError("initializer must be a callable") - - self._max_workers = max_workers - self._work_queue = queue.PriorityQueue(maxsize=maxsize) - self._idle_semaphore = threading.Semaphore(0) - self._threads = set() - self._broken = False - self._shutdown = False - self._shutdown_lock = threading.Lock() - self._thread_name_prefix = thread_name_prefix or ( - "ThreadPoolExecutor-%d" % self._counter() - ) - self._initializer = initializer - self._initargs = initargs - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None): - """Accept specific arguments from parser""" - prefix_str = "" if prefix == None else prefix + "." - try: - default_max_workers = ( - os.getenv("BT_PRIORITY_MAX_WORKERS") - if os.getenv("BT_PRIORITY_MAX_WORKERS") != None - else 5 - ) - default_maxsize = ( - os.getenv("BT_PRIORITY_MAXSIZE") - if os.getenv("BT_PRIORITY_MAXSIZE") != None - else 10 - ) - parser.add_argument( - "--" + prefix_str + "priority.max_workers", - type=int, - help="""maximum number of threads in thread pool""", - default=default_max_workers, - ) - parser.add_argument( - "--" + prefix_str + "priority.maxsize", - type=int, - help="""maximum size of tasks in priority queue""", - default=default_maxsize, - ) - except argparse.ArgumentError: - # re-parsing arguments. - pass - - @classmethod - def config(cls) -> "bittensor.config": - """Get config from the argument parser. - - Return: :func:`bittensor.config` object. - """ - parser = argparse.ArgumentParser() - PriorityThreadPoolExecutor.add_args(parser) - return bittensor.config(parser, args=[]) - - @property - def is_empty(self): - return self._work_queue.empty() - - def submit(self, fn: Callable, *args, **kwargs) -> _base.Future: - with self._shutdown_lock: - if self._broken: - raise BrokenThreadPool(self._broken) - - if self._shutdown: - raise RuntimeError("cannot schedule new futures after shutdown") - if _shutdown: - raise RuntimeError( - "cannot schedule new futures after " "interpreter shutdown" - ) - - priority = kwargs.get("priority", random.randint(0, 1000000)) - if priority == 0: - priority = random.randint(1, 100) - epsilon = random.uniform(0, 0.01) * priority - start_time = time.time() - if "priority" in kwargs: - del kwargs["priority"] - - f = _base.Future() - w = _WorkItem(f, fn, start_time, args, kwargs) - self._work_queue.put((-float(priority + epsilon), w), block=False) - self._adjust_thread_count() - return f - - submit.__doc__ = _base.Executor.submit.__doc__ - - def _adjust_thread_count(self): - # if idle threads are available, don't spin new threads - if self._idle_semaphore.acquire(timeout=0): - return - - # When the executor gets lost, the weakref callback will wake up - # the worker threads. - def weakref_cb(_, q=self._work_queue): - q.put(NULL_ENTRY) - - num_threads = len(self._threads) - if num_threads < self._max_workers: - thread_name = "%s_%d" % (self._thread_name_prefix or self, num_threads) - t = threading.Thread( - name=thread_name, - target=_worker, - args=( - weakref.ref(self, weakref_cb), - self._work_queue, - self._initializer, - self._initargs, - ), - ) - t.daemon = True - t.start() - self._threads.add(t) - _threads_queues[t] = self._work_queue - - def _initializer_failed(self): - with self._shutdown_lock: - self._broken = ( - "A thread initializer failed, the thread pool " "is not usable anymore" - ) - # Drain work queue and mark pending futures failed - while True: - try: - work_item = self._work_queue.get_nowait() - except queue.Empty: - break - if work_item is not None: - work_item.future.set_exception(BrokenThreadPool(self._broken)) - - def shutdown(self, wait=True): - with self._shutdown_lock: - self._shutdown = True - self._work_queue.put(NULL_ENTRY) - - if wait: - for t in self._threads: - try: - t.join(timeout=2) - except Exception: - pass - - shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/bittensor/types.py b/bittensor/types.py deleted file mode 100644 index 8aa9b7cde4..0000000000 --- a/bittensor/types.py +++ /dev/null @@ -1,42 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from typing import TypedDict - - -class AxonServeCallParams(TypedDict): - """ - Axon serve chain call parameters. - """ - - version: int - ip: int - port: int - ip_type: int - netuid: int - - -class PrometheusServeCallParams(TypedDict): - """ - Prometheus serve chain call parameters. - """ - - version: int - ip: int - port: int - ip_type: int - netuid: int diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py deleted file mode 100644 index 700a656131..0000000000 --- a/bittensor/utils/__init__.py +++ /dev/null @@ -1,282 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import hashlib -from typing import Callable, List, Dict, Literal, Tuple - -import numpy as np -import scalecodec - -import bittensor -from .registration import torch, use_torch -from .version import version_checking, check_version, VersionCheckError -from .wallet_utils import * # noqa F401 - -RAOPERTAO = 1e9 -U16_MAX = 65535 -U64_MAX = 18446744073709551615 - - -def ss58_to_vec_u8(ss58_address: str) -> List[int]: - ss58_bytes: bytes = bittensor.utils.ss58_address_to_bytes(ss58_address) - encoded_address: List[int] = [int(byte) for byte in ss58_bytes] - return encoded_address - - -def _unbiased_topk( - values: Union[np.ndarray, "torch.Tensor"], - k: int, - dim=0, - sorted=True, - largest=True, - axis=0, - return_type: str = "numpy", -) -> Union[Tuple[np.ndarray, np.ndarray], Tuple["torch.Tensor", "torch.LongTensor"]]: - """Selects topk as in torch.topk but does not bias lower indices when values are equal. - Args: - values: (np.ndarray) if using numpy, (torch.Tensor) if using torch: - Values to index into. - k: (int): - Number to take. - dim: (int): - Dimension to index into (used by Torch) - sorted: (bool): - Whether to sort indices. - largest: (bool): - Whether to take the largest value. - axis: (int): - Axis along which to index into (used by Numpy) - return_type: (str): - Whether or use torch or numpy approach - - Return: - topk: (np.ndarray) if using numpy, (torch.Tensor) if using torch: - topk k values. - indices: (np.ndarray) if using numpy, (torch.LongTensor) if using torch: - indices of the topk values. - """ - if return_type == "torch": - permutation = torch.randperm(values.shape[dim]) - permuted_values = values[permutation] - topk, indices = torch.topk( - permuted_values, k, dim=dim, sorted=sorted, largest=largest - ) - return topk, permutation[indices] - else: - if dim != 0 and axis == 0: - # Ensures a seamless transition for calls made to this function that specified args by keyword - axis = dim - - permutation = np.random.permutation(values.shape[axis]) - permuted_values = np.take(values, permutation, axis=axis) - indices = np.argpartition(permuted_values, -k, axis=axis)[-k:] - if not sorted: - indices = np.sort(indices, axis=axis) - if not largest: - indices = indices[::-1] - topk = np.take(permuted_values, indices, axis=axis) - return topk, permutation[indices] - - -def unbiased_topk( - values: Union[np.ndarray, "torch.Tensor"], - k: int, - dim: int = 0, - sorted: bool = True, - largest: bool = True, - axis: int = 0, -) -> Union[Tuple[np.ndarray, np.ndarray], Tuple["torch.Tensor", "torch.LongTensor"]]: - """Selects topk as in torch.topk but does not bias lower indices when values are equal. - Args: - values: (np.ndarray) if using numpy, (torch.Tensor) if using torch: - Values to index into. - k: (int): - Number to take. - dim: (int): - Dimension to index into (used by Torch) - sorted: (bool): - Whether to sort indices. - largest: (bool): - Whether to take the largest value. - axis: (int): - Axis along which to index into (used by Numpy) - - Return: - topk: (np.ndarray) if using numpy, (torch.Tensor) if using torch: - topk k values. - indices: (np.ndarray) if using numpy, (torch.LongTensor) if using torch: - indices of the topk values. - """ - if use_torch(): - return _unbiased_topk( - values, k, dim, sorted, largest, axis, return_type="torch" - ) - else: - return _unbiased_topk( - values, k, dim, sorted, largest, axis, return_type="numpy" - ) - - -def strtobool_with_default( - default: bool, -) -> Callable[[str], Union[bool, Literal["==SUPRESS=="]]]: - """ - Creates a strtobool function with a default value. - - Args: - default(bool): The default value to return if the string is empty. - - Returns: - The strtobool function with the default value. - """ - return lambda x: strtobool(x) if x != "" else default - - -def strtobool(val: str) -> Union[bool, Literal["==SUPRESS=="]]: - """ - Converts a string to a boolean value. - - truth-y values are 'y', 'yes', 't', 'true', 'on', and '1'; - false-y values are 'n', 'no', 'f', 'false', 'off', and '0'. - - Raises ValueError if 'val' is anything else. - """ - val = val.lower() - if val in ("y", "yes", "t", "true", "on", "1"): - return True - elif val in ("n", "no", "f", "false", "off", "0"): - return False - else: - raise ValueError("invalid truth value %r" % (val,)) - - -def get_explorer_root_url_by_network_from_map( - network: str, network_map: Dict[str, Dict[str, str]] -) -> Optional[Dict[str, str]]: - r""" - Returns the explorer root url for the given network name from the given network map. - - Args: - network(str): The network to get the explorer url for. - network_map(Dict[str, str]): The network map to get the explorer url from. - - Returns: - The explorer url for the given network. - Or None if the network is not in the network map. - """ - explorer_urls: Optional[Dict[str, str]] = {} - for entity_nm, entity_network_map in network_map.items(): - if network in entity_network_map: - explorer_urls[entity_nm] = entity_network_map[network] - - return explorer_urls - - -def get_explorer_url_for_network( - network: str, block_hash: str, network_map: Dict[str, str] -) -> Optional[List[str]]: - r""" - Returns the explorer url for the given block hash and network. - - Args: - network(str): The network to get the explorer url for. - block_hash(str): The block hash to get the explorer url for. - network_map(Dict[str, Dict[str, str]]): The network maps to get the explorer urls from. - - Returns: - The explorer url for the given block hash and network. - Or None if the network is not known. - """ - - explorer_urls: Optional[Dict[str, str]] = {} - # Will be None if the network is not known. i.e. not in network_map - explorer_root_urls: Optional[Dict[str, str]] = ( - get_explorer_root_url_by_network_from_map(network, network_map) - ) - - if explorer_root_urls != {}: - # We are on a known network. - explorer_opentensor_url = "{root_url}/query/{block_hash}".format( - root_url=explorer_root_urls.get("opentensor"), block_hash=block_hash - ) - explorer_taostats_url = "{root_url}/extrinsic/{block_hash}".format( - root_url=explorer_root_urls.get("taostats"), block_hash=block_hash - ) - explorer_urls["opentensor"] = explorer_opentensor_url - explorer_urls["taostats"] = explorer_taostats_url - - return explorer_urls - - -def ss58_address_to_bytes(ss58_address: str) -> bytes: - """Converts a ss58 address to a bytes object.""" - account_id_hex: str = scalecodec.ss58_decode( - ss58_address, bittensor.__ss58_format__ - ) - return bytes.fromhex(account_id_hex) - - -def U16_NORMALIZED_FLOAT(x: int) -> float: - return float(x) / float(U16_MAX) - - -def U64_NORMALIZED_FLOAT(x: int) -> float: - return float(x) / float(U64_MAX) - - -def u8_key_to_ss58(u8_key: List[int]) -> str: - r""" - Converts a u8-encoded account key to an ss58 address. - - Args: - u8_key (List[int]): The u8-encoded account key. - """ - # First byte is length, then 32 bytes of key. - return scalecodec.ss58_encode(bytes(u8_key).hex(), bittensor.__ss58_format__) - - -def hash(content, encoding="utf-8"): - sha3 = hashlib.sha3_256() - - # Update the hash object with the concatenated string - sha3.update(content.encode(encoding)) - - # Produce the hash - return sha3.hexdigest() - - -def format_error_message(error_message: dict) -> str: - """ - Formats an error message from the Subtensor error information to using in extrinsics. - - Args: - error_message (dict): A dictionary containing the error information from Subtensor. - - Returns: - str: A formatted error message string. - """ - err_type = "UnknownType" - err_name = "UnknownError" - err_description = "Unknown Description" - - if isinstance(error_message, dict): - err_type = error_message.get("type", err_type) - err_name = error_message.get("name", err_name) - err_docs = error_message.get("docs", []) - err_description = err_docs[0] if len(err_docs) > 0 else err_description - return f"Subtensor returned `{err_name} ({err_type})` error. This means: `{err_description}`" diff --git a/bittensor/utils/_register_cuda.py b/bittensor/utils/_register_cuda.py deleted file mode 100644 index 05619416e4..0000000000 --- a/bittensor/utils/_register_cuda.py +++ /dev/null @@ -1,126 +0,0 @@ -import binascii -import hashlib -import math -from typing import Tuple - -import numpy as np -from Crypto.Hash import keccak - -from contextlib import redirect_stdout -import io - - -def solve_cuda( - nonce_start: np.int64, - update_interval: np.int64, - tpb: int, - block_and_hotkey_hash_bytes: bytes, - difficulty: int, - limit: int, - dev_id: int = 0, -) -> Tuple[np.int64, bytes]: - """ - Solves the PoW problem using CUDA. - Args: - nonce_start: int64 - Starting nonce. - update_interval: int64 - Number of nonces to solve before updating block information. - tpb: int - Threads per block. - block_and_hotkey_hash_bytes: bytes - Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes. - difficulty: int256 - Difficulty of the PoW problem. - limit: int256 - Upper limit of the nonce. - dev_id: int (default=0) - The CUDA device ID - Returns: - Tuple[int64, bytes] - Tuple of the nonce and the seal corresponding to the solution. - Returns -1 for nonce if no solution is found. - """ - - try: - import cubit - except ImportError: - raise ImportError("Please install cubit") - - upper = int(limit // difficulty) - - upper_bytes = upper.to_bytes(32, byteorder="little", signed=False) - - def _hex_bytes_to_u8_list(hex_bytes: bytes): - hex_chunks = [ - int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2) - ] - return hex_chunks - - def _create_seal_hash(block_and_hotkey_hash_hex: bytes, nonce: int) -> bytes: - nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) - pre_seal = nonce_bytes + block_and_hotkey_hash_hex - seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest() - kec = keccak.new(digest_bits=256) - seal = kec.update(seal_sh256).digest() - return seal - - def _seal_meets_difficulty(seal: bytes, difficulty: int): - seal_number = int.from_bytes(seal, "big") - product = seal_number * difficulty - limit = int(math.pow(2, 256)) - 1 - - return product < limit - - # Call cython function - # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit, - # const unsigned char[:] block_bytes, int dev_id - block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64] - - solution = cubit.solve_cuda( - tpb, - nonce_start, - update_interval, - upper_bytes, - block_and_hotkey_hash_hex, - dev_id, - ) # 0 is first GPU - seal = None - if solution != -1: - seal = _create_seal_hash(block_and_hotkey_hash_hex, solution) - if _seal_meets_difficulty(seal, difficulty): - return solution, seal - else: - return -1, b"\x00" * 32 - - return solution, seal - - -def reset_cuda(): - """ - Resets the CUDA environment. - """ - try: - import cubit - except ImportError: - raise ImportError("Please install cubit") - - cubit.reset_cuda() - - -def log_cuda_errors() -> str: - """ - Logs any CUDA errors. - """ - try: - import cubit - except ImportError: - raise ImportError("Please install cubit") - - f = io.StringIO() - with redirect_stdout(f): - cubit.log_cuda_errors() - - s = f.getvalue() - - return s diff --git a/bittensor/utils/axon_utils.py b/bittensor/utils/axon_utils.py deleted file mode 100644 index 5912f389a4..0000000000 --- a/bittensor/utils/axon_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -from typing import Optional - -from bittensor.constants import ALLOWED_DELTA, NANOSECONDS_IN_SECOND - - -def allowed_nonce_window_ns(current_time_ns: int, synapse_timeout: Optional[float]): - synapse_timeout_ns = (synapse_timeout or 0) * NANOSECONDS_IN_SECOND - allowed_window_ns = current_time_ns - ALLOWED_DELTA - synapse_timeout_ns - return allowed_window_ns - - -def calculate_diff_seconds( - current_time: int, synapse_timeout: Optional[float], synapse_nonce: int -): - synapse_timeout_ns = (synapse_timeout or 0) * NANOSECONDS_IN_SECOND - diff_seconds = (current_time - synapse_nonce) / NANOSECONDS_IN_SECOND - allowed_delta_seconds = (ALLOWED_DELTA + synapse_timeout_ns) / NANOSECONDS_IN_SECOND - return diff_seconds, allowed_delta_seconds diff --git a/bittensor/utils/balance.py b/bittensor/utils/balance.py deleted file mode 100644 index 63ca6cd5ba..0000000000 --- a/bittensor/utils/balance.py +++ /dev/null @@ -1,285 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021-2022 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from typing import Union - -import bittensor - - -class Balance: - """ - Represents the bittensor balance of the wallet, stored as rao (int). - This class provides a way to interact with balances in two different units: rao and tao. - It provides methods to convert between these units, as well as to perform arithmetic and comparison operations. - - Attributes: - unit: A string representing the symbol for the tao unit. - rao_unit: A string representing the symbol for the rao unit. - rao: An integer that stores the balance in rao units. - tao: A float property that gives the balance in tao units. - """ - - unit: str = bittensor.__tao_symbol__ # This is the tao unit - rao_unit: str = bittensor.__rao_symbol__ # This is the rao unit - rao: int - tao: float - - def __init__(self, balance: Union[int, float]): - """ - Initialize a Balance object. If balance is an int, it's assumed to be in rao. - If balance is a float, it's assumed to be in tao. - - Args: - balance: The initial balance, in either rao (if an int) or tao (if a float). - """ - if isinstance(balance, int): - self.rao = balance - elif isinstance(balance, float): - # Assume tao value for the float - self.rao = int(balance * pow(10, 9)) - else: - raise TypeError("balance must be an int (rao) or a float (tao)") - - @property - def tao(self): - return self.rao / pow(10, 9) - - def __int__(self): - """ - Convert the Balance object to an int. The resulting value is in rao. - """ - return self.rao - - def __float__(self): - """ - Convert the Balance object to a float. The resulting value is in tao. - """ - return self.tao - - def __str__(self): - """ - Returns the Balance object as a string in the format "symbolvalue", where the value is in tao. - """ - return f"{self.unit}{float(self.tao):,.9f}" - - def __rich__(self): - return "[green]{}[/green][green]{}[/green][green].[/green][dim green]{}[/dim green]".format( - self.unit, - format(float(self.tao), "f").split(".")[0], - format(float(self.tao), "f").split(".")[1], - ) - - def __str_rao__(self): - return f"{self.rao_unit}{int(self.rao)}" - - def __rich_rao__(self): - return f"[green]{self.rao_unit}{int(self.rao)}[/green]" - - def __repr__(self): - return self.__str__() - - def __eq__(self, other: Union[int, float, "Balance"]): - if other is None: - return False - - if hasattr(other, "rao"): - return self.rao == other.rao - else: - try: - # Attempt to cast to int from rao - other_rao = int(other) - return self.rao == other_rao - except (TypeError, ValueError): - raise NotImplementedError("Unsupported type") - - def __ne__(self, other: Union[int, float, "Balance"]): - return not self == other - - def __gt__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return self.rao > other.rao - else: - try: - # Attempt to cast to int from rao - other_rao = int(other) - return self.rao > other_rao - except ValueError: - raise NotImplementedError("Unsupported type") - - def __lt__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return self.rao < other.rao - else: - try: - # Attempt to cast to int from rao - other_rao = int(other) - return self.rao < other_rao - except ValueError: - raise NotImplementedError("Unsupported type") - - def __le__(self, other: Union[int, float, "Balance"]): - try: - return self < other or self == other - except TypeError: - raise NotImplementedError("Unsupported type") - - def __ge__(self, other: Union[int, float, "Balance"]): - try: - return self > other or self == other - except TypeError: - raise NotImplementedError("Unsupported type") - - def __add__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return Balance.from_rao(int(self.rao + other.rao)) - else: - try: - # Attempt to cast to int from rao - return Balance.from_rao(int(self.rao + other)) - except (ValueError, TypeError): - raise NotImplementedError("Unsupported type") - - def __radd__(self, other: Union[int, float, "Balance"]): - try: - return self + other - except TypeError: - raise NotImplementedError("Unsupported type") - - def __sub__(self, other: Union[int, float, "Balance"]): - try: - return self + -other - except TypeError: - raise NotImplementedError("Unsupported type") - - def __rsub__(self, other: Union[int, float, "Balance"]): - try: - return -self + other - except TypeError: - raise NotImplementedError("Unsupported type") - - def __mul__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return Balance.from_rao(int(self.rao * other.rao)) - else: - try: - # Attempt to cast to int from rao - return Balance.from_rao(int(self.rao * other)) - except (ValueError, TypeError): - raise NotImplementedError("Unsupported type") - - def __rmul__(self, other: Union[int, float, "Balance"]): - return self * other - - def __truediv__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return Balance.from_rao(int(self.rao / other.rao)) - else: - try: - # Attempt to cast to int from rao - return Balance.from_rao(int(self.rao / other)) - except (ValueError, TypeError): - raise NotImplementedError("Unsupported type") - - def __rtruediv__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return Balance.from_rao(int(other.rao / self.rao)) - else: - try: - # Attempt to cast to int from rao - return Balance.from_rao(int(other / self.rao)) - except (ValueError, TypeError): - raise NotImplementedError("Unsupported type") - - def __floordiv__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return Balance.from_rao(int(self.tao // other.tao)) - else: - try: - # Attempt to cast to int from rao - return Balance.from_rao(int(self.rao // other)) - except (ValueError, TypeError): - raise NotImplementedError("Unsupported type") - - def __rfloordiv__(self, other: Union[int, float, "Balance"]): - if hasattr(other, "rao"): - return Balance.from_rao(int(other.rao // self.rao)) - else: - try: - # Attempt to cast to int from rao - return Balance.from_rao(int(other // self.rao)) - except (ValueError, TypeError): - raise NotImplementedError("Unsupported type") - - def __int__(self) -> int: - return self.rao - - def __float__(self) -> float: - return self.tao - - def __nonzero__(self) -> bool: - return bool(self.rao) - - def __neg__(self): - return Balance.from_rao(-self.rao) - - def __pos__(self): - return Balance.from_rao(self.rao) - - def __abs__(self): - return Balance.from_rao(abs(self.rao)) - - @staticmethod - def from_float(amount: float): - """ - Given tao (float), return Balance object with rao(int) and tao(float), where rao = int(tao*pow(10,9)) - Args: - amount: The amount in tao. - - Returns: - A Balance object representing the given amount. - """ - rao = int(amount * pow(10, 9)) - return Balance(rao) - - @staticmethod - def from_tao(amount: float): - """ - Given tao (float), return Balance object with rao(int) and tao(float), where rao = int(tao*pow(10,9)) - - Args: - amount: The amount in tao. - - Returns: - A Balance object representing the given amount. - """ - rao = int(amount * pow(10, 9)) - return Balance(rao) - - @staticmethod - def from_rao(amount: int): - """ - Given rao (int), return Balance object with rao(int) and tao(float), where rao = int(tao*pow(10,9)) - - Args: - amount: The amount in rao. - - Returns: - A Balance object representing the given amount. - """ - return Balance(amount) diff --git a/bittensor/utils/formatting.py b/bittensor/utils/formatting.py deleted file mode 100644 index 22fbe74c12..0000000000 --- a/bittensor/utils/formatting.py +++ /dev/null @@ -1,123 +0,0 @@ -import math -from typing import List - - -def get_human_readable(num, suffix="H"): - for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: - if abs(num) < 1000.0: - return f"{num:3.1f}{unit}{suffix}" - num /= 1000.0 - return f"{num:.1f}Y{suffix}" - - -def millify(n: int): - millnames = ["", " K", " M", " B", " T"] - n = float(n) - millidx = max( - 0, - min( - len(millnames) - 1, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)) - ), - ) - - return "{:.2f}{}".format(n / 10 ** (3 * millidx), millnames[millidx]) - - -def convert_blocks_to_time(blocks: int, block_time: int = 12) -> tuple[int, int, int]: - """ - Converts number of blocks into number of hours, minutes, seconds. - :param blocks: number of blocks - :param block_time: time per block, by default this is 12 - :return: tuple containing number of hours, number of minutes, number of seconds - """ - seconds = blocks * block_time - hours = seconds // 3600 - minutes = (seconds % 3600) // 60 - remaining_seconds = seconds % 60 - return hours, minutes, remaining_seconds - - -def float_to_u16(value: float) -> int: - # Ensure the input is within the expected range - if value is None: - return 0 - if not (0 <= value <= 1): - raise ValueError("Input value must be between 0 and 1") - - # Calculate the u16 representation - u16_max = 65535 - return int(value * u16_max) - - -def u16_to_float(value: int) -> float: - # Ensure the input is within the expected range - if value is None: - return 0.0 - if not (0 <= value <= 65535): - raise ValueError("Input value must be between 0 and 65535") - - # Calculate the float representation - u16_max = 65535 - return value / u16_max - - -def float_to_u64(value: float) -> int: - if value == 0.0: - return 0 - # Ensure the input is within the expected range - if not (0 <= value <= 1): - raise ValueError("Input value must be between 0 and 1") - - # Convert the float to a u64 value, take the floor value - return int(math.floor((value * (2**64 - 1)))) - - -def u64_to_float(value: int) -> float: - u64_max = 2**64 - 1 - # Allow for a small margin of error (e.g., 1) to account for potential rounding issues - if not (0 <= value <= u64_max + 1): - raise ValueError( - f"Input value ({value}) must be between 0 and {u64_max} (2^64 - 1)" - ) - return min(value / u64_max, 1.0) # Ensure the result is never greater than 1.0 - - -def normalize_u64_values(values: List[int]) -> List[int]: - """ - Normalize a list of u64 values so that their sum equals u64::MAX (2^64 - 1). - """ - if not values: - raise ValueError("Input list cannot be empty") - - if any(v < 0 for v in values): - raise ValueError("Input values must be non-negative") - - total = sum(values) - if total == 0: - raise ValueError("Sum of input values cannot be zero") - - u64_max = 2**64 - 1 - normalized = [int((v / total) * u64_max) for v in values] - - # Adjust values to ensure sum is exactly u64::MAX - current_sum = sum(normalized) - diff = u64_max - current_sum - - for i in range(abs(diff)): - if diff > 0: - normalized[i % len(normalized)] += 1 - else: - normalized[i % len(normalized)] = max( - 0, normalized[i % len(normalized)] - 1 - ) - - # Final check and adjustment - final_sum = sum(normalized) - if final_sum > u64_max: - normalized[-1] -= final_sum - u64_max - - assert ( - sum(normalized) == u64_max - ), f"Sum of normalized values ({sum(normalized)}) is not equal to u64::MAX ({u64_max})" - - return normalized diff --git a/bittensor/utils/networking.py b/bittensor/utils/networking.py deleted file mode 100644 index f4b729fe97..0000000000 --- a/bittensor/utils/networking.py +++ /dev/null @@ -1,198 +0,0 @@ -"""Utils for handling local network with ip and ports.""" - -# The MIT License (MIT) -# Copyright © 2021-2022 Yuma Rao -# Copyright © 2022-2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import json -import os -import socket -import urllib -from functools import wraps - -import netaddr -import requests - -from bittensor.btlogging import logging - - -def int_to_ip(int_val: int) -> str: - r"""Maps an integer to a unique ip-string - Args: - int_val (:type:`int128`, `required`): - The integer representation of an ip. Must be in the range (0, 3.4028237e+38). - - Returns: - str_val (:tyep:`str`, `required): - The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 - - Raises: - netaddr.core.AddrFormatError (Exception): - Raised when the passed int_vals is not a valid ip int value. - """ - return str(netaddr.IPAddress(int_val)) - - -def ip_to_int(str_val: str) -> int: - r"""Maps an ip-string to a unique integer. - arg: - str_val (:tyep:`str`, `required): - The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 - - Returns: - int_val (:type:`int128`, `required`): - The integer representation of an ip. Must be in the range (0, 3.4028237e+38). - - Raises: - netaddr.core.AddrFormatError (Exception): - Raised when the passed str_val is not a valid ip string value. - """ - return int(netaddr.IPAddress(str_val)) - - -def ip_version(str_val: str) -> int: - r"""Returns the ip version (IPV4 or IPV6). - arg: - str_val (:tyep:`str`, `required): - The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 - - Returns: - int_val (:type:`int128`, `required`): - The ip version (Either 4 or 6 for IPv4/IPv6) - - Raises: - netaddr.core.AddrFormatError (Exception): - Raised when the passed str_val is not a valid ip string value. - """ - return int(netaddr.IPAddress(str_val).version) - - -def ip__str__(ip_type: int, ip_str: str, port: int): - """Return a formatted ip string""" - return "/ipv%i/%s:%i" % (ip_type, ip_str, port) - - -class ExternalIPNotFound(Exception): - """Raised if we cannot attain your external ip from CURL/URLLIB/IPIFY/AWS""" - - -def get_external_ip() -> str: - r"""Checks CURL/URLLIB/IPIFY/AWS for your external ip. - Returns: - external_ip (:obj:`str` `required`): - Your routers external facing ip as a string. - - Raises: - ExternalIPNotFound (Exception): - Raised if all external ip attempts fail. - """ - # --- Try AWS - try: - external_ip = requests.get("https://checkip.amazonaws.com").text.strip() - assert isinstance(ip_to_int(external_ip), int) - return str(external_ip) - except Exception: - pass - - # --- Try ipconfig. - try: - process = os.popen("curl -s ifconfig.me") - external_ip = process.readline() - process.close() - assert isinstance(ip_to_int(external_ip), int) - return str(external_ip) - except Exception: - pass - - # --- Try ipinfo. - try: - process = os.popen("curl -s https://ipinfo.io") - external_ip = json.loads(process.read())["ip"] - process.close() - assert isinstance(ip_to_int(external_ip), int) - return str(external_ip) - except Exception: - pass - - # --- Try myip.dnsomatic - try: - process = os.popen("curl -s myip.dnsomatic.com") - external_ip = process.readline() - process.close() - assert isinstance(ip_to_int(external_ip), int) - return str(external_ip) - except Exception: - pass - - # --- Try urllib ipv6 - try: - external_ip = urllib.request.urlopen("https://ident.me").read().decode("utf8") - assert isinstance(ip_to_int(external_ip), int) - return str(external_ip) - except Exception: - pass - - # --- Try Wikipedia - try: - external_ip = requests.get("https://www.wikipedia.org").headers["X-Client-IP"] - assert isinstance(ip_to_int(external_ip), int) - return str(external_ip) - except Exception: - pass - - raise ExternalIPNotFound - - -def get_formatted_ws_endpoint_url(endpoint_url: str) -> str: - """ - Returns a formatted websocket endpoint url. - Note: The port (or lack thereof) is left unchanged - Args: - endpoint_url (str, `required`): - The endpoint url to format. - Returns: - formatted_endpoint_url (str, `required`): - The formatted endpoint url. In the form of ws:// or wss:// - """ - if endpoint_url[0:6] != "wss://" and endpoint_url[0:5] != "ws://": - endpoint_url = "ws://{}".format(endpoint_url) - - return endpoint_url - - -def ensure_connected(func): - """Decorator ensuring the function executes with an active substrate connection.""" - - @wraps(func) - def wrapper(self, *args, **kwargs): - # Check the socket state before method execution - if ( - # connection was closed correctly - self.substrate.websocket.sock is None - # connection has a broken pipe - or self.substrate.websocket.sock.getsockopt( - socket.SOL_SOCKET, socket.SO_ERROR - ) - != 0 - ): - logging.info("Reconnection substrate...") - self._get_substrate() - # Execute the method if the connection is active or after reconnecting - return func(self, *args, **kwargs) - - return wrapper diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py deleted file mode 100644 index d606929dcb..0000000000 --- a/bittensor/utils/registration.py +++ /dev/null @@ -1,1170 +0,0 @@ -import binascii -import functools -import hashlib -import math -import multiprocessing -import multiprocessing.queues # this must be imported separately, or could break type annotations -import os -import random -import time -import typing -from dataclasses import dataclass -from datetime import timedelta -from queue import Empty, Full -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import backoff -import numpy - -import bittensor -from Crypto.Hash import keccak -from rich import console as rich_console -from rich import status as rich_status - -from .formatting import get_human_readable, millify -from ._register_cuda import solve_cuda - - -def use_torch() -> bool: - """Force the use of torch over numpy for certain operations.""" - return True if os.getenv("USE_TORCH") == "1" else False - - -def legacy_torch_api_compat(func): - """ - Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True. - - Args: - func (function): - Function with numpy Input/Output to be decorated. - Returns: - decorated (function): - Decorated function. - """ - - @functools.wraps(func) - def decorated(*args, **kwargs): - if use_torch(): - # if argument is a Torch tensor, convert it to numpy - args = [ - arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg - for arg in args - ] - kwargs = { - key: value.cpu().numpy() if isinstance(value, torch.Tensor) else value - for key, value in kwargs.items() - } - ret = func(*args, **kwargs) - if use_torch(): - # if return value is a numpy array, convert it to Torch tensor - if isinstance(ret, numpy.ndarray): - ret = torch.from_numpy(ret) - return ret - - return decorated - - -@functools.cache -def _get_real_torch(): - try: - import torch as _real_torch - except ImportError: - _real_torch = None - return _real_torch - - -def log_no_torch_error(): - bittensor.logging.error( - "This command requires torch. You can install torch for bittensor" - ' with `pip install bittensor[torch]` or `pip install ".[torch]"`' - " if installing from source, and then run the command with USE_TORCH=1 {command}" - ) - - -class LazyLoadedTorch: - def __bool__(self): - return bool(_get_real_torch()) - - def __getattr__(self, name): - if real_torch := _get_real_torch(): - return getattr(real_torch, name) - else: - log_no_torch_error() - raise ImportError("torch not installed") - - -if typing.TYPE_CHECKING: - import torch -else: - torch = LazyLoadedTorch() - - -class CUDAException(Exception): - """An exception raised when an error occurs in the CUDA environment.""" - - pass - - -def _hex_bytes_to_u8_list(hex_bytes: bytes): - hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)] - return hex_chunks - - -def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes: - nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) - pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64] - seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest() - kec = keccak.new(digest_bits=256) - seal = kec.update(seal_sh256).digest() - return seal - - -def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int): - seal_number = int.from_bytes(seal, "big") - product = seal_number * difficulty - return product < limit - - -@dataclass -class POWSolution: - """A solution to the registration PoW problem.""" - - nonce: int - block_number: int - difficulty: int - seal: bytes - - def is_stale(self, subtensor: "bittensor.subtensor") -> bool: - """Returns True if the POW is stale. - This means the block the POW is solved for is within 3 blocks of the current block. - """ - return self.block_number < subtensor.get_current_block() - 3 - - -class _SolverBase(multiprocessing.Process): - """ - A process that solves the registration PoW problem. - - Args: - proc_num: int - The number of the process being created. - num_proc: int - The total number of processes running. - update_interval: int - The number of nonces to try to solve before checking for a new block. - finished_queue: multiprocessing.Queue - The queue to put the process number when a process finishes each update_interval. - Used for calculating the average time per update_interval across all processes. - solution_queue: multiprocessing.Queue - The queue to put the solution the process has found during the pow solve. - newBlockEvent: multiprocessing.Event - The event to set by the main process when a new block is finalized in the network. - The solver process will check for the event after each update_interval. - The solver process will get the new block hash and difficulty and start solving for a new nonce. - stopEvent: multiprocessing.Event - The event to set by the main process when all the solver processes should stop. - The solver process will check for the event after each update_interval. - The solver process will stop when the event is set. - Used to stop the solver processes when a solution is found. - curr_block: multiprocessing.Array - The array containing this process's current block hash. - The main process will set the array to the new block hash when a new block is finalized in the network. - The solver process will get the new block hash from this array when newBlockEvent is set. - curr_block_num: multiprocessing.Value - The value containing this process's current block number. - The main process will set the value to the new block number when a new block is finalized in the network. - The solver process will get the new block number from this value when newBlockEvent is set. - curr_diff: multiprocessing.Array - The array containing this process's current difficulty. - The main process will set the array to the new difficulty when a new block is finalized in the network. - The solver process will get the new difficulty from this array when newBlockEvent is set. - check_block: multiprocessing.Lock - The lock to prevent this process from getting the new block data while the main process is updating the data. - limit: int - The limit of the pow solve for a valid solution. - """ - - proc_num: int - num_proc: int - update_interval: int - finished_queue: multiprocessing.Queue - solution_queue: multiprocessing.Queue - newBlockEvent: multiprocessing.Event - stopEvent: multiprocessing.Event - hotkey_bytes: bytes - curr_block: multiprocessing.Array - curr_block_num: multiprocessing.Value - curr_diff: multiprocessing.Array - check_block: multiprocessing.Lock - limit: int - - def __init__( - self, - proc_num, - num_proc, - update_interval, - finished_queue, - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - ): - multiprocessing.Process.__init__(self, daemon=True) - self.proc_num = proc_num - self.num_proc = num_proc - self.update_interval = update_interval - self.finished_queue = finished_queue - self.solution_queue = solution_queue - self.newBlockEvent = multiprocessing.Event() - self.newBlockEvent.clear() - self.curr_block = curr_block - self.curr_block_num = curr_block_num - self.curr_diff = curr_diff - self.check_block = check_block - self.stopEvent = stopEvent - self.limit = limit - - def run(self): - raise NotImplementedError("_SolverBase is an abstract class") - - @staticmethod - def create_shared_memory() -> ( - Tuple[multiprocessing.Array, multiprocessing.Value, multiprocessing.Array] - ): - """Creates shared memory for the solver processes to use.""" - curr_block = multiprocessing.Array("h", 32, lock=True) # byte array - curr_block_num = multiprocessing.Value("i", 0, lock=True) # int - curr_diff = multiprocessing.Array("Q", [0, 0], lock=True) # [high, low] - - return curr_block, curr_block_num, curr_diff - - -class _Solver(_SolverBase): - def run(self): - block_number: int - block_and_hotkey_hash_bytes: bytes - block_difficulty: int - nonce_limit = int(math.pow(2, 64)) - 1 - - # Start at random nonce - nonce_start = random.randint(0, nonce_limit) - nonce_end = nonce_start + self.update_interval - while not self.stopEvent.is_set(): - if self.newBlockEvent.is_set(): - with self.check_block: - block_number = self.curr_block_num.value - block_and_hotkey_hash_bytes = bytes(self.curr_block) - block_difficulty = _registration_diff_unpack(self.curr_diff) - - self.newBlockEvent.clear() - - # Do a block of nonces - solution = _solve_for_nonce_block( - nonce_start, - nonce_end, - block_and_hotkey_hash_bytes, - block_difficulty, - self.limit, - block_number, - ) - if solution is not None: - self.solution_queue.put(solution) - - try: - # Send time - self.finished_queue.put_nowait(self.proc_num) - except Full: - pass - - nonce_start = random.randint(0, nonce_limit) - nonce_start = nonce_start % nonce_limit - nonce_end = nonce_start + self.update_interval - - -class _CUDASolver(_SolverBase): - dev_id: int - tpb: int - - def __init__( - self, - proc_num, - num_proc, - update_interval, - finished_queue, - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - dev_id: int, - tpb: int, - ): - super().__init__( - proc_num, - num_proc, - update_interval, - finished_queue, - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - ) - self.dev_id = dev_id - self.tpb = tpb - - def run(self): - block_number: int = 0 # dummy value - block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value - block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value - nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX - - # Start at random nonce - nonce_start = random.randint(0, nonce_limit) - while not self.stopEvent.is_set(): - if self.newBlockEvent.is_set(): - with self.check_block: - block_number = self.curr_block_num.value - block_and_hotkey_hash_bytes = bytes(self.curr_block) - block_difficulty = _registration_diff_unpack(self.curr_diff) - - self.newBlockEvent.clear() - - # Do a block of nonces - solution = _solve_for_nonce_block_cuda( - nonce_start, - self.update_interval, - block_and_hotkey_hash_bytes, - block_difficulty, - self.limit, - block_number, - self.dev_id, - self.tpb, - ) - if solution is not None: - self.solution_queue.put(solution) - - try: - # Signal that a nonce_block was finished using queue - # send our proc_num - self.finished_queue.put(self.proc_num) - except Full: - pass - - # increase nonce by number of nonces processed - nonce_start += self.update_interval * self.tpb - nonce_start = nonce_start % nonce_limit - - -def _solve_for_nonce_block_cuda( - nonce_start: int, - update_interval: int, - block_and_hotkey_hash_bytes: bytes, - difficulty: int, - limit: int, - block_number: int, - dev_id: int, - tpb: int, -) -> Optional[POWSolution]: - """Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb""" - solution, seal = solve_cuda( - nonce_start, - update_interval, - tpb, - block_and_hotkey_hash_bytes, - difficulty, - limit, - dev_id, - ) - - if solution != -1: - # Check if solution is valid (i.e. not -1) - return POWSolution(solution, block_number, difficulty, seal) - - return None - - -def _solve_for_nonce_block( - nonce_start: int, - nonce_end: int, - block_and_hotkey_hash_bytes: bytes, - difficulty: int, - limit: int, - block_number: int, -) -> Optional[POWSolution]: - """Tries to solve the POW for a block of nonces (nonce_start, nonce_end)""" - for nonce in range(nonce_start, nonce_end): - # Create seal. - seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce) - - # Check if seal meets difficulty - if _seal_meets_difficulty(seal, difficulty, limit): - # Found a solution, save it. - return POWSolution(nonce, block_number, difficulty, seal) - - return None - - -def _registration_diff_unpack(packed_diff: multiprocessing.Array) -> int: - """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian.""" - return int(packed_diff[0] << 32 | packed_diff[1]) - - -def _registration_diff_pack(diff: int, packed_diff: multiprocessing.Array): - """Packs the difficulty into two 32-bit integers. Little endian.""" - packed_diff[0] = diff >> 32 - packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits - - -def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes: - """Hashes the block with the hotkey using Keccak-256 to get 32 bytes""" - kec = keccak.new(digest_bits=256) - kec = kec.update(bytearray(block_bytes + hotkey_bytes)) - block_and_hotkey_hash_bytes = kec.digest() - return block_and_hotkey_hash_bytes - - -def _update_curr_block( - curr_diff: multiprocessing.Array, - curr_block: multiprocessing.Array, - curr_block_num: multiprocessing.Value, - block_number: int, - block_bytes: bytes, - diff: int, - hotkey_bytes: bytes, - lock: multiprocessing.Lock, -): - with lock: - curr_block_num.value = block_number - # Hash the block with the hotkey - block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes) - for i in range(32): - curr_block[i] = block_and_hotkey_hash_bytes[i] - _registration_diff_pack(diff, curr_diff) - - -def get_cpu_count() -> int: - try: - return len(os.sched_getaffinity(0)) - except AttributeError: - # OSX does not have sched_getaffinity - return os.cpu_count() - - -@dataclass -class RegistrationStatistics: - """Statistics for a registration.""" - - time_spent_total: float - rounds_total: int - time_average: float - time_spent: float - hash_rate_perpetual: float - hash_rate: float - difficulty: int - block_number: int - block_hash: bytes - - -class RegistrationStatisticsLogger: - """Logs statistics for a registration.""" - - console: rich_console.Console - status: Optional[rich_status.Status] - - def __init__( - self, console: rich_console.Console, output_in_place: bool = True - ) -> None: - self.console = console - - if output_in_place: - self.status = self.console.status("Solving") - else: - self.status = None - - def start(self) -> None: - if self.status is not None: - self.status.start() - - def stop(self) -> None: - if self.status is not None: - self.status.stop() - - def get_status_message( - cls, stats: RegistrationStatistics, verbose: bool = False - ) -> str: - message = ( - "Solving\n" - + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n" - + ( - f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n" - + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n" - if verbose - else "" - ) - + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n" - + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / " - + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n" - + f"Block Number: [bold white]{stats.block_number}[/bold white]\n" - + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n" - ) - return message - - def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None: - if self.status is not None: - self.status.update(self.get_status_message(stats, verbose=verbose)) - else: - self.console.log(self.get_status_message(stats, verbose=verbose)) - - -def _solve_for_difficulty_fast( - subtensor, - wallet: "bittensor.wallet", - netuid: int, - output_in_place: bool = True, - num_processes: Optional[int] = None, - update_interval: Optional[int] = None, - n_samples: int = 10, - alpha_: float = 0.80, - log_verbose: bool = False, -) -> Optional[POWSolution]: - """ - Solves the POW for registration using multiprocessing. - Args: - subtensor - Subtensor to connect to for block information and to submit. - wallet: - wallet to use for registration. - netuid: int - The netuid of the subnet to register to. - output_in_place: bool - If true, prints the status in place. Otherwise, prints the status on a new line. - num_processes: int - Number of processes to use. - update_interval: int - Number of nonces to solve before updating block information. - n_samples: int - The number of samples of the hash_rate to keep for the EWMA - alpha_: float - The alpha for the EWMA for the hash_rate calculation - log_verbose: bool - If true, prints more verbose logging of the registration metrics. - Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. - Note: - - We can also modify the update interval to do smaller blocks of work, - while still updating the block information after a different number of nonces, - to increase the transparency of the process while still keeping the speed. - """ - if num_processes is None: - # get the number of allowed processes for this process - num_processes = min(1, get_cpu_count()) - - if update_interval is None: - update_interval = 50_000 - - limit = int(math.pow(2, 256)) - 1 - - curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory() - - # Establish communication queues - ## See the _Solver class for more information on the queues. - stopEvent = multiprocessing.Event() - stopEvent.clear() - - solution_queue = multiprocessing.Queue() - finished_queues = [multiprocessing.Queue() for _ in range(num_processes)] - check_block = multiprocessing.Lock() - - hotkey_bytes = ( - wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key - ) - # Start consumers - solvers = [ - _Solver( - i, - num_processes, - update_interval, - finished_queues[i], - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - ) - for i in range(num_processes) - ] - - # Get first block - block_number, difficulty, block_hash = _get_block_with_retry( - subtensor=subtensor, netuid=netuid - ) - - block_bytes = bytes.fromhex(block_hash[2:]) - old_block_number = block_number - # Set to current block - _update_curr_block( - curr_diff, - curr_block, - curr_block_num, - block_number, - block_bytes, - difficulty, - hotkey_bytes, - check_block, - ) - - # Set new block events for each solver to start at the initial block - for worker in solvers: - worker.newBlockEvent.set() - - for worker in solvers: - worker.start() # start the solver processes - - start_time = time.time() # time that the registration started - time_last = start_time # time that the last work blocks completed - - curr_stats = RegistrationStatistics( - time_spent_total=0.0, - time_average=0.0, - rounds_total=0, - time_spent=0.0, - hash_rate_perpetual=0.0, - hash_rate=0.0, - difficulty=difficulty, - block_number=block_number, - block_hash=block_hash, - ) - - start_time_perpetual = time.time() - - console = bittensor.__console__ - logger = RegistrationStatisticsLogger(console, output_in_place) - logger.start() - - solution = None - - hash_rates = [0] * n_samples # The last n true hash_rates - weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha - - while netuid == -1 or not subtensor.is_hotkey_registered( - netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address - ): - # Wait until a solver finds a solution - try: - solution = solution_queue.get(block=True, timeout=0.25) - if solution is not None: - break - except Empty: - # No solution found, try again - pass - - # check for new block - old_block_number = _check_for_newest_block_and_update( - subtensor=subtensor, - netuid=netuid, - hotkey_bytes=hotkey_bytes, - old_block_number=old_block_number, - curr_diff=curr_diff, - curr_block=curr_block, - curr_block_num=curr_block_num, - curr_stats=curr_stats, - update_curr_block=_update_curr_block, - check_block=check_block, - solvers=solvers, - ) - - num_time = 0 - for finished_queue in finished_queues: - try: - proc_num = finished_queue.get(timeout=0.1) - num_time += 1 - - except Empty: - continue - - time_now = time.time() # get current time - time_since_last = time_now - time_last # get time since last work block(s) - if num_time > 0 and time_since_last > 0.0: - # create EWMA of the hash_rate to make measure more robust - - hash_rate_ = (num_time * update_interval) / time_since_last - hash_rates.append(hash_rate_) - hash_rates.pop(0) # remove the 0th data point - curr_stats.hash_rate = sum( - [hash_rates[i] * weights[i] for i in range(n_samples)] - ) / (sum(weights)) - - # update time last to now - time_last = time_now - - curr_stats.time_average = ( - curr_stats.time_average * curr_stats.rounds_total - + curr_stats.time_spent - ) / (curr_stats.rounds_total + num_time) - curr_stats.rounds_total += num_time - - # Update stats - curr_stats.time_spent = time_since_last - new_time_spent_total = time_now - start_time_perpetual - curr_stats.hash_rate_perpetual = ( - curr_stats.rounds_total * update_interval - ) / new_time_spent_total - curr_stats.time_spent_total = new_time_spent_total - - # Update the logger - logger.update(curr_stats, verbose=log_verbose) - - # exited while, solution contains the nonce or wallet is registered - stopEvent.set() # stop all other processes - logger.stop() - - # terminate and wait for all solvers to exit - _terminate_workers_and_wait_for_exit(solvers) - - return solution - - -@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3) -def _get_block_with_retry( - subtensor: "bittensor.subtensor", netuid: int -) -> Tuple[int, int, bytes]: - """ - Gets the current block number, difficulty, and block hash from the substrate node. - - Args: - subtensor (:obj:`bittensor.subtensor`, `required`): - The subtensor object to use to get the block number, difficulty, and block hash. - - netuid (:obj:`int`, `required`): - The netuid of the network to get the block number, difficulty, and block hash from. - - Returns: - block_number (:obj:`int`): - The current block number. - - difficulty (:obj:`int`): - The current difficulty of the subnet. - - block_hash (:obj:`bytes`): - The current block hash. - - Raises: - Exception: If the block hash is None. - ValueError: If the difficulty is None. - """ - block_number = subtensor.get_current_block() - difficulty = 1_000_000 if netuid == -1 else subtensor.difficulty(netuid=netuid) - block_hash = subtensor.get_block_hash(block_number) - if block_hash is None: - raise Exception( - "Network error. Could not connect to substrate to get block hash" - ) - if difficulty is None: - raise ValueError("Chain error. Difficulty is None") - return block_number, difficulty, block_hash - - -class _UsingSpawnStartMethod: - def __init__(self, force: bool = False): - self._old_start_method = None - self._force = force - - def __enter__(self): - self._old_start_method = multiprocessing.get_start_method(allow_none=True) - if self._old_start_method is None: - self._old_start_method = "spawn" # default to spawn - - multiprocessing.set_start_method("spawn", force=self._force) - - def __exit__(self, *args): - # restore the old start method - multiprocessing.set_start_method(self._old_start_method, force=True) - - -def _check_for_newest_block_and_update( - subtensor: "bittensor.subtensor", - netuid: int, - old_block_number: int, - hotkey_bytes: bytes, - curr_diff: multiprocessing.Array, - curr_block: multiprocessing.Array, - curr_block_num: multiprocessing.Value, - update_curr_block: Callable, - check_block: "multiprocessing.Lock", - solvers: List[_Solver], - curr_stats: RegistrationStatistics, -) -> int: - """ - Checks for a new block and updates the current block information if a new block is found. - - Args: - subtensor (:obj:`bittensor.subtensor`, `required`): - The subtensor object to use for getting the current block. - netuid (:obj:`int`, `required`): - The netuid to use for retrieving the difficulty. - old_block_number (:obj:`int`, `required`): - The old block number to check against. - hotkey_bytes (:obj:`bytes`, `required`): - The bytes of the hotkey's pubkey. - curr_diff (:obj:`multiprocessing.Array`, `required`): - The current difficulty as a multiprocessing array. - curr_block (:obj:`multiprocessing.Array`, `required`): - Where the current block is stored as a multiprocessing array. - curr_block_num (:obj:`multiprocessing.Value`, `required`): - Where the current block number is stored as a multiprocessing value. - update_curr_block (:obj:`Callable`, `required`): - A function that updates the current block. - check_block (:obj:`multiprocessing.Lock`, `required`): - A mp lock that is used to check for a new block. - solvers (:obj:`List[_Solver]`, `required`): - A list of solvers to update the current block for. - curr_stats (:obj:`RegistrationStatistics`, `required`): - The current registration statistics to update. - - Returns: - (int) The current block number. - """ - block_number = subtensor.get_current_block() - if block_number != old_block_number: - old_block_number = block_number - # update block information - block_number, difficulty, block_hash = _get_block_with_retry( - subtensor=subtensor, netuid=netuid - ) - block_bytes = bytes.fromhex(block_hash[2:]) - - update_curr_block( - curr_diff, - curr_block, - curr_block_num, - block_number, - block_bytes, - difficulty, - hotkey_bytes, - check_block, - ) - # Set new block events for each solver - - for worker in solvers: - worker.newBlockEvent.set() - - # update stats - curr_stats.block_number = block_number - curr_stats.block_hash = block_hash - curr_stats.difficulty = difficulty - - return old_block_number - - -def _solve_for_difficulty_fast_cuda( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - netuid: int, - output_in_place: bool = True, - update_interval: int = 50_000, - tpb: int = 512, - dev_id: Union[List[int], int] = 0, - n_samples: int = 10, - alpha_: float = 0.80, - log_verbose: bool = False, -) -> Optional[POWSolution]: - """ - Solves the registration fast using CUDA - Args: - subtensor: bittensor.subtensor - The subtensor node to grab blocks - wallet: bittensor.wallet - The wallet to register - netuid: int - The netuid of the subnet to register to. - output_in_place: bool - If true, prints the output in place, otherwise prints to new lines - update_interval: int - The number of nonces to try before checking for more blocks - tpb: int - The number of threads per block. CUDA param that should match the GPU capability - dev_id: Union[List[int], int] - The CUDA device IDs to execute the registration on, either a single device or a list of devices - n_samples: int - The number of samples of the hash_rate to keep for the EWMA - alpha_: float - The alpha for the EWMA for the hash_rate calculation - log_verbose: bool - If true, prints more verbose logging of the registration metrics. - Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. - """ - if isinstance(dev_id, int): - dev_id = [dev_id] - elif dev_id is None: - dev_id = [0] - - if update_interval is None: - update_interval = 50_000 - - if not torch.cuda.is_available(): - raise Exception("CUDA not available") - - limit = int(math.pow(2, 256)) - 1 - - # Set mp start to use spawn so CUDA doesn't complain - with _UsingSpawnStartMethod(force=True): - curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory() - - ## Create a worker per CUDA device - num_processes = len(dev_id) - - # Establish communication queues - stopEvent = multiprocessing.Event() - stopEvent.clear() - solution_queue = multiprocessing.Queue() - finished_queues = [multiprocessing.Queue() for _ in range(num_processes)] - check_block = multiprocessing.Lock() - - hotkey_bytes = wallet.hotkey.public_key - # Start workers - solvers = [ - _CUDASolver( - i, - num_processes, - update_interval, - finished_queues[i], - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - dev_id[i], - tpb, - ) - for i in range(num_processes) - ] - - # Get first block - block_number, difficulty, block_hash = _get_block_with_retry( - subtensor=subtensor, netuid=netuid - ) - - block_bytes = bytes.fromhex(block_hash[2:]) - old_block_number = block_number - - # Set to current block - _update_curr_block( - curr_diff, - curr_block, - curr_block_num, - block_number, - block_bytes, - difficulty, - hotkey_bytes, - check_block, - ) - - # Set new block events for each solver to start at the initial block - for worker in solvers: - worker.newBlockEvent.set() - - for worker in solvers: - worker.start() # start the solver processes - - start_time = time.time() # time that the registration started - time_last = start_time # time that the last work blocks completed - - curr_stats = RegistrationStatistics( - time_spent_total=0.0, - time_average=0.0, - rounds_total=0, - time_spent=0.0, - hash_rate_perpetual=0.0, - hash_rate=0.0, # EWMA hash_rate (H/s) - difficulty=difficulty, - block_number=block_number, - block_hash=block_hash, - ) - - start_time_perpetual = time.time() - - console = bittensor.__console__ - logger = RegistrationStatisticsLogger(console, output_in_place) - logger.start() - - hash_rates = [0] * n_samples # The last n true hash_rates - weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha - - solution = None - while netuid == -1 or not subtensor.is_hotkey_registered( - netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address - ): - # Wait until a solver finds a solution - try: - solution = solution_queue.get(block=True, timeout=0.15) - if solution is not None: - break - except Empty: - # No solution found, try again - pass - - # check for new block - old_block_number = _check_for_newest_block_and_update( - subtensor=subtensor, - netuid=netuid, - hotkey_bytes=hotkey_bytes, - curr_diff=curr_diff, - curr_block=curr_block, - curr_block_num=curr_block_num, - old_block_number=old_block_number, - curr_stats=curr_stats, - update_curr_block=_update_curr_block, - check_block=check_block, - solvers=solvers, - ) - - num_time = 0 - # Get times for each solver - for finished_queue in finished_queues: - try: - proc_num = finished_queue.get(timeout=0.1) - num_time += 1 - - except Empty: - continue - - time_now = time.time() # get current time - time_since_last = time_now - time_last # get time since last work block(s) - if num_time > 0 and time_since_last > 0.0: - # create EWMA of the hash_rate to make measure more robust - - hash_rate_ = (num_time * tpb * update_interval) / time_since_last - hash_rates.append(hash_rate_) - hash_rates.pop(0) # remove the 0th data point - curr_stats.hash_rate = sum( - [hash_rates[i] * weights[i] for i in range(n_samples)] - ) / (sum(weights)) - - # update time last to now - time_last = time_now - - curr_stats.time_average = ( - curr_stats.time_average * curr_stats.rounds_total - + curr_stats.time_spent - ) / (curr_stats.rounds_total + num_time) - curr_stats.rounds_total += num_time - - # Update stats - curr_stats.time_spent = time_since_last - new_time_spent_total = time_now - start_time_perpetual - curr_stats.hash_rate_perpetual = ( - curr_stats.rounds_total * (tpb * update_interval) - ) / new_time_spent_total - curr_stats.time_spent_total = new_time_spent_total - - # Update the logger - logger.update(curr_stats, verbose=log_verbose) - - # exited while, found_solution contains the nonce or wallet is registered - - stopEvent.set() # stop all other processes - logger.stop() - - # terminate and wait for all solvers to exit - _terminate_workers_and_wait_for_exit(solvers) - - return solution - - -def _terminate_workers_and_wait_for_exit( - workers: List[Union[multiprocessing.Process, multiprocessing.queues.Queue]], -) -> None: - for worker in workers: - if isinstance(worker, multiprocessing.queues.Queue): - worker.join_thread() - else: - worker.terminate() - worker.join() - worker.close() - - -def create_pow( - subtensor, - wallet, - netuid: int, - output_in_place: bool = True, - cuda: bool = False, - dev_id: Union[List[int], int] = 0, - tpb: int = 256, - num_processes: int = None, - update_interval: int = None, - log_verbose: bool = False, -) -> Optional[Dict[str, Any]]: - """ - Creates a proof of work for the given subtensor and wallet. - Args: - subtensor (:obj:`bittensor.subtensor.subtensor`, `required`): - The subtensor to create a proof of work for. - wallet (:obj:`bittensor.wallet.wallet`, `required`): - The wallet to create a proof of work for. - netuid (:obj:`int`, `required`): - The netuid for the subnet to create a proof of work for. - output_in_place (:obj:`bool`, `optional`, defaults to :obj:`True`): - If true, prints the progress of the proof of work to the console - in-place. Meaning the progress is printed on the same lines. - cuda (:obj:`bool`, `optional`, defaults to :obj:`False`): - If true, uses CUDA to solve the proof of work. - dev_id (:obj:`Union[List[int], int]`, `optional`, defaults to :obj:`0`): - The CUDA device id(s) to use. If cuda is true and dev_id is a list, - then multiple CUDA devices will be used to solve the proof of work. - tpb (:obj:`int`, `optional`, defaults to :obj:`256`): - The number of threads per block to use when solving the proof of work. - Should be a multiple of 32. - num_processes (:obj:`int`, `optional`, defaults to :obj:`None`): - The number of processes to use when solving the proof of work. - If None, then the number of processes is equal to the number of - CPU cores. - update_interval (:obj:`int`, `optional`, defaults to :obj:`None`): - The number of nonces to run before checking for a new block. - log_verbose (:obj:`bool`, `optional`, defaults to :obj:`False`): - If true, prints the progress of the proof of work more verbosely. - Returns: - :obj:`Optional[Dict[str, Any]]`: The proof of work solution or None if - the wallet is already registered or there is a different error. - - Raises: - :obj:`ValueError`: If the subnet does not exist. - """ - if netuid != -1: - if not subtensor.subnet_exists(netuid=netuid): - raise ValueError(f"Subnet {netuid} does not exist") - - if cuda: - solution: Optional[POWSolution] = _solve_for_difficulty_fast_cuda( - subtensor, - wallet, - netuid=netuid, - output_in_place=output_in_place, - dev_id=dev_id, - tpb=tpb, - update_interval=update_interval, - log_verbose=log_verbose, - ) - else: - solution: Optional[POWSolution] = _solve_for_difficulty_fast( - subtensor, - wallet, - netuid=netuid, - output_in_place=output_in_place, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - - return solution diff --git a/bittensor/utils/subtensor.py b/bittensor/utils/subtensor.py deleted file mode 100644 index 0df9c64d82..0000000000 --- a/bittensor/utils/subtensor.py +++ /dev/null @@ -1,173 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -"""Module providing common helper functions for working with Subtensor.""" - -import json -import logging -import os -from typing import Dict, Optional, Union, Any, List, Tuple - -from substrateinterface.base import SubstrateInterface - -_logger = logging.getLogger("subtensor.errors_handler") - -_USER_HOME_DIR = os.path.expanduser("~") -_BT_DIR = os.path.join(_USER_HOME_DIR, ".bittensor") -_ERRORS_FILE_PATH = os.path.join(_BT_DIR, "subtensor_errors_map.json") -_ST_BUILD_ID = "subtensor_build_id" - -# Create directory if it doesn't exist -os.makedirs(_BT_DIR, exist_ok=True) - - -# Pallet's typing class `PalletMetadataV14` is defined only at -# https://github.com/polkascan/py-scale-codec/blob/master/scalecodec/type_registry/core.json#L1024 -# A class object is created dynamically at runtime. -# Circleci linter complains about string represented classes like 'PalletMetadataV14'. -def _get_errors_from_pallet(pallet) -> Optional[Dict[str, Dict[str, str]]]: - """Extracts and returns error information from the given pallet metadata. - - Args: - pallet (PalletMetadataV14): The pallet metadata containing error definitions. - - Returns: - dict[str, str]: A dictionary of errors indexed by their IDs. - - Raises: - ValueError: If the pallet does not contain error definitions or the list is empty. - """ - if not hasattr(pallet, "errors") or not pallet.errors: - _logger.warning( - "The pallet does not contain any error definitions or the list is empty." - ) - return None - - return { - str(error["index"]): { - "name": error["name"], - "description": " ".join(error["docs"]), - } - for error in pallet.errors - } - - -def _save_errors_to_cache(uniq_version: str, errors: Dict[str, Dict[str, str]]): - """Saves error details and unique version identifier to a JSON file. - - Args: - uniq_version (str): Unique version identifier for the Subtensor build. - errors (dict[str, str]): Error information to be cached. - """ - data = {_ST_BUILD_ID: uniq_version, "errors": errors} - try: - with open(_ERRORS_FILE_PATH, "w") as json_file: - json.dump(data, json_file, indent=4) - except IOError as e: - _logger.warning(f"Error saving to file: {e}") - - -def _get_errors_from_cache() -> Optional[Dict[str, Dict[str, Dict[str, str]]]]: - """Retrieves and returns the cached error information from a JSON file, if it exists. - - Returns: - A dictionary containing error information. - """ - if not os.path.exists(_ERRORS_FILE_PATH): - return None - - try: - with open(_ERRORS_FILE_PATH, "r") as json_file: - data = json.load(json_file) - except IOError as e: - _logger.warning(f"Error reading from file: {e}") - return None - - return data - - -def get_subtensor_errors( - substrate: SubstrateInterface, -) -> Union[Dict[str, Dict[str, str]], Dict[Any, Any]]: - """Fetches or retrieves cached Subtensor error definitions using metadata. - - Args: - substrate (SubstrateInterface): Instance of SubstrateInterface to access metadata. - - Returns: - dict[str, str]: A dictionary containing error information. - """ - if not substrate.metadata: - substrate.get_metadata() - - cached_errors_map = _get_errors_from_cache() - # TODO: Talk to the Nucleus team about a unique identification for each assembly (subtensor). Before that, use - # the metadata value for `subtensor_build_id` - subtensor_build_id = substrate.metadata[0].value - - if not cached_errors_map or subtensor_build_id != cached_errors_map.get( - _ST_BUILD_ID - ): - pallet = substrate.metadata.get_metadata_pallet("SubtensorModule") - subtensor_errors_map = _get_errors_from_pallet(pallet) - - if not subtensor_errors_map: - return {} - - _save_errors_to_cache( - uniq_version=substrate.metadata[0].value, errors=subtensor_errors_map - ) - _logger.info(f"File {_ERRORS_FILE_PATH} has been updated.") - return subtensor_errors_map - else: - return cached_errors_map.get("errors", {}) - - -def format_parent(proportion, parent) -> Tuple[str, str]: - """ - Formats raw parent data into a list of tuples. - Args: - parent: The raw parent data. - proportion: proportion of parent data. - Returns: - list: List of (proportion, child_address) tuples. - """ - int_proportion = ( - proportion.value if hasattr(proportion, "value") else int(proportion) - ) - return int_proportion, parent.value - - -def format_children(children) -> List[Tuple[int, str]]: - """ - Formats raw children data into a list of tuples. - Args: - children: The raw children data. - Returns: - list: List of (proportion, child_address) tuples. - """ - formatted_children = [] - for proportion, child in children: - # Convert U64 to int - int_proportion = ( - proportion.value if hasattr(proportion, "value") else int(proportion) - ) - if int_proportion > 0: - formatted_children.append((int_proportion, child.value)) - return formatted_children diff --git a/bittensor/utils/test_utils.py b/bittensor/utils/test_utils.py deleted file mode 100644 index fdaa1bda95..0000000000 --- a/bittensor/utils/test_utils.py +++ /dev/null @@ -1,22 +0,0 @@ -import socket -from random import randint -from typing import Set - -max_tries = 10 - - -def get_random_unused_port(allocated_ports: Set = set()): - def port_in_use(port: int) -> bool: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - return s.connect_ex(("localhost", port)) == 0 - - tries = 0 - while tries < max_tries: - tries += 1 - port = randint(2**14, 2**16 - 1) - - if port not in allocated_ports and not port_in_use(port): - allocated_ports.add(port) - return port - - raise RuntimeError(f"Tried {max_tries} random ports and could not find an open one") diff --git a/bittensor/utils/version.py b/bittensor/utils/version.py deleted file mode 100644 index 2a5aa3cd57..0000000000 --- a/bittensor/utils/version.py +++ /dev/null @@ -1,103 +0,0 @@ -from typing import Optional -from pathlib import Path -import time -from packaging.version import Version - -import bittensor -import requests - -VERSION_CHECK_THRESHOLD = 86400 - - -class VersionCheckError(Exception): - pass - - -def _get_version_file_path() -> Path: - return Path.home() / ".bittensor" / ".last_known_version" - - -def _get_version_from_file(version_file: Path) -> Optional[str]: - try: - mtime = version_file.stat().st_mtime - bittensor.logging.debug(f"Found version file, last modified: {mtime}") - diff = time.time() - mtime - - if diff >= VERSION_CHECK_THRESHOLD: - bittensor.logging.debug("Version file expired") - return None - - return version_file.read_text() - except FileNotFoundError: - bittensor.logging.debug("No bitensor version file found") - return None - except OSError: - bittensor.logging.exception("Failed to read version file") - return None - - -def _get_version_from_pypi(timeout: int = 15) -> str: - bittensor.logging.debug( - f"Checking latest Bittensor version at: {bittensor.__pipaddress__}" - ) - try: - response = requests.get(bittensor.__pipaddress__, timeout=timeout) - latest_version = response.json()["info"]["version"] - return latest_version - except requests.exceptions.RequestException: - bittensor.logging.exception("Failed to get latest version from pypi") - raise - - -def get_and_save_latest_version(timeout: int = 15) -> str: - version_file = _get_version_file_path() - - if last_known_version := _get_version_from_file(version_file): - return last_known_version - - latest_version = _get_version_from_pypi(timeout) - - try: - version_file.write_text(latest_version) - except OSError: - bittensor.logging.exception("Failed to save latest version to file") - - return latest_version - - -def check_version(timeout: int = 15): - """ - Check if the current version of Bittensor is up to date with the latest version on PyPi. - Raises a VersionCheckError if the version check fails. - """ - - try: - latest_version = get_and_save_latest_version(timeout) - - if Version(latest_version) > Version(bittensor.__version__): - print( - "\u001b[33mBittensor Version: Current {}/Latest {}\nPlease update to the latest version at your earliest convenience. " - "Run the following command to upgrade:\n\n\u001b[0mpython -m pip install --upgrade bittensor".format( - bittensor.__version__, latest_version - ) - ) - except Exception as e: - raise VersionCheckError("Version check failed") from e - - -def version_checking(timeout: int = 15): - """ - Deprecated, kept for backwards compatibility. Use check_version() instead. - """ - - from warnings import warn - - warn( - "version_checking() is deprecated, please use check_version() instead", - DeprecationWarning, - ) - - try: - check_version(timeout) - except VersionCheckError: - bittensor.logging.exception("Version check failed") diff --git a/bittensor/utils/wallet_utils.py b/bittensor/utils/wallet_utils.py deleted file mode 100644 index 39218c33f0..0000000000 --- a/bittensor/utils/wallet_utils.py +++ /dev/null @@ -1,168 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from substrateinterface.utils import ss58 -from typing import Union, Optional - -from .. import __ss58_format__ -from substrateinterface import Keypair as Keypair - - -def get_ss58_format(ss58_address: str) -> int: - """Returns the ss58 format of the given ss58 address.""" - return ss58.get_ss58_format(ss58_address) - - -def is_valid_ss58_address(address: str) -> bool: - """ - Checks if the given address is a valid ss58 address. - - Args: - address(str): The address to check. - - Returns: - True if the address is a valid ss58 address for Bittensor, False otherwise. - """ - try: - return ss58.is_valid_ss58_address( - address, valid_ss58_format=__ss58_format__ - ) or ss58.is_valid_ss58_address( - address, valid_ss58_format=42 - ) # Default substrate ss58 format (legacy) - except IndexError: - return False - - -def is_valid_ed25519_pubkey(public_key: Union[str, bytes]) -> bool: - """ - Checks if the given public_key is a valid ed25519 key. - - Args: - public_key(Union[str, bytes]): The public_key to check. - - Returns: - True if the public_key is a valid ed25519 key, False otherwise. - - """ - try: - if isinstance(public_key, str): - if len(public_key) != 64 and len(public_key) != 66: - raise ValueError("a public_key should be 64 or 66 characters") - elif isinstance(public_key, bytes): - if len(public_key) != 32: - raise ValueError("a public_key should be 32 bytes") - else: - raise ValueError("public_key must be a string or bytes") - - keypair = Keypair(public_key=public_key, ss58_format=__ss58_format__) - - ss58_addr = keypair.ss58_address - return ss58_addr is not None - - except (ValueError, IndexError): - return False - - -def is_valid_bittensor_address_or_public_key(address: Union[str, bytes]) -> bool: - """ - Checks if the given address is a valid destination address. - - Args: - address(Union[str, bytes]): The address to check. - - Returns: - True if the address is a valid destination address, False otherwise. - """ - if isinstance(address, str): - # Check if ed25519 - if address.startswith("0x"): - return is_valid_ed25519_pubkey(address) - else: - # Assume ss58 address - return is_valid_ss58_address(address) - elif isinstance(address, bytes): - # Check if ed25519 - return is_valid_ed25519_pubkey(address) - else: - # Invalid address type - return False - - -def create_identity_dict( - display: str = "", - legal: str = "", - web: str = "", - riot: str = "", - email: str = "", - pgp_fingerprint: Optional[str] = None, - image: str = "", - info: str = "", - twitter: str = "", -) -> dict: - """ - Creates a dictionary with structure for identity extrinsic. Must fit within 64 bits. - - Args: - display (str): String to be converted and stored under 'display'. - legal (str): String to be converted and stored under 'legal'. - web (str): String to be converted and stored under 'web'. - riot (str): String to be converted and stored under 'riot'. - email (str): String to be converted and stored under 'email'. - pgp_fingerprint (str): String to be converted and stored under 'pgp_fingerprint'. - image (str): String to be converted and stored under 'image'. - info (str): String to be converted and stored under 'info'. - twitter (str): String to be converted and stored under 'twitter'. - - Returns: - dict: A dictionary with the specified structure and byte string conversions. - - Raises: - ValueError: If pgp_fingerprint is not exactly 20 bytes long when encoded. - """ - if pgp_fingerprint and len(pgp_fingerprint.encode()) != 20: - raise ValueError("pgp_fingerprint must be exactly 20 bytes long when encoded") - - return { - "info": { - "additional": [[]], - "display": {f"Raw{len(display.encode())}": display.encode()}, - "legal": {f"Raw{len(legal.encode())}": legal.encode()}, - "web": {f"Raw{len(web.encode())}": web.encode()}, - "riot": {f"Raw{len(riot.encode())}": riot.encode()}, - "email": {f"Raw{len(email.encode())}": email.encode()}, - "pgp_fingerprint": pgp_fingerprint.encode() if pgp_fingerprint else None, - "image": {f"Raw{len(image.encode())}": image.encode()}, - "info": {f"Raw{len(info.encode())}": info.encode()}, - "twitter": {f"Raw{len(twitter.encode())}": twitter.encode()}, - } - } - - -def decode_hex_identity_dict(info_dictionary): - for key, value in info_dictionary.items(): - if isinstance(value, dict): - item = list(value.values())[0] - if isinstance(item, str) and item.startswith("0x"): - try: - info_dictionary[key] = bytes.fromhex(item[2:]).decode() - except UnicodeDecodeError: - print(f"Could not decode: {key}: {item}") - else: - info_dictionary[key] = item - return info_dictionary diff --git a/bittensor/utils/weight_utils.py b/bittensor/utils/weight_utils.py deleted file mode 100644 index de26d98c02..0000000000 --- a/bittensor/utils/weight_utils.py +++ /dev/null @@ -1,406 +0,0 @@ -""" -Conversion for weight between chain representation and np.array or torch.Tensor -""" - -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import hashlib -import logging -from typing import Tuple, List, Union - -import numpy as np -from numpy.typing import NDArray -from scalecodec import ScaleBytes, U16, Vec -from substrateinterface import Keypair - -import bittensor -from bittensor.utils.registration import torch, use_torch, legacy_torch_api_compat - -U32_MAX = 4294967295 -U16_MAX = 65535 - - -@legacy_torch_api_compat -def normalize_max_weight( - x: Union[NDArray[np.float32], "torch.FloatTensor"], limit: float = 0.1 -) -> Union[NDArray[np.float32], "torch.FloatTensor"]: - r"""Normalizes the tensor x so that sum(x) = 1 and the max value is not greater than the limit. - Args: - x (:obj:`np.float32`): - Tensor to be max_value normalized. - limit: float: - Max value after normalization. - Returns: - y (:obj:`np.float32`): - Normalized x tensor. - """ - epsilon = 1e-7 # For numerical stability after normalization - - weights = x.copy() - values = np.sort(weights) - - if x.sum() == 0 or x.shape[0] * limit <= 1: - return np.ones_like(x) / x.shape[0] - else: - estimation = values / values.sum() - - if estimation.max() <= limit: - return weights / weights.sum() - - # Find the cumulative sum and sorted tensor - cumsum = np.cumsum(estimation, 0) - - # Determine the index of cutoff - estimation_sum = np.array( - [(len(values) - i - 1) * estimation[i] for i in range(len(values))] - ) - n_values = (estimation / (estimation_sum + cumsum + epsilon) < limit).sum() - - # Determine the cutoff based on the index - cutoff_scale = (limit * cumsum[n_values - 1] - epsilon) / ( - 1 - (limit * (len(estimation) - n_values)) - ) - cutoff = cutoff_scale * values.sum() - - # Applying the cutoff - weights[weights > cutoff] = cutoff - - y = weights / weights.sum() - - return y - - -def convert_weight_uids_and_vals_to_tensor( - n: int, uids: List[int], weights: List[int] -) -> Union[NDArray[np.float32], "torch.FloatTensor"]: - r"""Converts weights and uids from chain representation into a np.array (inverse operation from convert_weights_and_uids_for_emit) - Args: - n: int: - number of neurons on network. - uids (:obj:`List[int],`): - Tensor of uids as destinations for passed weights. - weights (:obj:`List[int],`): - Tensor of weights. - Returns: - row_weights ( np.float32 or torch.FloatTensor ): - Converted row weights. - """ - row_weights = ( - torch.zeros([n], dtype=torch.float32) - if use_torch() - else np.zeros([n], dtype=np.float32) - ) - for uid_j, wij in list(zip(uids, weights)): - row_weights[uid_j] = float( - wij - ) # assumes max-upscaled values (w_max = U16_MAX). - row_sum = row_weights.sum() - if row_sum > 0: - row_weights /= row_sum # normalize - return row_weights - - -def convert_root_weight_uids_and_vals_to_tensor( - n: int, uids: List[int], weights: List[int], subnets: List[int] -) -> Union[NDArray[np.float32], "torch.FloatTensor"]: - r"""Converts root weights and uids from chain representation into a np.array or torch FloatTensor (inverse operation from convert_weights_and_uids_for_emit) - Args: - n: int: - number of neurons on network. - uids (:obj:`List[int],`): - Tensor of uids as destinations for passed weights. - weights (:obj:`List[int],`): - Tensor of weights. - subnets (:obj:`List[int],`): - list of subnets on the network - Returns: - row_weights ( np.float32 ): - Converted row weights. - """ - - row_weights = ( - torch.zeros([n], dtype=torch.float32) - if use_torch() - else np.zeros([n], dtype=np.float32) - ) - for uid_j, wij in list(zip(uids, weights)): - if uid_j in subnets: - index_s = subnets.index(uid_j) - row_weights[index_s] = float( - wij - ) # assumes max-upscaled values (w_max = U16_MAX). - else: - logging.warning( - f"Incorrect Subnet uid {uid_j} in Subnets {subnets}. The subnet is unavailable at the moment." - ) - continue - row_sum = row_weights.sum() - if row_sum > 0: - row_weights /= row_sum # normalize - return row_weights - - -def convert_bond_uids_and_vals_to_tensor( - n: int, uids: List[int], bonds: List[int] -) -> Union[NDArray[np.int64], "torch.LongTensor"]: - r"""Converts bond and uids from chain representation into a np.array. - Args: - n: int: - number of neurons on network. - uids (:obj:`List[int],`): - Tensor of uids as destinations for passed bonds. - bonds (:obj:`List[int],`): - Tensor of bonds. - Returns: - row_bonds ( np.float32 ): - Converted row bonds. - """ - row_bonds = ( - torch.zeros([n], dtype=torch.int64) - if use_torch() - else np.zeros([n], dtype=np.int64) - ) - for uid_j, bij in list(zip(uids, bonds)): - row_bonds[uid_j] = int(bij) - return row_bonds - - -def convert_weights_and_uids_for_emit( - uids: Union[NDArray[np.int64], "torch.LongTensor"], - weights: Union[NDArray[np.float32], "torch.FloatTensor"], -) -> Tuple[List[int], List[int]]: - r"""Converts weights into integer u32 representation that sum to MAX_INT_WEIGHT. - Args: - uids (:obj:`np.int64,`): - Tensor of uids as destinations for passed weights. - weights (:obj:`np.float32,`): - Tensor of weights. - Returns: - weight_uids (List[int]): - Uids as a list. - weight_vals (List[int]): - Weights as a list. - """ - # Checks. - weights = weights.tolist() - uids = uids.tolist() - if min(weights) < 0: - raise ValueError( - "Passed weight is negative cannot exist on chain {}".format(weights) - ) - if min(uids) < 0: - raise ValueError("Passed uid is negative cannot exist on chain {}".format(uids)) - if len(uids) != len(weights): - raise ValueError( - "Passed weights and uids must have the same length, got {} and {}".format( - len(uids), len(weights) - ) - ) - if sum(weights) == 0: - return [], [] # Nothing to set on chain. - else: - max_weight = float(max(weights)) - weights = [ - float(value) / max_weight for value in weights - ] # max-upscale values (max_weight = 1). - - weight_vals = [] - weight_uids = [] - for i, (weight_i, uid_i) in enumerate(list(zip(weights, uids))): - uint16_val = round( - float(weight_i) * int(U16_MAX) - ) # convert to int representation. - - # Filter zeros - if uint16_val != 0: # Filter zeros - weight_vals.append(uint16_val) - weight_uids.append(uid_i) - - return weight_uids, weight_vals - - -def process_weights_for_netuid( - uids: Union[NDArray[np.int64], "torch.Tensor"], - weights: Union[NDArray[np.float32], "torch.Tensor"], - netuid: int, - subtensor: "bittensor.subtensor", - metagraph: "bittensor.metagraph" = None, - exclude_quantile: int = 0, -) -> Union[ - Tuple["torch.Tensor", "torch.FloatTensor"], - Tuple[NDArray[np.int64], NDArray[np.float32]], -]: - bittensor.logging.debug("process_weights_for_netuid()") - bittensor.logging.debug("weights", weights) - bittensor.logging.debug("netuid", netuid) - bittensor.logging.debug("subtensor", subtensor) - bittensor.logging.debug("metagraph", metagraph) - - # Get latest metagraph from chain if metagraph is None. - if metagraph is None: - metagraph = subtensor.metagraph(netuid) - - # Cast weights to floats. - if use_torch(): - if not isinstance(weights, torch.FloatTensor): - weights = weights.type(torch.float32) - else: - if not isinstance(weights, np.float32): - weights = weights.astype(np.float32) - - # Network configuration parameters from an subtensor. - # These parameters determine the range of acceptable weights for each neuron. - quantile = exclude_quantile / U16_MAX - min_allowed_weights = subtensor.min_allowed_weights(netuid=netuid) - max_weight_limit = subtensor.max_weight_limit(netuid=netuid) - bittensor.logging.debug("quantile", quantile) - bittensor.logging.debug("min_allowed_weights", min_allowed_weights) - bittensor.logging.debug("max_weight_limit", max_weight_limit) - - # Find all non zero weights. - non_zero_weight_idx = ( - torch.argwhere(weights > 0).squeeze(dim=1) - if use_torch() - else np.argwhere(weights > 0).squeeze(axis=1) - ) - non_zero_weight_uids = uids[non_zero_weight_idx] - non_zero_weights = weights[non_zero_weight_idx] - nzw_size = non_zero_weights.numel() if use_torch() else non_zero_weights.size - if nzw_size == 0 or metagraph.n < min_allowed_weights: - bittensor.logging.warning("No non-zero weights returning all ones.") - final_weights = ( - torch.ones((metagraph.n)).to(metagraph.n) / metagraph.n - if use_torch() - else np.ones((metagraph.n), dtype=np.int64) / metagraph.n - ) - bittensor.logging.debug("final_weights", final_weights) - final_weights_count = ( - torch.tensor(list(range(len(final_weights)))) - if use_torch() - else np.arange(len(final_weights)) - ) - return ( - (final_weights_count, final_weights) - if use_torch() - else (final_weights_count, final_weights) - ) - - elif nzw_size < min_allowed_weights: - bittensor.logging.warning( - "No non-zero weights less then min allowed weight, returning all ones." - ) - # ( const ): Should this be np.zeros( ( metagraph.n ) ) to reset everyone to build up weight? - weights = ( - torch.ones((metagraph.n)).to(metagraph.n) * 1e-5 - if use_torch() - else np.ones((metagraph.n), dtype=np.int64) * 1e-5 - ) # creating minimum even non-zero weights - weights[non_zero_weight_idx] += non_zero_weights - bittensor.logging.debug("final_weights", weights) - normalized_weights = bittensor.utils.weight_utils.normalize_max_weight( - x=weights, limit=max_weight_limit - ) - nw_arange = ( - torch.tensor(list(range(len(normalized_weights)))) - if use_torch() - else np.arange(len(normalized_weights)) - ) - return nw_arange, normalized_weights - - bittensor.logging.debug("non_zero_weights", non_zero_weights) - - # Compute the exclude quantile and find the weights in the lowest quantile - max_exclude = max(0, len(non_zero_weights) - min_allowed_weights) / len( - non_zero_weights - ) - exclude_quantile = min([quantile, max_exclude]) - lowest_quantile = ( - non_zero_weights.quantile(exclude_quantile) - if use_torch() - else np.quantile(non_zero_weights, exclude_quantile) - ) - bittensor.logging.debug("max_exclude", max_exclude) - bittensor.logging.debug("exclude_quantile", exclude_quantile) - bittensor.logging.debug("lowest_quantile", lowest_quantile) - - # Exclude all weights below the allowed quantile. - non_zero_weight_uids = non_zero_weight_uids[lowest_quantile <= non_zero_weights] - non_zero_weights = non_zero_weights[lowest_quantile <= non_zero_weights] - bittensor.logging.debug("non_zero_weight_uids", non_zero_weight_uids) - bittensor.logging.debug("non_zero_weights", non_zero_weights) - - # Normalize weights and return. - normalized_weights = bittensor.utils.weight_utils.normalize_max_weight( - x=non_zero_weights, limit=max_weight_limit - ) - bittensor.logging.debug("final_weights", normalized_weights) - - return non_zero_weight_uids, normalized_weights - - -def generate_weight_hash( - address: str, - netuid: int, - uids: List[int], - values: List[int], - version_key: int, - salt: List[int], -) -> str: - """ - Generate a valid commit hash from the provided weights. - - Args: - address (str): The account identifier. Wallet ss58_address. - netuid (int): The network unique identifier. - uids (List[int]): The list of UIDs. - salt (List[int]): The salt to add to hash. - values (List[int]): The list of weight values. - version_key (int): The version key. - - Returns: - str: The generated commit hash. - """ - # Encode data using SCALE codec - wallet_address = ScaleBytes(Keypair(ss58_address=address).public_key) - netuid = ScaleBytes(netuid.to_bytes(2, "little")) - - vec_uids = Vec(data=None, sub_type="U16") - vec_uids.value = [U16(ScaleBytes(uid.to_bytes(2, "little"))) for uid in uids] - uids = ScaleBytes(vec_uids.encode().data) - - vec_values = Vec(data=None, sub_type="U16") - vec_values.value = [ - U16(ScaleBytes(value.to_bytes(2, "little"))) for value in values - ] - values = ScaleBytes(vec_values.encode().data) - - version_key = ScaleBytes(version_key.to_bytes(8, "little")) - - vec_salt = Vec(data=None, sub_type="U16") - vec_salt.value = [U16(ScaleBytes(salts.to_bytes(2, "little"))) for salts in salt] - salt = ScaleBytes(vec_salt.encode().data) - - data = wallet_address + netuid + uids + values + salt + version_key - - # Generate Blake2b hash of the data tuple - blake2b_hash = hashlib.blake2b(data.data, digest_size=32) - - # Convert the hash to hex string and add "0x" prefix - commit_hash = "0x" + blake2b_hash.hexdigest() - - return commit_hash diff --git a/bittensor/wallet.py b/bittensor/wallet.py deleted file mode 100644 index 28da5d8654..0000000000 --- a/bittensor/wallet.py +++ /dev/null @@ -1,873 +0,0 @@ -"""Implementation of the wallet class, which manages balances with staking and transfer. Also manages hotkey and coldkey.""" - -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import argparse -import copy -import os -from typing import Dict, Optional, Tuple, Union, overload - -from substrateinterface import Keypair -from termcolor import colored - -import bittensor -from bittensor.utils import is_valid_bittensor_address_or_public_key - - -def display_mnemonic_msg(keypair: Keypair, key_type: str): - """ - Display the mnemonic and a warning message to keep the mnemonic safe. - - Args: - keypair (Keypair): Keypair object. - key_type (str): Type of the key (coldkey or hotkey). - """ - mnemonic = keypair.mnemonic - mnemonic_green = colored(mnemonic, "green") - print( - colored( - "\nIMPORTANT: Store this mnemonic in a secure (preferable offline place), as anyone " - "who has possession of this mnemonic can use it to regenerate the key and access your tokens. \n", - "red", - ) - ) - print("The mnemonic to the new {} is:\n\n{}\n".format(key_type, mnemonic_green)) - print( - "You can use the mnemonic to recreate the key in case it gets lost. The command to use to regenerate the key using this mnemonic is:" - ) - print("btcli w regen_{} --mnemonic {}".format(key_type, mnemonic)) - print("") - - -class wallet: - """ - The wallet class in the Bittensor framework handles wallet functionality, crucial for participating in the Bittensor network. - - It manages two types of keys: coldkey and hotkey, each serving different purposes in network operations. Each wallet contains a coldkey and a hotkey. - - The coldkey is the user's primary key for holding stake in their wallet and is the only way that users - can access Tao. Coldkeys can hold tokens and should be encrypted on your device. - - The coldkey is the primary key used for securing the wallet's stake in the Bittensor network (Tao) and - is critical for financial transactions like staking and unstaking tokens. It's recommended to keep the - coldkey encrypted and secure, as it holds the actual tokens. - - The hotkey, in contrast, is used for operational tasks like subscribing to and setting weights in the - network. It's linked to the coldkey through the metagraph and does not directly hold tokens, thereby - offering a safer way to interact with the network during regular operations. - - Args: - name (str): The name of the wallet, used to identify it among possibly multiple wallets. - path (str): File system path where wallet keys are stored. - hotkey_str (str): String identifier for the hotkey. - _hotkey, _coldkey, _coldkeypub (bittensor.Keypair): Internal representations of the hotkey and coldkey. - - Methods: - create_if_non_existent, create, recreate: Methods to handle the creation of wallet keys. - get_coldkey, get_hotkey, get_coldkeypub: Methods to retrieve specific keys. - set_coldkey, set_hotkey, set_coldkeypub: Methods to set or update keys. - hotkey_file, coldkey_file, coldkeypub_file: Properties that return respective key file objects. - regenerate_coldkey, regenerate_hotkey, regenerate_coldkeypub: Methods to regenerate keys from different sources. - config, help, add_args: Utility methods for configuration and assistance. - - The wallet class is a fundamental component for users to interact securely with the Bittensor network, facilitating both operational tasks and transactions involving value transfer across the network. - - Example Usage:: - - # Create a new wallet with default coldkey and hotkey names - my_wallet = wallet() - - # Access hotkey and coldkey - hotkey = my_wallet.get_hotkey() - coldkey = my_wallet.get_coldkey() - - # Set a new coldkey - my_wallet.new_coldkey(n_words=24) # number of seed words to use - - # Update wallet hotkey - my_wallet.set_hotkey(new_hotkey) - - # Print wallet details - print(my_wallet) - - # Access coldkey property, must use password to unlock - my_wallet.coldkey - """ - - @classmethod - def config(cls) -> "bittensor.config": - """ - Get config from the argument parser. - - Returns: - bittensor.config: Config object. - """ - parser = argparse.ArgumentParser() - cls.add_args(parser) - return bittensor.config(parser, args=[]) - - @classmethod - def help(cls): - """ - Print help to stdout. - """ - parser = argparse.ArgumentParser() - cls.add_args(parser) - print(cls.__new__.__doc__) - parser.print_help() - - @classmethod - def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None): - """ - Accept specific arguments from parser. - - Args: - parser (argparse.ArgumentParser): Argument parser object. - prefix (str): Argument prefix. - """ - prefix_str = "" if prefix is None else prefix + "." - try: - default_name = os.getenv("BT_WALLET_NAME") or "default" - default_hotkey = os.getenv("BT_WALLET_NAME") or "default" - default_path = os.getenv("BT_WALLET_PATH") or "~/.bittensor/wallets/" - parser.add_argument( - "--" + prefix_str + "wallet.name", - required=False, - default=default_name, - help="The name of the wallet to unlock for running bittensor " - "(name mock is reserved for mocking this wallet)", - ) - parser.add_argument( - "--" + prefix_str + "wallet.hotkey", - required=False, - default=default_hotkey, - help="The name of the wallet's hotkey.", - ) - parser.add_argument( - "--" + prefix_str + "wallet.path", - required=False, - default=default_path, - help="The path to your bittensor wallets", - ) - except argparse.ArgumentError as e: - pass - - def __init__( - self, - name: str = None, - hotkey: str = None, - path: str = None, - config: "bittensor.config" = None, - ): - r""" - Initialize the bittensor wallet object containing a hot and coldkey. - - Args: - name (str, optional): The name of the wallet to unlock for running bittensor. Defaults to ``default``. - hotkey (str, optional): The name of hotkey used to running the miner. Defaults to ``default``. - path (str, optional): The path to your bittensor wallets. Defaults to ``~/.bittensor/wallets/``. - config (bittensor.config, optional): bittensor.wallet.config(). Defaults to ``None``. - """ - # Fill config from passed args using command line defaults. - if config is None: - config = wallet.config() - self.config = copy.deepcopy(config) - self.config.wallet.name = name or self.config.wallet.get( - "name", bittensor.defaults.wallet.name - ) - self.config.wallet.hotkey = hotkey or self.config.wallet.get( - "hotkey", bittensor.defaults.wallet.hotkey - ) - self.config.wallet.path = path or self.config.wallet.get( - "path", bittensor.defaults.wallet.path - ) - - self.name = self.config.wallet.name - self.path = self.config.wallet.path - self.hotkey_str = self.config.wallet.hotkey - - self._hotkey = None - self._coldkey = None - self._coldkeypub = None - - def __str__(self): - """ - Returns the string representation of the Wallet object. - - Returns: - str: The string representation. - """ - return "wallet({}, {}, {})".format(self.name, self.hotkey_str, self.path) - - def __repr__(self): - """ - Returns the string representation of the wallet object. - - Returns: - str: The string representation. - """ - return self.__str__() - - def create_if_non_existent( - self, coldkey_use_password: bool = True, hotkey_use_password: bool = False - ) -> "wallet": - """ - Checks for existing coldkeypub and hotkeys, and creates them if non-existent. - - Args: - coldkey_use_password (bool, optional): Whether to use a password for coldkey. Defaults to ``True``. - hotkey_use_password (bool, optional): Whether to use a password for hotkey. Defaults to ``False``. - - Returns: - wallet: The wallet object. - """ - return self.create(coldkey_use_password, hotkey_use_password) - - def create( - self, coldkey_use_password: bool = True, hotkey_use_password: bool = False - ) -> "wallet": - """ - Checks for existing coldkeypub and hotkeys, and creates them if non-existent. - - Args: - coldkey_use_password (bool, optional): Whether to use a password for coldkey. Defaults to ``True``. - hotkey_use_password (bool, optional): Whether to use a password for hotkey. Defaults to ``False``. - - Returns: - wallet: The wallet object. - """ - # ---- Setup Wallet. ---- - if ( - not self.coldkey_file.exists_on_device() - and not self.coldkeypub_file.exists_on_device() - ): - self.create_new_coldkey(n_words=12, use_password=coldkey_use_password) - if not self.hotkey_file.exists_on_device(): - self.create_new_hotkey(n_words=12, use_password=hotkey_use_password) - return self - - def recreate( - self, coldkey_use_password: bool = True, hotkey_use_password: bool = False - ) -> "wallet": - """ - Checks for existing coldkeypub and hotkeys and creates them if non-existent. - - Args: - coldkey_use_password (bool, optional): Whether to use a password for coldkey. Defaults to ``True``. - hotkey_use_password (bool, optional): Whether to use a password for hotkey. Defaults to ``False``. - - Returns: - wallet: The wallet object. - """ - # ---- Setup Wallet. ---- - self.create_new_coldkey(n_words=12, use_password=coldkey_use_password) - self.create_new_hotkey(n_words=12, use_password=hotkey_use_password) - return self - - @property - def hotkey_file(self) -> "bittensor.keyfile": - """ - Property that returns the hotkey file. - - Returns: - bittensor.keyfile: The hotkey file. - """ - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - hotkey_path = os.path.join(wallet_path, "hotkeys", self.hotkey_str) - return bittensor.keyfile(path=hotkey_path) - - @property - def coldkey_file(self) -> "bittensor.keyfile": - """ - Property that returns the coldkey file. - - Returns: - bittensor.keyfile: The coldkey file. - """ - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - coldkey_path = os.path.join(wallet_path, "coldkey") - return bittensor.keyfile(path=coldkey_path) - - @property - def coldkeypub_file(self) -> "bittensor.keyfile": - """ - Property that returns the coldkeypub file. - - Returns: - bittensor.keyfile: The coldkeypub file. - """ - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - coldkeypub_path = os.path.join(wallet_path, "coldkeypub.txt") - return bittensor.keyfile(path=coldkeypub_path) - - def set_hotkey( - self, - keypair: "bittensor.Keypair", - encrypt: bool = False, - overwrite: bool = False, - ) -> "bittensor.keyfile": - """ - Sets the hotkey for the wallet. - - Args: - keypair (bittensor.Keypair): The hotkey keypair. - encrypt (bool, optional): Whether to encrypt the hotkey. Defaults to ``False``. - overwrite (bool, optional): Whether to overwrite an existing hotkey. Defaults to ``False``. - - Returns: - bittensor.keyfile: The hotkey file. - """ - self._hotkey = keypair - self.hotkey_file.set_keypair(keypair, encrypt=encrypt, overwrite=overwrite) - - def set_coldkeypub( - self, - keypair: "bittensor.Keypair", - encrypt: bool = False, - overwrite: bool = False, - ) -> "bittensor.keyfile": - """ - Sets the coldkeypub for the wallet. - - Args: - keypair (bittensor.Keypair): The coldkeypub keypair. - encrypt (bool, optional): Whether to encrypt the coldkeypub. Defaults to ``False``. - overwrite (bool, optional): Whether to overwrite an existing coldkeypub. Defaults to ``False``. - - Returns: - bittensor.keyfile: The coldkeypub file. - """ - self._coldkeypub = bittensor.Keypair(ss58_address=keypair.ss58_address) - self.coldkeypub_file.set_keypair( - self._coldkeypub, encrypt=encrypt, overwrite=overwrite - ) - - def set_coldkey( - self, - keypair: "bittensor.Keypair", - encrypt: bool = True, - overwrite: bool = False, - ) -> "bittensor.keyfile": - """ - Sets the coldkey for the wallet. - - Args: - keypair (bittensor.Keypair): The coldkey keypair. - encrypt (bool, optional): Whether to encrypt the coldkey. Defaults to ``True``. - overwrite (bool, optional): Whether to overwrite an existing coldkey. Defaults to ``False``. - - Returns: - bittensor.keyfile: The coldkey file. - """ - self._coldkey = keypair - self.coldkey_file.set_keypair( - self._coldkey, encrypt=encrypt, overwrite=overwrite - ) - - def get_coldkey(self, password: str = None) -> "bittensor.Keypair": - """ - Gets the coldkey from the wallet. - - Args: - password (str, optional): The password to decrypt the coldkey. Defaults to ``None``. - - Returns: - bittensor.Keypair: The coldkey keypair. - """ - return self.coldkey_file.get_keypair(password=password) - - def get_hotkey(self, password: str = None) -> "bittensor.Keypair": - """ - Gets the hotkey from the wallet. - - Args: - password (str, optional): The password to decrypt the hotkey. Defaults to ``None``. - - Returns: - bittensor.Keypair: The hotkey keypair. - """ - return self.hotkey_file.get_keypair(password=password) - - def get_coldkeypub(self, password: str = None) -> "bittensor.Keypair": - """ - Gets the coldkeypub from the wallet. - - Args: - password (str, optional): The password to decrypt the coldkeypub. Defaults to ``None``. - - Returns: - bittensor.Keypair: The coldkeypub keypair. - """ - return self.coldkeypub_file.get_keypair(password=password) - - @property - def hotkey(self) -> "bittensor.Keypair": - r"""Loads the hotkey from wallet.path/wallet.name/hotkeys/wallet.hotkey or raises an error. - - Returns: - hotkey (Keypair): - hotkey loaded from config arguments. - Raises: - KeyFileError: Raised if the file is corrupt of non-existent. - CryptoKeyError: Raised if the user enters an incorrec password for an encrypted keyfile. - """ - if self._hotkey == None: - self._hotkey = self.hotkey_file.keypair - return self._hotkey - - @property - def coldkey(self) -> "bittensor.Keypair": - r"""Loads the hotkey from wallet.path/wallet.name/coldkey or raises an error. - - Returns: - coldkey (Keypair): coldkey loaded from config arguments. - Raises: - KeyFileError: Raised if the file is corrupt of non-existent. - CryptoKeyError: Raised if the user enters an incorrec password for an encrypted keyfile. - """ - if self._coldkey == None: - self._coldkey = self.coldkey_file.keypair - return self._coldkey - - @property - def coldkeypub(self) -> "bittensor.Keypair": - r"""Loads the coldkeypub from wallet.path/wallet.name/coldkeypub.txt or raises an error. - - Returns: - coldkeypub (Keypair): coldkeypub loaded from config arguments. - Raises: - KeyFileError: Raised if the file is corrupt of non-existent. - CryptoKeyError: Raised if the user enters an incorrect password for an encrypted keyfile. - """ - if self._coldkeypub == None: - self._coldkeypub = self.coldkeypub_file.keypair - return self._coldkeypub - - def create_coldkey_from_uri( - self, - uri: str, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Creates coldkey from suri string, optionally encrypts it with the user-provided password. - - Args: - uri: (str, required): - URI string to use i.e., ``/Alice`` or ``/Bob``. - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determines if this operation overwrites the coldkey under the same path ``//coldkey``. - Returns: - wallet (bittensor.wallet): - This object with newly created coldkey. - """ - keypair = Keypair.create_from_uri(uri) - if not suppress: - display_mnemonic_msg(keypair, "coldkey") - self.set_coldkey(keypair, encrypt=use_password, overwrite=overwrite) - self.set_coldkeypub(keypair, overwrite=overwrite) - return self - - def create_hotkey_from_uri( - self, - uri: str, - use_password: bool = False, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Creates hotkey from suri string, optionally encrypts it with the user-provided password. - - Args: - uri: (str, required): - URI string to use i.e., ``/Alice`` or ``/Bob`` - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determines if this operation overwrites the hotkey under the same path ``//hotkeys/``. - Returns: - wallet (bittensor.wallet): - This object with newly created hotkey. - """ - keypair = Keypair.create_from_uri(uri) - if not suppress: - display_mnemonic_msg(keypair, "hotkey") - self.set_hotkey(keypair, encrypt=use_password, overwrite=overwrite) - return self - - def new_coldkey( - self, - n_words: int = 12, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Creates a new coldkey, optionally encrypts it with the user-provided password and saves to disk. - - Args: - n_words: (int, optional): - Number of mnemonic words to use. - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determines if this operation overwrites the coldkey under the same path ``//coldkey``. - Returns: - wallet (bittensor.wallet): - This object with newly created coldkey. - """ - self.create_new_coldkey(n_words, use_password, overwrite, suppress) - - def create_new_coldkey( - self, - n_words: int = 12, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Creates a new coldkey, optionally encrypts it with the user-provided password and saves to disk. - - Args: - n_words: (int, optional): - Number of mnemonic words to use. - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determines if this operation overwrites the coldkey under the same path ``//coldkey``. - Returns: - wallet (bittensor.wallet): - This object with newly created coldkey. - """ - mnemonic = Keypair.generate_mnemonic(n_words) - keypair = Keypair.create_from_mnemonic(mnemonic) - if not suppress: - display_mnemonic_msg(keypair, "coldkey") - self.set_coldkey(keypair, encrypt=use_password, overwrite=overwrite) - self.set_coldkeypub(keypair, overwrite=overwrite) - return self - - def new_hotkey( - self, - n_words: int = 12, - use_password: bool = False, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Creates a new hotkey, optionally encrypts it with the user-provided password and saves to disk. - - Args: - n_words: (int, optional): - Number of mnemonic words to use. - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determines if this operation overwrites the hotkey under the same path ``//hotkeys/``. - Returns: - wallet (bittensor.wallet): - This object with newly created hotkey. - """ - self.create_new_hotkey(n_words, use_password, overwrite, suppress) - - def create_new_hotkey( - self, - n_words: int = 12, - use_password: bool = False, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Creates a new hotkey, optionally encrypts it with the user-provided password and saves to disk. - - Args: - n_words: (int, optional): - Number of mnemonic words to use. - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Will this operation overwrite the hotkey under the same path //hotkeys/ - Returns: - wallet (bittensor.wallet): - This object with newly created hotkey. - """ - mnemonic = Keypair.generate_mnemonic(n_words) - keypair = Keypair.create_from_mnemonic(mnemonic) - if not suppress: - display_mnemonic_msg(keypair, "hotkey") - self.set_hotkey(keypair, encrypt=use_password, overwrite=overwrite) - return self - - def regenerate_coldkeypub( - self, - ss58_address: Optional[str] = None, - public_key: Optional[Union[str, bytes]] = None, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": - """Regenerates the coldkeypub from the passed ``ss58_address`` or public_key and saves the file. Requires either ``ss58_address`` or public_key to be passed. - - Args: - ss58_address: (str, optional): - Address as ``ss58`` string. - public_key: (str | bytes, optional): - Public key as hex string or bytes. - overwrite (bool, optional) (default: False): - Determins if this operation overwrites the coldkeypub (if exists) under the same path ``//coldkeypub``. - Returns: - wallet (bittensor.wallet): - Newly re-generated wallet with coldkeypub. - - """ - if ss58_address is None and public_key is None: - raise ValueError("Either ss58_address or public_key must be passed") - - if not is_valid_bittensor_address_or_public_key( - ss58_address if ss58_address is not None else public_key - ): - raise ValueError( - f"Invalid {'ss58_address' if ss58_address is not None else 'public_key'}" - ) - - if ss58_address is not None: - ss58_format = bittensor.utils.get_ss58_format(ss58_address) - keypair = Keypair( - ss58_address=ss58_address, - public_key=public_key, - ss58_format=ss58_format, - ) - else: - keypair = Keypair( - ss58_address=ss58_address, - public_key=public_key, - ss58_format=bittensor.__ss58_format__, - ) - - # No need to encrypt the public key - self.set_coldkeypub(keypair, overwrite=overwrite) - - return self - - # Short name for regenerate_coldkeypub - regen_coldkeypub = regenerate_coldkeypub - - @overload - def regenerate_coldkey( - self, - mnemonic: Optional[Union[list, str]] = None, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": ... - - @overload - def regenerate_coldkey( - self, - seed: Optional[str] = None, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": ... - - @overload - def regenerate_coldkey( - self, - json: Optional[Tuple[Union[str, Dict], str]] = None, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": ... - - def regenerate_coldkey( - self, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - **kwargs, - ) -> "wallet": - """Regenerates the coldkey from the passed mnemonic or seed, or JSON encrypts it with the user's password and saves the file. - - Args: - mnemonic: (Union[list, str], optional): - Key mnemonic as list of words or string space separated words. - seed: (str, optional): - Seed as hex string. - json: (Tuple[Union[str, Dict], str], optional): - Restore from encrypted JSON backup as ``(json_data: Union[str, Dict], passphrase: str)`` - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determines if this operation overwrites the coldkey under the same path ``//coldkey``. - Returns: - wallet (bittensor.wallet): - This object with newly created coldkey. - - Note: - Uses priority order: ``mnemonic > seed > json``. - - """ - if len(kwargs) == 0: - raise ValueError("Must pass either mnemonic, seed, or json") - - # Get from kwargs - mnemonic = kwargs.get("mnemonic", None) - seed = kwargs.get("seed", None) - json = kwargs.get("json", None) - - if mnemonic is None and seed is None and json is None: - raise ValueError("Must pass either mnemonic, seed, or json") - if mnemonic is not None: - if isinstance(mnemonic, str): - mnemonic = mnemonic.split() - elif isinstance(mnemonic, list) and len(mnemonic) == 1: - mnemonic = mnemonic[0].split() - if len(mnemonic) not in [12, 15, 18, 21, 24]: - raise ValueError( - "Mnemonic has invalid size. This should be 12,15,18,21 or 24 words" - ) - keypair = Keypair.create_from_mnemonic( - " ".join(mnemonic), ss58_format=bittensor.__ss58_format__ - ) - if not suppress: - display_mnemonic_msg(keypair, "coldkey") - elif seed is not None: - keypair = Keypair.create_from_seed( - seed, ss58_format=bittensor.__ss58_format__ - ) - else: - # json is not None - if ( - not isinstance(json, tuple) - or len(json) != 2 - or not isinstance(json[0], (str, dict)) - or not isinstance(json[1], str) - ): - raise ValueError( - "json must be a tuple of (json_data: str | Dict, passphrase: str)" - ) - - json_data, passphrase = json - keypair = Keypair.create_from_encrypted_json( - json_data, passphrase, ss58_format=bittensor.__ss58_format__ - ) - - self.set_coldkey(keypair, encrypt=use_password, overwrite=overwrite) - self.set_coldkeypub(keypair, overwrite=overwrite) - return self - - # Short name for regenerate_coldkey - regen_coldkey = regenerate_coldkey - - @overload - def regenerate_hotkey( - self, - mnemonic: Optional[Union[list, str]] = None, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": ... - - @overload - def regenerate_hotkey( - self, - seed: Optional[str] = None, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": ... - - @overload - def regenerate_hotkey( - self, - json: Optional[Tuple[Union[str, Dict], str]] = None, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - ) -> "wallet": ... - - def regenerate_hotkey( - self, - use_password: bool = True, - overwrite: bool = False, - suppress: bool = False, - **kwargs, - ) -> "wallet": - """Regenerates the hotkey from passed mnemonic or seed, encrypts it with the user's password and saves the file. - - Args: - mnemonic: (Union[list, str], optional): - Key mnemonic as list of words or string space separated words. - seed: (str, optional): - Seed as hex string. - json: (Tuple[Union[str, Dict], str], optional): - Restore from encrypted JSON backup as ``(json_data: Union[str, Dict], passphrase: str)``. - use_password (bool, optional): - Is the created key password protected. - overwrite (bool, optional): - Determies if this operation overwrites the hotkey under the same path ``//hotkeys/``. - Returns: - wallet (bittensor.wallet): - This object with newly created hotkey. - """ - if len(kwargs) == 0: - raise ValueError("Must pass either mnemonic, seed, or json") - - # Get from kwargs - mnemonic = kwargs.get("mnemonic", None) - seed = kwargs.get("seed", None) - json = kwargs.get("json", None) - - if mnemonic is None and seed is None and json is None: - raise ValueError("Must pass either mnemonic, seed, or json") - if mnemonic is not None: - if isinstance(mnemonic, str): - mnemonic = mnemonic.split() - elif isinstance(mnemonic, list) and len(mnemonic) == 1: - mnemonic = mnemonic[0].split() - if len(mnemonic) not in [12, 15, 18, 21, 24]: - raise ValueError( - "Mnemonic has invalid size. This should be 12,15,18,21 or 24 words" - ) - keypair = Keypair.create_from_mnemonic( - " ".join(mnemonic), ss58_format=bittensor.__ss58_format__ - ) - if not suppress: - display_mnemonic_msg(keypair, "hotkey") - elif seed is not None: - keypair = Keypair.create_from_seed( - seed, ss58_format=bittensor.__ss58_format__ - ) - else: - # json is not None - if ( - not isinstance(json, tuple) - or len(json) != 2 - or not isinstance(json[0], (str, dict)) - or not isinstance(json[1], str) - ): - raise ValueError( - "json must be a tuple of (json_data: str | Dict, passphrase: str)" - ) - - json_data, passphrase = json - keypair = Keypair.create_from_encrypted_json( - json_data, passphrase, ss58_format=bittensor.__ss58_format__ - ) - - self.set_hotkey(keypair, encrypt=use_password, overwrite=overwrite) - return self - - # Short name for regenerate_hotkey - regen_hotkey = regenerate_hotkey diff --git a/contrib/CODE_REVIEW_DOCS.md b/contrib/CODE_REVIEW_DOCS.md deleted file mode 100644 index 9909606a89..0000000000 --- a/contrib/CODE_REVIEW_DOCS.md +++ /dev/null @@ -1,72 +0,0 @@ -# Code Review -### Conceptual Review - -A review can be a conceptual review, where the reviewer leaves a comment - * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull - request", - * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the - approach of this change". - -A `NACK` needs to include a rationale why the change is not worthwhile. -NACKs without accompanying reasoning may be disregarded. -After conceptual agreement on the change, code review can be provided. A review -begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR -branch, followed by a description of how the reviewer did the review. The -following language is used within pull request comments: - - - "I have tested the code", involving change-specific manual testing in - addition to running the unit, functional, or fuzz tests, and in case it is - not obvious how the manual testing was done, it should be described; - - "I have not tested the code, but I have reviewed it and it looks - OK, I agree it can be merged"; - - A "nit" refers to a trivial, often non-blocking issue. - -### Code Review -Project maintainers reserve the right to weigh the opinions of peer reviewers -using common sense judgement and may also weigh based on merit. Reviewers that -have demonstrated a deeper commitment and understanding of the project over time -or who have clear domain expertise may naturally have more weight, as one would -expect in all walks of life. - -Where a patch set affects consensus-critical code, the bar will be much -higher in terms of discussion and peer review requirements, keeping in mind that -mistakes could be very costly to the wider community. This includes refactoring -of consensus-critical code. - -Where a patch set proposes to change the Bittensor consensus, it must have been -discussed extensively on the discord server and other channels, be accompanied by a widely -discussed BIP and have a generally widely perceived technical consensus of being -a worthwhile change based on the judgement of the maintainers. - -### Finding Reviewers - -As most reviewers are themselves developers with their own projects, the review -process can be quite lengthy, and some amount of patience is required. If you find -that you've been waiting for a pull request to be given attention for several -months, there may be a number of reasons for this, some of which you can do something -about: - - - It may be because of a feature freeze due to an upcoming release. During this time, - only bug fixes are taken into consideration. If your pull request is a new feature, - it will not be prioritized until after the release. Wait for the release. - - It may be because the changes you are suggesting do not appeal to people. Rather than - nits and critique, which require effort and means they care enough to spend time on your - contribution, thundering silence is a good sign of widespread (mild) dislike of a given change - (because people don't assume *others* won't actually like the proposal). Don't take - that personally, though! Instead, take another critical look at what you are suggesting - and see if it: changes too much, is too broad, doesn't adhere to the - [developer notes](DEVELOPMENT_WORKFLOW.md), is dangerous or insecure, is messily written, etc. - Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give - their opinion on the concept itself. - - It may be because your code is too complex for all but a few people, and those people - may not have realized your pull request even exists. A great way to find people who - are qualified and care about the code you are touching is the - [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply - look up who last modified the code you are changing and see if you can find - them and give them a nudge. Don't be incessant about the nudging, though. - - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request - a look. If you think you've been waiting for an unreasonably long time (say, - more than a month) for no particular reason (a few lines changed, etc.), - this is totally fine. Try to return the favor when someone else is asking - for feedback on their code, and the universe balances out. - - Remember that the best thing you can do while waiting is give review to others! \ No newline at end of file diff --git a/contrib/CONTRIBUTING.md b/contrib/CONTRIBUTING.md deleted file mode 100644 index f9f4ed5f34..0000000000 --- a/contrib/CONTRIBUTING.md +++ /dev/null @@ -1,299 +0,0 @@ -# Contributing to Bittensor - -The following is a set of guidelines for contributing to Bittensor, which are hosted in the [Opentensor Organization](https://github.com/opentensor) on GitHub. These are mostly guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. - -## Table Of Contents -1. [I don't want to read this whole thing, I just have a question!!!](#i-dont-want-to-read-this-whole-thing-i-just-have-a-question) -1. [What should I know before I get started?](#what-should-i-know-before-i-get-started) -1. [Getting Started](#getting-started) - 1. [Good First Issue Label](#good-first-issue-label) - 1. [Beginner and Help-wanted Issues Label](#beginner-and-help-wanted-issues-label) -1. [How Can I Contribute?](#how-can-i-contribute) - 1. [Code Contribution General Guideline](#code-contribution-general-guidelines) - 1. [Pull Request Philosophy](#pull-request-philosophy) - 1. [Pull Request Process](#pull-request-process) - 1. [Testing](#testing) - 1. [Addressing Feedback](#addressing-feedback) - 1. [Squashing Commits](#squashing-commits) - 1. [Refactoring](#refactoring) - 1. [Peer Review](#peer-review) - 1. [Reporting Bugs](#reporting-bugs) - 1. [Suggesting Features](#suggesting-enhancements) - - -## I don't want to read this whole thing I just have a question! - -> **Note:** Please don't file an issue to ask a question. You'll get faster results by using the resources below. - -We have an official Discord server where the community chimes in with helpful advice if you have questions. -This is the fastest way to get an answer and the core development team is active on Discord. - -* [Official Bittensor Discord](https://discord.gg/7wvFuPJZgq) - -## What should I know before I get started? -Bittensor is still in the Alpha stages, and as such you will likely run into some problems in deploying your model or installing Bittensor itself. If you run into an issue or end up resolving an issue yourself, feel free to create a pull request with a fix or with a fix to the documentation. The documentation repository can be found [here](https://github.com/opentensor/docs). - -Additionally, note that the core implementation of Bittensor consists of two separate repositories: [The core Bittensor code](https://github.com/opentensor/bittensor) and the Bittensor Blockchain [subtensor](https://github.com/opentensor/subtensor). - -Supplemental repository for the Bittensor subnet template can be found [here](https://github.com/opentensor/bittensor-subnet-template). This is a great first place to look for getting your hands dirty and started learning and building on Bittensor. See the subnet links [page](https://github.com/opentensor/bittensor-subnet-template/blob/main/subnet_links.json) for a list of all the repositories for the active registered subnets. - -## Getting Started -New contributors are very welcome and needed. -Reviewing and testing is highly valued and the most effective way you can contribute as a new contributor. It also will teach you much more about the code and process than opening pull requests. - -Before you start contributing, familiarize yourself with the Bittensor Core build system and tests. Refer to the documentation in the repository on how to build Bittensor core and how to run the unit tests, functional tests. - -There are many open issues of varying difficulty waiting to be fixed. If you're looking for somewhere to start contributing, check out the [good first issue](https://github.com/opentensor/bittensor/labels/good%20first%20issue) list or changes that are up for grabs. Some of them might no longer be applicable. So if you are interested, but unsure, you might want to leave a comment on the issue first. Also peruse the [issues](https://github.com/opentensor/bittensor/issues) tab for all open issues. - -### Good First Issue Label -The purpose of the good first issue label is to highlight which issues are suitable for a new contributor without a deep understanding of the codebase. - -However, good first issues can be solved by anyone. If they remain unsolved for a longer time, a frequent contributor might address them. - -You do not need to request permission to start working on an issue. However, you are encouraged to leave a comment if you are planning to work on it. This will help other contributors monitor which issues are actively being addressed and is also an effective way to request assistance if and when you need it. - -### Beginner and Help-wanted Issues Label -You can start by looking through these `beginner` and `help-wanted` issues: - -* [Beginner issues](https://github.com/opentensor/bittensor/labels/beginner) - issues which should only require a few lines of code, and a test or two. -* [Help wanted issues](https://github.com/opentensor/bittensor/labels/help%20wanted) - issues which should be a bit more involved than `beginner` issues. - -## Communication Channels -Most communication about Bittensor development happens on Discord channel. -Here's the link of Discord community. -[Bittensor Discord](https://discord.com/channels/799672011265015819/799672011814862902) - -And also here. -[Bittensor Community Discord](https://discord.com/channels/1120750674595024897/1120799375703162950) - -## How Can I Contribute? - -You can contribute to Bittensor in one of two main ways (as well as many others): -1. [Bug](#reporting-bugs) reporting and fixes -2. New features and Bittensor [enhancements](#suggesting-enhancements) - -> Please follow the Bittensor [style guide](./STYLE.md) regardless of your contribution type. - -Here is a high-level summary: -- Code consistency is crucial; adhere to established programming language conventions. -- Use `ruff format .` to format your Python code; it ensures readability and consistency. -- Write concise Git commit messages; summarize changes in ~50 characters. -- Follow these six commit rules: - - Atomic Commits: Focus on one task or fix per commit. - - Subject and Body Separation: Use a blank line to separate the subject from the body. - - Subject Line Length: Keep it under 50 characters for readability. - - Imperative Mood: Write subject line as if giving a command or instruction. - - Body Text Width: Wrap text manually at 72 characters. - - Body Content: Explain what changed and why, not how. -- Make use of your commit messages to simplify project understanding and maintenance. - -> For clear examples of each of the commit rules, see the style guide's [rules](./STYLE.md#the-six-rules-of-a-great-commit) section. - -### Code Contribution General Guidelines - -> Review the Bittensor [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before contributing. - -If you're looking to contribute to Bittensor but unsure where to start, please join our community [discord](https://discord.gg/bittensor), a developer-friendly Bittensor town square. Start with [#development](https://discord.com/channels/799672011265015819/799678806159392768) and [#bounties](https://discord.com/channels/799672011265015819/1095684873810890883) to see what issues are currently posted. For a greater understanding of Bittensor's usage and development, check the [Bittensor Documentation](https://bittensor.com/docs). - -#### Pull Request Philosophy - -Patchsets and enhancements should always be focused. A pull request could add a feature, fix a bug, or refactor code, but it should not contain a mixture of these. Please also avoid 'super' pull requests which attempt to do too much, are overly large, or overly complex as this makes review difficult. - -Specifically, pull requests must adhere to the following criteria: -- **Must** branch off from `staging`. Make sure that all your PRs are using `staging` branch as a base or will be closed. -- Contain fewer than 50 files. PRs with more than 50 files will be closed. -- Use the specific [template](./.github/pull_request_template.md) appropriate to your contribution. -- If a PR introduces a new feature, it *must* include corresponding tests. -- Other PRs (bug fixes, refactoring, etc.) should ideally also have tests, as they provide proof of concept and prevent regression. -- Categorize your PR properly by using GitHub labels. This aids in the review process by informing reviewers about the type of change at a glance. -- Make sure your code includes adequate comments. These should explain why certain decisions were made and how your changes work. -- If your changes are extensive, consider breaking your PR into smaller, related PRs. This makes your contributions easier to understand and review. -- Be active in the discussion about your PR. Respond promptly to comments and questions to help reviewers understand your changes and speed up the acceptance process. - -Generally, all pull requests must: - - - Have a clear use case, fix a demonstrable bug or serve the greater good of the project (e.g. refactoring for modularisation). - - Be well peer-reviewed. - - Follow code style guidelines. - - Not break the existing test suite. - - Where bugs are fixed, where possible, there should be unit tests demonstrating the bug and also proving the fix. - - Change relevant comments and documentation when behaviour of code changes. - -#### Pull Request Process - -Please follow these steps to have your contribution considered by the maintainers: - -*Before* creating the PR: -1. Read the [development workflow](./DEVELOPMENT_WORKFLOW.md) defined for this repository to understand our workflow. -2. Ensure your PR meets the criteria stated in the 'Pull Request Philosophy' section. -3. Include relevant tests for any fixed bugs or new features as stated in the [testing guide](./TESTING.md). -4. Follow all instructions in [the template](https://github.com/opentensor/bittensor/blob/master/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md) to create the PR. -5. Ensure your commit messages are clear and concise. Include the issue number if applicable. -6. If you have multiple commits, rebase them into a single commit using `git rebase -i`. -7. Explain what your changes do and why you think they should be merged in the PR description consistent with the [style guide](./STYLE.md). - -*After* creating the PR: -1. Verify that all [status checks](https://help.github.com/articles/about-status-checks/) are passing after you submit your pull request. -2. Label your PR using GitHub's labeling feature. The labels help categorize the PR and streamline the review process. -3. Document your code with comments that provide a clear understanding of your changes. Explain any non-obvious parts of your code or design decisions you've made. -4. If your PR has extensive changes, consider splitting it into smaller, related PRs. This reduces the cognitive load on the reviewers and speeds up the review process. - -Please be responsive and participate in the discussion on your PR! This aids in clarifying any confusion or concerns and leads to quicker resolution and merging of your PR. - -> Note: If your changes are not ready for merge but you want feedback, create a draft pull request. - -Following these criteria will aid in quicker review and potential merging of your PR. -While the prerequisites above must be satisfied prior to having your pull request reviewed, the reviewer(s) may ask you to complete additional design work, tests, or other changes before your pull request can be ultimately accepted. - -When you are ready to submit your changes, create a pull request: - -> **Always** follow the [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before submitting pull requests. - -After you submit a pull request, it will be reviewed by the maintainers. They may ask you to make changes. Please respond to any comments and push your changes as a new commit. - -> Note: Be sure to merge the latest from "upstream" before making a pull request: - -```bash -git remote add upstream https://github.com/opentensor/bittensor.git -git fetch upstream -git merge upstream/ -git push origin -``` - -#### Testing -Before making a PR for any code changes, please write adequate testing with unittest and/or pytest if it is warranted. This is **mandatory** for new features and enhancements. See the [testing guide](./TESTING.md) for more complete information. - -You may also like to view the [/tests](https://github.com/opentensor/bittensor/tree/master/tests) for starter examples. - -Here is a quick summary: -- **Running Tests**: Use `pytest` from the root directory of the Bittensor repository to run all tests. To run a specific test file or a specific test within a file, specify it directly (e.g., `pytest tests/test_wallet.py::test_create_new_coldkey`). -- **Writing Tests**: When writing tests, cover both the "happy path" and any potential error conditions. Use the `assert` statement to verify the expected behavior of a function. -- **Mocking**: Use the `unittest.mock` library to mock certain functions or objects when you need to isolate the functionality you're testing. This allows you to control the behavior of these functions or objects during testing. -- **Test Coverage**: Use the `pytest-cov` plugin to measure your test coverage. Aim for high coverage but also ensure your tests are meaningful and accurately represent the conditions under which your code will run. -- **Continuous Integration**: Bittensor uses GitHub Actions for continuous integration. Tests are automatically run every time you push changes to the repository. Check the "Actions" tab of the Bittensor GitHub repository to view the results. - -Remember, testing is crucial for maintaining code health, catching issues early, and facilitating the addition of new features or refactoring of existing code. - -#### Addressing Feedback - -After submitting your pull request, expect comments and reviews from other contributors. You can add more commits to your pull request by committing them locally and pushing to your fork. - -You are expected to reply to any review comments before your pull request is merged. You may update the code or reject the feedback if you do not agree with it, but you should express so in a reply. If there is outstanding feedback and you are not actively working on it, your pull request may be closed. - -#### Squashing Commits - -If your pull request contains fixup commits (commits that change the same line of code repeatedly) or too fine-grained commits, you may be asked to [squash](https://git-scm.com/docs/git-rebase#_interactive_mode) your commits before it will be reviewed. The basic squashing workflow is shown below. - - git checkout your_branch_name - git rebase -i HEAD~n - # n is normally the number of commits in the pull request. - # Set commits (except the one in the first line) from 'pick' to 'squash', save and quit. - # On the next screen, edit/refine commit messages. - # Save and quit. - git push -f # (force push to GitHub) - -Please update the resulting commit message, if needed. It should read as a coherent message. In most cases, this means not just listing the interim commits. - -If your change contains a merge commit, the above workflow may not work and you will need to remove the merge commit first. See the next section for details on how to rebase. - -Please refrain from creating several pull requests for the same change. Use the pull request that is already open (or was created earlier) to amend changes. This preserves the discussion and review that happened earlier for the respective change set. - -The length of time required for peer review is unpredictable and will vary from pull request to pull request. - -#### Refactoring - -Refactoring is a necessary part of any software project's evolution. The following guidelines cover refactoring pull requests for the Bittensor project. - -There are three categories of refactoring: code-only moves, code style fixes, and code refactoring. In general, refactoring pull requests should not mix these three kinds of activities in order to make refactoring pull requests easy to review and uncontroversial. In all cases, refactoring PRs must not change the behaviour of code within the pull request (bugs must be preserved as is). - -Project maintainers aim for a quick turnaround on refactoring pull requests, so where possible keep them short, uncomplex and easy to verify. - -Pull requests that refactor the code should not be made by new contributors. It requires a certain level of experience to know where the code belongs to and to understand the full ramification (including rebase effort of open pull requests). Trivial pull requests or pull requests that refactor the code with no clear benefits may be immediately closed by the maintainers to reduce unnecessary workload on reviewing. - -#### Peer Review - -Anyone may participate in peer review which is expressed by comments in the pull request. Typically reviewers will review the code for obvious errors, as well as test out the patch set and opine on the technical merits of the patch. Project maintainers take into account the peer review when determining if there is consensus to merge a pull request (remember that discussions may have taken place elsewhere, not just on GitHub). The following language is used within pull-request comments: - -- ACK means "I have tested the code and I agree it should be merged"; -- NACK means "I disagree this should be merged", and must be accompanied by sound technical justification. NACKs without accompanying reasoning may be disregarded; -- utACK means "I have not tested the code, but I have reviewed it and it looks OK, I agree it can be merged"; -- Concept ACK means "I agree in the general principle of this pull request"; -- Nit refers to trivial, often non-blocking issues. - -Reviewers should include the commit(s) they have reviewed in their comments. This can be done by copying the commit SHA1 hash. - -A pull request that changes consensus-critical code is considerably more involved than a pull request that adds a feature to the wallet, for example. Such patches must be reviewed and thoroughly tested by several reviewers who are knowledgeable about the changed subsystems. Where new features are proposed, it is helpful for reviewers to try out the patch set on a test network and indicate that they have done so in their review. Project maintainers will take this into consideration when merging changes. - -For a more detailed description of the review process, see the [Code Review Guidelines](CODE_REVIEW_DOCS.md). - -### Reporting Bugs - -This section guides you through submitting a bug report for Bittensor. Following these guidelines helps maintainers and the community understand your report :pencil:, reproduce the behavior :computer: :computer:, and find related reports :mag_right:. - -When you are creating a bug report, please [include as many details as possible](#how-do-i-submit-a-good-bug-report). - -> **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue and include a link to the original issue in the body of your new one. - -#### Before Submitting A Bug Report - -* **Check the [debugging guide](./DEBUGGING.md).** You might be able to find the cause of the problem and fix things yourself. Most importantly, check if you can reproduce the problem in the latest version of Bittensor by updating to the latest Master branch changes. -* **Check the [Discord Server](https://discord.gg/7wvFuPJZgq)** and ask in [#finney-issues](https://discord.com/channels/799672011265015819/1064247007688007800) or [#subnet-1-issues](https://discord.com/channels/799672011265015819/1096187495667998790). -* **Determine which repository the problem should be reported in**: if it has to do with your ML model, then it's likely [Bittensor](https://github.com/opentensor/bittensor). If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor) - -#### How Do I Submit A (Good) Bug Report? - -Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). You can find Bittensor's issues [here](https://github.com/opentensor/bittensor/issues). After you've determined which repository ([Bittensor](https://github.com/opentensor/bittensor) or [subtensor](https://github.com/opentensor/subtensor)) your bug is related to, create an issue on that repository. - -Explain the problem and include additional details to help maintainers reproduce the problem: - -* **Use a clear and descriptive title** for the issue to identify the problem. -* **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started Bittensor, e.g. which command exactly you used in the terminal, or how you started Bittensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran Bittensor with a set of custom configs, explain if you used a config file or command line arguments. -* **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). -* **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. -* **Explain which behavior you expected to see instead and why.** -* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -* **If you're reporting that Bittensor crashed**, include a crash report with a stack trace from the operating system. On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". Include the crash report in the issue in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines), a [file attachment](https://help.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist. -* **If the problem is related to performance or memory**, include a CPU profile capture with your report, if you're using a GPU then include a GPU profile capture as well. Look into the [PyTorch Profiler](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html) to look at memory usage of your model. -* **If the problem wasn't triggered by a specific action**, describe what you were doing before the problem happened and share more information using the guidelines below. - -Provide more context by answering these questions: - -* **Did the problem start happening recently** (e.g. after updating to a new version of Bittensor) or was this always a problem? -* If the problem started happening recently, **can you reproduce the problem in an older version of Bittensor?** -* **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under which conditions it normally happens. - -Include details about your configuration and environment: - -* **Which version of Bittensor are you using?** You can get the version by checking for `__version__` in [`bittensor/bittensor/__init.py`](https://github.com/opentensor/bittensor/blob/master/bittensor/__init__.py#L30). This is not sufficient. Also add the commit hash of the branch you are on. -* **What commit hash are you on?** You can get the exact commit hash by checking `git log` and pasting the full commit hash. -* **What's the name and version of the OS you're using**? -* **Are you running Bittensor in a virtual machine?** If so, which VM software are you using and which operating systems and versions are used for the host and the guest? -* **Are you running Bittensor in a dockerized container?** If so, have you made sure that your docker container contains your latest changes and is up to date with Master branch? -* **Are you using [local configuration files](https://opentensor.github.io/getting-started/configuration.html)** `config.yaml` to customize your Bittensor experiment? If so, provide the contents of that config file, preferably in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines) or with a link to a [gist](https://gist.github.com/). - -### Suggesting Enhancements and Features - -This section guides you through submitting an enhancement suggestion for Bittensor, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion :pencil: and find related suggestions :mag_right:. - -When you are creating an enhancement suggestion, please [include as many details as possible](#how-do-i-submit-a-good-enhancement-suggestion). - -#### Before Submitting An Enhancement Suggestion - -* **Check the [debugging guide](./DEBUGGING.md).** for tips — you might discover that the enhancement is already available. Most importantly, check if you're using the latest version of Bittensor by pulling the latest changes from the Master branch and if you can get the desired behavior by changing [Bittensor's config settings](https://opentensor.github.io/getting-started/configuration.html). -* **Determine which repository the problem should be reported in: if it has to do with your ML model, then it's likely [Bittensor](https://github.com/opentensor/bittensor). If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor) - -#### How Submit A (Good) Feature Suggestion - -Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com/features/issues/). After you've determined which repository ([Bittensor](https://github.com/opentensor/bittensor) or [subtensor](https://github.com/opentensor/subtensor)) your enhancement suggestion is related to, create an issue on that repository and provide the following information: - -* **Use a clear and descriptive title** for the issue to identify the problem. -* **Provide a step-by-step description of the suggested enhancement** in as many details as possible. -* **Provide specific examples to demonstrate the steps**. Include copy/pasteable snippets which you use in those examples, as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). -* **Describe the current behavior** and **explain which behavior you expected to see instead** and why. -* **Include screenshots and animated GIFs** which help you demonstrate the steps or point out the part of Bittensor which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -* **Explain why this enhancement would be useful** to most Bittensor users. -* **List some other text editors or applications where this enhancement exists.** -* **Specify which version of Bittensor are you using?** You can get the exact version by checking for `__version__` in [`bittensor/bittensor/__init.py`](https://github.com/opentensor/bittensor/blob/master/bittensor/__init__.py#L30). -* **Specify the name and version of the OS you're using.** - -Thank you for considering contributing to Bittensor! Any help is greatly appreciated along this journey to incentivize open and permissionless intelligence. diff --git a/contrib/DEBUGGING.md b/contrib/DEBUGGING.md deleted file mode 100644 index 093e3432bf..0000000000 --- a/contrib/DEBUGGING.md +++ /dev/null @@ -1,161 +0,0 @@ -## Installation - -First, make sure you have Bittensor installed correctly. There are three ways to install Bittensor: - -1. Through the installer: - -```bash -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/opentensor/bittensor/master/scripts/install.sh)" -``` - -2. With pip: - -```bash -pip install bittensor -``` - -3. From source: - -```bash -git clone https://github.com/opentensor/bittensor.git -python3 -m pip install -e bittensor/ -``` - -You can test your installation by running: - -```bash -python3 -c "import bittensor; print(bittensor.__version__)" -``` -## Logging -Make good use of the `bittensor.logging` module. It can be your friend and will help you find things that are otherwise difficult to get visibility on. - -You can enable debug or trace modes by running: -``` -import bittensor -bittensor.trace() # lowest level of granularity, best for figuring out what went wrong. -bittensor.debug() # for most everything else that you don't want to see normally at runtime -``` -at the top of your script or source file to enable more verbose output logs. - -You can also write your own in the code simply: -```python -# Bittensor's wallet maintenance class. -wallet = bittensor.wallet() - -bittensor.logging.debug( f"wallet keypair: {wallet.hotkey}" ) - -... - -# Bittensor's chain state object. -metagraph = bittensor.metagraph(netuid=1) - -bittensor.logging.trace( f"metagraph created! netuid {metagraph.netuid}" ) -``` - - -## Querying the Network - -Ensure you can query the Bittensor network using the Python API. If something is broken with your installation or the chain, this won't work out of the box. Here's an example of how to do this: - -```python -import bittensor -bittensor.trace() - -# Attempt to query through the foundation endpoint. -print(bittensor.prompt("Heraclitus was a ")) -``` - -## Debugging Miners - - -First, try registering and running on a testnet: -```bash -btcli register --netuid --subtensor.chain_endpoint wss://test.finney.opentensor.ai:443 -``` - -If that works, then try to register a miner on mainnet: - -```bash -btcli register --netuid -``` - -See if you can observe your slot specified by UID: - -```bash -btcli overview --netuid -``` - -Here's an example of how to run a pre-configured miner: - -```bash -python3 bittensor/neurons/text_prompting/miners/GPT4ALL/neuron.py --netuid -``` - -## Debugging with the Bittensor Package - -The Bittensor package contains data structures for interacting with the Bittensor ecosystem, writing miners, validators, and querying the network. - -Try to use the Bittensor package to create a wallet, connect to the axon running on slot 10, and send a prompt to this endpoint and see where things are breaking along this typical codepath: - -```python -import bittensor - -# Bittensor's wallet maintenance class. -wallet = bittensor.wallet() - -# Bittensor's chain interface. -subtensor = bittensor.subtensor() - -# Bittensor's chain state object. -metagraph = bittensor.metagraph(netuid=1) - -# Instantiate a Bittensor endpoint. -axon = bittensor.axon(wallet=wallet, metagraph=metagraph) - -# Start servicing messages on the wire. -axon.start() - -# Register this axon on a subnetwork -subtensor.serve_axon(netuid=1, axon=axon) - -# Connect to the axon running on slot 10, use the wallet to sign messages. -dendrite = bittensor.text_prompting(keypair=wallet.hotkey, axon=metagraph.axons[10]) - -# Send a prompt to this endpoint -dendrite.forward(roles=['user'], messages=['Who is Rick James?']) -``` - -> NOTE: It may be helpful to throw in breakpoints such as with `pdb`. -```python -# some code ... -import pdb; pdb.set_trace() # breakpoint! -# more code ... - -``` -This will stop execution at the breakpoint you set and can operate on the stack directly in the terminal. - -## Searching for strings -Use `ag`. It's fast, convenient, and widely available on unix systems. Ag will highlight all occurnaces of a given pattern. - -```bash -apt-get install silversearcher-ag -``` - -Usage: -```bash -$ ag "query_subtensor" - ->>> bittensor/_subtensor/subtensor_mock.py ->>> 165: e.g. We mock `Subtensor.query_subtensor` instead of all query methods. ->>> 536: def query_subtensor( ->>> 1149: curr_total_hotkey_stake = self.query_subtensor( ->>> 1154: curr_total_coldkey_stake = self.query_subtensor( ->>> 1345: return self.query_subtensor(name=name, block=block, params=[netuid]).value ->>> ->>> bittensor/_subtensor/subtensor_impl.py ->>> 902: def query_subtensor( ->>> 1017: return self.query_subtensor("Rho", block, [netuid]).value -... -``` - -Remember, debugging involves a lot of trial and error. Don't be discouraged if things don't work right away. Keep trying different things, and don't hesitate to ask for help if you need it. diff --git a/contrib/DEVELOPMENT_WORKFLOW.md b/contrib/DEVELOPMENT_WORKFLOW.md deleted file mode 100644 index 91e781ffcc..0000000000 --- a/contrib/DEVELOPMENT_WORKFLOW.md +++ /dev/null @@ -1,159 +0,0 @@ -# Bittensor Development Workflow - -## Table of contents - -- [Bittensor Development Workflow](#bittensor-development-workflow) - - [Main Branches](#main-branches) - - [Development Model](#development-model) - - [Feature Branches](#feature-branches) - - [Release Branches](#release-branches) - - [Hotfix Branches](#hotfix-branches) - - [Git Operations](#git-operations) - - [Creating a Feature Branch](#creating-a-feature-branch) - - [Merging Feature Branch into Staging](#merging-feature-branch-into-staging) - - [Creating a Release Branch](#creating-a-release-branch) - - [Finishing a Release Branch](#finishing-a-release-branch) - - [Creating a Hotfix Branch](#creating-a-hotfix-branch) - - [Finishing a Hotfix Branch](#finishing-a-hotfix-branch) - - [Continuous Integration (CI) and Continuous Deployment (CD)](#continuous-integration-ci-and-continuous-deployment-cd) - - [Versioning and Release Notes](#versioning-and-release-notes) - - [Pending Tasks](#pending-tasks) - -## Main Branches - -Bittensor's codebase consists of two main branches: **master** and **staging**. - -**master** -- This is Bittensor's live production branch, which should only be updated by the core development team. This branch is protected, so refrain from pushing or merging into it unless authorized. - -**staging** -- This branch is continuously updated and is where you propose and merge changes. It's essentially Bittensor's active development branch. - -## Development Model - -### Feature Branches - -- Branch off from: `staging` -- Merge back into: `staging` -- Naming convention: `feature//` - -Feature branches are used to develop new features for upcoming or future releases. They exist as long as the feature is in development, but will eventually be merged into `staging` or discarded. Always delete your feature branch after merging to avoid unnecessary clutter. - -### Release Branches - -- Branch off from: `staging` -- Merge back into: `staging` and then `master` -- Naming convention: `release///` - -Release branches support the preparation of a new production release, allowing for minor bug fixes and preparation of metadata (version number, configuration, etc). All new features should be merged into `staging` and wait for the next big release. - -### Hotfix Branches - -General workflow: - -- Branch off from: `master` or `staging` -- Merge back into: `staging` then `master` -- Naming convention: `hotfix///` - -Hotfix branches are meant for quick fixes in the production environment. When a critical bug in a production version must be resolved immediately, a hotfix branch is created. - -## Git Operations - -#### Create a feature branch - -1. Branch from the **staging** branch. - 1. Command: `git checkout -b feature/my-feature staging` - -> Rebase frequently with the updated staging branch so you do not face big conflicts before submitting your pull request. Remember, syncing your changes with other developers could also help you avoid big conflicts. - -#### Merge feature branch into staging - -In other words, integrate your changes into a branch that will be tested and prepared for release. - -1. Switch branch to staging: `git checkout staging` -2. Merging feature branch into staging: `git merge --no-ff feature/my-feature` -3. Pushing changes to staging: `git push origin staging` -4. Delete feature branch: `git branch -d feature/my-feature` (alternatively, this can be navigated on the GitHub web UI) - -This operation is done by Github when merging a PR. - -So, what you have to keep in mind is: -- Open the PR against the `staging` branch. -- After merging a PR you should delete your feature branch. This will be strictly enforced. - -#### Creating a release branch - -1. Create branch from staging: `git checkout -b release/3.4.0/descriptive-message/creator's_name staging` -2. Updating version with major or minor: `./scripts/update_version.sh major|minor` -3. Commit file changes with new version: `git commit -a -m "Updated version to 3.4.0"` - - -#### Finishing a Release Branch - -This involves releasing stable code and generating a new version for bittensor. - -1. Switch branch to master: `git checkout master` -2. Merge release branch into master: `git merge --no-ff release/3.4.0/optional-descriptive-message` -3. Tag changeset: `git tag -a v3.4.0 -m "Releasing v3.4.0: some comment about it"` -4. Push changes to master: `git push origin master` -5. Push tags to origin: `git push origin --tags` - -To keep the changes made in the __release__ branch, we need to merge those back into `staging`: - -- Switch branch to staging: `git checkout staging`. -- Merging release branch into staging: `git merge --no-ff release/3.4.0/optional-descriptive-message` - -This step may well lead to a merge conflict (probably even, since we have changed the version number). If so, fix it and commit. - - -#### Creating a hotfix branch -1. Create branch from master: `git checkout -b hotfix/3.3.4/descriptive-message/creator's-name master` -2. Update patch version: `./scripts/update_version.sh patch` -3. Commit file changes with new version: `git commit -a -m "Updated version to 3.3.4"` -4. Fix the bug and commit the fix: `git commit -m "Fixed critical production issue X"` - -#### Finishing a Hotfix Branch - -Finishing a hotfix branch involves merging the bugfix into both `master` and `staging`. - -1. Switch branch to master: `git checkout master` -2. Merge hotfix into master: `git merge --no-ff hotfix/3.3.4/optional-descriptive-message` -3. Tag new version: `git tag -a v3.3.4 -m "Releasing v3.3.4: descriptive comment about the hotfix"` -4. Push changes to master: `git push origin master` -5. Push tags to origin: `git push origin --tags` -6. Switch branch to staging: `git checkout staging` -7. Merge hotfix into staging: `git merge --no-ff hotfix/3.3.4/descriptive-message/creator's-name` -8. Push changes to origin/staging: `git push origin staging` -9. Delete hotfix branch: `git branch -d hotfix/3.3.4/optional-descriptive-message` - -The one exception to the rule here is that, **when a release branch currently exists, the hotfix changes need to be merged into that release branch, instead of** `staging`. Back-merging the bugfix into the __release__ branch will eventually result in the bugfix being merged into `develop` too, when the release branch is finished. (If work in develop immediately requires this bugfix and cannot wait for the release branch to be finished, you may safely merge the bugfix into develop now already as well.) - -Finally, we remove the temporary branch: - -- `git branch -d hotfix/3.3.4/optional-descriptive-message` -## Continuous Integration (CI) and Continuous Deployment (CD) - -Continuous Integration (CI) is a software development practice where members of a team integrate their work frequently. Each integration is verified by an automated build and test process to detect integration errors as quickly as possible. - -Continuous Deployment (CD) is a software engineering approach in which software functionalities are delivered frequently through automated deployments. - -- **CircleCI job**: Create jobs in CircleCI to automate the merging of staging into master and release version (needed to release code) and building and testing Bittensor (needed to merge PRs). - -## Versioning and Release Notes - -Semantic versioning helps keep track of the different versions of the software. When code is merged into master, generate a new version. - -Release notes provide documentation for each version released to the users, highlighting the new features, improvements, and bug fixes. When merged into master, generate GitHub release and release notes. - -## Pending Tasks - -- Determine if master and staging are different -- Determine what is in staging that is not merged yet - - Document not released developments - - When merged into staging, generate information about what's merged into staging but not released. - - When merged into master, generate GitHub release and release notes. -- CircleCI jobs - - Merge staging into master and release version (needed to release code) - - Build and Test Bittensor (needed to merge PRs) - -This document can be improved as the Bittensor project continues to develop and change. diff --git a/contrib/RELEASE_GUIDELINES.md b/contrib/RELEASE_GUIDELINES.md deleted file mode 100644 index d6bda7c860..0000000000 --- a/contrib/RELEASE_GUIDELINES.md +++ /dev/null @@ -1,87 +0,0 @@ -# Release Guidelines - -The release manager in charge can release a Bittensor version using two scripts: - - [../scripts/release/versioning.sh](../scripts/release/versioning.sh) - - [../scripts/release/release.sh](../scripts/release/release.sh) - -The release manager will need the right permissions for: - - github.com - - pypi.org - - hub.docker.com - -If you are new in this role, ask for the proper setup you need to run this process manually. - -## Process of release - -1. Create a branch called `release/VERSION`, having VERSION with the version to release. -1. Make sure twine is installed: `pip install twine` -1. Within the release branch: - 1. Update the version executing:`./scripts/release/versioning.sh --update UPDATE_TYPE` - 1. **UPDATE_TYPE** could be *major*, *minor* or *patch*. - 1. Add release notes to CHANGELOG executing: `./scripts/release/add_notes_changelog.sh -A -V NEW_VERSION -P PREVIOUS_TAG -T GH_ACCESS_TOKEN` - 1. **NEW_VERSION**: e.g.: 3.6.4 - 1. **PREVIOUS_TAG**: e.g.: v3.6.3 - 1. **GH_ACCESS_TOKEN**: A github [personal access token](https://docs.github.com/en/enterprise-server@3.4/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) you need. - -1. Test the release branch and verify that it meets the requirements. -1. After merging the release branch; Run the release script - -## Versioning script usage - -Options: - - -U, --update: type of update. It could be major, minor, patch or rc (release candidate). - - -A, --apply: This specify to apply the release. Without this the versioning will just show a dry run with no changes. - -## Release script usage - -Options: - - -A, --apply: This specify to apply the release. Without this the release will just show a dry run with no changes. - - -T,--github-token: A github personal access token to interact with the Github API. - -### Github token - -Since you need to use a secret when releasing bittensor (github personal access token), I encourage you to use [pass](https://www.passwordstore.org/) or a similar tool that allows you to store the secret safely and not expose it in the history of the machine you use. - -So you can have: -``` -GITHUB_ACCESS_TOKEN=$(pass github/your_personal_token_with_permisions) -``` - -or -``` -GITHUB_ACCESS_TOKEN=$(whatever you need to get the token safely) -``` - -### Executions - -So, executing the script to release a minor version will be: - -``` -# For a dry run -./scripts/release/release.sh -``` - -``` -# Applying changes -./scripts/release/release.sh --apply --github-token $GITHUB_ACCESS_TOKEN` -``` - -## Checking release - -After the execution of the release script we would have generated: - - A new git tag in [github.com](https://github.com/opentensor/bittensor/tags) - - A new github release in [github.com](https://github.com/opentensor/bittensor/releases) - - A new pip package in [pypi.org](https://pypi.org/project/bittensor/#history) - - A new docker image in [hub.docker.com](https://hub.docker.com/r/opentensorfdn/bittensor/tags) - -## After release - -After a Bittensor release we have to -- Update [cubit](https://github.com/opentensor/cubit). - -### Updating cubit - -1. Updating the [Dockerfile](https://github.com/opentensor/cubit/blob/master/docker/Dockerfile) -1. Building its docker image (follow its README instructions) -1. Push it to hub.docker.com - 1. The generated name will be the same but with `-cubit` in its name \ No newline at end of file diff --git a/contrib/STYLE.md b/contrib/STYLE.md deleted file mode 100644 index 7804359d22..0000000000 --- a/contrib/STYLE.md +++ /dev/null @@ -1,350 +0,0 @@ -# Style Guide - -A project’s long-term success rests (among other things) on its maintainability, and a maintainer has few tools more powerful than his or her project’s log. It’s worth taking the time to learn how to care for one properly. What may be a hassle at first soon becomes habit, and eventually a source of pride and productivity for all involved. - -Most programming languages have well-established conventions as to what constitutes idiomatic style, i.e. naming, formatting and so on. There are variations on these conventions, of course, but most developers agree that picking one and sticking to it is far better than the chaos that ensues when everybody does their own thing. - -# Table of Contents -1. [Code Style](#code-style) -2. [Naming Conventions](#naming-conventions) -3. [Git Commit Style](#git-commit-style) -4. [The Six Rules of a Great Commit](#the-six-rules-of-a-great-commit) - - [1. Atomic Commits](#1-atomic-commits) - - [2. Separate Subject from Body with a Blank Line](#2-separate-subject-from-body-with-a-blank-line) - - [3. Limit the Subject Line to 50 Characters](#3-limit-the-subject-line-to-50-characters) - - [4. Use the Imperative Mood in the Subject Line](#4-use-the-imperative-mood-in-the-subject-line) - - [5. Wrap the Body at 72 Characters](#5-wrap-the-body-at-72-characters) - - [6. Use the Body to Explain What and Why vs. How](#6-use-the-body-to-explain-what-and-why-vs-how) -5. [Tools Worth Mentioning](#tools-worth-mentioning) - - [Using `--fixup`](#using---fixup) - - [Interactive Rebase](#interactive-rebase) -6. [Pull Request and Squashing Commits Caveats](#pull-request-and-squashing-commits-caveats) - - -### Code style - -#### General Style -Python's official style guide is PEP 8, which provides conventions for writing code for the main Python distribution. Here are some key points: - -- `Indentation:` Use 4 spaces per indentation level. - -- `Line Length:` Limit all lines to a maximum of 79 characters. - -- `Blank Lines:` Surround top-level function and class definitions with two blank lines. Method definitions inside a class are surrounded by a single blank line. - -- `Imports:` Imports should usually be on separate lines and should be grouped in the following order: - - - Standard library imports. - - Related third party imports. - - Local application/library specific imports. -- `Whitespace:` Avoid extraneous whitespace in the following situations: - - - Immediately inside parentheses, brackets or braces. - - Immediately before a comma, semicolon, or colon. - - Immediately before the open parenthesis that starts the argument list of a function call. -- `Comments:` Comments should be complete sentences and should be used to clarify code and are not a substitute for poorly written code. - -#### For Python - -- `List Comprehensions:` Use list comprehensions for concise and readable creation of lists. - -- `Generators:` Use generators when dealing with large amounts of data to save memory. - -- `Context Managers:` Use context managers (with statement) for resource management. - -- `String Formatting:` Use f-strings for formatting strings in Python 3.6 and above. - -- `Error Handling:` Use exceptions for error handling whenever possible. - -#### More details - -Use [`ruff` to format](https://docs.astral.sh/ruff/formatter/#the-ruff-formatter) your python code before commiting for consistency across such a large pool of contributors. -Black code [style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#code-style) ensures consistent and opinionated code formatting. -Ruff automatically formats your Python code according to the Black style guide, enhancing code readability and maintainability. - -Key Features of ruff & Black code style: - - Consistency: ruff enforces a single, consistent coding style across your project, eliminating style debates and allowing developers to focus on code logic. - - Readability: By applying a standard formatting style, Black improves code readability, making it easier to understand and collaborate on projects. - - Automation: ruff automates the code formatting process, saving time and effort. It eliminates the need for manual formatting and reduces the likelihood of inconsistencies. - -### Naming Conventions - -- `Classes:` Class names should normally use the CapWords Convention. -- `Functions and Variables:` Function names should be lowercase, with words separated by underscores as necessary to improve readability. Variable names follow the same convention as function names. - -- `Constants:` Constants are usually defined on a module level and written in all capital letters with underscores separating words. - -- `Non-public Methods and Instance Variables:` Use a single leading underscore (_). This is a weak "internal use" indicator. - -- `Strongly "private" methods and variables:` Use a double leading underscore (__). This triggers name mangling in Python. - - -### Git commit style - -Here’s a model Git commit message when contributing: -``` -Summarize changes in around 50 characters or less - -More detailed explanatory text, if necessary. Wrap it to about 72 -characters or so. In some contexts, the first line is treated as the -subject of the commit and the rest of the text as the body. The -blank line separating the summary from the body is critical (unless -you omit the body entirely); various tools like `log`, `shortlog` -and `rebase` can get confused if you run the two together. - -Explain the problem that this commit is solving. Focus on why you -are making this change as opposed to how (the code explains that). -Are there side effects or other unintuitive consequences of this -change? Here's the place to explain them. - -Further paragraphs come after blank lines. - - - Bullet points are okay, too - - - Typically a hyphen or asterisk is used for the bullet, preceded - by a single space, with blank lines in between, but conventions - vary here - -If you use an issue tracker, put references to them at the bottom, -like this: - -Resolves: #123 -See also: #456, #789 -``` - - -## The six rules of a great commit. - -#### 1. Atomic Commits -An “atomic” change revolves around one task or one fix. - -Atomic Approach - - Commit each fix or task as a separate change - - Only commit when a block of work is complete - - Commit each layout change separately - - Joint commit for layout file, code behind file, and additional resources - -Benefits - -- Easy to roll back without affecting other changes -- Easy to make other changes on the fly -- Easy to merge features to other branches - -#### Avoid trivial commit messages - -Commit messages like "fix", "fix2", or "fix3" don't provide any context or clear understanding of what changes the commit introduces. Here are some examples of good vs. bad commit messages: - -**Bad Commit Message:** - - $ git commit -m "fix" - -**Good Commit Message:** - - $ git commit -m "Fix typo in README file" - -> **Caveat**: When working with new features, an atomic commit will often consist of multiple files, since a layout file, code behind file, and additional resources may have been added/modified. You don’t want to commit all of these separately, because if you had to roll back the application to a state before the feature was added, it would involve multiple commit entries, and that can get confusing - -#### 2. Separate subject from body with a blank line - -Not every commit requires both a subject and a body. Sometimes a single line is fine, especially when the change is so simple that no further context is necessary. - -For example: - - Fix typo in introduction to user guide - -Nothing more need be said; if the reader wonders what the typo was, she can simply take a look at the change itself, i.e. use git show or git diff or git log -p. - -If you’re committing something like this at the command line, it’s easy to use the -m option to git commit: - - $ git commit -m"Fix typo in introduction to user guide" - -However, when a commit merits a bit of explanation and context, you need to write a body. For example: - - Derezz the master control program - - MCP turned out to be evil and had become intent on world domination. - This commit throws Tron's disc into MCP (causing its deresolution) - and turns it back into a chess game. - -Commit messages with bodies are not so easy to write with the -m option. You’re better off writing the message in a proper text editor. [See Pro Git](https://git-scm.com/book/en/v2/Customizing-Git-Git-Configuration). - -In any case, the separation of subject from body pays off when browsing the log. Here’s the full log entry: - - $ git log - commit 42e769bdf4894310333942ffc5a15151222a87be - Author: Kevin Flynn - Date: Fri Jan 01 00:00:00 1982 -0200 - - Derezz the master control program - - MCP turned out to be evil and had become intent on world domination. - This commit throws Tron's disc into MCP (causing its deresolution) - and turns it back into a chess game. - - -#### 3. Limit the subject line to 50 characters -50 characters is not a hard limit, just a rule of thumb. Keeping subject lines at this length ensures that they are readable, and forces the author to think for a moment about the most concise way to explain what’s going on. - -GitHub’s UI is fully aware of these conventions. It will warn you if you go past the 50 character limit. Git will truncate any subject line longer than 72 characters with an ellipsis, thus keeping it to 50 is best practice. - -#### 4. Use the imperative mood in the subject line -Imperative mood just means “spoken or written as if giving a command or instruction”. A few examples: - - Clean your room - Close the door - Take out the trash - -Each of the seven rules you’re reading about right now are written in the imperative (“Wrap the body at 72 characters”, etc.). - -The imperative can sound a little rude; that’s why we don’t often use it. But it’s perfect for Git commit subject lines. One reason for this is that Git itself uses the imperative whenever it creates a commit on your behalf. - -For example, the default message created when using git merge reads: - - Merge branch 'myfeature' - -And when using git revert: - - Revert "Add the thing with the stuff" - - This reverts commit cc87791524aedd593cff5a74532befe7ab69ce9d. - -Or when clicking the “Merge” button on a GitHub pull request: - - Merge pull request #123 from someuser/somebranch - -So when you write your commit messages in the imperative, you’re following Git’s own built-in conventions. For example: - - Refactor subsystem X for readability - Update getting started documentation - Remove deprecated methods - Release version 1.0.0 - -Writing this way can be a little awkward at first. We’re more used to speaking in the indicative mood, which is all about reporting facts. That’s why commit messages often end up reading like this: - - Fixed bug with Y - Changing behavior of X - -And sometimes commit messages get written as a description of their contents: - - More fixes for broken stuff - Sweet new API methods - -To remove any confusion, here’s a simple rule to get it right every time. - -**A properly formed Git commit subject line should always be able to complete the following sentence:** - - If applied, this commit will - -For example: - - If applied, this commit will refactor subsystem X for readability - If applied, this commit will update getting started documentation - If applied, this commit will remove deprecated methods - If applied, this commit will release version 1.0.0 - If applied, this commit will merge pull request #123 from user/branch - -#### 5. Wrap the body at 72 characters -Git never wraps text automatically. When you write the body of a commit message, you must mind its right margin, and wrap text manually. - -The recommendation is to do this at 72 characters, so that Git has plenty of room to indent text while still keeping everything under 80 characters overall. - -A good text editor can help here. It’s easy to configure Vim, for example, to wrap text at 72 characters when you’re writing a Git commit. - -#### 6. Use the body to explain what and why vs. how -This [commit](https://github.com/bitcoin/bitcoin/commit/eb0b56b19017ab5c16c745e6da39c53126924ed6) from Bitcoin Core is a great example of explaining what changed and why: - -``` -commit eb0b56b19017ab5c16c745e6da39c53126924ed6 -Author: Pieter Wuille -Date: Fri Aug 1 22:57:55 2014 +0200 - - Simplify serialize.h's exception handling - - Remove the 'state' and 'exceptmask' from serialize.h's stream - implementations, as well as related methods. - - As exceptmask always included 'failbit', and setstate was always - called with bits = failbit, all it did was immediately raise an - exception. Get rid of those variables, and replace the setstate - with direct exception throwing (which also removes some dead - code). - - As a result, good() is never reached after a failure (there are - only 2 calls, one of which is in tests), and can just be replaced - by !eof(). - - fail(), clear(n) and exceptions() are just never called. Delete - them. -``` - -Take a look at the [full diff](https://github.com/bitcoin/bitcoin/commit/eb0b56b19017ab5c16c745e6da39c53126924ed6) and just think how much time the author is saving fellow and future committers by taking the time to provide this context here and now. If he didn’t, it would probably be lost forever. - -In most cases, you can leave out details about how a change has been made. Code is generally self-explanatory in this regard (and if the code is so complex that it needs to be explained in prose, that’s what source comments are for). Just focus on making clear the reasons why you made the change in the first place—the way things worked before the change (and what was wrong with that), the way they work now, and why you decided to solve it the way you did. - -The future maintainer that thanks you may be yourself! - - - -#### Tools worth mentioning - -##### Using `--fixup` - -If you've made a commit and then realize you've missed something or made a minor mistake, you can use the `--fixup` option. - -For example, suppose you've made a commit with a hash `9fceb02`. Later, you realize you've left a debug statement in your code. Instead of making a new commit titled "remove debug statement" or "fix", you can do the following: - - $ git commit --fixup 9fceb02 - -This will create a new commit to fix the issue, with a message like "fixup! The original commit message". - -##### Interactive Rebase - -Interactive rebase, or `rebase -i`, can be used to squash these fixup commits into the original commits they're fixing, which cleans up your commit history. You can use the `autosquash` option to automatically squash any commits marked as "fixup" into their target commits. - -For example: - - $ git rebase -i --autosquash HEAD~5 - -This command starts an interactive rebase for the last 5 commits (`HEAD~5`). Any commits marked as "fixup" will be automatically moved to squash with their target commits. - -The benefit of using `--fixup` and interactive rebase is that it keeps your commit history clean and readable. It groups fixes with the commits they are related to, rather than having a separate "fix" commit that might not make sense to other developers (or even to you) in the future. - - ---- - -#### Pull Request and Squashing Commits Caveats - -While atomic commits are great for development and for understanding the changes within the branch, the commit history can get messy when merging to the main branch. To keep a cleaner and more understandable commit history in our main branch, we encourage squashing all the commits of a PR into one when merging. - -This single commit should provide an overview of the changes that the PR introduced. It should follow the guidelines for atomic commits (an atomic commit is complete, self-contained, and understandable) but on the scale of the entire feature, task, or fix that the PR addresses. This approach combines the benefits of atomic commits during development with a clean commit history in our main branch. - -Here is how you can squash commits: - -```bash -git rebase -i HEAD~n -``` - -where `n` is the number of commits to squash. After running the command, replace `pick` with `squash` for the commits you want to squash into the previous commit. This will combine the commits and allow you to write a new commit message. - -In this context, an atomic commit message could look like: - -``` -Add feature X - -This commit introduces feature X which does A, B, and C. It adds -new files for layout, updates the code behind the file, and introduces -new resources. This change is important because it allows users to -perform task Y more efficiently. - -It includes: -- Creation of new layout file -- Updates in the code-behind file -- Addition of new resources - -Resolves: #123 -``` - -In your PRs, remember to detail what the PR is introducing or fixing. This will be helpful for reviewers to understand the context and the reason behind the changes. diff --git a/contrib/TESTING.md b/contrib/TESTING.md deleted file mode 100644 index 59dc1d81a3..0000000000 --- a/contrib/TESTING.md +++ /dev/null @@ -1,94 +0,0 @@ -# Testing Guide for Bittensor - -Testing is an essential part of software development that ensures the correctness and performance of your code. Bittensor uses a combination of unit tests and integration tests to verify the functionality of its components. This guide will walk you through how to run and write tests for Bittensor. - -## Running Tests - -Bittensor uses `pytest` for running its tests. To run all tests, navigate to the root directory of the Bittensor repository and run: - -```bash -pytest -``` - -This will automatically discover all test files (those that start with `test_`) and run them. - -If you want to run a specific test file, you can specify it directly. For example, to run the tests in `test_wallet.py`, you would use: - -```bash -pytest tests/test_wallet.py -``` - -Similarly, you can run a specific test within a file by appending `::` and the test name. For example: - -```bash -pytest tests/test_wallet.py::test_create_new_coldkey -``` - -## Writing Tests - -When writing tests for Bittensor, you should aim to cover both the "happy path" (where everything works as expected) and any potential error conditions. Here's a basic structure for a test file: - -```python -import pytest -import bittensor - -def test_some_functionality(): - # Setup any necessary objects or state. - wallet = bittensor.wallet() - - # Call the function you're testing. - result = wallet.create_new_coldkey() - - # Assert that the function behaved as expected. - assert result is not None -``` - -In this example, we're testing the `create_new_coldkey` function of the `wallet` object. We assert that the result is not `None`, which is the expected behavior. - -## Mocking - -In some cases, you may need to mock certain functions or objects to isolate the functionality you're testing. Bittensor uses the `unittest.mock` library for this. Here's a simple example from the axon unittest: - -```python -def test_axon_start(self): - mock_wallet = MagicMock( - spec=bittensor.Wallet, - coldkey=MagicMock(), - coldkeypub=MagicMock( - # mock ss58 address - ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - ), - hotkey=MagicMock( - ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" - ), - ) - axon = bittensor.axon(wallet=mock_wallet, metagraph=None) - axon.start() - assert axon.server._state.stage == grpc._server._ServerStage.STARTED -``` - -In this example, we're mocking the `coldkey`, `coldkeypub` and `hotkey` for a wallet. This allows us to test how the axon code behaves when `bittensor.Wallet()` would normally be called, without actually calling the constructor. -## Test Coverage - -It's important to ensure that your tests cover as much of your code as possible. You can use the `pytest-cov` plugin to measure your test coverage. To use it, first install it with pip: - -```bash -pip install pytest-cov -``` - -Then, you can run your tests with coverage like this: - -```bash -pytest --cov=bittensor -``` - -This will output a coverage report showing the percentage of your code that's covered by tests. - -Remember, while high test coverage is a good goal, it's also important to write meaningful tests. A test isn't very useful if it doesn't accurately represent the conditions under which your code will run. - -## Continuous Integration - -Bittensor uses CircleCI for continuous integration. This means that every time you push changes to the repository, all tests are automatically run. If any tests fail, you'll be notified so you can fix the issue before merging your changes. - - -Remember, tests are an important part of maintaining the health of a codebase. They help catch issues early and make it easier to add new features or refactor existing code. Happy testing! \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 7e6933ed25..0000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: "3.2" - -services: - dev: - container_name: node-bittensor - image: "bittensor/bittensor:latest" - ports: - - "8091:8091" - volumes: - - ~/.bittensor:/root/.bittensor \ No newline at end of file diff --git a/example.env b/example.env deleted file mode 100644 index 35d405fb58..0000000000 --- a/example.env +++ /dev/null @@ -1,5 +0,0 @@ -# To use legacy Torch-based of bittensor, you must set USE_TORCH=1 -USE_TORCH=0 -# If set to 0 (or anything else than 1), it will use current, numpy-based, bittensor interface. -# This is generally what you want unless you want legacy interoperability. -# Please note that the legacy interface is deprecated, and is not tested nearly as much. diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index d38bdc7172..0000000000 --- a/mypy.ini +++ /dev/null @@ -1,18 +0,0 @@ -[mypy] -ignore_missing_imports = True -ignore_errors = True - -[mypy-*.axon.*] -ignore_errors = False - -[mypy-*.dendrite.*] -ignore_errors = False - -[mypy-bittensor.metagraph.*] -ignore_errors = False - -[mypy-*.subtensor.*] -ignore_errors = False - -[mypy-*.synapse.*] -ignore_errors = False diff --git a/requirements/cubit.txt b/requirements/cubit.txt deleted file mode 100644 index 5af1316836..0000000000 --- a/requirements/cubit.txt +++ /dev/null @@ -1,3 +0,0 @@ -torch>=1.13.1 -cubit>=1.1.0 -cubit @ git+https://github.com/opentensor/cubit.git diff --git a/requirements/dev.txt b/requirements/dev.txt deleted file mode 100644 index a9e1a1bc4e..0000000000 --- a/requirements/dev.txt +++ /dev/null @@ -1,20 +0,0 @@ -black==24.3.0 -pytest==7.2.0 -pytest-asyncio==0.23.7 -pytest-mock==3.12.0 -pytest-split==0.8.0 -pytest-xdist==3.0.2 -pytest-rerunfailures==10.2 -coveralls==3.3.1 -pytest-cov==4.0.0 -ddt==1.6.0 -hypothesis==6.81.1 -flake8==7.0.0 -mypy==1.8.0 -types-retry==0.9.9.4 -freezegun==1.5.0 -torch>=1.13.1 -httpx==0.27.0 -ruff==0.4.7 -aioresponses==0.7.6 -factory-boy==3.3.0 diff --git a/requirements/prod.txt b/requirements/prod.txt deleted file mode 100644 index 8bb6acd0f4..0000000000 --- a/requirements/prod.txt +++ /dev/null @@ -1,35 +0,0 @@ -aiohttp~=3.9 -ansible~=8.5.0 -ansible_vault~=2.1 -backoff -certifi~=2024.7.4 -colorama~=0.4.6 -cryptography~=42.0.5 -ddt~=1.6.0 -eth-utils<2.3.0 -fuzzywuzzy>=0.18.0 -fastapi~=0.110.1 -munch~=2.5.0 -netaddr -numpy~=1.26 -msgpack-numpy-opentensor~=0.5.0 -nest_asyncio -packaging -pycryptodome>=3.18.0,<4.0.0 -pyyaml -password_strength -pydantic>=2.3, <3 -PyNaCl~=1.3 -python-Levenshtein -python-statemachine~=2.1 -retry -requests -rich -scalecodec==1.2.11 -setuptools~=70.0.0 -shtab~=1.6.5 -substrate-interface~=1.7.9 -termcolor -tqdm -uvicorn -wheel diff --git a/requirements/torch.txt b/requirements/torch.txt deleted file mode 100644 index 028dec0810..0000000000 --- a/requirements/torch.txt +++ /dev/null @@ -1 +0,0 @@ -torch>=1.13.1 diff --git a/scripts/check_compatibility.sh b/scripts/check_compatibility.sh deleted file mode 100755 index b9c89c24dd..0000000000 --- a/scripts/check_compatibility.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash - -if [ -z "$1" ]; then - echo "Please provide a Python version as an argument." - exit 1 -fi - -python_version="$1" -all_passed=true - -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -RED='\033[0;31m' -NC='\033[0m' # No Color - -check_compatibility() { - all_supported=0 - - while read -r requirement; do - # Skip lines starting with git+ - if [[ "$requirement" == git+* ]]; then - continue - fi - - package_name=$(echo "$requirement" | awk -F'[!=<>~]' '{print $1}' | awk -F'[' '{print $1}') # Strip off brackets - echo -n "Checking $package_name... " - - url="https://pypi.org/pypi/$package_name/json" - response=$(curl -s $url) - status_code=$(curl -s -o /dev/null -w "%{http_code}" $url) - - if [ "$status_code" != "200" ]; then - echo -e "${RED}Information not available for $package_name. Failure.${NC}" - all_supported=1 - continue - fi - - classifiers=$(echo "$response" | jq -r '.info.classifiers[]') - requires_python=$(echo "$response" | jq -r '.info.requires_python') - - base_version="Programming Language :: Python :: ${python_version%%.*}" - specific_version="Programming Language :: Python :: $python_version" - - if echo "$classifiers" | grep -q "$specific_version" || echo "$classifiers" | grep -q "$base_version"; then - echo -e "${GREEN}Supported${NC}" - elif [ "$requires_python" != "null" ]; then - if echo "$requires_python" | grep -Eq "==$python_version|>=$python_version|<=$python_version"; then - echo -e "${GREEN}Supported${NC}" - else - echo -e "${RED}Not compatible with Python $python_version due to constraint $requires_python.${NC}" - all_supported=1 - fi - else - echo -e "${YELLOW}Warning: Specific version not listed, assuming compatibility${NC}" - fi - done < requirements/prod.txt - - return $all_supported -} - -echo "Checking compatibility for Python $python_version..." -check_compatibility -if [ $? -eq 0 ]; then - echo -e "${GREEN}All requirements are compatible with Python $python_version.${NC}" -else - echo -e "${RED}All requirements are NOT compatible with Python $python_version.${NC}" - all_passed=false -fi - -echo "" -if $all_passed; then - echo -e "${GREEN}All tests passed.${NC}" -else - echo -e "${RED}All tests did not pass.${NC}" - exit 1 -fi diff --git a/scripts/check_pre_submit.sh b/scripts/check_pre_submit.sh deleted file mode 100755 index 4dbe7747f6..0000000000 --- a/scripts/check_pre_submit.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# ruff checks formating -echo ">>> Run the pre-submit format check with \`ruff format .\`." -ruff format . - -echo ">>> Run the pre-submit format check with \`mypy\`." - -# mypy checks python versions compatibility -versions=("3.9" "3.10" "3.11") -for version in "${versions[@]}"; do - echo "Running mypy for Python $version..." - mypy --ignore-missing-imports bittensor/ --python-version="$version" -done - -# flake8 checks errors count in bittensor folder -error_count=$(flake8 bittensor/ --count) -echo ">>> Flake8 found ${error_count} errors." diff --git a/scripts/check_requirements_changes.sh b/scripts/check_requirements_changes.sh deleted file mode 100755 index 5fcd27ea3f..0000000000 --- a/scripts/check_requirements_changes.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Check if requirements files have changed in the last commit -if git diff --name-only HEAD~1 | grep -E 'requirements/prod.txt|requirements/dev.txt'; then - echo "Requirements files have changed. Running compatibility checks..." - echo 'export REQUIREMENTS_CHANGED="true"' >> $BASH_ENV -else - echo "Requirements files have not changed. Skipping compatibility checks..." - echo 'export REQUIREMENTS_CHANGED="false"' >> $BASH_ENV -fi diff --git a/scripts/create_wallet.sh b/scripts/create_wallet.sh deleted file mode 100755 index d0ee08b69f..0000000000 --- a/scripts/create_wallet.sh +++ /dev/null @@ -1,13 +0,0 @@ -mkdir -p ~/.bittensor/wallets/default/hotkeys -rm ~/.bittensor/wallets/default/coldkeypub.txt -rm ~/.bittensor/wallets/default/hotkeys/default -touch ~/.bittensor/wallets/default/coldkeypub.txt -touch ~/.bittensor/wallets/default/hotkeys/default -echo "0x74acaa8d7829336dfff7569f19225818cc593335b9aafcde3f69db23c3538561" >> ~/.bittensor/wallets/default/coldkeypub.txt -echo '{"accountId": "0x9cf7085aa3304c21dc0f571c0134abb12f2e8e1bc9dbfc82440b8d6ba7908655", "publicKey": "0x9cf7085aa3304c21dc0f571c0134abb12f2e8e1bc9dbfc82440b8d6ba7908655", "secretPhrase": "document usage siren cross across crater shrug jump marine distance absurd caught", "secretSeed": "0x2465ae0757117bea271ad622e1cd0c4b319c96896a3c7d9469a68e63cf7f9646", "ss58Address": "5FcWiCiFoSspGGocSxzatNL5kT6cjxjXQ9LuAuYbvFNUqcfX"}' >> ~/.bittensor/wallets/default/hotkeys/default -chmod 0600 ~/.bittensor/wallets/default/coldkeypub.txt -chmod 0600 ~/.bittensor/wallets/default/hotkeys/default -echo "~/.bittensor/wallets/default/coldkeypub.txt" -cat ~/.bittensor/wallets/default/coldkeypub.txt -echo "~/.bittensor/wallets/default/hotkeys/default" -cat ~/.bittensor/wallets/default/hotkeys/default \ No newline at end of file diff --git a/scripts/environments/README.md b/scripts/environments/README.md deleted file mode 100644 index 0caa0d2ae4..0000000000 --- a/scripts/environments/README.md +++ /dev/null @@ -1,21 +0,0 @@ -## 04 Installation on Apple M chip -There are quite a few Python libraries that are not yet compatible with Apple M chipset architecture. The best way to use Bittensor on this hardware is through Conda and Miniforge. The Opentensor team has created a Conda environment that makes installing Bittensor on these systems very easy. - -> NOTE: This tutorial assumes you have installed conda on mac, if you have not done so already you can install it from [here](https://conda.io/projects/conda/en/latest/user-guide/install/macos.html). - -1. Create the conda environment from the `apple_m1_environment.yml` file here: - ```bash - conda env create -f apple_m1_environment.yml - ``` - -2. Activate the new environment: `conda activate bittensor`. -3. Verify that the new environment was installed correctly: - ```bash - conda env list - ``` - -4. Install bittensor (without dependencies): - ```bash - conda activate bittensor # activate the env - pip install --no-deps bittensor # install bittensor - ``` diff --git a/scripts/environments/apple_m1_environment.yml b/scripts/environments/apple_m1_environment.yml deleted file mode 100644 index 25824aa64e..0000000000 --- a/scripts/environments/apple_m1_environment.yml +++ /dev/null @@ -1,272 +0,0 @@ -name: bittensor -channels: - - conda-forge -dependencies: - - anyio=3.6.2=pyhd8ed1ab_0 - - appnope=0.1.3=pyhd8ed1ab_0 - - argon2-cffi=21.3.0=pyhd8ed1ab_0 - - argon2-cffi-bindings=21.2.0=py310h8e9501a_3 - - asttokens=2.2.1=pyhd8ed1ab_0 - - async-lru=2.0.2=pyhd8ed1ab_0 - - attrs=23.1.0=pyh71513ae_1 - - babel=2.12.1=pyhd8ed1ab_1 - - backcall=0.2.0=pyh9f0ad1d_0 - - backports=1.0=pyhd8ed1ab_3 - - backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0 - - beautifulsoup4=4.12.2=pyha770c72_0 - - bleach=6.0.0=pyhd8ed1ab_0 - - brotli=1.0.9=h1a8c8d9_8 - - brotli-bin=1.0.9=h1a8c8d9_8 - - bzip2=1.0.8=h3422bc3_4 - - c-ares=1.18.1=h3422bc3_0 - - ca-certificates=2023.5.7=hf0a4a13_0 - - cffi=1.15.1=py310h2399d43_3 - - charset-normalizer=3.1.0=pyhd8ed1ab_0 - - comm=0.1.3=pyhd8ed1ab_0 - - debugpy=1.6.7=py310h0f1eb42_0 - - decorator=5.1.1=pyhd8ed1ab_0 - - defusedxml=0.7.1=pyhd8ed1ab_0 - - entrypoints=0.4=pyhd8ed1ab_0 - - executing=1.2.0=pyhd8ed1ab_0 - - flit-core=3.9.0=pyhd8ed1ab_0 - - gmp=6.2.1=h9f76cd9_0 - - grpcio=1.42.0=py310h00ca444_0 - - importlib-metadata=6.6.0=pyha770c72_0 - - importlib_metadata=6.6.0=hd8ed1ab_0 - - importlib_resources=5.12.0=pyhd8ed1ab_0 - - ipython=8.13.2=pyhd1c38e8_0 - - jedi=0.18.2=pyhd8ed1ab_0 - - json5=0.9.5=pyh9f0ad1d_0 - - jupyter-lsp=2.1.0=pyhd8ed1ab_0 - - jupyter_client=8.2.0=pyhd8ed1ab_0 - - jupyter_core=5.3.0=py310hbe9552e_0 - - jupyter_events=0.6.3=pyhd8ed1ab_0 - - jupyter_server=2.5.0=pyhd8ed1ab_0 - - jupyter_server_terminals=0.4.4=pyhd8ed1ab_1 - - jupyterlab=4.0.0=pyhd8ed1ab_1 - - jupyterlab_pygments=0.2.2=pyhd8ed1ab_0 - - jupyterlab_server=2.22.1=pyhd8ed1ab_0 - - libabseil=20230125.0=cxx17_hb7217d7_1 - - libbrotlicommon=1.0.9=h1a8c8d9_8 - - libbrotlidec=1.0.9=h1a8c8d9_8 - - libbrotlienc=1.0.9=h1a8c8d9_8 - - libcxx=16.0.3=h4653b0c_0 - - libffi=3.4.2=h3422bc3_5 - - libgrpc=1.54.1=h9dbdbd0_0 - - libsodium=1.0.18=h27ca646_1 - - libsqlite=3.41.2=hb31c410_1 - - libzlib=1.2.13=h03a7124_4 - - matplotlib-inline=0.1.6=pyhd8ed1ab_0 - - mistune=2.0.5=pyhd8ed1ab_0 - - nb_conda_kernels=2.3.1=py310hbe9552e_2 - - nbconvert-core=7.4.0=pyhd8ed1ab_0 - - nbformat=5.8.0=pyhd8ed1ab_0 - - ncurses=6.3=h07bb92c_1 - - nest-asyncio=1.5.6=pyhd8ed1ab_0 - - notebook-shim=0.2.3=pyhd8ed1ab_0 - - openssl=3.1.0=h53f4e23_3 - - packaging=23.1=pyhd8ed1ab_0 - - pandocfilters=1.5.0=pyhd8ed1ab_0 - - parso=0.8.3=pyhd8ed1ab_0 - - pexpect=4.8.0=pyh1a96a4e_2 - - pickleshare=0.7.5=py_1003 - - pip=23.1.2=pyhd8ed1ab_0 - - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_0 - - prompt-toolkit=3.0.38=pyha770c72_0 - - prompt_toolkit=3.0.38=hd8ed1ab_0 - - ptyprocess=0.7.0=pyhd3deb0d_0 - - pure_eval=0.2.2=pyhd8ed1ab_0 - - pycparser=2.21=pyhd8ed1ab_0 - - pycryptodome=3.19.0=py310hd71b1c6_1 - - pygments=2.15.1=pyhd8ed1ab_0 - - python-levenshtein=0.12.2=py310he2143c4_1 - - pyobjc-core=9.1.1=py310h44ed3dd_0 - - pyobjc-framework-cocoa=9.1.1=py310h44ed3dd_0 - - pyrsistent=0.19.3=py310h8e9501a_0 - - pysocks=1.7.1=pyha2e5f31_6 - - pytest-asyncio=0.21.0=pyhd8ed1ab_0 - - python=3.10.10=h3ba56d0_0_cpython - - python-dateutil=2.8.2=pyhd8ed1ab_0 - - python-json-logger=2.0.7=pyhd8ed1ab_0 - - python_abi=3.10=3_cp310 - - pytz=2023.3=pyhd8ed1ab_0 - - pyzmq=25.0.2=py310hc407298_0 - - re2=2023.02.02=hb7217d7_0 - - readline=8.2=h92ec313_1 - - rfc3339-validator=0.1.4=pyhd8ed1ab_0 - - rfc3986-validator=0.1.1=pyh9f0ad1d_0 - - send2trash=1.8.2=pyhd1c38e8_0 - - setuptools=67.7.2=pyhd8ed1ab_0 - - six=1.16.0=pyh6c4a22f_0 - - sniffio=1.3.0=pyhd8ed1ab_0 - - stack_data=0.6.2=pyhd8ed1ab_0 - - terminado=0.17.1=pyhd1c38e8_0 - - tinycss2=1.2.1=pyhd8ed1ab_0 - - tk=8.6.12=he1e0b03_0 - - tomli=2.0.1=pyhd8ed1ab_0 - - traitlets=5.9.0=pyhd8ed1ab_0 - - typing_extensions=4.6.1=pyha770c72_0 - - tzdata=2023c=h71feb2d_0 - - uvicorn=0.22.0=py310hbe9552e_0 - - wcwidth=0.2.6=pyhd8ed1ab_0 - - xz=5.2.6=h57fd34a_0 - - yaml=0.2.5=h3422bc3_2 - - zeromq=4.3.4=hbdafb3b_1 - - zipp=3.15.0=pyhd8ed1ab_0 - - zlib=1.2.13=h03a7124_4 - - pip: - - addict==2.4.0 - - aiohttp==3.9.0 - - aiosignal==1.3.1 - - altair==4.2.2 - - ansible==6.7.0 - - ansible-core==2.13.7 - - ansible-vault==2.1.0 - - appdirs==1.4.4 - - argparse==1.4.0 - - arrow==1.2.3 - - async-timeout==4.0.2 - - backoff==2.1.0 - - blinker==1.6.2 - - cachetools==4.2.4 - - certifi==2024.2.2 - - cfgv==3.4.0 - - chardet==3.0.4 - - click==8.1.3 - - colorama==0.4.6 - - commonmark==0.9.1 - - cryptography==42.0.5 - - cytoolz==0.12.2 - - dataclasses-json==0.5.13 - - ddt==1.6.0 - - dill==0.3.6 - - distlib==0.3.7 - - docker-pycreds==0.4.0 - - ecdsa==0.18.0 - - eth-hash==0.5.2 - - eth-keys==0.4.0 - - eth-typing==3.4.0 - - eth-utils==2.2.0 - - exceptiongroup==1.1.2 - - fastapi==0.110.1 - - filelock==3.12.2 - - fqdn==1.5.1 - - frozenlist==1.4.0 - - fsspec==2023.6.0 - - fuzzywuzzy==0.18.0 - - gitdb==4.0.10 - - gitpython==3.1.32 - - google-api-core==1.34.0 - - google-api-python-client==2.7.0 - - google-auth==1.35.0 - - google-auth-httplib2==0.1.0 - - googleapis-common-protos==1.59.0 - - grpcio-tools==1.42.0 - - httplib2==0.22.0 - - huggingface-hub==0.16.4 - - hypothesis==6.47.4 - - identify==2.5.26 - - ipykernel==6.26.0 - - ipython-genutils==0.2.0 - - ipywidgets==8.0.6 - - isoduration==20.11.0 - - jinja2==3.1.2 - - joblib==1.2.0 - - jsonpointer==2.3 - - jupyter==1.0.0 - - jupyter-console==6.6.3 - - jupyterlab-widgets==3.0.7 - - markupsafe==2.0.1 - - marshmallow==3.19.0 - - marshmallow-enum==1.5.1 - - more-itertools==10.0.0 - - msgpack-numpy-opentensor==0.5.0 - - multidict==6.0.4 - - multiprocess==0.70.14 - - munch==2.5.0 - - mypy-extensions==1.0.0 - - nbclassic==1.0.0 - - nbclient==0.7.4 - - netaddr==0.8.0 - - networkx==3.1 - - nltk==3.8.1 - - nodeenv==1.8.0 - - notebook==6.5.4 - - numexpr==2.8.4 - - openapi-schema-pydantic==1.2.4 - - password-strength==0.0.3.post2 - - pathtools==0.1.2 - - pillow==10.1.0 - - platformdirs==3.10.0 - - plotly==5.14.1 - - pre-commit==3.3.2 - - prometheus-client==0.14.1 - - promise==2.3 - - py==1.11.0 - - py-bip39-bindings==0.1.11 - - py-ed25519-bindings==1.0.2 - - py-ed25519-zebra-bindings==1.0.1 - - py-sr25519-bindings==0.2.0 - - pyarrow==12.0.1 - - pyasn1==0.5.0 - - pyasn1-modules==0.3.0 - - pydantic==2.7.1 - - pydeck==0.8.1b0 - - pyinstrument==4.4.0 - - pympler==1.0.1 - - pynacl==1.5.0 - - pyparsing==3.1.1 - - python-statemachine==2.1.2 - - pytest==7.4.0 - - qqdm==0.0.7 - - qtconsole==5.4.3 - - qtpy==2.3.1 - - regex==2023.6.3 - - requests==2.31.0 - - resolvelib==0.8.1 - - responses==0.18.0 - - retry==0.9.2 - - rich==12.5.1 - - rsa==4.9 - - scalecodec==1.2.11 - - scikit-learn==1.2.2 - - scipy==1.10.1 - - sentencepiece==0.1.99 - - sentry-sdk==1.28.1 - - setproctitle==1.3.2 - - shortuuid==1.0.11 - - shtab==1.6.5 - - smmap==5.0.0 - - sortedcontainers==2.4.0 - - soupsieve==2.4.1 - - sqlalchemy==2.0.19 - - starlette==0.37.2 - - streamlit==1.22.0 - - substrate-interface==1.7.9 - - tenacity==8.2.2 - - termcolor==2.1.1 - - threadpoolctl==3.1.0 - - tokenizers==0.13.3 - - toml==0.10.2 - - toolz==0.12.0 - - torch==2.0.1 - - torchvision==0.15.2 - - tornado==6.3.3 - - tqdm==4.64.1 - - typing-extensions==4.8.0 - - typing-inspect==0.8.0 - - tzlocal==5.0.1 - - uri-template==1.2.0 - - uritemplate==3.0.1 - - urllib3==1.26.15 - - validators==0.20.0 - - virtualenv==20.24.3 - - wandb==0.15.10 - - webcolors==1.13 - - webencodings==0.5.1 - - websocket-client==1.6.1 - - wheel==0.37.1 - - widgetsnbextension==4.0.7 - - xxhash==3.2.0 - - yarl==1.9.2 -prefix: /opt/homebrew/Caskroom/miniforge/base/envs/bittensor diff --git a/scripts/install.sh b/scripts/install.sh deleted file mode 100755 index 5111d75afb..0000000000 --- a/scripts/install.sh +++ /dev/null @@ -1,298 +0,0 @@ - -#!/bin/bash -set -u - -# enable command completion -set -o history -o histexpand - -python="python3" - -abort() { - printf "%s\n" "$1" - exit 1 -} - -getc() { - local save_state - save_state=$(/bin/stty -g) - /bin/stty raw -echo - IFS= read -r -n 1 -d '' "$@" - /bin/stty "$save_state" -} - -exit_on_error() { - exit_code=$1 - last_command=${@:2} - if [ $exit_code -ne 0 ]; then - >&2 echo "\"${last_command}\" command failed with exit code ${exit_code}." - exit $exit_code - fi -} - -wait_for_user() { - local c - echo - echo "Press RETURN to continue or any other key to abort" - getc c - # we test for \r and \n because some stuff does \r instead - if ! [[ "$c" == $'\r' || "$c" == $'\n' ]]; then - exit 1 - fi -} - -shell_join() { - local arg - printf "%s" "$1" - shift - for arg in "$@"; do - printf " " - printf "%s" "${arg// /\ }" - done -} - -# string formatters -if [[ -t 1 ]]; then - tty_escape() { printf "\033[%sm" "$1"; } -else - tty_escape() { :; } -fi -tty_mkbold() { tty_escape "1;$1"; } -tty_underline="$(tty_escape "4;39")" -tty_blue="$(tty_mkbold 34)" -tty_red="$(tty_mkbold 31)" -tty_bold="$(tty_mkbold 39)" -tty_reset="$(tty_escape 0)" - -ohai() { - printf "${tty_blue}==>${tty_bold} %s${tty_reset}\n" "$(shell_join "$@")" -} - -# Things can fail later if `pwd` doesn't exist. -# Also sudo prints a warning message for no good reason -cd "/usr" || exit 1 - -linux_install_pre() { - sudo apt-get update - sudo apt-get install --no-install-recommends --no-install-suggests -y apt-utils curl git cmake build-essential - exit_on_error $? -} - -linux_install_python() { - which $python - if [[ $? != 0 ]] ; then - ohai "Installing python" - sudo apt-get install --no-install-recommends --no-install-suggests -y $python - else - ohai "Updating python" - sudo apt-get install --only-upgrade $python - fi - exit_on_error $? - ohai "Installing python tools" - sudo apt-get install --no-install-recommends --no-install-suggests -y $python-pip $python-dev - exit_on_error $? -} - -linux_update_pip() { - PYTHONPATH=$(which $python) - ohai "You are using python@ $PYTHONPATH$" - ohai "Installing python tools" - $python -m pip install --upgrade pip -} - -linux_install_bittensor() { - ohai "Cloning bittensor@master into ~/.bittensor/bittensor" - mkdir -p ~/.bittensor/bittensor - git clone https://github.com/opentensor/bittensor.git ~/.bittensor/bittensor/ 2> /dev/null || (cd ~/.bittensor/bittensor/ ; git fetch origin master ; git checkout master ; git pull --ff-only ; git reset --hard ; git clean -xdf) - ohai "Installing bittensor" - $python -m pip install -e ~/.bittensor/bittensor/ - exit_on_error $? -} - -linux_increase_ulimit(){ - ohai "Increasing ulimit to 1,000,000" - prlimit --pid=$PPID --nofile=1000000 -} - - -mac_install_xcode() { - which -s xcode-select - if [[ $? != 0 ]] ; then - ohai "Installing xcode:" - xcode-select --install - exit_on_error $? - fi -} - -mac_install_brew() { - which -s brew - if [[ $? != 0 ]] ; then - ohai "Installing brew:" - ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" - else - ohai "Updating brew:" - brew update --verbose - fi - exit_on_error $? -} - -mac_install_cmake() { - which -s cmake - if [[ $? != 0 ]] ; then - ohai "Installing cmake:" - brew install cmake - else - ohai "Updating cmake:" - brew upgrade cmake - fi -} - -mac_install_python() { - which -s python3 - ohai "Installing python3" - brew list python@3 &>/dev/null || brew install python@3; - ohai "Updating python3" - brew upgrade python@3 - exit_on_error $? -} - -mac_update_pip() { - PYTHONPATH=$(which $python) - ohai "You are using python@ $PYTHONPATH$" - ohai "Installing python tools" - $python -m pip install --upgrade pip -} - -mac_install_bittensor() { - ohai "Cloning bittensor@text_prompting into ~/.bittensor/bittensor" - git clone https://github.com/opentensor/bittensor.git ~/.bittensor/bittensor/ 2> /dev/null || (cd ~/.bittensor/bittensor/ ; git fetch origin master ; git checkout master ; git pull --ff-only ; git reset --hard; git clean -xdf) - ohai "Installing bittensor" - $python -m pip install -e ~/.bittensor/bittensor/ - exit_on_error $? - deactivate -} - -# Do install. -OS="$(uname)" -if [[ "$OS" == "Linux" ]]; then - - which -s apt - if [[ $? == 0 ]] ; then - abort "This linux based install requires apt. To run with other distros (centos, arch, etc), you will need to manually install the requirements" - fi - echo """ - -██████╗░██╗████████╗████████╗███████╗███╗░░██╗░██████╗░█████╗░██████╗░ -██╔══██╗██║╚══██╔══╝╚══██╔══╝██╔════╝████╗░██║██╔════╝██╔══██╗██╔══██╗ -██████╩╝██║░░░██║░░░░░░██║░░░█████╗░░██╔██╗██║╚█████╗░██║░░██║██████╔╝ -██╔══██╗██║░░░██║░░░░░░██║░░░██╔══╝░░██║╚████║░╚═══██╗██║░░██║██╔══██╗ -██████╩╝██║░░░██║░░░░░░██║░░░███████╗██║░╚███║██████╔╝╚█████╔╝██║░░██║ -╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚══╝╚═════╝░░╚════╝░╚═╝░░╚═╝ - - - Mining a new element. - """ - ohai "This script will install:" - echo "git" - echo "curl" - echo "cmake" - echo "build-essential" - echo "python3" - echo "python3-pip" - echo "bittensor" - - wait_for_user - linux_install_pre - linux_install_python - linux_update_pip - linux_install_bittensor - - ohai "Would you like to increase the ulimit? This will allow your miner to run for a longer time" - wait_for_user - linux_increase_ulimit - echo "" - echo "" - echo "######################################################################" - echo "## ##" - echo "## BITTENSOR SETUP ##" - echo "## ##" - echo "######################################################################" - echo "" - echo "" - -elif [[ "$OS" == "Darwin" ]]; then - echo """ - -██████╗░██╗████████╗████████╗███████╗███╗░░██╗░██████╗░█████╗░██████╗░ -██╔══██╗██║╚══██╔══╝╚══██╔══╝██╔════╝████╗░██║██╔════╝██╔══██╗██╔══██╗ -██████╩╝██║░░░██║░░░░░░██║░░░█████╗░░██╔██╗██║╚█████╗░██║░░██║██████╔╝ -██╔══██╗██║░░░██║░░░░░░██║░░░██╔══╝░░██║╚████║░╚═══██╗██║░░██║██╔══██╗ -██████╩╝██║░░░██║░░░░░░██║░░░███████╗██║░╚███║██████╔╝╚█████╔╝██║░░██║ -╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚══╝╚═════╝░░╚════╝░╚═╝░░╚═╝ - - - Mining a new element. - """ - ohai "This script will install:" - echo "xcode" - echo "homebrew" - echo "git" - echo "cmake" - echo "python3" - echo "python3-pip" - echo "bittensor" - - wait_for_user - mac_install_brew - mac_install_cmake - mac_install_python - mac_update_pip - mac_install_bittensor - echo "" - echo "" - echo "######################################################################" - echo "## ##" - echo "## BITTENSOR SETUP ##" - echo "## ##" - echo "######################################################################" -else - abort "Bittensor is only supported on macOS and Linux" -fi - -# Use the shell's audible bell. -if [[ -t 1 ]]; then -printf "\a" -fi - -echo "" -echo "" -ohai "Welcome. Installation successful" -echo "" -echo "- 1. Create a wallet " -echo " $ btcli new_coldkey (for holding funds)" -echo " $ btcli new_hotkey (for running miners)" -echo "" -echo "- 2. Run a miner on the prompting network. " -echo " $ python3 ~/.bittensor/bittensor/neurons/text/prompting/miners/gpt4all/neuron.py" -echo "" -ohai "Extras:" -echo "" -echo "- Check your tao balance: " -echo " $ btcli wallet overview" -echo "" -echo "- Stake to your miners:" -echo " $ btcli stake add" -echo " $ btcli stake remove" -echo "" -echo "- Create/list/register wallets" -echo " $ btcli w new_coldkey" -echo " $ btcli w new_hotkey" -echo " $ btcli w list" -echo " $ btcli s register" -echo "" -echo "- Use the Python API" -echo " $ python3"echo " >> import bittensor" -echo "" -echo "- Join the discussion: " -echo " ${tty_underline}https://discord.gg/3rUr6EcvbB${tty_reset}" -echo "" - - - diff --git a/scripts/post_install_cli.py b/scripts/post_install_cli.py deleted file mode 100644 index bfaca34c37..0000000000 --- a/scripts/post_install_cli.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import subprocess -import sys - - -def post_install(): - # Determine the shell type (bash, zsh, etc.) - shell = os.environ.get("SHELL") - if "bash" in shell: - shell_config = "~/.bashrc" - elif "zsh" in shell: - shell_config = "~/.zshrc" - else: - print("Unsupported shell for autocompletion.") - return - - # Generate the completion script - completion_script = subprocess.check_output( - [sys.executable, "-m", "bittensor.cli", "--print-completion", shell] - ).decode() - - # Append the completion script to the shell configuration file - with open(os.path.expanduser(shell_config), "a") as file: - file.write("\n# Bittensor CLI Autocompletion\n") - file.write(completion_script) - - -if __name__ == "__main__": - post_install() diff --git a/scripts/run.sh b/scripts/run.sh deleted file mode 100755 index b6502b5d37..0000000000 --- a/scripts/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -cd ~/.bittensor/bittensor - -UPSTREAM=${1:-'@{u}'} -LOCAL=$(git rev-parse @) -REMOTE=$(git rev-parse "$UPSTREAM") -BASE=$(git merge-base @ "$UPSTREAM") - -if [ $LOCAL = $REMOTE ]; then - echo "Up-to-date" -elif [ $LOCAL = $BASE ]; then - git pull origin master - pip install -e . -else - echo "Diverged" -fi - diff --git a/setup.py b/setup.py deleted file mode 100644 index 49c419724a..0000000000 --- a/setup.py +++ /dev/null @@ -1,98 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from setuptools import setup, find_packages -from os import path -from io import open -import codecs -import re -import os -import pathlib - - -def read_requirements(path): - requirements = [] - git_requirements = [] - - with pathlib.Path(path).open() as requirements_txt: - for line in requirements_txt: - if line.startswith("git+"): - pkg_name = re.search(r"egg=([a-zA-Z0-9_-]+)", line.strip()).group(1) - requirements.append(pkg_name + " @ " + line.strip()) - else: - requirements.append(line.strip()) - - return requirements - - -requirements = read_requirements("requirements/prod.txt") -extra_requirements_dev = read_requirements("requirements/dev.txt") -extra_requirements_cubit = read_requirements("requirements/cubit.txt") -extra_requirements_torch = read_requirements("requirements/torch.txt") - -here = path.abspath(path.dirname(__file__)) - -with open(path.join(here, "README.md"), encoding="utf-8") as f: - long_description = f.read() - - -# loading version from setup.py -with codecs.open( - os.path.join(here, "bittensor/__init__.py"), encoding="utf-8" -) as init_file: - version_match = re.search( - r"^__version__ = ['\"]([^'\"]*)['\"]", init_file.read(), re.M - ) - version_string = version_match.group(1) - -setup( - name="bittensor", - version=version_string, - description="bittensor", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/opentensor/bittensor", - author="bittensor.com", - packages=find_packages(exclude=["tests", "tests.*"]), - include_package_data=True, - author_email="", - license="MIT", - python_requires=">=3.9", - install_requires=requirements, - extras_require={ - "dev": extra_requirements_dev, - "torch": extra_requirements_torch, - }, - scripts=["bin/btcli"], - classifiers=[ - "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", - "Topic :: Software Development :: Build Tools", - # Pick your license as you wish - "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Mathematics", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries", - "Topic :: Software Development :: Libraries :: Python Modules", - ], -) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index 1c7bc4757e..0000000000 --- a/tests/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Yuma Rao -# Copyright © 2022-2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. diff --git a/tests/e2e_tests/__init__.py b/tests/e2e_tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py deleted file mode 100644 index 6f648be130..0000000000 --- a/tests/e2e_tests/conftest.py +++ /dev/null @@ -1,104 +0,0 @@ -import logging -import os -import re -import shlex -import signal -import socket -import subprocess -import time - -import pytest -from substrateinterface import SubstrateInterface - -from tests.e2e_tests.utils import ( - clone_or_update_templates, - install_templates, - uninstall_templates, -) - -logging.basicConfig(level=logging.INFO) - - -# Function to check if the process is running by port -def is_chain_running(port): - """Check if a node is running on the given port.""" - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - try: - # Attempt to connect to the given port on localhost - s.connect(("127.0.0.1", port)) - return True - except (ConnectionRefusedError, OSError): - # If the connection is refused or there's an OS error, the node is not running - return False - - -# Fixture for setting up and tearing down a localnet.sh chain between tests -@pytest.fixture(scope="function") -def local_chain(request): - param = request.param if hasattr(request, "param") else None - script_path = os.getenv("LOCALNET_SH_PATH") - - if not script_path: - logging.warning("LOCALNET_SH_PATH env variable is not set, e2e test skipped.") - pytest.skip("LOCALNET_SH_PATH environment variable is not set.") - - # Determine the port to check based on `param` - port = 9945 # Default port if `param` is None - - # Always perform template installation - logging.info("Downloading and installing neuron templates from GitHub") - templates_dir = clone_or_update_templates() - install_templates(templates_dir) - - already_running = False - if is_chain_running(port): - already_running = True - logging.info(f"Chain already running on port {port}, skipping start.") - else: - logging.info(f"Starting new chain on port {port}...") - # compile commands to send to process - cmds = shlex.split(f"{script_path} {param}") - # Start new node process - process = subprocess.Popen( - cmds, stdout=subprocess.PIPE, text=True, preexec_fn=os.setsid - ) - - # Wait for the node to start using the existing pattern match - pattern = re.compile(r"Imported #1") - timestamp = int(time.time()) - - def wait_for_node_start(process, pattern): - for line in process.stdout: - print(line.strip()) - if int(time.time()) - timestamp > 20 * 60: - pytest.fail("Subtensor not started in time") - if pattern.search(line): - print("Node started!") - break - - wait_for_node_start(process, pattern) - - # Continue with installing templates - logging.info("Downloading and installing neuron templates from GitHub") - templates_dir = clone_or_update_templates() - install_templates(templates_dir) - - # Run the test, passing in the substrate interface - yield SubstrateInterface(url=f"ws://127.0.0.1:{port}") - - if not already_running: - # Terminate the process group (includes all child processes) - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - - # Give some time for the process to terminate - time.sleep(1) - - # If the process is not terminated, send SIGKILL - if process.poll() is None: - os.killpg(os.getpgid(process.pid), signal.SIGKILL) - - # Ensure the process has terminated - process.wait() - - logging.info("Uninstalling neuron templates") - uninstall_templates(templates_dir) diff --git a/tests/e2e_tests/multistep/__init__.py b/tests/e2e_tests/multistep/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/multistep/test_axon.py b/tests/e2e_tests/multistep/test_axon.py deleted file mode 100644 index ebe95587ea..0000000000 --- a/tests/e2e_tests/multistep/test_axon.py +++ /dev/null @@ -1,112 +0,0 @@ -import asyncio -import sys - -import pytest - -import bittensor -from bittensor import logging -from bittensor.commands import ( - RegisterCommand, - RegisterSubnetworkCommand, -) -from bittensor.utils import networking -from tests.e2e_tests.utils import ( - setup_wallet, - template_path, - templates_repo, -) - -""" -Test the axon mechanism. - -Verify that: -* axon is registered on network as a miner -* ip -* type -* port - -are set correctly, and that the miner is currently running - -""" - - -@pytest.mark.asyncio -async def test_axon(local_chain): - logging.info("Testing test_axon") - netuid = 1 - # Register root as Alice - alice_keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # Verify subnet created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [netuid] - ).serialize(), "Subnet wasn't created successfully" - - # Register a neuron to the subnet - exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - str(netuid), - ], - ) - - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - - # validate one miner with ip of none - old_axon = metagraph.axons[0] - - assert len(metagraph.axons) == 1, "Expected 1 axon, but got len(metagraph.axons)" - assert old_axon.hotkey == alice_keypair.ss58_address - assert old_axon.coldkey == alice_keypair.ss58_address - assert old_axon.ip == "0.0.0.0" - assert old_axon.port == 0 - assert old_axon.ip_type == 0 - - # register miner - # "python neurons/miner.py --netuid 1 --subtensor.chain_endpoint ws://localhost:9945 --wallet.name wallet.name --wallet.hotkey wallet.hotkey.ss58_address" - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", - "--netuid", - str(netuid), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - wallet.path, - "--wallet.name", - wallet.name, - "--wallet.hotkey", - "default", - ] - ) - - axon_process = await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Neuron Alice is now mining") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph to refresh with latest data - - # refresh metagraph - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - updated_axon = metagraph.axons[0] - external_ip = networking.get_external_ip() - - assert len(metagraph.axons) == 1 - assert updated_axon.ip == external_ip - assert updated_axon.ip_type == networking.ip_version(external_ip) - assert updated_axon.port == 8091 - assert updated_axon.hotkey == alice_keypair.ss58_address - assert updated_axon.coldkey == alice_keypair.ss58_address - logging.info("Passed test_axon") diff --git a/tests/e2e_tests/multistep/test_dendrite.py b/tests/e2e_tests/multistep/test_dendrite.py deleted file mode 100644 index c68ccda818..0000000000 --- a/tests/e2e_tests/multistep/test_dendrite.py +++ /dev/null @@ -1,164 +0,0 @@ -import asyncio -import sys - -import pytest - -import bittensor -from bittensor import logging -from bittensor.commands import ( - RegisterCommand, - RegisterSubnetworkCommand, - RootRegisterCommand, - RootSetBoostCommand, - StakeCommand, -) -from tests.e2e_tests.utils import ( - setup_wallet, - template_path, - templates_repo, - wait_epoch, -) - -""" -Test the dendrites mechanism. - -Verify that: -* dendrite is registered on network as a validator -* stake successfully -* validator permit is set - -""" - - -@pytest.mark.asyncio -async def test_dendrite(local_chain): - logging.info("Testing test_dendrite") - netuid = 1 - # Register root as Alice - the subnet owner - alice_keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # Verify subnet created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [netuid] - ).serialize(), "Subnet wasn't created successfully" - - bob_keypair, exec_command, wallet_path = setup_wallet("//Bob") - - # Register a neuron to the subnet - exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - str(netuid), - ], - ) - - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # assert one neuron is Bob - assert len(subtensor.neurons(netuid=netuid)) == 1 - neuron = metagraph.neurons[0] - assert neuron.hotkey == bob_keypair.ss58_address - assert neuron.coldkey == bob_keypair.ss58_address - - # assert stake is 0 - assert neuron.stake.tao == 0 - - # Stake to become to top neuron after the first epoch - exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "10000", - ], - ) - - # refresh metagraph - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - neuron = metagraph.neurons[0] - # assert stake is 10000 - assert ( - neuron.stake.tao == 10_000.0 - ), f"Expected 10_000.0 staked TAO, but got {neuron.stake.tao}" - - # assert neuron is not validator - assert neuron.active is True - assert neuron.validator_permit is False - assert neuron.validator_trust == 0.0 - assert neuron.pruning_score == 0 - - # register validator from template - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", - "--netuid", - str(netuid), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - wallet.path, - "--wallet.name", - wallet.name, - "--wallet.hotkey", - "default", - ] - ) - - # run validator in the background - dendrite_process = await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Neuron Alice is now validating") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data - - # register validator with root network - exec_command( - RootRegisterCommand, - [ - "root", - "register", - "--netuid", - str(netuid), - ], - ) - - exec_command( - RootSetBoostCommand, - [ - "root", - "boost", - "--netuid", - str(netuid), - "--increase", - "1", - ], - ) - # get current block, wait until next epoch - await wait_epoch(subtensor, netuid=netuid) - - # refresh metagraph - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - - # refresh validator neuron - neuron = metagraph.neurons[0] - - assert len(metagraph.neurons) == 1 - assert neuron.active is True - assert neuron.validator_permit is True - assert neuron.hotkey == bob_keypair.ss58_address - assert neuron.coldkey == bob_keypair.ss58_address - logging.info("Passed test_dendrite") diff --git a/tests/e2e_tests/multistep/test_emissions.py b/tests/e2e_tests/multistep/test_emissions.py deleted file mode 100644 index a05ff478a4..0000000000 --- a/tests/e2e_tests/multistep/test_emissions.py +++ /dev/null @@ -1,283 +0,0 @@ -import asyncio -import sys - -import pytest - -import bittensor -from bittensor import logging -from bittensor.commands import ( - RegisterCommand, - RegisterSubnetworkCommand, - RootRegisterCommand, - RootSetBoostCommand, - RootSetWeightsCommand, - SetTakeCommand, - StakeCommand, - SubnetSudoCommand, -) -from tests.e2e_tests.utils import ( - setup_wallet, - template_path, - templates_repo, - wait_epoch, -) - -""" -Test the emissions mechanism. - -Verify that for the miner: -* trust -* rank -* consensus -* incentive -* emission -are updated with proper values after an epoch has passed. - -For the validator verify that: -* validator_permit -* validator_trust -* dividends -* stake -are updated with proper values after an epoch has passed. - -""" - - -@pytest.mark.asyncio -@pytest.mark.skip -async def test_emissions(local_chain): - logging.info("Testing test_emissions") - netuid = 1 - # Register root as Alice - the subnet owner and validator - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - # Verify subnet created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [netuid] - ).serialize(), "Subnet wasn't created successfully" - - # Register Bob as miner - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - - # Register Alice as neuron to the subnet - alice_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - str(netuid), - ], - ) - - # Register Bob as neuron to the subnet - bob_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - str(netuid), - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - # assert two neurons are in network - assert len(subtensor.neurons(netuid=netuid)) == 2 - - # Alice to stake to become to top neuron after the first epoch - alice_exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "10000", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # register Alice as validator - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", - "--netuid", - str(netuid), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - alice_wallet.path, - "--wallet.name", - alice_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - # run validator in the background - - await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Neuron Alice is now validating") - await asyncio.sleep(5) - - # register validator with root network - alice_exec_command( - RootRegisterCommand, - [ - "root", - "register", - "--netuid", - str(netuid), - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - await wait_epoch(subtensor, netuid=netuid) - - alice_exec_command( - RootSetBoostCommand, - [ - "root", - "boost", - "--netuid", - str(netuid), - "--increase", - "1000", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # register Bob as miner - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", - "--netuid", - str(netuid), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - bob_wallet.path, - "--wallet.name", - bob_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - - await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Neuron Bob is now mining") - await wait_epoch(subtensor) - - logging.warning("Setting root set weights") - alice_exec_command( - RootSetWeightsCommand, - [ - "root", - "weights", - "--netuid", - str(netuid), - "--weights", - "0.3", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # Set delegate take for Alice - alice_exec_command(SetTakeCommand, ["r", "set_take", "--take", "0.15"]) - - # Lower the rate limit - alice_exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - str(netuid), - "--wallet.name", - alice_wallet.name, - "--param", - "weights_rate_limit", - "--value", - "1", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # wait epoch until for emissions to get distributed - await wait_epoch(subtensor) - - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data - - # refresh metagraph - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # get current emissions and validate that Alice has gotten tao - weights = [(0, [(0, 65535), (1, 65535)])] - assert ( - subtensor.weights(netuid=netuid) == weights - ), "Weights set vs weights in subtensor don't match" - - neurons = subtensor.neurons(netuid=netuid) - bob = neurons[1] - alice = neurons[0] - - assert bob.emission > 0 - assert bob.consensus == 1 - assert bob.incentive == 1 - assert bob.rank == 1 - assert bob.trust == 1 - - assert alice.emission > 0 - assert alice.bonds == [(1, 65535)] - assert alice.dividends == 1 - assert alice.stake.tao > 10000 # assert an increase in stake - assert alice.validator_permit is True - assert alice.validator_trust == 1 - - assert alice.weights == [(0, 65535), (1, 65535)] - - assert ( - subtensor.get_emission_value_by_subnet(netuid=netuid) > 0 - ), ( - "Emissions are not greated than 0" - ) # emission on this subnet is strictly greater than 0 - logging.info("Passed test_emissions") diff --git a/tests/e2e_tests/multistep/test_incentive.py b/tests/e2e_tests/multistep/test_incentive.py deleted file mode 100644 index c2f6baa664..0000000000 --- a/tests/e2e_tests/multistep/test_incentive.py +++ /dev/null @@ -1,247 +0,0 @@ -import asyncio -import sys - -import pytest - -import bittensor -from bittensor import logging -from bittensor.commands import ( - RegisterCommand, - RegisterSubnetworkCommand, - RootRegisterCommand, - RootSetBoostCommand, - StakeCommand, -) -from tests.e2e_tests.utils import ( - setup_wallet, - template_path, - templates_repo, - wait_epoch, -) - -""" -Test the incentive mechanism. - -Verify that for the miner: -* trust -* rank -* consensus -* incentive -are updated with proper values after an epoch has passed. - -For the validator verify that: -* validator_permit -* validator_trust -* dividends -* stake -are updated with proper values after an epoch has passed. - -""" - - -@pytest.mark.asyncio -async def test_incentive(local_chain): - logging.info("Testing test_incentive") - netuid = 1 - # Register root as Alice - the subnet owner and validator - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - # Verify subnet created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [netuid] - ).serialize(), "Subnet wasn't created successfully" - - # Register Bob as miner - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - - # Register Alice as neuron to the subnet - alice_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - str(netuid), - ], - ) - - # Register Bob as neuron to the subnet - bob_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - str(netuid), - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - # assert two neurons are in network - assert ( - len(subtensor.neurons(netuid=netuid)) == 2 - ), "Alice & Bob not registered in the subnet" - - # Alice to stake to become to top neuron after the first epoch - alice_exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "10000", - ], - ) - - # register Bob as miner - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", - "--netuid", - str(netuid), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - bob_wallet.path, - "--wallet.name", - bob_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - - miner_process = await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Neuron Bob is now mining") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph to refresh with latest data - - # register Alice as validator - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", - "--netuid", - str(netuid), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - alice_wallet.path, - "--wallet.name", - alice_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - # run validator in the background - - validator_process = await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Neuron Alice is now validating") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data - - # register validator with root network - alice_exec_command( - RootRegisterCommand, - [ - "root", - "register", - "--netuid", - str(netuid), - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - ], - ) - - alice_exec_command( - RootSetBoostCommand, - [ - "root", - "boost", - "--netuid", - str(netuid), - "--increase", - "100", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - ], - ) - - # get latest metagraph - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - - # get current emissions - bob_neuron = metagraph.neurons[1] - assert bob_neuron.incentive == 0 - assert bob_neuron.consensus == 0 - assert bob_neuron.rank == 0 - assert bob_neuron.trust == 0 - - alice_neuron = metagraph.neurons[0] - assert alice_neuron.validator_permit is False - assert alice_neuron.dividends == 0 - assert alice_neuron.stake.tao == 10_000.0 - assert alice_neuron.validator_trust == 0 - - # wait until next epoch - await wait_epoch(subtensor) - - # for some reason the weights do not get set through the template. Set weight manually. - alice_wallet = bittensor.wallet() - alice_wallet._hotkey = alice_keypair - subtensor._do_set_weights( - wallet=alice_wallet, - uids=[1], - vals=[65535], - netuid=netuid, - version_key=0, - wait_for_inclusion=True, - wait_for_finalization=True, - ) - logging.info("Alice neuron set weights successfully") - - # wait epoch until weight go into effect - await wait_epoch(subtensor) - - # refresh metagraph - metagraph = bittensor.metagraph(netuid=netuid, network="ws://localhost:9945") - - # get current emissions and validate that Alice has gotten tao - bob_neuron = metagraph.neurons[1] - assert bob_neuron.incentive == 1 - assert bob_neuron.consensus == 1 - assert bob_neuron.rank == 1 - assert bob_neuron.trust == 1 - - alice_neuron = metagraph.neurons[0] - assert alice_neuron.validator_permit is True - assert alice_neuron.dividends == 1 - assert alice_neuron.stake.tao == 10_000.0 - assert alice_neuron.validator_trust == 1 - logging.info("Passed test_incentive") diff --git a/tests/e2e_tests/subcommands/__init__.py b/tests/e2e_tests/subcommands/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/delegation/__init__.py b/tests/e2e_tests/subcommands/delegation/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/delegation/test_set_delegate_take.py b/tests/e2e_tests/subcommands/delegation/test_set_delegate_take.py deleted file mode 100644 index ddad1dfcc1..0000000000 --- a/tests/e2e_tests/subcommands/delegation/test_set_delegate_take.py +++ /dev/null @@ -1,61 +0,0 @@ -from bittensor import logging -from bittensor.commands.delegates import NominateCommand, SetTakeCommand -from bittensor.commands.network import RegisterSubnetworkCommand -from bittensor.commands.register import RegisterCommand -from bittensor.commands.root import RootRegisterCommand -from tests.e2e_tests.utils import setup_wallet - - -def test_set_delegate_increase_take(local_chain): - logging.info("Testing test_set_delegate_increase_take") - # Register root as Alice - keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RootRegisterCommand, ["root", "register"]) - - # Create subnet 1 and verify created successfully - assert not ( - local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - ), "Subnet is already registered" - - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [1] - ).serialize(), "Subnet wasn't registered" - - # Register and nominate Bob - keypair, exec_command, wallet = setup_wallet("//Bob") - assert ( - local_chain.query( - "SubtensorModule", "LastTxBlock", [keypair.ss58_address] - ).serialize() - == 0 - ) - - assert ( - local_chain.query( - "SubtensorModule", "LastTxBlockDelegateTake", [keypair.ss58_address] - ).serialize() - == 0 - ) - exec_command(RegisterCommand, ["s", "register", "--netuid", "1"]) - exec_command(NominateCommand, ["root", "nominate"]) - assert ( - local_chain.query( - "SubtensorModule", "LastTxBlock", [keypair.ss58_address] - ).serialize() - > 0 - ) - assert ( - local_chain.query( - "SubtensorModule", "LastTxBlockDelegateTake", [keypair.ss58_address] - ).serialize() - > 0 - ) - - # Set delegate take for Bob - exec_command(SetTakeCommand, ["r", "set_take", "--take", "0.15"]) - assert local_chain.query( - "SubtensorModule", "Delegates", [keypair.ss58_address] - ).value == int(0.15 * 65535), "Take value set incorrectly" - logging.info("Passed test_set_delegate_increase_take") diff --git a/tests/e2e_tests/subcommands/hyperparams/__init__.py b/tests/e2e_tests/subcommands/hyperparams/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/hyperparams/test_liquid_alpha.py b/tests/e2e_tests/subcommands/hyperparams/test_liquid_alpha.py deleted file mode 100644 index 434184cb8d..0000000000 --- a/tests/e2e_tests/subcommands/hyperparams/test_liquid_alpha.py +++ /dev/null @@ -1,278 +0,0 @@ -import bittensor -from bittensor import logging -from bittensor.commands import ( - RegisterCommand, - RegisterSubnetworkCommand, - StakeCommand, - SubnetSudoCommand, -) -from tests.e2e_tests.utils import setup_wallet - -""" -Test the liquid alpha weights mechanism. - -Verify that: -* it can get enabled -* liquid alpha values cannot be set before the feature flag is set -* after feature flag, you can set alpha_high -* after feature flag, you can set alpha_low -""" - - -def test_liquid_alpha_enabled(local_chain, capsys): - logging.info("Testing test_liquid_alpha_enabled") - # Register root as Alice - keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # hyperparameter values - alpha_values = "6553, 53083" - - # Verify subnet 1 created successfully - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - - # Register a neuron to the subnet - exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - # Stake to become to top neuron after the first epoch - exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "100000", - ], - ) - - # Assert liquid alpha disabled - subtensor = bittensor.subtensor(network="ws://localhost:9945") - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).liquid_alpha_enabled is False - ), "Liquid alpha is enabled by default" - - # Attempt to set alpha high/low while disabled (should fail) - result = subtensor.set_hyperparameter( - wallet=wallet, - netuid=1, - parameter="alpha_values", - value=list(map(int, alpha_values.split(","))), - wait_for_inclusion=True, - wait_for_finalization=True, - ) - assert result is None - output = capsys.readouterr().out - assert ( - "❌ Failed: Subtensor returned `LiquidAlphaDisabled (Module)` error. This means: \n`Attempting to set alpha high/low while disabled`" - in output - ) - - # Enable Liquid Alpha - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "liquid_alpha_enabled", - "--value", - "True", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - assert subtensor.get_subnet_hyperparameters( - netuid=1 - ).liquid_alpha_enabled, "Failed to enable liquid alpha" - - output = capsys.readouterr().out - assert "✅ Hyper parameter liquid_alpha_enabled changed to True" in output - - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "alpha_values", - "--value", - "87, 54099", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).alpha_high == 54099 - ), "Failed to set alpha high" - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).alpha_low == 87 - ), "Failed to set alpha low" - - u16_max = 65535 - # Set alpha high too low - alpha_high_too_low = ( - u16_max * 4 // 5 - ) - 1 # One less than the minimum acceptable value - result = subtensor.set_hyperparameter( - wallet=wallet, - netuid=1, - parameter="alpha_values", - value=[6553, alpha_high_too_low], - wait_for_inclusion=True, - wait_for_finalization=True, - ) - assert result is None - output = capsys.readouterr().out - assert ( - "❌ Failed: Subtensor returned `AlphaHighTooLow (Module)` error. This means: \n`Alpha high is too low: alpha_high > 0.8`" - in output - ) - - alpha_high_too_high = u16_max + 1 # One more than the max acceptable value - try: - result = subtensor.set_hyperparameter( - wallet=wallet, - netuid=1, - parameter="alpha_values", - value=[6553, alpha_high_too_high], - wait_for_inclusion=True, - wait_for_finalization=True, - ) - assert result is None, "Expected not to be able to set alpha value above u16" - except Exception as e: - assert str(e) == "65536 out of range for u16", f"Unexpected error: {e}" - - # Set alpha low too low - alpha_low_too_low = 0 - result = subtensor.set_hyperparameter( - wallet=wallet, - netuid=1, - parameter="alpha_values", - value=[alpha_low_too_low, 53083], - wait_for_inclusion=True, - wait_for_finalization=True, - ) - assert result is None - output = capsys.readouterr().out - assert ( - "❌ Failed: Subtensor returned `AlphaLowOutOfRange (Module)` error. This means: \n`Alpha low is out of range: alpha_low > 0 && alpha_low < 0.8`" - in output - ) - - # Set alpha low too high - alpha_low_too_high = ( - u16_max * 4 // 5 - ) + 1 # One more than the maximum acceptable value - result = subtensor.set_hyperparameter( - wallet=wallet, - netuid=1, - parameter="alpha_values", - value=[alpha_low_too_high, 53083], - wait_for_inclusion=True, - wait_for_finalization=True, - ) - assert result is None - output = capsys.readouterr().out - assert ( - "❌ Failed: Subtensor returned `AlphaLowOutOfRange (Module)` error. This means: \n`Alpha low is out of range: alpha_low > 0 && alpha_low < 0.8`" - in output - ) - - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "alpha_values", - "--value", - alpha_values, - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).alpha_high == 53083 - ), "Failed to set alpha high" - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).alpha_low == 6553 - ), "Failed to set alpha low" - - output = capsys.readouterr().out - assert "✅ Hyper parameter alpha_values changed to [6553.0, 53083.0]" in output - - # Disable Liquid Alpha - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "liquid_alpha_enabled", - "--value", - "False", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).liquid_alpha_enabled is False - ), "Failed to disable liquid alpha" - - output = capsys.readouterr().out - assert "✅ Hyper parameter liquid_alpha_enabled changed to False" in output - - result = subtensor.set_hyperparameter( - wallet=wallet, - netuid=1, - parameter="alpha_values", - value=list(map(int, alpha_values.split(","))), - wait_for_inclusion=True, - wait_for_finalization=True, - ) - assert result is None - output = capsys.readouterr().out - assert ( - "❌ Failed: Subtensor returned `LiquidAlphaDisabled (Module)` error. This means: \n`Attempting to set alpha high/low while disabled`" - in output - ) - logging.info("Passed test_liquid_alpha_enabled") diff --git a/tests/e2e_tests/subcommands/register/__init__.py b/tests/e2e_tests/subcommands/register/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/register/test_swap_hotkey.py b/tests/e2e_tests/subcommands/register/test_swap_hotkey.py deleted file mode 100644 index 798aafc3f1..0000000000 --- a/tests/e2e_tests/subcommands/register/test_swap_hotkey.py +++ /dev/null @@ -1,566 +0,0 @@ -import asyncio -import sys -import uuid - -import pytest - -import bittensor -from bittensor import logging -from bittensor.commands import ( - ListCommand, - NewHotkeyCommand, - RegisterCommand, - RegisterSubnetworkCommand, - RootRegisterCommand, - StakeCommand, - SwapHotkeyCommand, -) -from tests.e2e_tests.utils import ( - setup_wallet, - template_path, - templates_repo, - wait_interval, -) - -""" -Test the swap_hotkey mechanism. - -Verify that: -* Alice - neuron is registered on network as a validator -* Bob - neuron is registered on network as a miner -* Swap hotkey of Alice via BTCLI -* verify that the hotkey is swapped -* verify that stake hotkey, delegates hotkey, UIDS and prometheus hotkey is swapped -""" - - -@pytest.mark.asyncio -async def test_swap_hotkey_validator_owner(local_chain): - logging.info("Testing swap hotkey of validator_owner") - # Register root as Alice - the subnet owner and validator - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - # Verify subnet 1 created successfully - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - - # Register Bob as miner - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - - alice_old_hotkey_address = alice_wallet.hotkey.ss58_address - - # Register Alice as neuron to the subnet - alice_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - # Register Bob as neuron to the subnet - bob_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - logging.info("Alice and bob registered to the subnet") - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - # assert two neurons are in network - assert ( - len(subtensor.neurons(netuid=1)) == 2 - ), "Alice and Bob neurons not found in the network" - - # register Bob as miner - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", - "--netuid", - "1", - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - bob_wallet.path, - "--wallet.name", - bob_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - - await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Bob neuron is now mining") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph to refresh with latest data - - # register Alice as validator - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", - "--netuid", - "1", - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - alice_wallet.path, - "--wallet.name", - alice_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - # run validator in the background - - await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Alice neuron is now validating") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data - - # register validator with root network - alice_exec_command( - RootRegisterCommand, - [ - "root", - "register", - "--netuid", - "1", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - ], - ) - - # Alice to stake to become to top neuron after the first epoch - alice_exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "10000", - ], - ) - - # get latest metagraph - metagraph = bittensor.metagraph(netuid=1, network="ws://localhost:9945") - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # assert alice has old hotkey - alice_neuron = metagraph.neurons[0] - - # get current number of hotkeys - wallet_tree = alice_exec_command(ListCommand, ["w", "list"], "get_tree") - num_hotkeys = len(wallet_tree.children[0].children) - - assert ( - alice_neuron.coldkey == "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" - ), "Alice coldkey not as expected" - assert ( - alice_neuron.hotkey == alice_old_hotkey_address - ), "Alice hotkey not as expected" - assert ( - alice_neuron.stake_dict["5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"].tao - == 10000.0 - ), "Alice tao not as expected" - assert alice_neuron.hotkey == alice_neuron.coldkey, "Coldkey and hotkey don't match" - assert ( - alice_neuron.hotkey == subtensor.get_all_subnets_info()[1].owner_ss58 - ), "Hotkey doesn't match owner address" - assert alice_neuron.coldkey == subtensor.get_hotkey_owner( - alice_old_hotkey_address - ), "Coldkey doesn't match hotkey owner" - assert ( - subtensor.is_hotkey_delegate(alice_neuron.hotkey) is True - ), "Alice is not a delegate" - assert ( - subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=alice_neuron.hotkey, netuid=1 - ) - is True - ), "Alice hotkey not registered on subnet" - assert ( - subtensor.get_uid_for_hotkey_on_subnet( - hotkey_ss58=alice_neuron.hotkey, netuid=1 - ) - == alice_neuron.uid - ), "Alice hotkey not regisred on netuid" - if num_hotkeys > 1: - logging.info(f"You have {num_hotkeys} hotkeys for Alice.") - - # generate new guid name for hotkey - new_hotkey_name = str(uuid.uuid4()) - - # create a new hotkey - alice_exec_command( - NewHotkeyCommand, - [ - "w", - "new_hotkey", - "--wallet.name", - alice_wallet.name, - "--wallet.hotkey", - new_hotkey_name, - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - logging.info("New hotkey is created") - # wait rate limit, until we are allowed to change hotkeys - rate_limit = subtensor.tx_rate_limit() - curr_block = subtensor.get_current_block() - await wait_interval(rate_limit + curr_block + 1, subtensor) - - # swap hotkey - alice_exec_command( - SwapHotkeyCommand, - [ - "w", - "swap_hotkey", - "--wallet.name", - alice_wallet.name, - "--wallet.hotkey", - alice_wallet.hotkey_str, - "--wallet.hotkey_b", - new_hotkey_name, - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # get latest metagraph - metagraph = bittensor.metagraph(netuid=1, network="ws://localhost:9945") - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # assert Alice has new hotkey - alice_neuron = metagraph.neurons[0] - wallet_tree = alice_exec_command(ListCommand, ["w", "list"], "get_tree") - new_num_hotkeys = len(wallet_tree.children[0].children) - - assert ( - alice_neuron.coldkey == "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" - ), "Coldkey was changed" # cold key didnt change - assert ( - alice_neuron.hotkey != alice_old_hotkey_address - ), "Hotkey is not updated w.r.t old_hotkey_address" - assert ( - alice_neuron.hotkey != alice_neuron.coldkey - ), "Hotkey is not updated w.r.t coldkey" - assert ( - alice_neuron.coldkey == subtensor.get_all_subnets_info()[1].owner_ss58 - ) # new hotkey address is subnet owner - assert alice_neuron.coldkey != subtensor.get_hotkey_owner( - alice_old_hotkey_address - ) # old key is NOT owner - assert alice_neuron.coldkey == subtensor.get_hotkey_owner( - alice_neuron.hotkey - ) # new key is owner - assert ( - subtensor.is_hotkey_delegate(alice_neuron.hotkey) is True - ) # new key is delegate - assert ( # new key is registered on subnet - subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=alice_neuron.hotkey, netuid=1 - ) - is True - ) - assert ( # old key is NOT registered on subnet - subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=alice_old_hotkey_address, netuid=1 - ) - is False - ) - assert ( # uid is unchanged - subtensor.get_uid_for_hotkey_on_subnet( - hotkey_ss58=alice_neuron.hotkey, netuid=1 - ) - == alice_neuron.uid - ) - assert new_num_hotkeys == num_hotkeys + 1 - logging.info("Finished test_swap_hotkey_validator_owner") - - -""" -Test the swap_hotkey mechanism. - -Verify that: -* Alice - neuron is registered on network as a validator -* Bob - neuron is registered on network as a miner -* Swap hotkey of Bob via BTCLI -* verify that the hotkey is swapped -* verify that stake hotkey, delegates hotkey, UIDS and prometheus hotkey is swapped -""" - - -@pytest.mark.asyncio -async def test_swap_hotkey_miner(local_chain): - logging.info("Testing test_swap_hotkey_miner") - # Register root as Alice - the subnet owner and validator - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - # Verify subnet 1 created successfully - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - - # Register Bob as miner - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - - bob_old_hotkey_address = bob_wallet.hotkey.ss58_address - - # Register Alice as neuron to the subnet - alice_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - # Register Bob as neuron to the subnet - bob_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - # assert two neurons are in network - total_neurons = len(subtensor.neurons(netuid=1)) - assert total_neurons == 2, f"Expected 2 neurons, found {total_neurons}" - - # register Bob as miner - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", - "--netuid", - "1", - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - bob_wallet.path, - "--wallet.name", - bob_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - - await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Bob neuron is now mining") - # register Alice as validator - cmd = " ".join( - [ - f"{sys.executable}", - f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", - "--netuid", - "1", - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - alice_wallet.path, - "--wallet.name", - alice_wallet.name, - "--wallet.hotkey", - "default", - "--logging.trace", - ] - ) - # run validator in the background - - await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - logging.info("Alice neuron is now validating") - await asyncio.sleep( - 5 - ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data - - # register validator with root network - alice_exec_command( - RootRegisterCommand, - [ - "root", - "register", - "--netuid", - "1", - ], - ) - - # Alice to stake to become to top neuron after the first epoch - alice_exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "10000", - ], - ) - - # get latest metagraph - metagraph = bittensor.metagraph(netuid=1, network="ws://localhost:9945") - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # assert bob has old hotkey - bob_neuron = metagraph.neurons[1] - - # get current number of hotkeys - wallet_tree = bob_exec_command(ListCommand, ["w", "list"], "get_tree") - num_hotkeys = len(wallet_tree.children[0].children) - - assert bob_neuron.coldkey == "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty" - assert bob_neuron.hotkey == bob_old_hotkey_address - assert bob_neuron.hotkey == bob_neuron.coldkey - assert bob_neuron.coldkey == subtensor.get_hotkey_owner(bob_old_hotkey_address) - assert subtensor.is_hotkey_delegate(bob_neuron.hotkey) is False - assert ( - subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=bob_neuron.hotkey, netuid=1 - ) - is True - ) - assert ( - subtensor.get_uid_for_hotkey_on_subnet(hotkey_ss58=bob_neuron.hotkey, netuid=1) - == bob_neuron.uid - ) - if num_hotkeys > 1: - logging.info(f"You have {num_hotkeys} hotkeys for Bob.") - - # generate new guid name for hotkey - new_hotkey_name = str(uuid.uuid4()) - - # create a new hotkey - bob_exec_command( - NewHotkeyCommand, - [ - "w", - "new_hotkey", - "--wallet.name", - bob_wallet.name, - "--wallet.hotkey", - new_hotkey_name, - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # wait rate limit, until we are allowed to change hotkeys - rate_limit = subtensor.tx_rate_limit() - curr_block = subtensor.get_current_block() - await wait_interval(rate_limit + curr_block + 1, subtensor) - - # swap hotkey - bob_exec_command( - SwapHotkeyCommand, - [ - "w", - "swap_hotkey", - "--wallet.name", - bob_wallet.name, - "--wallet.hotkey", - bob_wallet.hotkey_str, - "--wallet.hotkey_b", - new_hotkey_name, - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - # get latest metagraph - metagraph = bittensor.metagraph(netuid=1, network="ws://localhost:9945") - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # assert bob has new hotkey - bob_neuron = metagraph.neurons[1] - wallet_tree = bob_exec_command(ListCommand, ["w", "list"], "get_tree") - new_num_hotkeys = len(wallet_tree.children[0].children) - - assert ( - bob_neuron.coldkey == "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty" - ) # cold key didn't change - assert ( - bob_neuron.hotkey != bob_old_hotkey_address - ), "Old and New hotkeys are the same" - assert ( - bob_neuron.hotkey != bob_neuron.coldkey - ), "Hotkey is still the same as coldkey" - assert bob_neuron.coldkey == subtensor.get_hotkey_owner( - bob_neuron.hotkey - ), "Coldkey is not the owner of the new hotkey" # new key is owner - assert ( - subtensor.is_hotkey_delegate(bob_neuron.hotkey) is False - ) # new key is delegate ?? - assert ( # new key is registered on subnet - subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=bob_neuron.hotkey, netuid=1 - ) - is True - ) - assert ( # old key is NOT registered on subnet - subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=bob_old_hotkey_address, netuid=1 - ) - is False - ) - assert ( # uid is unchanged - subtensor.get_uid_for_hotkey_on_subnet(hotkey_ss58=bob_neuron.hotkey, netuid=1) - == bob_neuron.uid - ), "UID for Bob changed on the subnet" - assert new_num_hotkeys == num_hotkeys + 1, "Total hotkeys are not as expected" - logging.info("Passed test_swap_hotkey_miner") diff --git a/tests/e2e_tests/subcommands/root/__init__.py b/tests/e2e_tests/subcommands/root/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/root/test_root_delegate_list.py b/tests/e2e_tests/subcommands/root/test_root_delegate_list.py deleted file mode 100644 index 998bc90574..0000000000 --- a/tests/e2e_tests/subcommands/root/test_root_delegate_list.py +++ /dev/null @@ -1,24 +0,0 @@ -from bittensor import logging -from bittensor.commands.delegates import ListDelegatesCommand - -from ...utils import setup_wallet - - -# delegate seems hard code the network config -def test_root_delegate_list(local_chain, capsys): - logging.info("Testing test_root_delegate_list") - alice_keypair, exec_command, wallet = setup_wallet("//Alice") - - # 1200 hardcoded block gap - exec_command( - ListDelegatesCommand, - ["root", "list_delegates"], - ) - - captured = capsys.readouterr() - lines = captured.out.splitlines() - - # the command print too many lines - # To:do - Find a better to validate list delegates - assert len(lines) > 200 - logging.info("Passed test_root_delegate_list") diff --git a/tests/e2e_tests/subcommands/root/test_root_register_add_member_senate.py b/tests/e2e_tests/subcommands/root/test_root_register_add_member_senate.py deleted file mode 100644 index 7d45e5abcb..0000000000 --- a/tests/e2e_tests/subcommands/root/test_root_register_add_member_senate.py +++ /dev/null @@ -1,120 +0,0 @@ -import bittensor -from bittensor import logging -from bittensor.commands import ( - NominateCommand, - RegisterCommand, - RegisterSubnetworkCommand, - RootRegisterCommand, - SetTakeCommand, - StakeCommand, -) -from bittensor.commands.senate import SenateCommand - -from ...utils import setup_wallet - - -def test_root_register_add_member_senate(local_chain, capsys): - logging.info("Testing test_root_register_add_member_senate") - # Register root as Alice - the subnet owner - alice_keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # Register a neuron to the subnet - exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - # Stake to become to top neuron after the first epoch - exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "10000", - ], - ) - - exec_command(NominateCommand, ["root", "nominate"]) - - exec_command(SetTakeCommand, ["r", "set_take", "--take", "0.8"]) - - captured = capsys.readouterr() - # Verify subnet 1 created successfully - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - # Query local chain for senate members - members = local_chain.query("SenateMembers", "Members").serialize() - assert len(members) == 3, f"Expected 3 senate members, found {len(members)}" - - # Assert subtensor has 3 senate members - subtensor = bittensor.subtensor(network="ws://localhost:9945") - sub_senate = len(subtensor.get_senate_members()) - assert ( - sub_senate == 3 - ), f"Root senate expected 3 members but found {sub_senate} instead." - - # Execute command and capture output - exec_command( - SenateCommand, - ["root", "senate"], - ) - - captured = capsys.readouterr() - - # assert output is graph Titling "Senate" with names and addresses - assert "Senate" in captured.out - assert "NAME" in captured.out - assert "ADDRESS" in captured.out - assert "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" in captured.out - assert "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" in captured.out - assert "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw" in captured.out - - exec_command( - RootRegisterCommand, - [ - "root", - "register", - "--wallet.hotkey", - "default", - "--wallet.name", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - # sudo_call_add_senate_member(local_chain, wallet) - - members = local_chain.query("SenateMembers", "Members").serialize() - assert len(members) == 4, f"Expected 4 senate members, found {len(members)}" - - # Assert subtensor has 4 senate members - subtensor = bittensor.subtensor(network="ws://localhost:9945") - sub_senate = len(subtensor.get_senate_members()) - assert ( - sub_senate == 4 - ), f"Root senate expected 3 members but found {sub_senate} instead." - - exec_command( - SenateCommand, - ["root", "senate"], - ) - - captured = capsys.readouterr() - - # assert output is graph Titling "Senate" with names and addresses - - assert "Senate" in captured.out - assert "NAME" in captured.out - assert "ADDRESS" in captured.out - assert "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" in captured.out - assert "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" in captured.out - assert "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" in captured.out - assert "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw" in captured.out diff --git a/tests/e2e_tests/subcommands/root/test_root_senate_vote.py b/tests/e2e_tests/subcommands/root/test_root_senate_vote.py deleted file mode 100644 index e08df94072..0000000000 --- a/tests/e2e_tests/subcommands/root/test_root_senate_vote.py +++ /dev/null @@ -1,49 +0,0 @@ -from bittensor import logging -from bittensor.commands.root import RootRegisterCommand -from bittensor.commands.senate import VoteCommand - -from ...utils import ( - call_add_proposal, - setup_wallet, -) - - -def test_root_senate_vote(local_chain, capsys, monkeypatch): - logging.info("Testing test_root_senate_vote") - keypair, exec_command, wallet = setup_wallet("//Alice") - monkeypatch.setattr("rich.prompt.Confirm.ask", lambda self: True) - - exec_command( - RootRegisterCommand, - ["root", "register"], - ) - - members = local_chain.query("Triumvirate", "Members") - proposals = local_chain.query("Triumvirate", "Proposals").serialize() - - assert len(members) == 3, f"Expected 3 Triumvirate members, found {len(members)}" - assert ( - len(proposals) == 0 - ), f"Expected 0 initial Triumvirate proposals, found {len(proposals)}" - - call_add_proposal(local_chain, wallet) - - proposals = local_chain.query("Triumvirate", "Proposals").serialize() - - assert ( - len(proposals) == 1 - ), f"Expected 1 proposal in the Triumvirate after addition, found {len(proposals)}" - proposal_hash = proposals[0] - - exec_command( - VoteCommand, - ["root", "senate_vote", "--proposal", proposal_hash], - ) - - voting = local_chain.query("Triumvirate", "Voting", [proposal_hash]).serialize() - - assert len(voting["ayes"]) == 1, f"Expected 1 ayes, found {len(voting['ayes'])}" - assert ( - voting["ayes"][0] == wallet.hotkey.ss58_address - ), "wallet hotkey address doesn't match 'ayes' address" - logging.info("Passed test_root_senate_vote") diff --git a/tests/e2e_tests/subcommands/root/test_root_view_proposal.py b/tests/e2e_tests/subcommands/root/test_root_view_proposal.py deleted file mode 100644 index 9de8296e52..0000000000 --- a/tests/e2e_tests/subcommands/root/test_root_view_proposal.py +++ /dev/null @@ -1,45 +0,0 @@ -import bittensor -from bittensor import logging -from bittensor.commands.senate import ProposalsCommand - -from ...utils import ( - call_add_proposal, - setup_wallet, -) - - -def test_root_view_proposal(local_chain, capsys): - logging.info("Testing test_root_view_proposal") - keypair, exec_command, wallet = setup_wallet("//Alice") - - proposals = local_chain.query("Triumvirate", "Proposals").serialize() - - assert len(proposals) == 0, "Proposals are not 0" - - call_add_proposal(local_chain, wallet) - - proposals = local_chain.query("Triumvirate", "Proposals").serialize() - - assert len(proposals) == 1, "Added proposal not found" - - exec_command( - ProposalsCommand, - ["root", "proposals"], - ) - - simulated_output = [ - "📡 Syncing with chain: local ...", - " Proposals Active Proposals: 1 Senate Size: 3 ", - "HASH C
", - "0x78b8a348690f565efe3730cd8189f7388c0a896b6fd090276639c9130c0eba47 r
", - " \x00) ", - " ", - ] - - captured = capsys.readouterr() - output = captured.out - - for expected_line in simulated_output: - assert ( - expected_line in output - ), f"Expected '{expected_line}' to be in the output" diff --git a/tests/e2e_tests/subcommands/stake/__init__.py b/tests/e2e_tests/subcommands/stake/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/stake/test_childkeys.py b/tests/e2e_tests/subcommands/stake/test_childkeys.py deleted file mode 100644 index 080d01263d..0000000000 --- a/tests/e2e_tests/subcommands/stake/test_childkeys.py +++ /dev/null @@ -1,472 +0,0 @@ -import bittensor -import pytest -from bittensor.commands import ( - RegisterCommand, - StakeCommand, - RegisterSubnetworkCommand, - SetChildrenCommand, - RevokeChildrenCommand, - GetChildrenCommand, -) -from bittensor.commands.stake import SetChildKeyTakeCommand, GetChildKeyTakeCommand -from bittensor.extrinsics.staking import prepare_child_proportions -from tests.e2e_tests.utils import setup_wallet, wait_interval - - -@pytest.mark.asyncio -async def test_set_revoke_children_multiple(local_chain, capsys): - """ - Test the setting and revoking of children hotkeys for staking. - - This test case covers the following scenarios: - 1. Setting multiple children hotkeys with specified proportions - 2. Retrieving children information - 3. Revoking all children hotkeys - 4. Verifying the absence of children after revocation - - The test uses three wallets (Alice, Bob, and Eve) and performs operations - on a local blockchain. - - Args: - local_chain: A fixture providing access to the local blockchain - capsys: A pytest fixture for capturing stdout and stderr - - The test performs the following steps: - - Set up wallets for Alice, Bob, and Eve - - Create a subnet and register wallets - - Add stake to Alice's wallet - - Set Bob and Eve as children of Alice with specific proportions - - Verify the children are set correctly - - Get and verify children information - - Revoke all children - - Verify children are revoked - - Check that no children exist after revocation - - This test ensures the proper functioning of setting children hotkeys, - retrieving children information, and revoking children in the staking system. - """ - # Setup - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - eve_keypair, eve_exec_command, eve_wallet = setup_wallet("//Eve") - - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - - for exec_command in [alice_exec_command, bob_exec_command, eve_exec_command]: - exec_command(RegisterCommand, ["s", "register", "--netuid", "1"]) - - alice_exec_command(StakeCommand, ["stake", "add", "--amount", "100000"]) - - async def wait(): - # wait rate limit, until we are allowed to get children - - rate_limit = ( - subtensor.query_constant( - module_name="SubtensorModule", constant_name="InitialTempo" - ).value - * 2 - ) - curr_block = subtensor.get_current_block() - await wait_interval(rate_limit + curr_block + 1, subtensor) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - await wait() - - children_with_proportions = [ - [0.4, bob_keypair.ss58_address], - [0.2, eve_keypair.ss58_address], - ] - - # Test 1: Set multiple children - alice_exec_command( - SetChildrenCommand, - [ - "stake", - "set_children", - "--netuid", - "1", - "--children", - f"{children_with_proportions[0][1]},{children_with_proportions[1][1]}", - "--hotkey", - str(alice_keypair.ss58_address), - "--proportions", - f"{children_with_proportions[0][0]},{children_with_proportions[1][0]}", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - await wait() - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - children_info = subtensor.get_children(hotkey=alice_keypair.ss58_address, netuid=1) - - assert len(children_info) == 2, "Failed to set children hotkeys" - - normalized_proportions = prepare_child_proportions(children_with_proportions) - assert ( - children_info[0][0] == normalized_proportions[0][0] - and children_info[1][0] == normalized_proportions[1][0] - ), "Incorrect proportions set" - - # Test 2: Get children information - alice_exec_command( - GetChildrenCommand, - [ - "stake", - "get_children", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - ], - ) - output = capsys.readouterr().out - assert "5FHne
 │ 40.000%" in output - assert "5HGjW
 │ 20.000%" in output - assert "Total │ 60.000%" in output - - await wait() - - # Test 3: Revoke all children - alice_exec_command( - RevokeChildrenCommand, - [ - "stake", - "revoke_children", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - await wait() - - assert ( - subtensor.get_children(netuid=1, hotkey=alice_keypair.ss58_address) == [] - ), "Failed to revoke children hotkeys" - - await wait() - # Test 4: Get children after revocation - alice_exec_command( - GetChildrenCommand, - [ - "stake", - "get_children", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - ], - ) - output = capsys.readouterr().out - assert "There are currently no child hotkeys on subnet" in output - - -@pytest.mark.asyncio -async def test_set_revoke_childkey_take(local_chain, capsys): - """ - Test the setting and retrieving of childkey take amounts for staking. - - This test case covers the following scenarios: - 1. Setting a childkey take amount for a specific hotkey - 2. Retrieving the childkey take amount - 3. Verifying the retrieved childkey take amount - - The test uses one wallet (Alice) and performs operations - on a local blockchain. - - Args: - local_chain: A fixture providing access to the local blockchain - capsys: A pytest fixture for capturing stdout and stderr - - The test performs the following steps: - - Set up wallets for Alice, Bob, and Eve - - Create a subnet and register wallets - - Set a childkey take amount for Alice - - Verify the setting operation was successful - - Retrieve the set childkey take amount - - Verify the retrieved amount is correct - - This test ensures the proper functioning of setting and retrieving - childkey take amounts in the staking system. - """ - # Setup - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - - for exec_command in [alice_exec_command]: - exec_command(RegisterCommand, ["s", "register", "--netuid", "1"]) - - # Test 1: Set multiple children - alice_exec_command( - SetChildKeyTakeCommand, - [ - "stake", - "set_childkey_take", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--take", - "0.12", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - output = capsys.readouterr().out - assert ( - "The childkey take for 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY is now \nset to 12.000%." - in output - ) - - # Test 1: Set multiple children - alice_exec_command( - GetChildKeyTakeCommand, - [ - "stake", - "get_childkey_take", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - ], - ) - - output = capsys.readouterr().out - assert ( - "The childkey take for 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY is \n12.000%." - in output - ) - - -@pytest.mark.asyncio -async def test_set_revoke_children_singular(local_chain, capsys): - """ - Test the setting and revoking of children hotkeys for staking. - - This test case covers the following scenarios: - 1. Setting multiple children hotkeys with specified proportions (set one at a time) - 2. Retrieving children information - 3. Revoking children hotkeys (one at a time) - 4. Verifying the absence of children after revocation - - The test uses three wallets (Alice, Bob, and Eve) and performs operations - on a local blockchain. - - Args: - local_chain: A fixture providing access to the local blockchain - capsys: A pytest fixture for capturing stdout and stderr - - The test performs the following steps: - - Set up wallets for Alice, Bob, and Eve - - Create a subnet and register wallets - - Add stake to Alice's wallet - - Set Bob and Eve as children of Alice with specific proportions - - Verify the children are set correctly - - Get and verify children information - - Revoke all children - - Verify children are revoked - - Check that no children exist after revocation - - This test ensures the proper functioning of setting children hotkeys, - retrieving children information, and revoking children in the staking system. - """ - # Setup - alice_keypair, alice_exec_command, alice_wallet = setup_wallet("//Alice") - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - eve_keypair, eve_exec_command, eve_wallet = setup_wallet("//Eve") - - alice_exec_command(RegisterSubnetworkCommand, ["s", "create"]) - assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - - for exec_command in [alice_exec_command, bob_exec_command, eve_exec_command]: - exec_command(RegisterCommand, ["s", "register", "--netuid", "1"]) - - alice_exec_command(StakeCommand, ["stake", "add", "--amount", "100000"]) - - async def wait(): - # wait rate limit, until we are allowed to get children - - rate_limit = ( - subtensor.query_constant( - module_name="SubtensorModule", constant_name="InitialTempo" - ).value - * 2 - ) - curr_block = subtensor.get_current_block() - await wait_interval(rate_limit + curr_block + 1, subtensor) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - await wait() - - children_with_proportions = [ - [0.6, bob_keypair.ss58_address], - [0.4, eve_keypair.ss58_address], - ] - - # Test 1: Set first children - alice_exec_command( - SetChildrenCommand, - [ - "stake", - "set_children", - "--netuid", - "1", - "--children", - f"{children_with_proportions[0][1]}", - "--hotkey", - str(alice_keypair.ss58_address), - "--proportions", - f"{children_with_proportions[0][0]}", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - output = capsys.readouterr().out - assert "5FHne
 │ 60.000%" in output - - await wait() - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - children_info = subtensor.get_children(hotkey=alice_keypair.ss58_address, netuid=1) - - assert len(children_info) == 1, "Failed to set child hotkeys" - - # Test 2: Set second child - alice_exec_command( - SetChildrenCommand, - [ - "stake", - "set_children", - "--netuid", - "1", - "--children", - f"{children_with_proportions[1][1]}", - "--hotkey", - str(alice_keypair.ss58_address), - "--proportions", - f"{children_with_proportions[1][0]}", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - await wait() - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - children_info = subtensor.get_children(hotkey=alice_keypair.ss58_address, netuid=1) - - assert len(children_info) == 1, "Failed to set child hotkey" - - # Test 2: Get children information - alice_exec_command( - GetChildrenCommand, - [ - "stake", - "get_children", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - ], - ) - output = capsys.readouterr().out - assert "5HGjW
 │ 40.000%" in output - - await wait() - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - children_info = subtensor.get_children(hotkey=alice_keypair.ss58_address, netuid=1) - assert len(children_info) == 1, "Failed to revoke child hotkey" - - # Test 4: Revoke second child - alice_exec_command( - RevokeChildrenCommand, - [ - "stake", - "revoke_children", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - await wait() - subtensor = bittensor.subtensor(network="ws://localhost:9945") - children_info = subtensor.get_children(hotkey=alice_keypair.ss58_address, netuid=1) - assert len(children_info) == 0, "Failed to revoke child hotkey" - - # Test 4: Get children after revocation - alice_exec_command( - GetChildrenCommand, - [ - "stake", - "get_children", - "--netuid", - "1", - "--hotkey", - str(alice_keypair.ss58_address), - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - ], - ) - output = capsys.readouterr().out - assert ( - "There are currently no child hotkeys on subnet 1 with Parent HotKey \n5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY." - in output - ) diff --git a/tests/e2e_tests/subcommands/stake/test_stake_add_remove.py b/tests/e2e_tests/subcommands/stake/test_stake_add_remove.py deleted file mode 100644 index 2598f1feaa..0000000000 --- a/tests/e2e_tests/subcommands/stake/test_stake_add_remove.py +++ /dev/null @@ -1,81 +0,0 @@ -from bittensor import logging -from bittensor.commands.network import RegisterSubnetworkCommand -from bittensor.commands.register import RegisterCommand -from bittensor.commands.stake import StakeCommand -from bittensor.commands.unstake import UnStakeCommand - -from ...utils import ( - setup_wallet, - sudo_call_set_network_limit, - sudo_call_set_target_stakes_per_interval, -) - - -def test_stake_add(local_chain): - logging.info("Testing test_stake_add") - alice_keypair, exec_command, wallet = setup_wallet("//Alice") - assert sudo_call_set_network_limit( - local_chain, wallet - ), "Unable to set network limit" - assert sudo_call_set_target_stakes_per_interval( - local_chain, wallet - ), "Unable to set target stakes per interval" - - assert not ( - local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() - ), "Subnet was found in netuid 1" - - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [1] - ).serialize(), "Subnet 1 was successfully added" - - assert ( - local_chain.query( - "SubtensorModule", "LastTxBlock", [wallet.hotkey.ss58_address] - ).serialize() - == 0 - ), "LastTxBlock is not 0" - - assert ( - local_chain.query( - "SubtensorModule", "LastTxBlockDelegateTake", [wallet.hotkey.ss58_address] - ).serialize() - == 0 - ), "LastTxBlockDelegateTake is not 0" - - exec_command(RegisterCommand, ["s", "register", "--netuid", "1"]) - - assert ( - local_chain.query( - "SubtensorModule", "TotalHotkeyStake", [wallet.hotkey.ss58_address] - ).serialize() - == 0 - ), "TotalHotkeyStake is not 0" - - stake_amount = 2 - exec_command(StakeCommand, ["stake", "add", "--amount", str(stake_amount)]) - exact_stake = local_chain.query( - "SubtensorModule", "TotalHotkeyStake", [wallet.hotkey.ss58_address] - ).serialize() - withdraw_loss = 1_000_000 - stake_amount_in_rao = stake_amount * 1_000_000_000 - - assert ( - stake_amount_in_rao - withdraw_loss < exact_stake <= stake_amount_in_rao - ), f"Stake amount mismatch: expected {exact_stake} to be between {stake_amount_in_rao - withdraw_loss} and {stake_amount_in_rao}" - - # we can test remove after set the stake rate limit larger than 1 - remove_amount = 1 - - exec_command(UnStakeCommand, ["stake", "remove", "--amount", str(remove_amount)]) - total_hotkey_stake = local_chain.query( - "SubtensorModule", "TotalHotkeyStake", [wallet.hotkey.ss58_address] - ).serialize() - expected_stake = exact_stake - remove_amount * 1_000_000_000 - assert ( - total_hotkey_stake == expected_stake - ), f"Unstake amount mismatch: expected {expected_stake}, but got {total_hotkey_stake}" - - logging.info("Passed test_stake_add") diff --git a/tests/e2e_tests/subcommands/stake/test_stake_show.py b/tests/e2e_tests/subcommands/stake/test_stake_show.py deleted file mode 100644 index af155ffc2b..0000000000 --- a/tests/e2e_tests/subcommands/stake/test_stake_show.py +++ /dev/null @@ -1,37 +0,0 @@ -from bittensor import logging -from bittensor.commands.stake import StakeShow - -from ...utils import setup_wallet - - -def test_stake_show(local_chain, capsys): - logging.info("Testing test_stake_show") - keypair, exec_command, wallet = setup_wallet("//Alice") - - # Execute the command - exec_command(StakeShow, ["stake", "show"]) - captured = capsys.readouterr() - output = captured.out - - # Check the header line - assert "Coldkey" in output, "Output missing 'Coldkey'." - assert "Balance" in output, "Output missing 'Balance'." - assert "Account" in output, "Output missing 'Account'." - assert "Stake" in output, "Output missing 'Stake'." - assert "Rate" in output, "Output missing 'Rate'." - - # Check the first line of data - assert "default" in output, "Output missing 'default'." - assert "1000000.000000" in output.replace( - "τ", "" - ), "Output missing '1000000.000000'." - - # Check the second line of data - assert "0.000000" in output.replace("τ", ""), "Output missing '0.000000'." - assert "0/d" in output, "Output missing '0/d'." - - # Check the third line of data - - assert "1000000.00000" in output.replace("τ", ""), "Output missing '1000000.00000'." - assert "0.00000" in output.replace("τ", ""), "Output missing '0.00000'." - assert "0.00000/d" in output.replace("τ", ""), "Output missing '0.00000/d'." diff --git a/tests/e2e_tests/subcommands/subnet/__init__.py b/tests/e2e_tests/subcommands/subnet/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/subnet/test_list.py b/tests/e2e_tests/subcommands/subnet/test_list.py deleted file mode 100644 index 74b79a2dfb..0000000000 --- a/tests/e2e_tests/subcommands/subnet/test_list.py +++ /dev/null @@ -1,29 +0,0 @@ -import bittensor -from bittensor.commands import RegisterSubnetworkCommand -from tests.e2e_tests.utils import setup_wallet - -""" -Test the list command before and after registering subnets. - -Verify that: -* list of subnets gets displayed -------------------------- -* Register a subnets -* Ensure is visible in list cmd -""" - - -def test_list_command(local_chain, capsys): - # Register root as Alice - keypair, exec_command, wallet = setup_wallet("//Alice") - - netuid = 0 - - assert local_chain.query("SubtensorModule", "NetworksAdded", [netuid]).serialize() - - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - netuid - 1 - - # Verify subnet 1 created successfully - assert local_chain.query("SubtensorModule", "NetworksAdded", [netuid]).serialize() diff --git a/tests/e2e_tests/subcommands/subnet/test_metagraph.py b/tests/e2e_tests/subcommands/subnet/test_metagraph.py deleted file mode 100644 index e8e18ef617..0000000000 --- a/tests/e2e_tests/subcommands/subnet/test_metagraph.py +++ /dev/null @@ -1,122 +0,0 @@ -import bittensor -from bittensor import logging -from bittensor.commands import ( - MetagraphCommand, - RegisterCommand, - RegisterSubnetworkCommand, -) -from tests.e2e_tests.utils import setup_wallet - -""" -Test the metagraph command before and after registering neurons. - -Verify that: -* Metagraph gets displayed -* Initially empty -------------------------- -* Register 2 neurons one by one -* Ensure both are visible in metagraph -""" - - -def test_metagraph_command(local_chain, capsys): - logging.info("Testing test_metagraph_command") - # Register root as Alice - keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # Verify subnet 1 created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [1] - ).serialize(), "Subnet wasn't created successfully" - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - metagraph = subtensor.metagraph(netuid=1) - - # Assert metagraph is empty - assert len(metagraph.uids) == 0, "Metagraph is not empty" - - # Execute btcli metagraph command - exec_command(MetagraphCommand, ["subnet", "metagraph", "--netuid", "1"]) - - captured = capsys.readouterr() - - # Assert metagraph is printed for netuid 1 - - assert ( - "Metagraph: net: local:1" in captured.out - ), "Netuid 1 was not displayed in metagraph" - - # Register Bob as neuron to the subnet - bob_keypair, bob_exec_command, bob_wallet = setup_wallet("//Bob") - bob_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - captured = capsys.readouterr() - - # Assert neuron was registered - - assert "✅ Registered" in captured.out, "Neuron was not registered" - - # Refresh the metagraph - metagraph = subtensor.metagraph(netuid=1) - - # Assert metagraph has registered neuron - assert len(metagraph.uids) == 1, "Metagraph doesn't have exactly 1 neuron" - assert ( - metagraph.hotkeys[0] == "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty" - ), "Neuron's hotkey in metagraph doesn't match" - # Execute btcli metagraph command - exec_command(MetagraphCommand, ["subnet", "metagraph", "--netuid", "1"]) - - captured = capsys.readouterr() - - # Assert the neuron is registered and displayed - assert ( - "Metagraph: net: local:1" and "N: 1/1" in captured.out - ), "Neuron isn't displayed in metagraph" - - # Register Dave as neuron to the subnet - dave_keypair, dave_exec_command, dave_wallet = setup_wallet("//Dave") - dave_exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - captured = capsys.readouterr() - - # Assert neuron was registered - - assert "✅ Registered" in captured.out, "Neuron was not registered" - - # Refresh the metagraph - metagraph = subtensor.metagraph(netuid=1) - - # Assert metagraph has registered neuron - assert len(metagraph.uids) == 2 - assert ( - metagraph.hotkeys[1] == "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" - ), "Neuron's hotkey in metagraph doesn't match" - - # Execute btcli metagraph command - exec_command(MetagraphCommand, ["subnet", "metagraph", "--netuid", "1"]) - - captured = capsys.readouterr() - - # Assert the neuron is registered and displayed - assert "Metagraph: net: local:1" and "N: 2/2" in captured.out - - logging.info("Passed test_metagraph_command") diff --git a/tests/e2e_tests/subcommands/wallet/__init__.py b/tests/e2e_tests/subcommands/wallet/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/wallet/test_faucet.py b/tests/e2e_tests/subcommands/wallet/test_faucet.py deleted file mode 100644 index 64ae2b7f86..0000000000 --- a/tests/e2e_tests/subcommands/wallet/test_faucet.py +++ /dev/null @@ -1,92 +0,0 @@ -import pytest - -import bittensor -from bittensor import logging -from bittensor.commands import ( - RegisterCommand, - RegisterSubnetworkCommand, - RunFaucetCommand, -) -from tests.e2e_tests.utils import ( - setup_wallet, -) - - -@pytest.mark.skip -@pytest.mark.parametrize("local_chain", [False], indirect=True) -def test_faucet(local_chain): - logging.info("Testing test_faucet") - # Register root as Alice - keypair, exec_command, wallet = setup_wallet("//Alice") - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # Verify subnet 1 created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [1] - ).serialize(), "Subnet wasn't created successfully" - - # Register a neuron to the subnet - exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--no_prompt", - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # verify current balance - wallet_balance = subtensor.get_balance(keypair.ss58_address) - assert wallet_balance.tao == 998999.0, "Balance wasn't as expected" - - # run faucet 3 times - for i in range(3): - logging.info(f"faucet run #:{i + 1}") - try: - exec_command( - RunFaucetCommand, - [ - "wallet", - "faucet", - "--wallet.name", - wallet.name, - "--wallet.hotkey", - "default", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - logging.info( - f"wallet balance is {subtensor.get_balance(keypair.ss58_address).tao} tao" - ) - except SystemExit as e: - logging.warning( - "Block not generated fast enough to be within 3 block seconds window." - ) - # Handle the SystemExit exception - assert e.code == 1 # Assert that the exit code is 1 - except Exception as e: - logging.warning(f"Unexpected exception occurred on faucet: {e}") - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - new_wallet_balance = subtensor.get_balance(keypair.ss58_address) - # verify balance increase - assert ( - wallet_balance.tao < new_wallet_balance.tao - ), "Old wallet balance is not less than the new wallet" - logging.info("Passed test_faucet") diff --git a/tests/e2e_tests/subcommands/wallet/test_list.py b/tests/e2e_tests/subcommands/wallet/test_list.py deleted file mode 100644 index 15f34514b0..0000000000 --- a/tests/e2e_tests/subcommands/wallet/test_list.py +++ /dev/null @@ -1,72 +0,0 @@ -from bittensor.commands.list import ListCommand -from bittensor.commands.wallets import WalletCreateCommand -from bittensor.subtensor import subtensor - -from ...utils import setup_wallet - - -def test_wallet_list(capsys): - """ - Test the listing of wallets in the Bittensor network. - - Steps: - 1. Set up a default wallet - 2. List existing wallets and verify the default setup - 3. Create a new wallet - 4. List wallets again and verify the new wallet is present - - Raises: - AssertionError: If any of the checks or verifications fail - """ - - wallet_path_name = "//Alice" - base_path = f"/tmp/btcli-e2e-wallet-list-{wallet_path_name.strip('/')}" - keypair, exec_command, wallet = setup_wallet(wallet_path_name) - - # List initial wallets - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - - captured = capsys.readouterr() - # Assert the default wallet is present in the display - assert "default" in captured.out - assert "└── default" in captured.out - - # Create a new wallet - exec_command( - WalletCreateCommand, - [ - "wallet", - "create", - "--wallet.name", - "new_wallet", - "--wallet.hotkey", - "new_hotkey", - "--no_password", - "--overwrite_coldkey", - "--overwrite_hotkey", - "--no_prompt", - "--wallet.path", - base_path, - ], - ) - - # List wallets again - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - - captured = capsys.readouterr() - - # Verify the new wallet is displayed - assert "new_wallet" in captured.out - assert "new_hotkey" in captured.out diff --git a/tests/e2e_tests/subcommands/wallet/test_transfer.py b/tests/e2e_tests/subcommands/wallet/test_transfer.py deleted file mode 100644 index 9d1bd2692c..0000000000 --- a/tests/e2e_tests/subcommands/wallet/test_transfer.py +++ /dev/null @@ -1,35 +0,0 @@ -from bittensor import logging -from bittensor.commands.transfer import TransferCommand - -from ...utils import setup_wallet - - -# Example test using the local_chain fixture -def test_transfer(local_chain): - logging.info("Testing test_transfer") - keypair, exec_command, wallet = setup_wallet("//Alice") - - acc_before = local_chain.query("System", "Account", [keypair.ss58_address]) - exec_command( - TransferCommand, - [ - "wallet", - "transfer", - "--amount", - "2", - "--dest", - "5GpzQgpiAKHMWNSH3RN4GLf96GVTDct9QxYEFAY7LWcVzTbx", - ], - ) - acc_after = local_chain.query("System", "Account", [keypair.ss58_address]) - - expected_transfer = 2_000_000_000 - tolerance = 200_000 # Tx fee tolerance - - actual_difference = ( - acc_before.value["data"]["free"] - acc_after.value["data"]["free"] - ) - assert ( - expected_transfer <= actual_difference <= expected_transfer + tolerance - ), f"Expected transfer with tolerance: {expected_transfer} <= {actual_difference} <= {expected_transfer + tolerance}" - logging.info("Passed test_transfer") diff --git a/tests/e2e_tests/subcommands/wallet/test_wallet_creations.py b/tests/e2e_tests/subcommands/wallet/test_wallet_creations.py deleted file mode 100644 index 78a235ad25..0000000000 --- a/tests/e2e_tests/subcommands/wallet/test_wallet_creations.py +++ /dev/null @@ -1,505 +0,0 @@ -import os -import re -import time -from typing import Dict, Optional, Tuple - -from bittensor import logging -from bittensor.commands.list import ListCommand -from bittensor.commands.wallets import ( - NewColdkeyCommand, - NewHotkeyCommand, - RegenColdkeyCommand, - RegenColdkeypubCommand, - RegenHotkeyCommand, - WalletCreateCommand, -) -from bittensor.subtensor import subtensor - -from ...utils import setup_wallet - -""" -Verify commands: - -* btcli w list -* btcli w create -* btcli w new_coldkey -* btcli w new_hotkey -* btcli w regen_coldkey -* btcli w regen_coldkeypub -* btcli w regen_hotkey -""" - - -def verify_wallet_dir( - base_path: str, - wallet_name: str, - hotkey_name: Optional[str] = None, - coldkeypub_name: Optional[str] = None, -) -> Tuple[bool, str]: - """ - Verifies the existence of wallet directory, coldkey, and optionally the hotkey. - - Args: - base_path (str): The base directory path where wallets are stored. - wallet_name (str): The name of the wallet directory to verify. - hotkey_name (str, optional): The name of the hotkey file to verify. If None, - only the wallet and coldkey file are checked. - coldkeypub_name (str, optional): The name of the coldkeypub file to verify. If None - only the wallet and coldkey is checked - - Returns: - tuple: Returns a tuple containing a boolean and a message. The boolean is True if - all checks pass, otherwise False. - """ - wallet_path = os.path.join(base_path, wallet_name) - - # Check if wallet directory exists - if not os.path.isdir(wallet_path): - return False, f"Wallet directory {wallet_name} not found in {base_path}" - - # Check if coldkey file exists - coldkey_path = os.path.join(wallet_path, "coldkey") - if not os.path.isfile(coldkey_path): - return False, f"Coldkey file not found in {wallet_name}" - - # Check if coldkeypub exists - if coldkeypub_name: - coldkeypub_path = os.path.join(wallet_path, coldkeypub_name) - if not os.path.isfile(coldkeypub_path): - return False, f"Coldkeypub file not found in {wallet_name}" - - # Check if hotkey directory and file exists - if hotkey_name: - hotkeys_path = os.path.join(wallet_path, "hotkeys") - if not os.path.isdir(hotkeys_path): - return False, f"Hotkeys directory not found in {wallet_name}" - - hotkey_file_path = os.path.join(hotkeys_path, hotkey_name) - if not os.path.isfile(hotkey_file_path): - return ( - False, - f"Hotkey file {hotkey_name} not found in {wallet_name}/hotkeys", - ) - - return True, f"Wallet {wallet_name} verified successfully" - - -def verify_key_pattern(output: str, wallet_name: str) -> Optional[str]: - """ - Verifies that a specific wallet key pattern exists in the output text. - - Args: - output (str): The string output where the wallet key should be verified. - wallet_name (str): The name of the wallet to search for in the output. - - Raises: - AssertionError: If the wallet key pattern is not found, or if the key does not - start with '5', or if the key is not exactly 48 characters long. - """ - split_output = output.splitlines() - pattern = rf"{wallet_name}\s*\((5[A-Za-z0-9]{{47}})\)" - found = False - - # Traverse each line to find instance of the pattern - for line in split_output: - match = re.search(pattern, line) - if match: - # Assert key starts with '5' - assert match.group(1).startswith( - "5" - ), f"{wallet_name} should start with '5'" - # Assert length of key is 48 characters - assert ( - len(match.group(1)) == 48 - ), f"Key for {wallet_name} should be 48 characters long" - found = True - return match.group(1) - - # If no match is found in any line, raise an assertion error - assert found, f"{wallet_name} not found in wallet list" - return None - - -def extract_ss58_address(output: str, wallet_name: str) -> str: - """ - Extracts the ss58 address from the given output for a specified wallet. - - Args: - output (str): The captured output. - wallet_name (str): The name of the wallet. - - Returns: - str: ss58 address. - """ - pattern = rf"{wallet_name}\s*\((5[A-Za-z0-9]{{47}})\)" - lines = output.splitlines() - for line in lines: - match = re.search(pattern, line) - if match: - return match.group(1) # Return the ss58 address - - raise ValueError(f"ss58 address not found for wallet {wallet_name}") - - -def extract_mnemonics_from_commands(output: str) -> Dict[str, Optional[str]]: - """ - Extracts mnemonics of coldkeys & hotkeys from the given output for a specified wallet. - - Args: - output (str): The captured output. - - Returns: - dict: A dictionary keys 'coldkey' and 'hotkey', each containing their mnemonics. - """ - mnemonics: Dict[str, Optional[str]] = {"coldkey": None, "hotkey": None} - lines = output.splitlines() - - # Regex pattern to capture the mnemonic - pattern = re.compile(r"btcli w regen_(coldkey|hotkey) --mnemonic ([a-z ]+)") - - for line in lines: - line = line.strip().lower() - match = pattern.search(line) - if match: - key_type = match.group(1) # 'coldkey' or 'hotkey' - mnemonic_phrase = match.group(2).strip() - mnemonics[key_type] = mnemonic_phrase - - return mnemonics - - -def test_wallet_creations(local_chain: subtensor, capsys): - """ - Test the creation and verification of wallet keys and directories in the Bittensor network. - - Steps: - 1. List existing wallets and verify the default setup. - 2. Create a new wallet with both coldkey and hotkey, verify their presence in the output, - and check their physical existence. - 3. Create a new coldkey and verify both its display in the command line output and its physical file. - 4. Create a new hotkey for an existing coldkey, verify its display in the command line output, - and check for both coldkey and hotkey files. - - Raises: - AssertionError: If any of the checks or verifications fail - """ - - logging.info("Testing test_wallet_creations (create, new_hotkey, new_coldkey)") - wallet_path_name = "//Alice" - base_path = f"/tmp/btcli-e2e-wallet-{wallet_path_name.strip('/')}" - keypair, exec_command, wallet = setup_wallet(wallet_path_name) - - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - - captured = capsys.readouterr() - # Assert the coldkey and hotkey are present in the display with keys - assert ( - "default" and "└── default" in captured.out - ), "Default wallet not found in wallet list" - wallet_status, message = verify_wallet_dir( - base_path, "default", hotkey_name="default" - ) - assert wallet_status, message - - # ----------------------------- - # Command 1: - # ----------------------------- - # Create a new wallet (coldkey + hotkey) - logging.info("Testing wallet create command") - exec_command( - WalletCreateCommand, - [ - "wallet", - "create", - "--wallet.name", - "new_wallet", - "--wallet.hotkey", - "new_hotkey", - "--no_password", - "--overwrite_coldkey", - "--overwrite_hotkey", - "--no_prompt", - "--wallet.path", - base_path, - ], - ) - - captured = capsys.readouterr() - - # List the wallets - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - - captured = capsys.readouterr() - - # Verify coldkey "new_wallet" is displayed with key - verify_key_pattern(captured.out, "new_wallet") - - # Verify hotkey "new_hotkey" is displayed with key - verify_key_pattern(captured.out, "new_hotkey") - - # Physically verify "new_wallet" and "new_hotkey" are present - wallet_status, message = verify_wallet_dir( - base_path, "new_wallet", hotkey_name="new_hotkey" - ) - assert wallet_status, message - - # ----------------------------- - # Command 2: - # ----------------------------- - # Create a new wallet (coldkey) - logging.info("Testing wallet new_coldkey command") - exec_command( - NewColdkeyCommand, - [ - "wallet", - "new_coldkey", - "--wallet.name", - "new_coldkey", - "--no_password", - "--no_prompt", - "--overwrite_coldkey", - "--wallet.path", - base_path, - ], - ) - - captured = capsys.readouterr() - - # List the wallets - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - - captured = capsys.readouterr() - - # Verify coldkey "new_coldkey" is displayed with key - verify_key_pattern(captured.out, "new_coldkey") - - # Physically verify "new_coldkey" is present - wallet_status, message = verify_wallet_dir(base_path, "new_coldkey") - assert wallet_status, message - - # ----------------------------- - # Command 3: - # ----------------------------- - # Create a new hotkey for alice_new_coldkey wallet - logging.info("Testing wallet new_hotkey command") - exec_command( - NewHotkeyCommand, - [ - "wallet", - "new_hotkey", - "--wallet.name", - "new_coldkey", - "--wallet.hotkey", - "new_hotkey", - "--no_prompt", - "--overwrite_hotkey", - "--wallet.path", - base_path, - ], - ) - - captured = capsys.readouterr() - - # List the wallets - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - captured = capsys.readouterr() - - # Verify hotkey "alice_new_hotkey" is displyed with key - verify_key_pattern(captured.out, "new_hotkey") - - # Physically verify "alice_new_coldkey" and "alice_new_hotkey" are present - wallet_status, message = verify_wallet_dir( - base_path, "new_coldkey", hotkey_name="new_hotkey" - ) - assert wallet_status, message - logging.info("Passed test_wallet_creations") - - -def test_wallet_regen(local_chain: subtensor, capsys): - """ - Test the regeneration of coldkeys, hotkeys, and coldkeypub files using mnemonics or ss58 address. - - Steps: - 1. List existing wallets and verify the default setup. - 2. Regenerate the coldkey using the mnemonics and verify using mod time. - 3. Regenerate the coldkeypub using ss58 address and verify using mod time - 4. Regenerate the hotkey using mnemonics and verify using mod time. - - Raises: - AssertionError: If any of the checks or verifications fail - """ - logging.info( - "Testing test_wallet_regen (regen_coldkey, regen_hotkey, regen_coldkeypub)" - ) - wallet_path_name = "//Bob" - base_path = f"/tmp/btcli-e2e-wallet-{wallet_path_name.strip('/')}" - keypair, exec_command, wallet = setup_wallet(wallet_path_name) - - # Create a new wallet (coldkey + hotkey) - exec_command( - WalletCreateCommand, - [ - "wallet", - "create", - "--wallet.name", - "new_wallet", - "--wallet.hotkey", - "new_hotkey", - "--no_password", - "--overwrite_coldkey", - "--overwrite_hotkey", - "--no_prompt", - "--wallet.path", - base_path, - ], - ) - - captured = capsys.readouterr() - mnemonics = extract_mnemonics_from_commands(captured.out) - - wallet_status, message = verify_wallet_dir( - base_path, - "new_wallet", - hotkey_name="new_hotkey", - coldkeypub_name="coldkeypub.txt", - ) - assert wallet_status, message # Ensure wallet exists - - # ----------------------------- - # Command 1: - # ----------------------------- - - logging.info("Testing w regen_coldkey") - coldkey_path = os.path.join(base_path, "new_wallet", "coldkey") - initial_coldkey_mod_time = os.path.getmtime(coldkey_path) - - exec_command( - RegenColdkeyCommand, - [ - "wallet", - "regen_coldkey", - "--wallet.name", - "new_wallet", - "--wallet.path", - base_path, - "--no_prompt", - "--overwrite_coldkey", - "--mnemonic", - mnemonics["coldkey"], - "--no_password", - ], - ) - - # Wait a bit to ensure file system updates modification time - time.sleep(1) - - new_coldkey_mod_time = os.path.getmtime(coldkey_path) - - assert ( - initial_coldkey_mod_time != new_coldkey_mod_time - ), "Coldkey file was not regenerated as expected" - - # ----------------------------- - # Command 2: - # ----------------------------- - - logging.info("Testing w regen_coldkeypub") - coldkeypub_path = os.path.join(base_path, "new_wallet", "coldkeypub.txt") - initial_coldkeypub_mod_time = os.path.getmtime(coldkeypub_path) - - # List the wallets - exec_command( - ListCommand, - [ - "wallet", - "list", - ], - ) - captured = capsys.readouterr() - ss58_address = extract_ss58_address(captured.out, "new_wallet") - - exec_command( - RegenColdkeypubCommand, - [ - "wallet", - "regen_coldkeypub", - "--wallet.name", - "new_wallet", - "--wallet.path", - base_path, - "--no_prompt", - "--overwrite_coldkeypub", - "--ss58_address", - ss58_address, - ], - ) - - # Wait a bit to ensure file system updates modification time - time.sleep(1) - - new_coldkeypub_mod_time = os.path.getmtime(coldkeypub_path) - - assert ( - initial_coldkeypub_mod_time != new_coldkeypub_mod_time - ), "Coldkeypub file was not regenerated as expected" - - # ----------------------------- - # Command 3: - # ----------------------------- - - logging.info("Testing w regen_hotkey") - hotkey_path = os.path.join(base_path, "new_wallet", "hotkeys", "new_hotkey") - initial_hotkey_mod_time = os.path.getmtime(hotkey_path) - - exec_command( - RegenHotkeyCommand, - [ - "wallet", - "regen_hotkey", - "--no_prompt", - "--overwrite_hotkey", - "--wallet.name", - "new_wallet", - "--wallet.hotkey", - "new_hotkey", - "--wallet.path", - base_path, - "--mnemonic", - mnemonics["hotkey"], - ], - ) - - # Wait a bit to ensure file system updates modification time - time.sleep(1) - - new_hotkey_mod_time = os.path.getmtime(hotkey_path) - - assert ( - initial_hotkey_mod_time != new_hotkey_mod_time - ), "Hotkey file was not regenerated as expected" - - logging.info("Passed test_wallet_regen") diff --git a/tests/e2e_tests/subcommands/weights/__init__.py b/tests/e2e_tests/subcommands/weights/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/e2e_tests/subcommands/weights/test_commit_weights.py b/tests/e2e_tests/subcommands/weights/test_commit_weights.py deleted file mode 100644 index c53746be81..0000000000 --- a/tests/e2e_tests/subcommands/weights/test_commit_weights.py +++ /dev/null @@ -1,246 +0,0 @@ -import asyncio -import re - -import numpy as np -import pytest - -import bittensor -import bittensor.utils.weight_utils as weight_utils -from bittensor import logging -from bittensor.commands import ( - CommitWeightCommand, - RegisterCommand, - RegisterSubnetworkCommand, - RevealWeightCommand, - StakeCommand, - SubnetSudoCommand, -) -from tests.e2e_tests.utils import setup_wallet, wait_interval - -""" -Test the Commit/Reveal weights mechanism. - -Verify that: -* Weights are commited -* weights are hashed with salt ---- after an epoch --- -* weights are un-hashed with salt -* weights are properly revealed - -""" - - -@pytest.mark.asyncio -async def test_commit_and_reveal_weights(local_chain): - logging.info("Testing test_commit_and_reveal_weights") - # Register root as Alice - keypair, exec_command, wallet = setup_wallet("//Alice") - - exec_command(RegisterSubnetworkCommand, ["s", "create"]) - - # define values - weights = 0.1 - uid = 0 - salt = "18, 179, 107, 0, 165, 211, 141, 197" - - # Verify subnet 1 created successfully - assert local_chain.query( - "SubtensorModule", "NetworksAdded", [1] - ).serialize(), "Subnet wasn't created successfully" - - # Register a neuron to the subnet - exec_command( - RegisterCommand, - [ - "s", - "register", - "--netuid", - "1", - ], - ) - - # Stake to become to top neuron after the first epoch - exec_command( - StakeCommand, - [ - "stake", - "add", - "--amount", - "100000", - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - - # Enable Commit Reveal - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "commit_reveal_weights_enabled", - "--value", - "True", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - assert subtensor.get_subnet_hyperparameters( - netuid=1 - ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" - - # Lower the interval - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "commit_reveal_weights_interval", - "--value", - "370", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).commit_reveal_weights_interval - == 370 - ), "Failed to set commit/reveal interval" - - # Lower the rate limit - exec_command( - SubnetSudoCommand, - [ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--wallet.name", - wallet.name, - "--param", - "weights_rate_limit", - "--value", - "0", - "--wait_for_inclusion", - "True", - "--wait_for_finalization", - "True", - ], - ) - - subtensor = bittensor.subtensor(network="ws://localhost:9945") - assert ( - subtensor.get_subnet_hyperparameters(netuid=1).weights_rate_limit == 0 - ), "Failed to set commit/reveal rate limit" - - # Configure the CLI arguments for the CommitWeightCommand - exec_command( - CommitWeightCommand, - [ - "wt", - "commit", - "--no_prompt", - "--netuid", - "1", - "--uids", - str(uid), - "--weights", - str(weights), - "--salt", - str(salt), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - "/tmp/btcli-wallet", - ], - ) - - weight_commits = subtensor.query_module( - module="SubtensorModule", - name="WeightCommits", - params=[1, wallet.hotkey.ss58_address], - ) - - # Assert that the committed weights are set correctly - assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value - assert commit_block > 0, f"Invalid block number: {commit_block}" - - # Query the WeightCommitRevealInterval storage map - weight_commit_reveal_interval = subtensor.query_module( - module="SubtensorModule", name="WeightCommitRevealInterval", params=[1] - ) - interval = weight_commit_reveal_interval.value - assert interval > 0, "Invalid WeightCommitRevealInterval" - - # Wait until the reveal block range - await wait_interval(interval, subtensor) - - # Configure the CLI arguments for the RevealWeightCommand - exec_command( - RevealWeightCommand, - [ - "wt", - "reveal", - "--no_prompt", - "--netuid", - "1", - "--uids", - str(uid), - "--weights", - str(weights), - "--salt", - str(salt), - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - "/tmp/btcli-wallet", - ], - ) - - # Query the Weights storage map - revealed_weights = subtensor.query_module( - module="SubtensorModule", - name="Weights", - params=[1, uid], # netuid and uid - ) - - # Assert that the revealed weights are set correctly - assert revealed_weights.value is not None, "Weight reveal not found in storage" - - uid_list = [int(x) for x in re.split(r"[ ,]+", str(uid))] - uids = np.array(uid_list, dtype=np.int64) - weight_list = [float(x) for x in re.split(r"[ ,]+", str(weights))] - weights_array = np.array(weight_list, dtype=np.float32) - weight_uids, expected_weights = weight_utils.convert_weights_and_uids_for_emit( - uids, weights_array - ) - assert ( - expected_weights[0] == revealed_weights.value[0][1] - ), f"Incorrect revealed weights. Expected: {expected_weights[0]}, Actual: {revealed_weights.value[0][1]}" - logging.info("Passed test_commit_and_reveal_weights") diff --git a/tests/e2e_tests/utils.py b/tests/e2e_tests/utils.py deleted file mode 100644 index 5a4adc6c95..0000000000 --- a/tests/e2e_tests/utils.py +++ /dev/null @@ -1,214 +0,0 @@ -import logging -import asyncio -import os -import shutil -import subprocess -import sys -import time -from typing import List - -from substrateinterface import SubstrateInterface - -import bittensor -from bittensor import Keypair - -template_path = os.getcwd() + "/neurons/" -templates_repo = "templates repository" - - -def setup_wallet(uri: str): - keypair = Keypair.create_from_uri(uri) - wallet_path = "/tmp/btcli-e2e-wallet-{}".format(uri.strip("/")) - wallet = bittensor.wallet(path=wallet_path) - wallet.set_coldkey(keypair=keypair, encrypt=False, overwrite=True) - wallet.set_coldkeypub(keypair=keypair, encrypt=False, overwrite=True) - wallet.set_hotkey(keypair=keypair, encrypt=False, overwrite=True) - - def exec_command(command, extra_args: List[str], function: str = "run"): - parser = bittensor.cli.__create_parser__() - args = extra_args + [ - "--no_prompt", - "--subtensor.network", - "local", - "--subtensor.chain_endpoint", - "ws://localhost:9945", - "--wallet.path", - wallet_path, - ] - logging.info(f'executing command: {command} {" ".join(args)}') - config = bittensor.config( - parser=parser, - args=args, - ) - cli_instance = bittensor.cli(config) - # Dynamically call the specified function on the command - result = getattr(command, function)(cli_instance) - return result - - return keypair, exec_command, wallet - - -def sudo_call_set_network_limit( - substrate: SubstrateInterface, wallet: bittensor.wallet -) -> bool: - inner_call = substrate.compose_call( - call_module="AdminUtils", - call_function="sudo_set_network_rate_limit", - call_params={"rate_limit": 1}, - ) - call = substrate.compose_call( - call_module="Sudo", - call_function="sudo", - call_params={"call": inner_call}, - ) - - extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.coldkey) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=True, - wait_for_finalization=True, - ) - - response.process_events() - return response.is_success - - -def sudo_call_set_target_stakes_per_interval( - substrate: SubstrateInterface, wallet: bittensor.wallet -) -> bool: - inner_call = substrate.compose_call( - call_module="AdminUtils", - call_function="sudo_set_target_stakes_per_interval", - call_params={"target_stakes_per_interval": 100}, - ) - call = substrate.compose_call( - call_module="Sudo", - call_function="sudo", - call_params={"call": inner_call}, - ) - - extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.coldkey) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=True, - wait_for_finalization=True, - ) - - response.process_events() - return response.is_success - - -def call_add_proposal(substrate: SubstrateInterface, wallet: bittensor.wallet) -> bool: - proposal_call = substrate.compose_call( - call_module="System", - call_function="remark", - call_params={"remark": [0]}, - ) - call = substrate.compose_call( - call_module="Triumvirate", - call_function="propose", - call_params={ - "proposal": proposal_call, - "length_bound": 100_000, - "duration": 100_000_000, - }, - ) - - extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.coldkey) - response = substrate.submit_extrinsic( - extrinsic, - wait_for_inclusion=True, - wait_for_finalization=True, - ) - - response.process_events() - return response.is_success - - -async def wait_epoch(subtensor, netuid=1): - q_tempo = [ - v.value - for [k, v] in subtensor.query_map_subtensor("Tempo") - if k.value == netuid - ] - if len(q_tempo) == 0: - raise Exception("could not determine tempo") - tempo = q_tempo[0] - logging.info(f"tempo = {tempo}") - await wait_interval(tempo, subtensor, netuid) - - -async def wait_interval(tempo, subtensor, netuid=1): - interval = tempo + 1 - current_block = subtensor.get_current_block() - last_epoch = current_block - 1 - (current_block + netuid + 1) % interval - next_tempo_block_start = last_epoch + interval - last_reported = None - while current_block < next_tempo_block_start: - await asyncio.sleep( - 1 - ) # Wait for 1 second before checking the block number again - current_block = subtensor.get_current_block() - if last_reported is None or current_block - last_reported >= 10: - last_reported = current_block - print( - f"Current Block: {current_block} Next tempo for netuid {netuid} at: {next_tempo_block_start}" - ) - logging.info( - f"Current Block: {current_block} Next tempo for netuid {netuid} at: {next_tempo_block_start}" - ) - - -def clone_or_update_templates(): - specific_commit = None - install_dir = template_path - repo_mapping = { - templates_repo: "https://github.com/opentensor/bittensor-subnet-template.git", - } - os.makedirs(install_dir, exist_ok=True) - os.chdir(install_dir) - - for repo, git_link in repo_mapping.items(): - if not os.path.exists(repo): - print(f"\033[94mCloning {repo}...\033[0m") - subprocess.run(["git", "clone", git_link, repo], check=True) - else: - print(f"\033[94mUpdating {repo}...\033[0m") - os.chdir(repo) - subprocess.run(["git", "pull"], check=True) - os.chdir("..") - - # here for pulling specific commit versions of repo - if specific_commit: - os.chdir(templates_repo) - print( - f"\033[94mChecking out commit {specific_commit} in {templates_repo}...\033[0m" - ) - subprocess.run(["git", "checkout", specific_commit], check=True) - os.chdir("..") - - return install_dir + templates_repo + "/" - - -def install_templates(install_dir): - subprocess.check_call([sys.executable, "-m", "pip", "install", install_dir]) - - -def uninstall_templates(install_dir): - # uninstall templates - subprocess.check_call( - [sys.executable, "-m", "pip", "uninstall", "bittensor_subnet_template", "-y"] - ) - # delete everything in directory - shutil.rmtree(install_dir) - - -async def write_output_log_to_file(name, stream): - log_file = f"{name}.log" - with open(log_file, "a") as f: - while True: - line = await stream.readline() - if not line: - break - f.write(line.decode()) - f.flush() diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py deleted file mode 100644 index fc9e8ad9d2..0000000000 --- a/tests/helpers/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -from .helpers import ( - _get_mock_coldkey, - _get_mock_hotkey, - _get_mock_keypair, - _get_mock_wallet, - CLOSE_IN_VALUE, - MockConsole, - __mock_wallet_factory__, -) - - -def is_running_in_circleci(): - """Checks that tests are running in the app.circleci.com environment.""" - return os.getenv("CIRCLECI") == "true" diff --git a/tests/helpers/helpers.py b/tests/helpers/helpers.py deleted file mode 100644 index 482f59ce2d..0000000000 --- a/tests/helpers/helpers.py +++ /dev/null @@ -1,172 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from typing import Union -from bittensor import Balance, NeuronInfo, AxonInfo, PrometheusInfo, __ss58_format__ -from bittensor.mock.wallet_mock import MockWallet as _MockWallet -from bittensor.mock.wallet_mock import get_mock_coldkey as _get_mock_coldkey -from bittensor.mock.wallet_mock import get_mock_hotkey as _get_mock_hotkey -from bittensor.mock.wallet_mock import get_mock_keypair as _get_mock_keypair -from bittensor.mock.wallet_mock import get_mock_wallet as _get_mock_wallet - -from rich.console import Console -from rich.text import Text - - -def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet: - """Returns a mock wallet object.""" - - mock_wallet = _get_mock_wallet() - - return mock_wallet - - -class CLOSE_IN_VALUE: - value: Union[float, int, Balance] - tolerance: Union[float, int, Balance] - - def __init__( - self, - value: Union[float, int, Balance], - tolerance: Union[float, int, Balance] = 0.0, - ) -> None: - self.value = value - self.tolerance = tolerance - - def __eq__(self, __o: Union[float, int, Balance]) -> bool: - # True if __o \in [value - tolerance, value + tolerance] - # or if value \in [__o - tolerance, __o + tolerance] - return ( - (self.value - self.tolerance) <= __o - and __o <= (self.value + self.tolerance) - ) or ( - (__o - self.tolerance) <= self.value - and self.value <= (__o + self.tolerance) - ) - - -def get_mock_neuron(**kwargs) -> NeuronInfo: - """ - Returns a mock neuron with the given kwargs overriding the default values. - """ - - mock_neuron_d = dict( - { - "netuid": -1, # mock netuid - "axon_info": AxonInfo( - block=0, - version=1, - ip=0, - port=0, - ip_type=0, - protocol=0, - placeholder1=0, - placeholder2=0, - ), - "prometheus_info": PrometheusInfo( - block=0, version=1, ip=0, port=0, ip_type=0 - ), - "validator_permit": True, - "uid": 1, - "hotkey": "some_hotkey", - "coldkey": "some_coldkey", - "active": 0, - "last_update": 0, - "stake": {"some_coldkey": 1e12}, - "total_stake": 1e12, - "rank": 0.0, - "trust": 0.0, - "consensus": 0.0, - "validator_trust": 0.0, - "incentive": 0.0, - "dividends": 0.0, - "emission": 0.0, - "bonds": [], - "weights": [], - "stake_dict": {}, - "pruning_score": 0.0, - "is_null": False, - } - ) - - mock_neuron_d.update(kwargs) # update with kwargs - - if kwargs.get("stake") is None and kwargs.get("coldkey") is not None: - mock_neuron_d["stake"] = {kwargs.get("coldkey"): 1e12} - - if kwargs.get("total_stake") is None: - mock_neuron_d["total_stake"] = sum(mock_neuron_d["stake"].values()) - - mock_neuron = NeuronInfo._neuron_dict_to_namespace(mock_neuron_d) - - return mock_neuron - - -def get_mock_neuron_by_uid(uid: int, **kwargs) -> NeuronInfo: - return get_mock_neuron( - uid=uid, hotkey=_get_mock_hotkey(uid), coldkey=_get_mock_coldkey(uid), **kwargs - ) - - -class MockStatus: - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - def start(self): - pass - - def stop(self): - pass - - def update(self, *args, **kwargs): - MockConsole().print(*args, **kwargs) - - -class MockConsole: - """ - Mocks the console object for status and print. - Captures the last print output as a string. - """ - - captured_print = None - - def status(self, *args, **kwargs): - return MockStatus() - - def print(self, *args, **kwargs): - console = Console( - width=1000, no_color=True, markup=False - ) # set width to 1000 to avoid truncation - console.begin_capture() - console.print(*args, **kwargs) - self.captured_print = console.end_capture() - - def clear(self, *args, **kwargs): - pass - - @staticmethod - def remove_rich_syntax(text: str) -> str: - """ - Removes rich syntax from the given text. - Removes markup and ansi syntax. - """ - output_no_syntax = Text.from_ansi(Text.from_markup(text).plain).plain - - return output_no_syntax diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/integration_tests/test_cli.py b/tests/integration_tests/test_cli.py deleted file mode 100644 index 6fe1acf3bc..0000000000 --- a/tests/integration_tests/test_cli.py +++ /dev/null @@ -1,2752 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Yuma Rao -# Copyright © 2022-2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -import contextlib -from copy import deepcopy -import os -import random -import shutil -from types import SimpleNamespace -from typing import Dict -import unittest -from unittest.mock import MagicMock, patch - -import pytest - -import bittensor -from bittensor import Balance -from bittensor.commands.delegates import _get_coldkey_wallets_for_path -from bittensor.commands.identity import SetIdentityCommand -from bittensor.commands.wallets import _get_coldkey_ss58_addresses_for_path -from bittensor.mock import MockSubtensor -from bittensor.wallet import wallet as Wallet -from tests.helpers import ( - is_running_in_circleci, - MockConsole, - _get_mock_keypair, - _get_mock_wallet as generate_wallet, -) - - -_subtensor_mock: MockSubtensor = MockSubtensor() - - -def setUpModule(): - _subtensor_mock.reset() - - _subtensor_mock.create_subnet(netuid=1) - - _subtensor_mock.create_subnet(netuid=2) - - _subtensor_mock.create_subnet(netuid=3) - - # Set diff 0 - _subtensor_mock.set_difficulty(netuid=1, difficulty=0) - - _subtensor_mock.set_difficulty(netuid=2, difficulty=0) - - _subtensor_mock.set_difficulty(netuid=3, difficulty=0) - - -def return_mock_sub(*args, **kwargs): - return MockSubtensor - - -@patch("bittensor.subtensor", new_callable=return_mock_sub) -class TestCLIWithNetworkAndConfig(unittest.TestCase): - def setUp(self): - self._config = TestCLIWithNetworkAndConfig.construct_config() - - @property - def config(self): - copy_ = deepcopy(self._config) - return copy_ - - @staticmethod - def construct_config(): - parser = bittensor.cli.__create_parser__() - defaults = bittensor.config(parser=parser, args=[]) - # Parse commands and subcommands - for command in bittensor.ALL_COMMANDS: - if ( - command in bittensor.ALL_COMMANDS - and "commands" in bittensor.ALL_COMMANDS[command] - ): - for subcommand in bittensor.ALL_COMMANDS[command]["commands"]: - defaults.merge( - bittensor.config(parser=parser, args=[command, subcommand]) - ) - else: - defaults.merge(bittensor.config(parser=parser, args=[command])) - - defaults.netuid = 1 - # Always use mock subtensor. - defaults.subtensor.network = "finney" - # Skip version checking. - defaults.no_version_checking = True - - return defaults - - def test_overview(self, _): - if is_running_in_circleci(): - config = self.config - config.wallet.path = "/tmp/test_cli_test_overview" - config.wallet.name = "mock_wallet" - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - - mock_hotkeys = ["hk0", "hk1", "hk2", "hk3", "hk4"] - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - coldkeypub_file=MagicMock( - exists_on_device=MagicMock(return_value=True) # Wallet exists - ), - ) - for idx, hk in enumerate(mock_hotkeys) - ] - - mock_registrations = [ - (1, mock_wallets[0]), - (1, mock_wallets[1]), - # (1, mock_wallets[2]), Not registered on netuid 1 - (2, mock_wallets[0]), - # (2, mock_wallets[1]), Not registered on netuid 2 - (2, mock_wallets[2]), - (3, mock_wallets[0]), - (3, mock_wallets[1]), - (3, mock_wallets[2]), # All registered on netuid 3 (but hk3) - (3, mock_wallets[4]), # hk4 is only on netuid 3 - ] # hk3 is not registered on any network - - # Register each wallet to it's subnet. - print("Registering wallets to mock subtensor...") - - for netuid, wallet in mock_registrations: - _ = _subtensor_mock.force_register_neuron( - netuid=netuid, - coldkey=wallet.coldkey.ss58_address, - hotkey=wallet.hotkey.ss58_address, - ) - - def mock_get_wallet(*args, **kwargs): - hk = kwargs.get("hotkey") - name_ = kwargs.get("name") - - if not hk and kwargs.get("config"): - hk = kwargs.get("config").wallet.hotkey - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - for wallet in mock_wallets: - if wallet.name == name_ and wallet.hotkey_str == hk: - return wallet - else: - for wallet in mock_wallets: - if wallet.name == name_: - return wallet - else: - return mock_wallets[0] - - mock_console = MockConsole() - with patch( - "bittensor.commands.overview.get_hotkey_wallets_for_wallet" - ) as mock_get_all_wallets: - mock_get_all_wallets.return_value = mock_wallets - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - with patch("bittensor.__console__", mock_console): - cli.run() - - # Check that the overview was printed. - self.assertIsNotNone(mock_console.captured_print) - - output_no_syntax = mock_console.remove_rich_syntax( - mock_console.captured_print - ) - - # Check that each subnet was printed. - self.assertIn("Subnet: 1", output_no_syntax) - self.assertIn("Subnet: 2", output_no_syntax) - self.assertIn("Subnet: 3", output_no_syntax) - - # Check that only registered hotkeys are printed once for each subnet. - for wallet in mock_wallets: - expected = [ - wallet.hotkey_str for _, wallet in mock_registrations - ].count(wallet.hotkey_str) - occurrences = output_no_syntax.count(wallet.hotkey_str) - self.assertEqual(occurrences, expected) - - # Check that unregistered hotkeys are not printed. - for wallet in mock_wallets: - if wallet not in [w for _, w in mock_registrations]: - self.assertNotIn(wallet.hotkey_str, output_no_syntax) - - def test_overview_not_in_first_subnet(self, _): - if is_running_in_circleci(): - config = self.config - config.wallet.path = "/tmp/test_cli_test_overview" - config.wallet.name = "mock_wallet" - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - - mock_hotkeys = ["hk0", "hk1", "hk2", "hk3", "hk4"] - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - coldkeypub_file=MagicMock( - exists_on_device=MagicMock(return_value=True) # Wallet exists - ), - ) - for idx, hk in enumerate(mock_hotkeys) - ] - - mock_registrations = [ - # No registrations in subnet 1 or 2 - (3, mock_wallets[4]) # hk4 is on netuid 3 - ] - - # Register each wallet to it's subnet - print("Registering mock wallets to subnets...") - - for netuid, wallet in mock_registrations: - print( - "Registering wallet {} to subnet {}".format( - wallet.hotkey_str, netuid - ) - ) - _ = _subtensor_mock.force_register_neuron( - netuid=netuid, - coldkey=wallet.coldkey.ss58_address, - hotkey=wallet.hotkey.ss58_address, - ) - - def mock_get_wallet(*args, **kwargs): - hk = kwargs.get("hotkey") - name_ = kwargs.get("name") - - if not hk and kwargs.get("config"): - hk = kwargs.get("config").wallet.hotkey - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - for wallet in mock_wallets: - if wallet.name == name_ and wallet.hotkey_str == hk: - return wallet - else: - for wallet in mock_wallets: - if wallet.name == name_: - return wallet - else: - return mock_wallets[0] - - mock_console = MockConsole() - with patch( - "bittensor.commands.overview.get_hotkey_wallets_for_wallet" - ) as mock_get_all_wallets: - mock_get_all_wallets.return_value = mock_wallets - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - with patch("bittensor.__console__", mock_console): - cli.run() - - # Check that the overview was printed. - self.assertIsNotNone(mock_console.captured_print) - - output_no_syntax = mock_console.remove_rich_syntax( - mock_console.captured_print - ) - - # Check that each subnet was printed except subnet 1 and 2. - # Subnet 1 and 2 are not printed because no wallet is registered to them. - self.assertNotIn("Subnet: 1", output_no_syntax) - self.assertNotIn("Subnet: 2", output_no_syntax) - self.assertIn("Subnet: 3", output_no_syntax) - - # Check that only registered hotkeys are printed once for each subnet. - for wallet in mock_wallets: - expected = [ - wallet.hotkey_str for _, wallet in mock_registrations - ].count(wallet.hotkey_str) - occurrences = output_no_syntax.count(wallet.hotkey_str) - self.assertEqual(occurrences, expected) - - # Check that unregistered hotkeys are not printed. - for wallet in mock_wallets: - if wallet not in [w for _, w in mock_registrations]: - self.assertNotIn(wallet.hotkey_str, output_no_syntax) - - def test_overview_with_hotkeys_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.hotkeys = ["some_hotkey"] - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_without_hotkeys_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_with_sort_by_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.wallet.sort_by = "rank" - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_with_sort_by_bad_column_name(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.wallet.sort_by = "totallynotmatchingcolumnname" - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_without_sort_by_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_with_sort_order_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.wallet.sort_order = "desc" # Set descending sort order - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_with_sort_order_config_bad_sort_type(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.wallet.sort_order = "nowaythisshouldmatchanyorderingchoice" - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_without_sort_order_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - # Don't specify sort_order in config - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_with_width_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.width = 100 - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_without_width_config(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - # Don't specify width in config - config.no_prompt = True - config.all = False - config.netuid = [] # Don't set, so it tries all networks. - - cli = bittensor.cli(config) - cli.run() - - def test_overview_all(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "overview" - config.no_prompt = True - config.netuid = [] # Don't set, so it tries all networks. - - config.all = True - cli = bittensor.cli(config) - cli.run() - - def test_unstake_with_specific_hotkeys(self, _): - config = self.config - config.command = "stake" - config.subcommand = "remove" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0", "hk1", "hk2"] - config.all_hotkeys = False - # Notice no max_stake specified - - mock_stakes: Dict[str, Balance] = { - # All have more than 5.0 stake - "hk0": Balance.from_float(10.0), - "hk1": Balance.from_float(11.1), - "hk2": Balance.from_float(12.2), - } - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them stakes - - for wallet in mock_wallets: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkey.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, mock_stakes[wallet.hotkey_str].rao) - - cli.run() - - # Check stakes after unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertAlmostEqual( - stake.tao, - mock_stakes[wallet.hotkey_str].tao - config.amount, - places=4, - ) - - def test_unstake_with_all_hotkeys(self, _): - config = self.config - config.command = "stake" - config.subcommand = "remove" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "fake_wallet" - # Notice wallet.hotkeys not specified - config.all_hotkeys = True - # Notice no max_stake specified - - mock_stakes: Dict[str, Balance] = { - # All have more than 5.0 stake - "hk0": Balance.from_float(10.0), - "hk1": Balance.from_float(11.1), - "hk2": Balance.from_float(12.2), - } - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(list(mock_stakes.keys())) - ] - - # Register mock wallets and give them stakes - - for wallet in mock_wallets: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkey.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch( - "bittensor.commands.unstake.get_hotkey_wallets_for_wallet" - ) as mock_get_all_wallets: - mock_get_all_wallets.return_value = mock_wallets - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, mock_stakes[wallet.hotkey_str].rao) - - cli.run() - - # Check stakes after unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertAlmostEqual( - stake.tao, - mock_stakes[wallet.hotkey_str].tao - config.amount, - places=4, - ) - - def test_unstake_with_exclude_hotkeys_from_all(self, _): - config = self.config - config.command = "stake" - config.subcommand = "remove" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk1"] # Exclude hk1 - config.all_hotkeys = True - - mock_stakes: Dict[str, Balance] = { - # All have more than 5.0 stake - "hk0": Balance.from_float(10.0), - "hk1": Balance.from_float(11.1), - "hk2": Balance.from_float(12.2), - } - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(list(mock_stakes.keys())) - ] - - # Register mock wallets and give them stakes - - for wallet in mock_wallets: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkey.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch( - "bittensor.commands.unstake.get_hotkey_wallets_for_wallet" - ) as mock_get_all_wallets: - mock_get_all_wallets.return_value = mock_wallets - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, mock_stakes[wallet.hotkey_str].rao) - - cli.run() - - # Check stakes after unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - if wallet.hotkey_str == "hk1": - # hk1 should not have been unstaked - self.assertAlmostEqual( - stake.tao, mock_stakes[wallet.hotkey_str].tao, places=4 - ) - else: - self.assertAlmostEqual( - stake.tao, - mock_stakes[wallet.hotkey_str].tao - config.amount, - places=4, - ) - - def test_unstake_with_multiple_hotkeys_max_stake(self, _): - config = self.config - config.command = "stake" - config.subcommand = "remove" - config.no_prompt = True - # Notie amount is not specified - config.max_stake = 5.0 # The keys should have at most 5.0 tao staked after - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0", "hk1", "hk2"] - config.all_hotkeys = False - - mock_stakes: Dict[str, Balance] = { - # All have more than 5.0 stake - "hk0": Balance.from_float(10.0), - "hk1": Balance.from_float(4.9), - "hk2": Balance.from_float(12.2), - } - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(list(mock_stakes.keys())) - ] - - # Register mock wallets and give them stakes - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkey.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch( - "bittensor.commands.unstake.get_hotkey_wallets_for_wallet" - ) as mock_get_all_wallets: - mock_get_all_wallets.return_value = mock_wallets - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, mock_stakes[wallet.hotkey_str].rao) - - cli.run() - - # Check stakes after unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # All should have been unstaked below or equal to max_stake - self.assertLessEqual( - stake.tao, config.max_stake + 0.0001 - ) # Add a small buffer for fp errors - - if wallet.hotkey_str == "hk1": - # hk1 should not have been unstaked because it was already below max_stake - self.assertAlmostEqual( - stake.tao, mock_stakes[wallet.hotkey_str].tao, places=4 - ) - - def test_unstake_with_thresholds(self, _): - config = self.config - config.command = "stake" - config.subcommand = "remove" - config.no_prompt = True - # as the minimum required stake may change, this method allows us to dynamically - # update the amount in the mock without updating the tests - min_stake: Balance = _subtensor_mock.get_minimum_required_stake() - # Must be a float - config.amount = min_stake.tao # Unstake below the minimum required stake - wallet_names = ["w0", "w1", "w2"] - config.all_hotkeys = False - # Notice no max_stake specified - - mock_stakes: Dict[str, Balance] = { - "w0": 2 * min_stake - 1, # remaining stake will be below the threshold - "w1": 2 * min_stake - 2, - "w2": 2 * min_stake - 5, - } - - mock_wallets = [ - SimpleNamespace( - name=wallet_name, - coldkey=_get_mock_keypair(idx, self.id()), - coldkeypub=_get_mock_keypair(idx, self.id()), - hotkey_str="hk{}".format(idx), # doesn't matter - hotkey=_get_mock_keypair(idx + 100, self.id()), # doesn't matter - ) - for idx, wallet_name in enumerate(wallet_names) - ] - - delegate_hotkey = mock_wallets[0].hotkey.ss58_address - - # Register mock neuron, only for w0 - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=delegate_hotkey, - coldkey=mock_wallets[0].coldkey.ss58_address, - stake=mock_stakes["w0"], - ) - - # Become a delegate - _ = _subtensor_mock.nominate( - wallet=mock_wallets[0], - ) - - # Stake to the delegate with the other coldkeys - for wallet in mock_wallets[1:]: - # Give balance - _ = _subtensor_mock.force_set_balance( - ss58_address=wallet.coldkeypub.ss58_address, - balance=( - mock_stakes[wallet.name] + _subtensor_mock.get_existential_deposit() - ).tao - + 1.0, - ) - _ = _subtensor_mock.add_stake( - wallet=wallet, - hotkey_ss58=delegate_hotkey, - amount=mock_stakes[wallet.name], - ) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("config") and kwargs["config"].get("wallet"): - for wallet in mock_wallets: - if wallet.name == kwargs["config"].wallet.name: - return wallet - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - for wallet in mock_wallets: - # Check stakes before unstaking - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=delegate_hotkey, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, mock_stakes[wallet.name].rao) - - config.wallet.name = wallet.name - config.hotkey_ss58address = delegate_hotkey # Single unstake - - cli = bittensor.cli(config) - with patch.object(_subtensor_mock, "_do_unstake") as mock_unstake: - with patch( - "bittensor.__console__.print" - ) as mock_print: # Catch console print - cli.run() - - # Filter for console print calls - console_prints = [ - call[0][0] for call in mock_print.call_args_list - ] - minimum_print = filter( - lambda x: "less than minimum of" in x, console_prints - ) - - unstake_calls = mock_unstake.call_args_list - self.assertEqual(len(unstake_calls), 1) # Only one unstake call - - _, kwargs = unstake_calls[0] - # Verify delegate was unstaked - self.assertEqual(kwargs["hotkey_ss58"], delegate_hotkey) - self.assertEqual(kwargs["wallet"].name, wallet.name) - - if wallet.name == "w0": - # This wallet owns the delegate - # Should unstake specified amount - self.assertEqual( - kwargs["amount"], bittensor.Balance(config.amount) - ) - # No warning for w0 - self.assertRaises( - StopIteration, next, minimum_print - ) # No warning for w0 - else: - # Should unstake *all* the stake - staked = mock_stakes[wallet.name] - self.assertEqual(kwargs["amount"], staked) - - # Check warning was printed - _ = next( - minimum_print - ) # Doesn't raise, so the warning was printed - - def test_unstake_all(self, _): - config = self.config - config.command = "stake" - config.subcommand = "remove" - config.no_prompt = True - config.amount = 0.0 # 0 implies full unstake - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0"] - config.all_hotkeys = False - - mock_stakes: Dict[str, Balance] = {"hk0": Balance.from_float(10.0)} - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them stakes - - for wallet in mock_wallets: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkey.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, mock_stakes[wallet.hotkey_str].rao) - - cli.run() - - # Check stakes after unstaking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # because the amount is less than the threshold, none of these should unstake - self.assertEqual(stake.tao, Balance.from_tao(0)) - - def test_stake_with_specific_hotkeys(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0", "hk1", "hk2"] - config.all_hotkeys = False - # Notice no max_stake specified - - mock_balance = Balance.from_float(22.2) - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkey.ss58_address, - ) - - success, err = _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, 0) - - cli.run() - - # Check stakes after staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertAlmostEqual(stake.tao, config.amount, places=4) - - def test_stake_with_all_hotkeys(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "fake_wallet" - # Notice wallet.hotkeys is not specified - config.all_hotkeys = True - # Notice no max_stake specified - - mock_hotkeys = ["hk0", "hk1", "hk2"] - - mock_balance = Balance.from_float(22.0) - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(mock_hotkeys) - ] - - # Register mock wallets and give them no stake - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - # Set the coldkey balance - success, err = _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - with patch( - "bittensor.commands.stake.get_hotkey_wallets_for_wallet" - ) as mock_get_hotkey_wallets_for_wallet: - mock_get_hotkey_wallets_for_wallet.return_value = mock_wallets - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are 0 - self.assertEqual(stake.rao, 0) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - # Check stakes after staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are 5.0 - self.assertAlmostEqual(stake.tao, config.amount, places=4) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertAlmostEqual( - balance.tao, - mock_balance.tao - (config.amount * len(mock_wallets)), - places=4, - ) - - def test_stake_with_exclude_hotkeys_from_all(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk1"] # exclude hk1 - config.all_hotkeys = True - # Notice no max_stake specified - - mock_hotkeys = ["hk0", "hk1", "hk2"] - - mock_balance = Balance.from_float(25.0) - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(mock_hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - # Set the coldkey balance - _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch( - "bittensor.commands.stake.get_hotkey_wallets_for_wallet" - ) as mock_get_all_wallets: - mock_get_all_wallets.return_value = mock_wallets - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are 0 - self.assertEqual(stake.rao, 0) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - # Check stakes after staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - - if wallet.hotkey_str == "hk1": - # Check that hk1 stake is 0 - # We excluded it from staking - self.assertEqual(stake.tao, 0) - else: - # Check that all stakes are 5.0 - self.assertAlmostEqual(stake.tao, config.amount, places=4) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertAlmostEqual( - balance.tao, mock_balance.tao - (config.amount * 2), places=4 - ) - - def test_stake_with_multiple_hotkeys_max_stake(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - # Notie amount is not specified - config.max_stake = 15.0 # The keys should have at most 15.0 tao staked after - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0", "hk1", "hk2"] - config.all_hotkeys = False - - mock_balance = Balance.from_float(config.max_stake * 3) - - mock_stakes: Dict[str, Balance] = { - "hk0": Balance.from_float(0.0), - "hk1": Balance.from_float(config.max_stake * 2), - "hk2": Balance.from_float(0.0), - } - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - if wallet.hotkey_str == "hk1": - # Set the stake for hk1 - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, - ) - else: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are correct - if wallet.hotkey_str == "hk1": - self.assertAlmostEqual(stake.tao, config.max_stake * 2, places=4) - else: - self.assertEqual(stake.rao, 0) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - # Check stakes after staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - - # Check that all stakes at least 15.0 - self.assertGreaterEqual(stake.tao + 0.1, config.max_stake) - - if wallet.hotkey_str == "hk1": - # Check that hk1 stake was not changed - # It had more than max_stake already - self.assertAlmostEqual( - stake.tao, mock_stakes[wallet.hotkey_str].tao, places=4 - ) - - # Check that the balance decreased - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertLessEqual(balance.tao, mock_balance.tao) - - def test_stake_with_multiple_hotkeys_max_stake_not_enough_balance(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - # Notie amount is not specified - config.max_stake = 15.0 # The keys should have at most 15.0 tao staked after - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0", "hk1", "hk2"] - config.all_hotkeys = False - - mock_balance = Balance.from_float(15.0 * 2) # Not enough for all hotkeys - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are 0 - self.assertEqual(stake.rao, 0) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - # Check stakes after staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - - if wallet.hotkey_str == "hk2": - # Check that the stake is still 0 - self.assertEqual(stake.tao, 0) - - else: - # Check that all stakes are maximum of 15.0 - self.assertLessEqual(stake.tao, config.max_stake) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertLessEqual(balance.tao, mock_balance.tao) - - def test_stake_with_single_hotkey_max_stake(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - # Notie amount is not specified - config.max_stake = 15.0 # The keys should have at most 15.0 tao staked after - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0"] - config.all_hotkeys = False - - mock_balance = Balance.from_float(15.0 * 3) - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - print("Registering mock wallet {}".format(wallet.hotkey_str)) - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are 0 - self.assertEqual(stake.rao, 0) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - # Check stakes after staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - - # Check that all stakes are maximum of 15.0 - self.assertLessEqual(stake.tao, config.max_stake) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertLessEqual(balance.tao, mock_balance.tao) - - def test_stake_with_single_hotkey_max_stake_not_enough_balance(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - # Notie amount is not specified - config.max_stake = 15.0 # The keys should have at most 15.0 tao staked after - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0"] - config.all_hotkeys = False - - mock_balance = Balance.from_float(1.0) # Not enough balance to do max - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - ) - - _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - for wallet in mock_wallets: - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that all stakes are 0 - self.assertEqual(stake.rao, 0) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - wallet = mock_wallets[0] - - # Check did not stake - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - - # Check that stake is less than max_stake - 1.0 - self.assertLessEqual(stake.tao, config.max_stake - 1.0) - - # Check that the balance decreased by less than max_stake - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertGreaterEqual(balance.tao, mock_balance.tao - config.max_stake) - - def test_stake_with_single_hotkey_max_stake_enough_stake(self, _): - # tests max stake when stake >= max_stake already - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - # Notie amount is not specified - config.max_stake = 15.0 # The keys should have at most 15.0 tao staked after - config.wallet.name = "fake_wallet" - config.hotkeys = ["hk0"] - config.all_hotkeys = False - - mock_balance = Balance.from_float(config.max_stake * 3) - - mock_stakes: Dict[str, Balance] = { # has enough stake, more than max_stake - "hk0": Balance.from_float(config.max_stake * 2) - } - - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - - mock_wallets = [ - SimpleNamespace( - name=config.wallet.name, - coldkey=mock_coldkey_kp, - coldkeypub=mock_coldkey_kp, - hotkey_str=hk, - hotkey=_get_mock_keypair(idx + 100, self.id()), - ) - for idx, hk in enumerate(config.hotkeys) - ] - - # Register mock wallets and give them balances - print("Registering mock wallets...") - - for wallet in mock_wallets: - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=wallet.hotkey.ss58_address, - coldkey=wallet.coldkeypub.ss58_address, - stake=mock_stakes[wallet.hotkey_str].rao, # More than max_stake - ) - - success, err = _subtensor_mock.force_set_balance( - ss58_address=mock_coldkey_kp.ss58_address, balance=mock_balance.rao - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("hotkey"): - for wallet in mock_wallets: - if wallet.hotkey_str == kwargs.get("hotkey"): - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - # Check stakes before staking - wallet = mock_wallets[0] - - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - # Check that stake is correct - self.assertAlmostEqual( - stake.tao, mock_stakes[wallet.hotkey_str].tao, places=4 - ) - # Check that the stake is greater than or equal to max_stake - self.assertGreaterEqual(stake.tao, config.max_stake) - - # Check that the balance is correct - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - cli.run() - - wallet = mock_wallets[0] - - # Check did not stake, since stake >= max_stake - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=wallet.hotkey.ss58_address, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - - # Check that all stake is unchanged - self.assertAlmostEqual( - stake.tao, mock_stakes[wallet.hotkey_str].tao, places=4 - ) - - # Check that the balance is the same - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertAlmostEqual(balance.tao, mock_balance.tao, places=4) - - def test_stake_with_thresholds(self, _): - config = self.config - config.command = "stake" - config.subcommand = "add" - config.no_prompt = True - - min_stake: Balance = _subtensor_mock.get_minimum_required_stake() - # Must be a float - wallet_names = ["w0", "w1", "w2"] - config.all_hotkeys = False - # Notice no max_stake specified - - mock_stakes: Dict[str, Balance] = { - "w0": min_stake - 1, # new stake will be below the threshold - "w1": min_stake - 2, - "w2": min_stake - 5, - } - - mock_wallets = [ - SimpleNamespace( - name=wallet_name, - coldkey=_get_mock_keypair(idx, self.id()), - coldkeypub=_get_mock_keypair(idx, self.id()), - hotkey_str="hk{}".format(idx), # doesn't matter - hotkey=_get_mock_keypair(idx + 100, self.id()), # doesn't matter - ) - for idx, wallet_name in enumerate(wallet_names) - ] - - delegate_hotkey = mock_wallets[0].hotkey.ss58_address - - # Register mock neuron, only for w0 - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=delegate_hotkey, - coldkey=mock_wallets[0].coldkey.ss58_address, - balance=(mock_stakes["w0"] + _subtensor_mock.get_existential_deposit()).tao - + 1.0, - ) # No stake, but enough balance - - # Become a delegate - _ = _subtensor_mock.nominate( - wallet=mock_wallets[0], - ) - - # Give enough balance - for wallet in mock_wallets[1:]: - # Give balance - _ = _subtensor_mock.force_set_balance( - ss58_address=wallet.coldkeypub.ss58_address, - balance=( - mock_stakes[wallet.name] + _subtensor_mock.get_existential_deposit() - ).tao - + 1.0, - ) - - def mock_get_wallet(*args, **kwargs): - if kwargs.get("config") and kwargs["config"].get("wallet"): - for wallet in mock_wallets: - if wallet.name == kwargs["config"].wallet.name: - return wallet - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - for wallet in mock_wallets: - # Check balances and stakes before staking - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=delegate_hotkey, - coldkey_ss58=wallet.coldkey.ss58_address, - ) - self.assertEqual(stake.rao, 0) # No stake - - balance = _subtensor_mock.get_balance( - address=wallet.coldkeypub.ss58_address - ) - self.assertGreaterEqual( - balance, mock_stakes[wallet.name] - ) # Enough balance - - config.wallet.name = wallet.name - config.wallet.hotkey = delegate_hotkey # Single stake - config.amount = mock_stakes[ - wallet.name - ].tao # Stake an amount below the threshold - - cli = bittensor.cli(config) - with patch.object(_subtensor_mock, "_do_stake") as mock_stake: - with patch( - "bittensor.__console__.print" - ) as mock_print: # Catch console print - cli.run() - - # Filter for console print calls - console_prints = [ - call[0][0] for call in mock_print.call_args_list - ] - minimum_print = filter( - lambda x: "below the minimum required" in x, console_prints - ) - - if wallet.name == "w0": - # This wallet owns the delegate - stake_calls = mock_stake.call_args_list - # Can stake below the threshold - self.assertEqual(len(stake_calls), 1) - - _, kwargs = stake_calls[0] - - # Should stake specified amount - self.assertEqual( - kwargs["amount"], bittensor.Balance(config.amount) - ) - # No error for w0 - self.assertRaises( - StopIteration, next, minimum_print - ) # No warning for w0 - else: - # Should not call stake - self.assertEqual(len(mock_stake.call_args_list), 0) - # Should print error - self.assertIsNotNone(next(minimum_print)) - - def test_nominate(self, _): - config = self.config - config.command = "root" - config.subcommand = "nominate" - config.no_prompt = True - config.wallet.name = "w0" - config.hotkey = "hk0" - - mock_balance = Balance.from_float(100.0) - - mock_wallet = SimpleNamespace( - name="w0", - coldkey=_get_mock_keypair(0, self.id()), - coldkeypub=_get_mock_keypair(0, self.id()), - hotkey_str="hk0", - hotkey=_get_mock_keypair(0 + 100, self.id()), - ) - - # Register mock wallet and give it a balance - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=mock_wallet.hotkey.ss58_address, - coldkey=mock_wallet.coldkey.ss58_address, - balance=mock_balance.rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - hk = kwargs.get("hotkey") - name_ = kwargs.get("name") - - if not hk and kwargs.get("config"): - hk = kwargs.get("config").wallet.hotkey - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - if mock_wallet.name == name_: - return mock_wallet - else: - raise ValueError("Mock wallet not found") - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - cli.run() - - # Check the nomination - is_delegate = _subtensor_mock.is_hotkey_delegate( - hotkey_ss58=mock_wallet.hotkey.ss58_address - ) - self.assertTrue(is_delegate) - - def test_delegate_stake(self, _): - config = self.config - config.command = "root" - config.subcommand = "delegate" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "w1" - - mock_balances: Dict[str, Balance] = { - # All have more than 5.0 stake - "w0": { - "hk0": Balance.from_float(10.0), - }, - "w1": {"hk1": Balance.from_float(11.1)}, - } - - mock_stake = Balance.from_float(5.0) - - mock_wallets = [] - for idx, wallet_name in enumerate(list(mock_balances.keys())): - for idx_hk, hk in enumerate(list(mock_balances[wallet_name].keys())): - wallet = SimpleNamespace( - name=wallet_name, - coldkey=_get_mock_keypair(idx, self.id()), - coldkeypub=_get_mock_keypair(idx, self.id()), - hotkey_str=hk, - hotkey=_get_mock_keypair(idx * 100 + idx_hk, self.id()), - ) - mock_wallets.append(wallet) - - # Set hotkey to be the hotkey from the other wallet - config.delegate_ss58key: str = mock_wallets[0].hotkey.ss58_address - - # Register mock wallets and give them balance - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=mock_wallets[0].hotkey.ss58_address, - coldkey=mock_wallets[0].coldkey.ss58_address, - balance=mock_balances["w0"]["hk0"].rao, - stake=mock_stake.rao, # Needs set stake to be a validator - ) - - # Give w1 some balance - success, err = _subtensor_mock.force_set_balance( - ss58_address=mock_wallets[1].coldkey.ss58_address, - balance=mock_balances["w1"]["hk1"].rao, - ) - - # Make the first wallet a delegate - success = _subtensor_mock.nominate(wallet=mock_wallets[0]) - self.assertTrue(success) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - hk = kwargs.get("hotkey") - name_ = kwargs.get("name") - - if not hk and kwargs.get("config"): - hk = kwargs.get("config").wallet.hotkey - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - for wallet in mock_wallets: - if wallet.name == name_ and wallet.hotkey_str == hk: - return wallet - else: - for wallet in mock_wallets: - if wallet.name == name_: - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - cli.run() - - # Check the stake - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=mock_wallets[0].hotkey.ss58_address, - coldkey_ss58=mock_wallets[1].coldkey.ss58_address, - ) - self.assertAlmostEqual(stake.tao, config.amount, places=4) - - def test_undelegate_stake(self, _): - config = self.config - config.command = "root" - config.subcommand = "undelegate" - config.no_prompt = True - config.amount = 5.0 - config.wallet.name = "w1" - - mock_balances: Dict[str, Balance] = { - # All have more than 5.0 stake - "w0": { - "hk0": Balance.from_float(10.0), - }, - "w1": {"hk1": Balance.from_float(11.1)}, - } - - mock_stake = Balance.from_float(5.0) - mock_delegated = Balance.from_float(6.0) - - mock_wallets = [] - for idx, wallet_name in enumerate(list(mock_balances.keys())): - for idx_hk, hk in enumerate(list(mock_balances[wallet_name].keys())): - wallet = SimpleNamespace( - name=wallet_name, - coldkey=_get_mock_keypair(idx, self.id()), - coldkeypub=_get_mock_keypair(idx, self.id()), - hotkey_str=hk, - hotkey=_get_mock_keypair(idx * 100 + idx_hk, self.id()), - ) - mock_wallets.append(wallet) - - # Set hotkey to be the hotkey from the other wallet - config.delegate_ss58key: str = mock_wallets[0].hotkey.ss58_address - - # Register mock wallets and give them balance - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=mock_wallets[0].hotkey.ss58_address, - coldkey=mock_wallets[0].coldkey.ss58_address, - balance=mock_balances["w0"]["hk0"].rao, - stake=mock_stake.rao, # Needs set stake to be a validator - ) - - # Give w1 some balance - success, err = _subtensor_mock.force_set_balance( - ss58_address=mock_wallets[1].coldkey.ss58_address, - balance=mock_balances["w1"]["hk1"].rao, - ) - - # Make the first wallet a delegate - success = _subtensor_mock.nominate(wallet=mock_wallets[0]) - self.assertTrue(success) - - # Stake to the delegate - success = _subtensor_mock.delegate( - wallet=mock_wallets[1], - delegate_ss58=mock_wallets[0].hotkey.ss58_address, - amount=mock_delegated, - prompt=False, - ) - self.assertTrue(success) - - # Verify the stake - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=mock_wallets[0].hotkey.ss58_address, - coldkey_ss58=mock_wallets[1].coldkey.ss58_address, - ) - self.assertAlmostEqual(stake.tao, mock_delegated.tao, places=4) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - hk = kwargs.get("hotkey") - name_ = kwargs.get("name") - - if not hk and kwargs.get("config"): - hk = kwargs.get("config").wallet.hotkey - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - for wallet in mock_wallets: - if wallet.name == name_ and wallet.hotkey_str == hk: - return wallet - else: - for wallet in mock_wallets: - if wallet.name == name_: - return wallet - else: - return mock_wallets[0] - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - cli.run() - - # Check the stake - stake = _subtensor_mock.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=mock_wallets[0].hotkey.ss58_address, - coldkey_ss58=mock_wallets[1].coldkey.ss58_address, - ) - self.assertAlmostEqual( - stake.tao, mock_delegated.tao - config.amount, places=4 - ) - - def test_transfer(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "transfer" - config.no_prompt = True - config.amount = 3.2 - config.wallet.name = "w1" - - mock_balances: Dict[str, Balance] = { - "w0": Balance.from_float(10.0), - "w1": Balance.from_float(config.amount + 0.001), - } - - mock_wallets = [] - for idx, wallet_name in enumerate(list(mock_balances.keys())): - wallet = SimpleNamespace( - name=wallet_name, - coldkey=_get_mock_keypair(idx, self.id()), - coldkeypub=_get_mock_keypair(idx, self.id()), - ) - mock_wallets.append(wallet) - - # Set dest to w0 - config.dest = mock_wallets[0].coldkey.ss58_address - - # Give w0 and w1 balance - - for wallet in mock_wallets: - success, err = _subtensor_mock.force_set_balance( - ss58_address=wallet.coldkey.ss58_address, - balance=mock_balances[wallet.name].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - name_ = kwargs.get("name") - - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - for wallet in mock_wallets: - if wallet.name == name_: - return wallet - else: - raise ValueError(f"No mock wallet found with name: {name_}") - - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - cli.run() - - # Check the balance of w0 - balance = _subtensor_mock.get_balance( - address=mock_wallets[0].coldkey.ss58_address - ) - self.assertAlmostEqual( - balance.tao, mock_balances["w0"].tao + config.amount, places=4 - ) - - # Check the balance of w1 - balance = _subtensor_mock.get_balance( - address=mock_wallets[1].coldkey.ss58_address - ) - self.assertAlmostEqual( - balance.tao, mock_balances["w1"].tao - config.amount, places=4 - ) # no fees - - def test_transfer_not_enough_balance(self, _): - config = self.config - config.command = "wallet" - config.subcommand = "transfer" - config.no_prompt = True - config.amount = 3.2 - config.wallet.name = "w1" - - mock_balances: Dict[str, Balance] = { - "w0": Balance.from_float(10.0), - "w1": Balance.from_float(config.amount - 0.1), # not enough balance - } - - mock_wallets = [] - for idx, wallet_name in enumerate(list(mock_balances.keys())): - wallet = SimpleNamespace( - name=wallet_name, - coldkey=_get_mock_keypair(idx, self.id()), - coldkeypub=_get_mock_keypair(idx, self.id()), - ) - mock_wallets.append(wallet) - - # Set dest to w0 - config.dest = mock_wallets[0].coldkey.ss58_address - - # Give w0 and w1 balance - - for wallet in mock_wallets: - success, err = _subtensor_mock.force_set_balance( - ss58_address=wallet.coldkey.ss58_address, - balance=mock_balances[wallet.name].rao, - ) - - cli = bittensor.cli(config) - - def mock_get_wallet(*args, **kwargs): - name_ = kwargs.get("name") - - if not name_ and kwargs.get("config"): - name_ = kwargs.get("config").wallet.name - - for wallet in mock_wallets: - if wallet.name == name_: - return wallet - else: - raise ValueError(f"No mock wallet found with name: {name_}") - - mock_console = MockConsole() - with patch("bittensor.wallet") as mock_create_wallet: - mock_create_wallet.side_effect = mock_get_wallet - - with patch("bittensor.__console__", mock_console): - cli.run() - - # Check that the overview was printed. - self.assertIsNotNone(mock_console.captured_print) - - output_no_syntax = mock_console.remove_rich_syntax( - mock_console.captured_print - ) - - self.assertIn("Not enough balance", output_no_syntax) - - # Check the balance of w0 - balance = _subtensor_mock.get_balance( - address=mock_wallets[0].coldkey.ss58_address - ) - self.assertAlmostEqual( - balance.tao, mock_balances["w0"].tao, places=4 - ) # did not transfer - - # Check the balance of w1 - balance = _subtensor_mock.get_balance( - address=mock_wallets[1].coldkey.ss58_address - ) - self.assertAlmostEqual( - balance.tao, mock_balances["w1"].tao, places=4 - ) # did not transfer - - def test_register(self, _): - config = self.config - config.command = "subnets" - config.subcommand = "register" - config.no_prompt = True - - mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - # Give the wallet some balance for burning - success, err = _subtensor_mock.force_set_balance( - ss58_address=mock_wallet.coldkeypub.ss58_address, - balance=Balance.from_float(200.0), - ) - - with patch("bittensor.wallet", return_value=mock_wallet) as mock_create_wallet: - cli = bittensor.cli(config) - cli.run() - mock_create_wallet.assert_called_once() - - # Verify that the wallet was registered - subtensor = bittensor.subtensor(config) - registered = subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=mock_wallet.hotkey.ss58_address, netuid=1 - ) - - self.assertTrue(registered) - - def test_pow_register(self, _): - # Not the best way to do this, but I need to finish these tests, and unittest doesn't make this - # as simple as pytest - config = self.config - config.command = "subnets" - config.subcommand = "pow_register" - config.pow_register.num_processes = 1 - config.pow_register.update_interval = 50_000 - config.no_prompt = True - - mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - with patch("bittensor.wallet", return_value=mock_wallet) as mock_create_wallet: - with patch( - "bittensor.extrinsics.registration.POWSolution.is_stale", - side_effect=MockException, - ) as mock_is_stale: - with pytest.raises(MockException): - cli = bittensor.cli(config) - cli.run() - mock_create_wallet.assert_called_once() - - self.assertEqual(mock_is_stale.call_count, 1) - - def test_stake(self, _): - amount_to_stake: Balance = Balance.from_tao(0.5) - config = self.config - config.no_prompt = True - config.command = "stake" - config.subcommand = "add" - config.amount = amount_to_stake.tao - config.stake_all = False - config.use_password = False - config.model = "core_server" - config.hotkey = "hk0" - - subtensor = bittensor.subtensor(config) - - mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - # Register the hotkey and give it some balance - _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=mock_wallet.hotkey.ss58_address, - coldkey=mock_wallet.coldkey.ss58_address, - balance=( - amount_to_stake + Balance.from_tao(1.0) - ).rao, # 1.0 tao extra for fees, etc - ) - - with patch("bittensor.wallet", return_value=mock_wallet) as mock_create_wallet: - old_stake = subtensor.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=mock_wallet.hotkey.ss58_address, - coldkey_ss58=mock_wallet.coldkey.ss58_address, - ) - - cli = bittensor.cli(config) - cli.run() - mock_create_wallet.assert_called() - self.assertEqual(mock_create_wallet.call_count, 2) - - new_stake = subtensor.get_stake_for_coldkey_and_hotkey( - hotkey_ss58=mock_wallet.hotkey.ss58_address, - coldkey_ss58=mock_wallet.coldkey.ss58_address, - ) - - self.assertGreater(new_stake, old_stake) - - def test_metagraph(self, _): - config = self.config - config.wallet.name = "metagraph_testwallet" - config.command = "subnets" - config.subcommand = "metagraph" - config.no_prompt = True - - # Add some neurons to the metagraph - mock_nn = [] - - def register_mock_neuron(i: int) -> int: - mock_nn.append( - SimpleNamespace( - hotkey=_get_mock_keypair(i + 100, self.id()).ss58_address, - coldkey=_get_mock_keypair(i, self.id()).ss58_address, - balance=Balance.from_rao(random.randint(0, 2**45)).rao, - stake=Balance.from_rao(random.randint(0, 2**45)).rao, - ) - ) - uid = _subtensor_mock.force_register_neuron( - netuid=config.netuid, - hotkey=mock_nn[i].hotkey, - coldkey=mock_nn[i].coldkey, - balance=mock_nn[i].balance, - stake=mock_nn[i].stake, - ) - return uid - - for i in range(5): - _ = register_mock_neuron(i) - - _subtensor_mock.neurons_lite(netuid=config.netuid) - - cli = bittensor.cli(config) - - mock_console = MockConsole() - with patch("bittensor.__console__", mock_console): - cli.run() - - # Check that the overview was printed. - self.assertIsNotNone(mock_console.captured_print) - - output_no_syntax = mock_console.remove_rich_syntax(mock_console.captured_print) - - self.assertIn("Metagraph", output_no_syntax) - nn = _subtensor_mock.neurons_lite(netuid=config.netuid) - self.assertIn( - str(len(nn) - 1), output_no_syntax - ) # Check that the number of neurons is output - # Check each uid is in the output - for neuron in nn: - self.assertIn(str(neuron.uid), output_no_syntax) - - def test_inspect(self, _): - config = self.config - config.wallet.name = "inspect_testwallet" - config.no_prompt = True - config.n_words = 12 - config.use_password = False - config.overwrite_coldkey = True - config.overwrite_hotkey = True - - # First create a new coldkey - config.command = "wallet" - config.subcommand = "new_coldkey" - cli = bittensor.cli(config) - cli.run() - - # Now let's give it a hotkey - config.command = "wallet" - config.subcommand = "new_hotkey" - cli.config = config - cli.run() - - # Now inspect it - config.command = "wallet" - cli.config.subcommand = "inspect" - cli.config = config - cli.run() - - config.command = "wallet" - cli.config.subcommand = "list" - cli.config = config - cli.run() - - # Run History Command to get list of transfers - config.command = "wallet" - cli.config.subcommand = "history" - cli.config = config - cli.run() - - -@patch("bittensor.subtensor", new_callable=return_mock_sub) -class TestCLIWithNetworkUsingArgs(unittest.TestCase): - """ - Test the CLI by passing args directly to the bittensor.cli factory - """ - - @unittest.mock.patch.object(MockSubtensor, "get_delegates") - def test_list_delegates(self, mocked_get_delegates, _): - # Call - cli = bittensor.cli(args=["root", "list_delegates"]) - cli.run() - - # Assertions - # make sure get_delegates called once without previous state (current only) - self.assertEqual(mocked_get_delegates.call_count, 2) - - def test_list_subnets(self, _): - cli = bittensor.cli( - args=[ - "subnets", - "list", - ] - ) - cli.run() - - def test_delegate(self, _): - """ - Test delegate add command - """ - mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - delegate_wallet = generate_wallet(hotkey=_get_mock_keypair(100 + 1, self.id())) - - # register the wallet - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=mock_wallet.hotkey.ss58_address, - coldkey=mock_wallet.coldkey.ss58_address, - ) - - # register the delegate - _ = _subtensor_mock.force_register_neuron( - netuid=1, - hotkey=delegate_wallet.hotkey.ss58_address, - coldkey=delegate_wallet.coldkey.ss58_address, - ) - - # make the delegate a delegate - _subtensor_mock.nominate(delegate_wallet, wait_for_finalization=True) - self.assertTrue( - _subtensor_mock.is_hotkey_delegate(delegate_wallet.hotkey.ss58_address) - ) - - # Give the wallet some TAO - _, err = _subtensor_mock.force_set_balance( - ss58_address=mock_wallet.coldkey.ss58_address, - balance=Balance.from_tao(20.0), - ) - self.assertEqual(err, None) - - # Check balance - old_balance = _subtensor_mock.get_balance(mock_wallet.coldkey.ss58_address) - self.assertEqual(old_balance.tao, 20.0) - - # Check delegate stake - old_delegate_stake = _subtensor_mock.get_total_stake_for_hotkey( - delegate_wallet.hotkey.ss58_address - ) - - # Check wallet stake - old_wallet_stake = _subtensor_mock.get_total_stake_for_coldkey( - mock_wallet.coldkey.ss58_address - ) - - with patch( - "bittensor.wallet", return_value=mock_wallet - ): # Mock wallet creation. SHOULD NOT BE REGISTERED - cli = bittensor.cli( - args=[ - "root", - "delegate", - "--subtensor.network", - "mock", # Mock network - "--wallet.name", - "mock", - "--delegate_ss58key", - delegate_wallet.hotkey.ss58_address, - "--amount", - "10.0", # Delegate 10 TAO - "--no_prompt", - ] - ) - cli.run() - - # Check delegate stake - new_delegate_stake = _subtensor_mock.get_total_stake_for_hotkey( - delegate_wallet.hotkey.ss58_address - ) - - # Check wallet stake - new_wallet_stake = _subtensor_mock.get_total_stake_for_coldkey( - mock_wallet.coldkey.ss58_address - ) - - # Check that the delegate stake increased by 10 TAO - self.assertAlmostEqual( - new_delegate_stake.tao, old_delegate_stake.tao + 10.0, delta=1e-6 - ) - - # Check that the wallet stake increased by 10 TAO - self.assertAlmostEqual( - new_wallet_stake.tao, old_wallet_stake.tao + 10.0, delta=1e-6 - ) - - new_balance = _subtensor_mock.get_balance(mock_wallet.coldkey.ss58_address) - self.assertAlmostEqual(new_balance.tao, old_balance.tao - 10.0, delta=1e-6) - - -@pytest.fixture(scope="function") -def wallets_dir_path(tmp_path): - wallets_dir = tmp_path / "wallets" - wallets_dir.mkdir() - yield wallets_dir - - -@pytest.mark.parametrize( - "test_id, wallet_names, expected_wallet_count", - [ - ("happy_path_single_wallet", ["wallet1"], 1), # Single wallet - ( - "happy_path_multiple_wallets", - ["wallet1", "wallet2", "wallet3"], - 3, - ), # Multiple wallets - ("happy_path_no_wallets", [], 0), # No wallets - ], -) -def test_get_coldkey_wallets_for_path( - test_id, wallet_names, expected_wallet_count, wallets_dir_path -): - # Arrange: Create mock wallet directories - for name in wallet_names: - (wallets_dir_path / name).mkdir() - - # Act: Call the function with the test directory - wallets = _get_coldkey_wallets_for_path(str(wallets_dir_path)) - - # Assert: Check if the correct number of wallet objects are returned - assert len(wallets) == expected_wallet_count - for wallet in wallets: - assert isinstance( - wallet, Wallet - ), "The returned object should be an instance of bittensor.wallet" - - -@pytest.mark.parametrize( - "test_id, exception, mock_path, expected_result", - [ - ( - "error_case_invalid_path", - StopIteration, - "/invalid/path", - [], - ), # Invalid path causing StopIteration - ], -) -def test_get_coldkey_wallets_for_path_errors( - test_id, exception, mock_path, expected_result -): - # Arrange: Patch os.walk to raise an exception - with patch("os.walk", side_effect=exception): - # Act: Call the function with an invalid path - wallets = _get_coldkey_wallets_for_path(mock_path) - - # Assert: Check if an empty list is returned - assert ( - wallets == expected_result - ), "Function should return an empty list on error" - - -@pytest.mark.parametrize( - "test_id, display, legal, web, riot, email, pgp_fingerprint, image, info, twitter, expected_exception, expected_message", - [ - ( - "test-run-happy-path-1", - "Alice", - "Alice Doe", - "https://alice.example.com", - "@alice:matrix", - "alice@example.com", - "ABCD1234ABCD1234ABCD", - "https://alice.image", - "Alice in Wonderland", - "@liceTwitter", - None, - "", - ), - # Edge cases - ( - "test_run_edge_case_002", - "", - "", - "", - "", - "", - "", - "", - "", - "", - None, - "", - ), # Empty strings as input - # Error cases - # Each field has a maximum size of 64 bytes, PGP fingerprint has a maximum size of 20 bytes - ( - "test_run_error_case_003", - "A" * 65, - "B" * 65, - "C" * 65, - "D" * 65, - "E" * 65, - "F" * 21, - "G" * 65, - "H" * 65, - "I" * 65, - ValueError, - "Identity value `display` must be <= 64 raw bytes", - ), - ], -) -def test_set_identity_command( - test_id, - display, - legal, - web, - riot, - email, - pgp_fingerprint, - image, - info, - twitter, - expected_exception, - expected_message, -): - # Arrange - mock_cli = MagicMock() - mock_cli.config = MagicMock( - display=display, - legal=legal, - web=web, - riot=riot, - email=email, - pgp_fingerprint=pgp_fingerprint, - image=image, - info=info, - twitter=twitter, - ) - mock_subtensor = MagicMock() - mock_subtensor.update_identity = MagicMock() - mock_subtensor.query_identity = MagicMock(return_value={}) - mock_subtensor.close = MagicMock() - mock_wallet = MagicMock() - mock_wallet.hotkey.ss58_address = "fake_ss58_address" - mock_wallet.coldkey.ss58_address = "fake_coldkey_ss58_address" - mock_wallet.coldkey = MagicMock() - - with patch("bittensor.subtensor", return_value=mock_subtensor), patch( - "bittensor.wallet", return_value=mock_wallet - ), patch("bittensor.__console__", MagicMock()), patch( - "rich.prompt.Prompt.ask", side_effect=["y", "y"] - ), patch("sys.exit") as mock_exit: - # Act - if expected_exception: - with pytest.raises(expected_exception) as exc_info: - SetIdentityCommand._run(mock_cli, mock_subtensor) - # Assert - assert str(exc_info.value) == expected_message - else: - SetIdentityCommand._run(mock_cli, mock_subtensor) - # Assert - mock_subtensor.update_identity.assert_called_once() - assert mock_exit.call_count == 0 - - -@pytest.fixture -def setup_files(tmp_path): - def _setup_files(files): - for file_path, content in files.items(): - full_path = tmp_path / file_path - os.makedirs(full_path.parent, exist_ok=True) - with open(full_path, "w") as f: - f.write(content) - return tmp_path - - return _setup_files - - -@pytest.mark.parametrize( - "test_id, setup_data, expected", - [ - # Error cases - ( - "error_case_nonexistent_dir", - {"just_a_file.txt": ""}, - ([], []), - ), # Nonexistent dir - ], -) -def test_get_coldkey_ss58_addresses_for_path( - setup_files, test_id, setup_data, expected -): - path = setup_files(setup_data) - - # Arrange - # Setup done in setup_files fixture and parametrize - - # Act - result = _get_coldkey_ss58_addresses_for_path(str(path)) - - # Assert - assert ( - result == expected - ), f"Test ID: {test_id} failed. Expected {expected}, got {result}" - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/integration_tests/test_cli_no_network.py b/tests/integration_tests/test_cli_no_network.py deleted file mode 100644 index e3a3d6a49c..0000000000 --- a/tests/integration_tests/test_cli_no_network.py +++ /dev/null @@ -1,1533 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Yuma Rao -# Copyright © 2022-2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -import unittest -from unittest.mock import MagicMock, patch -from typing import Any, Optional -import pytest -from copy import deepcopy -import re - -from tests.helpers import _get_mock_coldkey, __mock_wallet_factory__, MockConsole - -import bittensor -from bittensor import Balance -from rich.table import Table - - -class MockException(Exception): - pass - - -mock_delegate_info = { - "hotkey_ss58": "", - "total_stake": bittensor.Balance.from_rao(0), - "nominators": [], - "owner_ss58": "", - "take": 0.18, - "validator_permits": [], - "registrations": [], - "return_per_1000": bittensor.Balance.from_rao(0), - "total_daily_return": bittensor.Balance.from_rao(0), -} - - -def return_mock_sub_1(*args, **kwargs): - return MagicMock( - return_value=MagicMock( - get_subnets=MagicMock(return_value=[1]), # Mock subnet 1 ONLY. - block=10_000, - get_delegates=MagicMock( - return_value=[bittensor.DelegateInfo(**mock_delegate_info)] - ), - ) - ) - - -def return_mock_wallet_factory(*args, **kwargs): - return MagicMock( - return_value=__mock_wallet_factory__(*args, **kwargs), - add_args=bittensor.wallet.add_args, - ) - - -@patch( - "bittensor.subtensor", - new_callable=return_mock_sub_1, -) -@patch("bittensor.wallet", new_callable=return_mock_wallet_factory) -class TestCLINoNetwork(unittest.TestCase): - def setUp(self): - self._config = TestCLINoNetwork.construct_config() - - def config(self): - copy_ = deepcopy(self._config) - return copy_ - - @staticmethod - def construct_config(): - parser = bittensor.cli.__create_parser__() - defaults = bittensor.config(parser=parser, args=["subnets", "metagraph"]) - - # Parse commands and subcommands - for command in bittensor.ALL_COMMANDS: - if ( - command in bittensor.ALL_COMMANDS - and "commands" in bittensor.ALL_COMMANDS[command] - ): - for subcommand in bittensor.ALL_COMMANDS[command]["commands"]: - defaults.merge( - bittensor.config(parser=parser, args=[command, subcommand]) - ) - else: - defaults.merge(bittensor.config(parser=parser, args=[command])) - - defaults.netuid = 1 - defaults.subtensor.network = "mock" - defaults.no_version_checking = True - - return defaults - - def test_check_configs(self, _, __): - config = self.config() - config.no_prompt = True - config.model = "core_server" - config.dest = "no_prompt" - config.amount = 1 - config.mnemonic = "this is a mnemonic" - config.seed = None - config.uids = [1, 2, 3] - config.weights = [0.25, 0.25, 0.25, 0.25] - config.no_version_checking = True - config.ss58_address = bittensor.Keypair.create_from_seed(b"0" * 32).ss58_address - config.public_key_hex = None - config.proposal_hash = "" - - cli_instance = bittensor.cli - - # Define the response function for rich.prompt.Prompt.ask - def ask_response(prompt: str) -> Any: - if "delegate index" in prompt: - return 0 - elif "wallet name" in prompt: - return "mock" - elif "hotkey" in prompt: - return "mock" - - # Patch the ask response - with patch("rich.prompt.Prompt.ask", ask_response): - # Loop through all commands and their subcommands - for command, command_data in bittensor.ALL_COMMANDS.items(): - config.command = command - if isinstance(command_data, dict): - for subcommand in command_data["commands"].keys(): - config.subcommand = subcommand - cli_instance.check_config(config) - else: - config.subcommand = None - cli_instance.check_config(config) - - def test_new_coldkey(self, _, __): - config = self.config() - config.wallet.name = "new_coldkey_testwallet" - - config.command = "wallet" - config.subcommand = "new_coldkey" - config.amount = 1 - config.dest = "no_prompt" - config.model = "core_server" - config.n_words = 12 - config.use_password = False - config.no_prompt = True - config.overwrite_coldkey = True - - cli = bittensor.cli(config) - cli.run() - - def test_new_hotkey(self, _, __): - config = self.config() - config.wallet.name = "new_hotkey_testwallet" - config.command = "wallet" - config.subcommand = "new_hotkey" - config.amount = 1 - config.dest = "no_prompt" - config.model = "core_server" - config.n_words = 12 - config.use_password = False - config.no_prompt = True - config.overwrite_hotkey = True - - cli = bittensor.cli(config) - cli.run() - - def test_regen_coldkey(self, _, __): - config = self.config() - config.wallet.name = "regen_coldkey_testwallet" - config.command = "wallet" - config.subcommand = "regen_coldkey" - config.amount = 1 - config.dest = "no_prompt" - config.model = "core_server" - config.mnemonic = "faculty decade seven jelly gospel axis next radio grain radio remain gentle" - config.seed = None - config.n_words = 12 - config.use_password = False - config.no_prompt = True - config.overwrite_coldkey = True - - cli = bittensor.cli(config) - cli.run() - - def test_regen_coldkeypub(self, _, __): - config = self.config() - config.wallet.name = "regen_coldkeypub_testwallet" - config.command = "wallet" - config.subcommand = "regen_coldkeypub" - config.ss58_address = "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - config.public_key = None - config.use_password = False - config.no_prompt = True - config.overwrite_coldkeypub = True - - cli = bittensor.cli(config) - cli.run() - - def test_regen_hotkey(self, _, __): - config = self.config() - config.wallet.name = "regen_hotkey_testwallet" - config.command = "wallet" - config.subcommand = "regen_hotkey" - config.amount = 1 - config.model = "core_server" - config.mnemonic = "faculty decade seven jelly gospel axis next radio grain radio remain gentle" - config.seed = None - config.n_words = 12 - config.use_password = False - config.no_prompt = True - config.overwrite_hotkey = True - - cli = bittensor.cli(config) - cli.run() - - def test_list(self, _, __): - # Mock IO for wallet - with patch( - "bittensor.wallet", - side_effect=[ - MagicMock( - coldkeypub_file=MagicMock( - exists_on_device=MagicMock(return_value=True), # Wallet exists - is_encrypted=MagicMock( - return_value=False # Wallet is not encrypted - ), - ), - coldkeypub=MagicMock( - ss58_address=bittensor.Keypair.create_from_mnemonic( - bittensor.Keypair.generate_mnemonic() - ).ss58_address - ), - ), - MagicMock( - hotkey_file=MagicMock( - exists_on_device=MagicMock(return_value=True), # Wallet exists - is_encrypted=MagicMock( - return_value=False # Wallet is not encrypted - ), - ), - hotkey=MagicMock( - ss58_address=bittensor.Keypair.create_from_mnemonic( - bittensor.Keypair.generate_mnemonic() - ).ss58_address - ), - ), - ], - ): - config = self.config() - config.wallet.path = "tmp/walletpath" - config.wallet.name = "mock_wallet" - config.no_prompt = True - config.command = "wallet" - config.subcommand = "list" - - cli = bittensor.cli(config) - with patch( - "os.walk", - side_effect=[ - iter([("/tmp/walletpath", ["mock_wallet"], [])]), # 1 wallet dir - iter( - [ - ("/tmp/walletpath/mock_wallet/hotkeys", [], ["hk0"]) - ] # 1 hotkey file - ), - ], - ): - cli.run() - - def test_list_no_wallet(self, _, __): - with patch( - "bittensor.wallet", - side_effect=[ - MagicMock( - coldkeypub_file=MagicMock( - exists_on_device=MagicMock(return_value=True) - ) - ) - ], - ): - config = self.config() - config.wallet.path = "/tmp/test_cli_test_list_no_wallet" - config.no_prompt = True - config.command = "wallet" - config.subcommand = "list" - - cli = bittensor.cli(config) - # This shouldn't raise an error anymore - cli.run() - - def test_btcli_help(self, _, __): - with pytest.raises(SystemExit) as pytest_wrapped_e: - with patch( - "argparse.ArgumentParser._print_message", return_value=None - ) as mock_print_message: - args = ["--help"] - bittensor.cli(args=args).run() - - mock_print_message.assert_called_once() - - call_args = mock_print_message.call_args - help_out = call_args[0][0] - - # Extract commands from the help text. - commands_section = re.search( - r"positional arguments:.*?{(.+?)}", help_out, re.DOTALL - ).group(1) - extracted_commands = [cmd.strip() for cmd in commands_section.split(",")] - - # Get expected commands - parser = bittensor.cli.__create_parser__() - expected_commands = [command for command in parser._actions[-1].choices] - - # Validate each expected command is in extracted commands - for command in expected_commands: - assert ( - command in extracted_commands - ), f"Command {command} not found in help output" - - # Check for duplicates - assert len(extracted_commands) == len( - set(extracted_commands) - ), "Duplicate commands found in help output" - - @patch("torch.cuda.is_available", return_value=True) - def test_register_cuda_use_cuda_flag(self, _, __, patched_sub): - base_args = [ - "subnets", - "pow_register", - "--wallet.path", - "tmp/walletpath", - "--wallet.name", - "mock", - "--wallet.hotkey", - "hk0", - "--no_prompt", - "--cuda.dev_id", - "0", - ] - - patched_sub.return_value = MagicMock( - get_subnets=MagicMock(return_value=[1]), - subnet_exists=MagicMock(return_value=True), - register=MagicMock(side_effect=MockException), - ) - - # Should be able to set true without argument - args = base_args + [ - "--pow_register.cuda.use_cuda", # should be True without any arugment - ] - with pytest.raises(MockException): - cli = bittensor.cli(args=args) - cli.run() - - self.assertEqual(cli.config.pow_register.cuda.get("use_cuda"), True) - - # Should be able to set to false with no argument - - args = base_args + [ - "--pow_register.cuda.no_cuda", - ] - with pytest.raises(MockException): - cli = bittensor.cli(args=args) - cli.run() - - self.assertEqual(cli.config.pow_register.cuda.get("use_cuda"), False) - - -def return_mock_sub_2(*args, **kwargs): - return MagicMock( - return_value=MagicMock( - get_subnet_burn_cost=MagicMock(return_value=0.1), - get_subnets=MagicMock(return_value=[1]), # Need to pass check config - get_delegates=MagicMock( - return_value=[ - bittensor.DelegateInfo( - hotkey_ss58="", - total_stake=Balance.from_rao(0), - nominators=[], - owner_ss58="", - take=0.18, - validator_permits=[], - registrations=[], - return_per_1000=Balance(0.0), - total_daily_return=Balance(0.0), - ) - ] - ), - block=10_000, - ), - add_args=bittensor.subtensor.add_args, - ) - - -@patch("bittensor.wallet", new_callable=return_mock_wallet_factory) -@patch("bittensor.subtensor", new_callable=return_mock_sub_2) -class TestEmptyArgs(unittest.TestCase): - """ - Test that the CLI doesn't crash when no args are passed - """ - - @patch("rich.prompt.PromptBase.ask", side_effect=MockException) - def test_command_no_args(self, _, __, patched_prompt_ask): - # Get argparser - parser = bittensor.cli.__create_parser__() - # Get all commands from argparser - commands = [ - command - for command in parser._actions[-1].choices # extract correct subparser keys - if len(command) > 1 # Skip singleton aliases - and command - not in [ - "subnet", - "sudos", - "stakes", - "roots", - "wallets", - "weight", - "st", - "wt", - "su", - ] # Skip duplicate aliases - ] - # Test that each command and its subcommands can be run with no args - for command in commands: - command_data = bittensor.ALL_COMMANDS.get(command) - - # If command is dictionary, it means it has subcommands - if isinstance(command_data, dict): - for subcommand in command_data["commands"].keys(): - try: - # Run each subcommand - bittensor.cli(args=[command, subcommand]).run() - except MockException: - pass # Expected exception - else: - try: - # If no subcommands, just run the command - bittensor.cli(args=[command]).run() - except MockException: - pass # Expected exception - - # Should not raise any other exceptions - - -mock_delegate_info = { - "hotkey_ss58": "", - "total_stake": bittensor.Balance.from_rao(0), - "nominators": [], - "owner_ss58": "", - "take": 0.18, - "validator_permits": [], - "registrations": [], - "return_per_1000": bittensor.Balance.from_rao(0), - "total_daily_return": bittensor.Balance.from_rao(0), -} - - -def return_mock_sub_3(*args, **kwargs): - return MagicMock( - return_value=MagicMock( - get_subnets=MagicMock(return_value=[1]), # Mock subnet 1 ONLY. - block=10_000, - get_delegates=MagicMock( - return_value=[bittensor.DelegateInfo(**mock_delegate_info)] - ), - ), - block=10_000, - ) - - -@patch("bittensor.subtensor", new_callable=return_mock_sub_3) -class TestCLIDefaultsNoNetwork(unittest.TestCase): - def test_inspect_prompt_wallet_name(self, _): - # Patch command to exit early - with patch("bittensor.commands.inspect.InspectCommand.run", return_value=None): - # Test prompt happens when no wallet name is passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=[ - "wallet", - "inspect", - # '--wallet.name', 'mock', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called_once() - - # Test NO prompt happens when wallet name is passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=[ - "wallet", - "inspect", - "--wallet.name", - "coolwalletname", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - # Test NO prompt happens when wallet name 'default' is passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=[ - "wallet", - "inspect", - "--wallet.name", - "default", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_overview_prompt_wallet_name(self, _): - # Patch command to exit early - with patch( - "bittensor.commands.overview.OverviewCommand.run", return_value=None - ): - # Test prompt happens when no wallet name is passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=[ - "wallet", - "overview", - # '--wallet.name', 'mock', - "--netuid", - "1", - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called_once() - - # Test NO prompt happens when wallet name is passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=[ - "wallet", - "overview", - "--wallet.name", - "coolwalletname", - "--netuid", - "1", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - # Test NO prompt happens when wallet name 'default' is passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=[ - "wallet", - "overview", - "--wallet.name", - "default", - "--netuid", - "1", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_stake_prompt_wallet_name_and_hotkey_name(self, _): - base_args = [ - "stake", - "add", - "--all", - ] - # Patch command to exit early - with patch("bittensor.commands.stake.StakeCommand.run", return_value=None): - # Test prompt happens when - # - wallet name IS NOT passed, AND - # - hotkey name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock", "mock_hotkey"] - - cli = bittensor.cli( - args=base_args - + [ - # '--wallet.name', 'mock', - #'--wallet.hotkey', 'mock_hotkey', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 2, - msg="Prompt should have been called twice", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in [val for val in kwargs0.values()] - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - args1, kwargs1 = mock_ask_prompt.call_args_list[1] - combined_args_kwargs1 = [arg for arg in args1] + [ - val for val in kwargs1.values() - ] - # check that prompt was called for hotkey - - self.assertTrue( - any(filter(lambda x: "hotkey" in x.lower(), combined_args_kwargs1)), - msg=f"Prompt should have been called for hotkey: {combined_args_kwargs1}", - ) - - # Test prompt happens when - # - wallet name IS NOT passed, AND - # - hotkey name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock", "mock_hotkey"] - - cli = bittensor.cli( - args=base_args - + [ - #'--wallet.name', 'mock', - "--wallet.hotkey", - "mock_hotkey", - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - # Test prompt happens when - # - wallet name IS passed, AND - # - hotkey name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock", "mock_hotkey"] - - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "mock", - #'--wallet.hotkey', 'mock_hotkey', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for hotkey - self.assertTrue( - any(filter(lambda x: "hotkey" in x.lower(), combined_args_kwargs0)), - msg=f"Prompt should have been called for hotkey {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - wallet name IS passed, AND - # - hotkey name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "coolwalletname", - "--wallet.hotkey", - "coolwalletname_hotkey", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - # Test NO prompt happens when - # - wallet name 'default' IS passed, AND - # - hotkey name 'default' IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_unstake_prompt_wallet_name_and_hotkey_name(self, _): - base_args = [ - "stake", - "remove", - "--all", - ] - # Patch command to exit early - with patch("bittensor.commands.unstake.UnStakeCommand.run", return_value=None): - # Test prompt happens when - # - wallet name IS NOT passed, AND - # - hotkey name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock", "mock_hotkey"] - - cli = bittensor.cli( - args=base_args - + [ - # '--wallet.name', 'mock', - #'--wallet.hotkey', 'mock_hotkey', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 2, - msg="Prompt should have been called twice", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - args1, kwargs1 = mock_ask_prompt.call_args_list[1] - combined_args_kwargs1 = [arg for arg in args1] + [ - val for val in kwargs1.values() - ] - # check that prompt was called for hotkey - self.assertTrue( - any(filter(lambda x: "hotkey" in x.lower(), combined_args_kwargs1)), - msg=f"Prompt should have been called for hotkey {combined_args_kwargs1}", - ) - - # Test prompt happens when - # - wallet name IS NOT passed, AND - # - hotkey name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock", "mock_hotkey"] - - cli = bittensor.cli( - args=base_args - + [ - #'--wallet.name', 'mock', - "--wallet.hotkey", - "mock_hotkey", - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - # Test prompt happens when - # - wallet name IS passed, AND - # - hotkey name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock", "mock_hotkey"] - - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "mock", - #'--wallet.hotkey', 'mock_hotkey', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for hotkey - self.assertTrue( - any(filter(lambda x: "hotkey" in x.lower(), combined_args_kwargs0)), - msg=f"Prompt should have been called for hotkey {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - wallet name IS passed, AND - # - hotkey name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "coolwalletname", - "--wallet.hotkey", - "coolwalletname_hotkey", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - # Test NO prompt happens when - # - wallet name 'default' IS passed, AND - # - hotkey name 'default' IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "default", - "--wallet.hotkey", - "default", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_delegate_prompt_wallet_name(self, _): - base_args = [ - "root", - "delegate", - "--all", - "--delegate_ss58key", - _get_mock_coldkey(0), - ] - # Patch command to exit early - with patch( - "bittensor.commands.delegates.DelegateStakeCommand.run", return_value=None - ): - # Test prompt happens when - # - wallet name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock"] - - cli = bittensor.cli( - args=base_args - + [ - # '--wallet.name', 'mock', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - wallet name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "coolwalletname", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_undelegate_prompt_wallet_name(self, _): - base_args = [ - "root", - "undelegate", - "--all", - "--delegate_ss58key", - _get_mock_coldkey(0), - ] - # Patch command to exit early - with patch( - "bittensor.commands.delegates.DelegateUnstakeCommand.run", return_value=None - ): - # Test prompt happens when - # - wallet name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock"] - - cli = bittensor.cli( - args=base_args - + [ - # '--wallet.name', 'mock', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - wallet name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "coolwalletname", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_history_prompt_wallet_name(self, _): - base_args = [ - "wallet", - "history", - ] - # Patch command to exit early - with patch( - "bittensor.commands.wallets.GetWalletHistoryCommand.run", return_value=None - ): - # Test prompt happens when - # - wallet name IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = ["mock"] - - cli = bittensor.cli( - args=base_args - + [ - # '--wallet.name', 'mock', - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for wallet name - self.assertTrue( - any( - filter( - lambda x: "wallet name" in x.lower(), combined_args_kwargs0 - ) - ), - msg=f"Prompt should have been called for wallet name: {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - wallet name IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--wallet.name", - "coolwalletname", - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_delegate_prompt_hotkey(self, _): - # Tests when - # - wallet name IS passed, AND - # - delegate hotkey IS NOT passed - base_args = [ - "root", - "delegate", - "--all", - "--wallet.name", - "mock", - ] - - delegate_ss58 = _get_mock_coldkey(0) - with patch("bittensor.commands.delegates.show_delegates"): - with patch( - "bittensor.subtensor.Subtensor.get_delegates", - return_value=[ - bittensor.DelegateInfo( - hotkey_ss58=delegate_ss58, # return delegate with mock coldkey - total_stake=bittensor.Balance.from_float(0.1), - nominators=[], - owner_ss58="", - take=0.18, - validator_permits=[], - registrations=[], - return_per_1000=bittensor.Balance.from_float(0.1), - total_daily_return=bittensor.Balance.from_float(0.1), - ) - ], - ): - # Patch command to exit early - with patch( - "bittensor.commands.delegates.DelegateStakeCommand.run", - return_value=None, - ): - # Test prompt happens when - # - delegate hotkey IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = [ - "0" - ] # select delegate with mock coldkey - - cli = bittensor.cli( - args=base_args - + [ - # '--delegate_ss58key', delegate_ss58, - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for delegate hotkey - self.assertTrue( - any( - filter( - lambda x: "delegate" in x.lower(), - combined_args_kwargs0, - ) - ), - msg=f"Prompt should have been called for delegate: {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - delegate hotkey IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--delegate_ss58key", - delegate_ss58, - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_undelegate_prompt_hotkey(self, _): - # Tests when - # - wallet name IS passed, AND - # - delegate hotkey IS NOT passed - base_args = [ - "root", - "undelegate", - "--all", - "--wallet.name", - "mock", - ] - - delegate_ss58 = _get_mock_coldkey(0) - with patch("bittensor.commands.delegates.show_delegates"): - with patch( - "bittensor.subtensor.Subtensor.get_delegates", - return_value=[ - bittensor.DelegateInfo( - hotkey_ss58=delegate_ss58, # return delegate with mock coldkey - total_stake=bittensor.Balance.from_float(0.1), - nominators=[], - owner_ss58="", - take=0.18, - validator_permits=[], - registrations=[], - return_per_1000=bittensor.Balance.from_float(0.1), - total_daily_return=bittensor.Balance.from_float(0.1), - ) - ], - ): - # Patch command to exit early - with patch( - "bittensor.commands.delegates.DelegateUnstakeCommand.run", - return_value=None, - ): - # Test prompt happens when - # - delegate hotkey IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = [ - "0" - ] # select delegate with mock coldkey - - cli = bittensor.cli( - args=base_args - + [ - # '--delegate_ss58key', delegate_ss58, - ] - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called ONCE", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for delegate hotkey - self.assertTrue( - any( - filter( - lambda x: "delegate" in x.lower(), - combined_args_kwargs0, - ) - ), - msg=f"Prompt should have been called for delegate: {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - delegate hotkey IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--delegate_ss58key", - delegate_ss58, - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - def test_vote_command_prompt_proposal_hash(self, _): - """Test that the vote command prompts for proposal_hash when it is not passed""" - base_args = [ - "root", - "senate_vote", - "--wallet.name", - "mock", - "--wallet.hotkey", - "mock_hotkey", - ] - - mock_proposal_hash = "mock_proposal_hash" - - with patch("bittensor.subtensor.Subtensor.is_senate_member", return_value=True): - with patch( - "bittensor.subtensor.Subtensor.get_vote_data", - return_value={"index": 1}, - ): - # Patch command to exit early - with patch( - "bittensor.commands.senate.VoteCommand.run", - return_value=None, - ): - # Test prompt happens when - # - proposal_hash IS NOT passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - mock_ask_prompt.side_effect = [ - mock_proposal_hash # Proposal hash - ] - - cli = bittensor.cli( - args=base_args - # proposal_hash not added - ) - cli.run() - - # Prompt happened - mock_ask_prompt.assert_called() - self.assertEqual( - mock_ask_prompt.call_count, - 1, - msg="Prompt should have been called once", - ) - args0, kwargs0 = mock_ask_prompt.call_args_list[0] - combined_args_kwargs0 = [arg for arg in args0] + [ - val for val in kwargs0.values() - ] - # check that prompt was called for proposal_hash - self.assertTrue( - any( - filter( - lambda x: "proposal" in x.lower(), - combined_args_kwargs0, - ) - ), - msg=f"Prompt should have been called for proposal: {combined_args_kwargs0}", - ) - - # Test NO prompt happens when - # - proposal_hash IS passed - with patch("rich.prompt.Prompt.ask") as mock_ask_prompt: - cli = bittensor.cli( - args=base_args - + [ - "--proposal_hash", - mock_proposal_hash, - ] - ) - cli.run() - - # NO prompt happened - mock_ask_prompt.assert_not_called() - - @patch("bittensor.wallet", new_callable=return_mock_wallet_factory) - def test_commit_reveal_weights_enabled_parse_boolean_argument(self, mock_sub, __): - param = "commit_reveal_weights_enabled" - - def _test_value_parsing(parsed_value: bool, modified: str): - cli = bittensor.cli( - args=[ - "sudo", - "set", - "--netuid", - "1", - "--param", - param, - "--value", - modified, - "--wallet.name", - "mock", - ] - ) - cli.run() - - _, kwargs = mock_sub.call_args - passed_config = kwargs["config"] - self.assertEqual(passed_config.param, param, msg="Incorrect param") - self.assertEqual( - passed_config.value, - parsed_value, - msg=f"Boolean argument not correctly for {modified}", - ) - - for boolean_value in [True, False, 1, 0]: - as_str = str(boolean_value) - - _test_value_parsing(boolean_value, as_str) - _test_value_parsing(boolean_value, as_str.capitalize()) - _test_value_parsing(boolean_value, as_str.upper()) - _test_value_parsing(boolean_value, as_str.lower()) - - @patch("bittensor.wallet", new_callable=return_mock_wallet_factory) - def test_hyperparameter_allowed_values( - self, - mock_sub, - __, - ): - params = ["alpha_values"] - - def _test_value_parsing(param: str, value: str): - cli = bittensor.cli( - args=[ - "sudo", - "set", - "hyperparameters", - "--netuid", - "1", - "--param", - param, - "--value", - value, - "--wallet.name", - "mock", - ] - ) - should_raise_error = False - error_message = "" - - try: - alpha_low_str, alpha_high_str = value.strip("[]").split(",") - alpha_high = float(alpha_high_str) - alpha_low = float(alpha_low_str) - if alpha_high <= 52428 or alpha_high >= 65535: - should_raise_error = True - error_message = "between 52428 and 65535" - elif alpha_low < 0 or alpha_low > 52428: - should_raise_error = True - error_message = "between 0 and 52428" - except ValueError: - should_raise_error = True - error_message = "a number or a boolean" - except TypeError: - should_raise_error = True - error_message = "a number or a boolean" - - if isinstance(value, bool): - should_raise_error = True - error_message = "a number or a boolean" - - if should_raise_error: - with pytest.raises(ValueError) as exc_info: - cli.run() - assert ( - f"Hyperparameter {param} value is not within bounds. Value is {value} but must be {error_message}" - in str(exc_info.value) - ) - else: - cli.run() - _, kwargs = mock_sub.call_args - passed_config = kwargs["config"] - self.assertEqual(passed_config.param, param, msg="Incorrect param") - self.assertEqual( - passed_config.value, - value, - msg=f"Value argument not set correctly for {param}", - ) - - for param in params: - for value in [ - [0.8, 11], - [52429, 52428], - [52427, 53083], - [6553, 53083], - [-123, None], - [1, 0], - [True, "Some string"], - ]: - as_str = str(value).strip("[]") - _test_value_parsing(param, as_str) - - @patch("bittensor.wallet", new_callable=return_mock_wallet_factory) - def test_network_registration_allowed_parse_boolean_argument(self, mock_sub, __): - param = "network_registration_allowed" - - def _test_value_parsing(parsed_value: bool, modified: str): - cli = bittensor.cli( - args=[ - "sudo", - "set", - "--netuid", - "1", - "--param", - param, - "--value", - modified, - "--wallet.name", - "mock", - ] - ) - cli.run() - - _, kwargs = mock_sub.call_args - passed_config = kwargs["config"] - self.assertEqual(passed_config.param, param, msg="Incorrect param") - self.assertEqual( - passed_config.value, - parsed_value, - msg=f"Boolean argument not correctly for {modified}", - ) - - for boolean_value in [True, False, 1, 0]: - as_str = str(boolean_value) - - _test_value_parsing(boolean_value, as_str) - _test_value_parsing(boolean_value, as_str.capitalize()) - _test_value_parsing(boolean_value, as_str.upper()) - _test_value_parsing(boolean_value, as_str.lower()) - - @patch("bittensor.wallet", new_callable=return_mock_wallet_factory) - def test_network_pow_registration_allowed_parse_boolean_argument( - self, mock_sub, __ - ): - param = "network_pow_registration_allowed" - - def _test_value_parsing(parsed_value: bool, modified: str): - cli = bittensor.cli( - args=[ - "sudo", - "set", - "--netuid", - "1", - "--param", - param, - "--value", - modified, - "--wallet.name", - "mock", - ] - ) - cli.run() - - _, kwargs = mock_sub.call_args - passed_config = kwargs["config"] - self.assertEqual(passed_config.param, param, msg="Incorrect param") - self.assertEqual( - passed_config.value, - parsed_value, - msg=f"Boolean argument not correctly for {modified}", - ) - - for boolean_value in [True, False, 1, 0]: - as_str = str(boolean_value) - - _test_value_parsing(boolean_value, as_str) - _test_value_parsing(boolean_value, as_str.capitalize()) - _test_value_parsing(boolean_value, as_str.upper()) - _test_value_parsing(boolean_value, as_str.lower()) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/integration_tests/test_metagraph_integration.py b/tests/integration_tests/test_metagraph_integration.py deleted file mode 100644 index 5dbb9ddfc1..0000000000 --- a/tests/integration_tests/test_metagraph_integration.py +++ /dev/null @@ -1,114 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor -import torch -import os -from bittensor.mock import MockSubtensor -from bittensor.metagraph import METAGRAPH_STATE_DICT_NDARRAY_KEYS, get_save_dir - -_subtensor_mock: MockSubtensor = MockSubtensor() - - -def setUpModule(): - _subtensor_mock.reset() - - _subtensor_mock.create_subnet(netuid=3) - - # Set diff 0 - _subtensor_mock.set_difficulty(netuid=3, difficulty=0) - - -class TestMetagraph: - def setup_method(self): - self.sub = MockSubtensor() - self.metagraph = bittensor.metagraph(netuid=3, network="mock", sync=False) - - def test_print_empty(self): - print(self.metagraph) - - def test_lite_sync(self): - self.metagraph.sync(lite=True, subtensor=self.sub) - - def test_full_sync(self): - self.metagraph.sync(lite=False, subtensor=self.sub) - - def test_sync_block_0(self): - self.metagraph.sync(lite=True, block=0, subtensor=self.sub) - - def test_load_sync_save(self): - self.metagraph.sync(lite=True, subtensor=self.sub) - self.metagraph.save() - self.metagraph.load() - self.metagraph.save() - - def test_load_sync_save_from_torch(self): - self.metagraph.sync(lite=True, subtensor=self.sub) - - def deprecated_save_torch(metagraph): - save_directory = get_save_dir(metagraph.network, metagraph.netuid) - os.makedirs(save_directory, exist_ok=True) - graph_filename = save_directory + f"/block-{metagraph.block.item()}.pt" - state_dict = metagraph.state_dict() - for key in METAGRAPH_STATE_DICT_NDARRAY_KEYS: - state_dict[key] = torch.nn.Parameter( - torch.tensor(state_dict[key]), requires_grad=False - ) - torch.save(state_dict, graph_filename) - - deprecated_save_torch(self.metagraph) - self.metagraph.load() - - def test_state_dict(self): - self.metagraph.load() - state = self.metagraph.state_dict() - assert "version" in state - assert "n" in state - assert "block" in state - assert "stake" in state - assert "total_stake" in state - assert "ranks" in state - assert "trust" in state - assert "consensus" in state - assert "validator_trust" in state - assert "incentive" in state - assert "emission" in state - assert "dividends" in state - assert "active" in state - assert "last_update" in state - assert "validator_permit" in state - assert "weights" in state - assert "bonds" in state - assert "uids" in state - - def test_properties(self): - metagraph = self.metagraph - metagraph.hotkeys - metagraph.coldkeys - metagraph.addresses - metagraph.validator_trust - metagraph.S - metagraph.R - metagraph.I - metagraph.E - metagraph.C - metagraph.T - metagraph.Tv - metagraph.D - metagraph.B - metagraph.W diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py deleted file mode 100644 index 407dee848c..0000000000 --- a/tests/integration_tests/test_subtensor_integration.py +++ /dev/null @@ -1,850 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import random -import unittest -from queue import Empty as QueueEmpty -from unittest.mock import MagicMock, patch - -import numpy as np -import pytest -from substrateinterface import Keypair - -import bittensor -from bittensor.mock import MockSubtensor -from bittensor.utils import weight_utils -from bittensor.utils.balance import Balance -from tests.helpers import ( - _get_mock_coldkey, - MockConsole, - _get_mock_keypair, - _get_mock_wallet, -) - - -class TestSubtensor(unittest.TestCase): - _mock_console_patcher = None - _mock_subtensor: MockSubtensor - subtensor: MockSubtensor - - def setUp(self): - self.wallet = _get_mock_wallet( - hotkey=_get_mock_keypair(0, self.id()), - coldkey=_get_mock_keypair(1, self.id()), - ) - self.balance = Balance.from_tao(1000) - self.mock_neuron = MagicMock() # NOTE: this might need more sophistication - self.subtensor = MockSubtensor() # own instance per test - - @classmethod - def setUpClass(cls) -> None: - # mock rich console status - mock_console = MockConsole() - cls._mock_console_patcher = patch("bittensor.__console__", mock_console) - cls._mock_console_patcher.start() - - # Keeps the same mock network for all tests. This stops the network from being re-setup for each test. - cls._mock_subtensor = MockSubtensor() - - cls._do_setup_subnet() - - @classmethod - def _do_setup_subnet(cls): - # reset the mock subtensor - cls._mock_subtensor.reset() - # Setup the mock subnet 3 - cls._mock_subtensor.create_subnet(netuid=3) - - @classmethod - def tearDownClass(cls) -> None: - cls._mock_console_patcher.stop() - - def test_network_overrides(self): - """Tests that the network overrides the chain_endpoint.""" - # Argument importance: chain_endpoint (arg) > network (arg) > config.subtensor.chain_endpoint > config.subtensor.network - config0 = bittensor.subtensor.config() - config0.subtensor.network = "finney" - config0.subtensor.chain_endpoint = "wss://finney.subtensor.io" # Should not match bittensor.__finney_entrypoint__ - assert config0.subtensor.chain_endpoint != bittensor.__finney_entrypoint__ - - config1 = bittensor.subtensor.config() - config1.subtensor.network = "local" - config1.subtensor.chain_endpoint = None - - # Mock network calls - with patch("substrateinterface.SubstrateInterface.connect_websocket"): - with patch("substrateinterface.SubstrateInterface.reload_type_registry"): - print(bittensor.subtensor, type(bittensor.subtensor)) - # Choose network arg over config - sub1 = bittensor.subtensor(config=config1, network="local") - self.assertEqual( - sub1.chain_endpoint, - bittensor.__local_entrypoint__, - msg="Explicit network arg should override config.network", - ) - - # Choose network config over chain_endpoint config - sub2 = bittensor.subtensor(config=config0) - self.assertNotEqual( - sub2.chain_endpoint, - bittensor.__finney_entrypoint__, # Here we expect the endpoint corresponding to the network "finney" - msg="config.network should override config.chain_endpoint", - ) - - sub3 = bittensor.subtensor(config=config1) - # Should pick local instead of finney (default) - assert sub3.network == "local" - assert sub3.chain_endpoint == bittensor.__local_entrypoint__ - - def test_get_current_block(self): - block = self.subtensor.get_current_block() - assert type(block) == int - - def test_do_block_step(self): - self.subtensor.do_block_step() - block = self.subtensor.get_current_block() - assert type(block) == int - - def test_do_block_step_query_previous_block(self): - self.subtensor.do_block_step() - block = self.subtensor.get_current_block() - self.subtensor.query_subtensor("NetworksAdded", block) - - def test_unstake(self): - self.subtensor._do_unstake = MagicMock(return_value=True) - - self.subtensor.substrate.get_payment_info = MagicMock( - return_value={"partialFee": 100} - ) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_stake_for_coldkey_and_hotkey = MagicMock( - return_value=Balance.from_tao(500) - ) - success = self.subtensor.unstake(self.wallet, amount=200) - self.assertTrue(success, msg="Unstake should succeed") - - def test_unstake_inclusion(self): - self.subtensor._do_unstake = MagicMock(return_value=True) - - self.subtensor.substrate.get_payment_info = MagicMock( - return_value={"partialFee": 100} - ) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_stake_for_coldkey_and_hotkey = MagicMock( - return_value=Balance.from_tao(500) - ) - success = self.subtensor.unstake( - self.wallet, amount=200, wait_for_inclusion=True - ) - self.assertTrue(success, msg="Unstake should succeed") - - def test_unstake_failed(self): - self.subtensor._do_unstake = MagicMock(return_value=False) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_stake_for_coldkey_and_hotkey = MagicMock( - return_value=Balance.from_tao(500) - ) - fail = self.subtensor.unstake(self.wallet, amount=200, wait_for_inclusion=True) - self.assertFalse(fail, msg="Unstake should fail") - - def test_stake(self): - self.subtensor._do_stake = MagicMock(return_value=True) - - self.subtensor.substrate.get_payment_info = MagicMock( - return_value={"partialFee": 100} - ) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_stake_for_coldkey_and_hotkey = MagicMock( - return_value=Balance.from_tao(500) - ) - self.subtensor.get_hotkey_owner = MagicMock( - return_value=self.wallet.coldkeypub.ss58_address - ) - success = self.subtensor.add_stake(self.wallet, amount=200) - self.assertTrue(success, msg="Stake should succeed") - - def test_stake_inclusion(self): - self.subtensor._do_stake = MagicMock(return_value=True) - - self.subtensor.substrate.get_payment_info = MagicMock( - return_value={"partialFee": 100} - ) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_stake_for_coldkey_and_hotkey = MagicMock( - return_value=Balance.from_tao(500) - ) - self.subtensor.get_hotkey_owner = MagicMock( - return_value=self.wallet.coldkeypub.ss58_address - ) - success = self.subtensor.add_stake( - self.wallet, amount=200, wait_for_inclusion=True - ) - self.assertTrue(success, msg="Stake should succeed") - - def test_stake_failed(self): - self.subtensor._do_stake = MagicMock(return_value=False) - - self.subtensor.substrate.get_payment_info = MagicMock( - return_value={"partialFee": 100} - ) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_balance = MagicMock(return_value=Balance.from_rao(0)) - - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_stake_for_coldkey_and_hotkey = MagicMock( - return_value=Balance.from_tao(500) - ) - self.subtensor.get_hotkey_owner = MagicMock( - return_value=self.wallet.coldkeypub.ss58_address - ) - fail = self.subtensor.add_stake( - self.wallet, amount=200, wait_for_inclusion=True - ) - self.assertFalse(fail, msg="Stake should fail") - - def test_transfer(self): - fake_coldkey = _get_mock_coldkey(1) - - self.subtensor._do_transfer = MagicMock(return_value=(True, "0x", None)) - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - success = self.subtensor.transfer( - self.wallet, - fake_coldkey, - amount=200, - ) - self.assertTrue(success, msg="Transfer should succeed") - - def test_transfer_inclusion(self): - fake_coldkey = _get_mock_coldkey(1) - self.subtensor._do_transfer = MagicMock(return_value=(True, "0x", None)) - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - - success = self.subtensor.transfer( - self.wallet, fake_coldkey, amount=200, wait_for_inclusion=True - ) - self.assertTrue(success, msg="Transfer should succeed") - - def test_transfer_failed(self): - fake_coldkey = _get_mock_coldkey(1) - self.subtensor._do_transfer = MagicMock( - return_value=(False, None, "Mock failure message") - ) - - fail = self.subtensor.transfer( - self.wallet, fake_coldkey, amount=200, wait_for_inclusion=True - ) - self.assertFalse(fail, msg="Transfer should fail") - - def test_transfer_invalid_dest(self): - fake_coldkey = _get_mock_coldkey(1) - - fail = self.subtensor.transfer( - self.wallet, - fake_coldkey[:-1], # invalid dest - amount=200, - wait_for_inclusion=True, - ) - self.assertFalse(fail, msg="Transfer should fail because of invalid dest") - - def test_transfer_dest_as_bytes(self): - fake_coldkey = _get_mock_coldkey(1) - self.subtensor._do_transfer = MagicMock(return_value=(True, "0x", None)) - - self.subtensor.register = MagicMock(return_value=True) - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=self.mock_neuron - ) - self.subtensor.get_balance = MagicMock(return_value=self.balance) - - dest_as_bytes: bytes = Keypair(fake_coldkey).public_key - success = self.subtensor.transfer( - self.wallet, - dest_as_bytes, # invalid dest - amount=200, - wait_for_inclusion=True, - ) - self.assertTrue(success, msg="Transfer should succeed") - - def test_set_weights(self): - chain_weights = [0] - - class success: - def __init__(self): - self.is_success = True - - def process_events(self): - return True - - self.subtensor.set_weights = MagicMock(return_value=True) - self.subtensor._do_set_weights = MagicMock(return_value=(True, None)) - - success = self.subtensor.set_weights( - wallet=self.wallet, - netuid=3, - uids=[1], - weights=chain_weights, - ) - assert success == True - - def test_set_weights_inclusion(self): - chain_weights = [0] - self.subtensor._do_set_weights = MagicMock(return_value=(True, None)) - self.subtensor.set_weights = MagicMock(return_value=True) - - success = self.subtensor.set_weights( - wallet=self.wallet, - netuid=1, - uids=[1], - weights=chain_weights, - wait_for_inclusion=True, - ) - assert success == True - - def test_set_weights_failed(self): - chain_weights = [0] - self.subtensor._do_set_weights = MagicMock( - return_value=(False, "Mock failure message") - ) - self.subtensor.set_weights = MagicMock(return_value=False) - - fail = self.subtensor.set_weights( - wallet=self.wallet, - netuid=3, - uids=[1], - weights=chain_weights, - wait_for_inclusion=True, - ) - assert fail == False - - def test_commit_weights(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - commit_hash = bittensor.utils.weight_utils.generate_weight_hash( - address=self.wallet.hotkey.ss58_address, - netuid=3, - uids=weight_uids, - values=weight_vals, - salt=salt.tolist(), - version_key=0, - ) - - self.subtensor.commit_weights = MagicMock( - return_value=(True, "Successfully committed weights.") - ) - self.subtensor._do_commit_weights = MagicMock(return_value=(True, None)) - - success, message = self.subtensor.commit_weights( - wallet=self.wallet, netuid=3, uids=uids, weights=weights, salt=salt - ) - assert success is True - assert message == "Successfully committed weights." - - def test_commit_weights_inclusion(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - - commit_hash = bittensor.utils.weight_utils.generate_weight_hash( - address=self.wallet.hotkey.ss58_address, - netuid=1, - uids=weight_uids, - values=weight_vals, - salt=salt.tolist(), - version_key=0, - ) - - self.subtensor._do_commit_weights = MagicMock(return_value=(True, None)) - self.subtensor.commit_weights = MagicMock( - return_value=(True, "Successfully committed weights.") - ) - - success, message = self.subtensor.commit_weights( - wallet=self.wallet, - netuid=1, - uids=uids, - weights=weights, - salt=salt, - wait_for_inclusion=True, - ) - assert success is True - assert message == "Successfully committed weights." - - def test_commit_weights_failed(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - - commit_hash = bittensor.utils.weight_utils.generate_weight_hash( - address=self.wallet.hotkey.ss58_address, - netuid=3, - uids=weight_uids, - values=weight_vals, - salt=salt.tolist(), - version_key=0, - ) - - self.subtensor._do_commit_weights = MagicMock( - return_value=(False, "Mock failure message") - ) - self.subtensor.commit_weights = MagicMock( - return_value=(False, "Mock failure message") - ) - - success, message = self.subtensor.commit_weights( - wallet=self.wallet, - netuid=3, - uids=uids, - weights=weights, - salt=salt, - wait_for_inclusion=True, - ) - assert success is False - assert message == "Mock failure message" - - def test_reveal_weights(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - - self.subtensor.reveal_weights = MagicMock( - return_value=(True, "Successfully revealed weights.") - ) - self.subtensor._do_reveal_weights = MagicMock(return_value=(True, None)) - - success, message = self.subtensor.reveal_weights( - wallet=self.wallet, - netuid=3, - uids=uids, - weights=weights, - salt=salt, - version_key=0, - ) - assert success is True - assert message == "Successfully revealed weights." - - def test_reveal_weights_inclusion(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - - self.subtensor._do_reveal_weights = MagicMock(return_value=(True, None)) - self.subtensor.reveal_weights = MagicMock( - return_value=(True, "Successfully revealed weights.") - ) - - success, message = self.subtensor.reveal_weights( - wallet=self.wallet, - netuid=1, - uids=uids, - weights=weights, - salt=salt, - version_key=0, - wait_for_inclusion=True, - ) - assert success is True - assert message == "Successfully revealed weights." - - def test_reveal_weights_failed(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - - self.subtensor._do_reveal_weights = MagicMock( - return_value=(False, "Mock failure message") - ) - self.subtensor.reveal_weights = MagicMock( - return_value=(False, "Mock failure message") - ) - - success, message = self.subtensor.reveal_weights( - wallet=self.wallet, - netuid=3, - uids=uids, - weights=weights, - salt=salt, - version_key=0, - wait_for_inclusion=True, - ) - assert success is False - assert message == "Mock failure message" - - def test_commit_and_reveal_weights(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - version_key = 0 - - # Mock the commit_weights and reveal_weights functions - self.subtensor.commit_weights = MagicMock( - return_value=(True, "Successfully committed weights.") - ) - self.subtensor._do_commit_weights = MagicMock(return_value=(True, None)) - self.subtensor.reveal_weights = MagicMock( - return_value=(True, "Successfully revealed weights.") - ) - self.subtensor._do_reveal_weights = MagicMock(return_value=(True, None)) - - # Commit weights - commit_success, commit_message = self.subtensor.commit_weights( - wallet=self.wallet, - netuid=3, - uids=uids, - weights=weights, - salt=salt, - ) - assert commit_success is True - assert commit_message == "Successfully committed weights." - - # Reveal weights - reveal_success, reveal_message = self.subtensor.reveal_weights( - wallet=self.wallet, - netuid=3, - uids=uids, - weights=weights, - salt=salt, - version_key=version_key, - ) - assert reveal_success is True - assert reveal_message == "Successfully revealed weights." - - def test_commit_and_reveal_weights_inclusion(self): - weights = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) - uids = np.array([1, 2, 3, 4], dtype=np.int64) - salt = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int64) - version_key = 0 - - # Mock the commit_weights and reveal_weights functions - self.subtensor.commit_weights = MagicMock( - return_value=(True, "Successfully committed weights.") - ) - self.subtensor._do_commit_weights = MagicMock(return_value=(True, None)) - self.subtensor.reveal_weights = MagicMock( - return_value=(True, "Successfully revealed weights.") - ) - self.subtensor._do_reveal_weights = MagicMock(return_value=(True, None)) - - # Commit weights with wait_for_inclusion - commit_success, commit_message = self.subtensor.commit_weights( - wallet=self.wallet, - netuid=1, - uids=uids, - weights=weights, - salt=salt, - wait_for_inclusion=True, - ) - assert commit_success is True - assert commit_message == "Successfully committed weights." - - # Reveal weights with wait_for_inclusion - reveal_success, reveal_message = self.subtensor.reveal_weights( - wallet=self.wallet, - netuid=1, - uids=uids, - weights=weights, - salt=salt, - version_key=version_key, - wait_for_inclusion=True, - ) - assert reveal_success is True - assert reveal_message == "Successfully revealed weights." - - def test_get_balance(self): - fake_coldkey = _get_mock_coldkey(0) - balance = self.subtensor.get_balance(address=fake_coldkey) - assert type(balance) == bittensor.utils.balance.Balance - - def test_get_balances(self): - balances = self.subtensor.get_balances() - assert type(balances) == dict - for i in balances: - assert type(balances[i]) == bittensor.utils.balance.Balance - - def test_get_uid_by_hotkey_on_subnet(self): - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - mock_hotkey_kp = _get_mock_keypair(100, self.id()) - - # Register on subnet 3 - mock_uid = self.subtensor.force_register_neuron( - netuid=3, - hotkey=mock_hotkey_kp.ss58_address, - coldkey=mock_coldkey_kp.ss58_address, - ) - - uid = self.subtensor.get_uid_for_hotkey_on_subnet( - mock_hotkey_kp.ss58_address, netuid=3 - ) - self.assertIsInstance( - uid, int, msg="get_uid_for_hotkey_on_subnet should return an int" - ) - self.assertEqual( - uid, - mock_uid, - msg="get_uid_for_hotkey_on_subnet should return the correct uid", - ) - - def test_is_hotkey_registered(self): - mock_coldkey_kp = _get_mock_keypair(0, self.id()) - mock_hotkey_kp = _get_mock_keypair(100, self.id()) - - # Register on subnet 3 - _ = self.subtensor.force_register_neuron( - netuid=3, - hotkey=mock_hotkey_kp.ss58_address, - coldkey=mock_coldkey_kp.ss58_address, - ) - - registered = self.subtensor.is_hotkey_registered( - mock_hotkey_kp.ss58_address, netuid=3 - ) - self.assertTrue(registered, msg="Hotkey should be registered") - - def test_is_hotkey_registered_not_registered(self): - mock_hotkey_kp = _get_mock_keypair(100, self.id()) - - # Do not register on subnet 3 - - registered = self.subtensor.is_hotkey_registered( - mock_hotkey_kp.ss58_address, netuid=3 - ) - self.assertFalse(registered, msg="Hotkey should not be registered") - - def test_registration_multiprocessed_already_registered(self): - workblocks_before_is_registered = random.randint(5, 10) - # return False each work block but return True after a random number of blocks - is_registered_return_values = ( - [False for _ in range(workblocks_before_is_registered)] - + [True] - + [True, False] - ) - # this should pass the initial False check in the subtensor class and then return True because the neuron is already registered - - mock_neuron = MagicMock() - mock_neuron.is_null = True - - # patch solution queue to return None - with patch( - "multiprocessing.queues.Queue.get", return_value=None - ) as mock_queue_get: - # patch time queue get to raise Empty exception - with patch( - "multiprocessing.queues.Queue.get_nowait", side_effect=QueueEmpty - ) as mock_queue_get_nowait: - wallet = _get_mock_wallet( - hotkey=_get_mock_keypair(0, self.id()), - coldkey=_get_mock_keypair(1, self.id()), - ) - self.subtensor.is_hotkey_registered = MagicMock( - side_effect=is_registered_return_values - ) - - self.subtensor.difficulty = MagicMock(return_value=1) - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - side_effect=mock_neuron - ) - self.subtensor._do_pow_register = MagicMock(return_value=(True, None)) - - with patch("bittensor.__console__.status") as mock_set_status: - # Need to patch the console status to avoid opening a parallel live display - mock_set_status.__enter__ = MagicMock(return_value=True) - mock_set_status.__exit__ = MagicMock(return_value=True) - - # should return True - assert self.subtensor.register( - wallet=wallet, netuid=3, num_processes=3, update_interval=5 - ) - - # calls until True and once again before exiting subtensor class - # This assertion is currently broken when difficulty is too low - assert ( - self.subtensor.is_hotkey_registered.call_count - == workblocks_before_is_registered + 2 - ) - - def test_registration_partly_failed(self): - do_pow_register_mock = MagicMock( - side_effect=[(False, "Failed"), (False, "Failed"), (True, None)] - ) - - def is_registered_side_effect(*args, **kwargs): - nonlocal do_pow_register_mock - return do_pow_register_mock.call_count < 3 - - current_block = [i for i in range(0, 100)] - - wallet = _get_mock_wallet( - hotkey=_get_mock_keypair(0, self.id()), - coldkey=_get_mock_keypair(1, self.id()), - ) - - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=bittensor.NeuronInfo.get_null_neuron() - ) - self.subtensor.is_hotkey_registered = MagicMock( - side_effect=is_registered_side_effect - ) - - self.subtensor.difficulty = MagicMock(return_value=1) - self.subtensor.get_current_block = MagicMock(side_effect=current_block) - self.subtensor._do_pow_register = do_pow_register_mock - - # should return True - self.assertTrue( - self.subtensor.register( - wallet=wallet, netuid=3, num_processes=3, update_interval=5 - ), - msg="Registration should succeed", - ) - - def test_registration_failed(self): - is_registered_return_values = [False for _ in range(100)] - current_block = [i for i in range(0, 100)] - mock_neuron = MagicMock() - mock_neuron.is_null = True - - with patch( - "bittensor.extrinsics.registration.create_pow", return_value=None - ) as mock_create_pow: - wallet = _get_mock_wallet( - hotkey=_get_mock_keypair(0, self.id()), - coldkey=_get_mock_keypair(1, self.id()), - ) - - self.subtensor.is_hotkey_registered = MagicMock( - side_effect=is_registered_return_values - ) - - self.subtensor.get_current_block = MagicMock(side_effect=current_block) - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=mock_neuron - ) - self.subtensor.substrate.get_block_hash = MagicMock( - return_value="0x" + "0" * 64 - ) - self.subtensor._do_pow_register = MagicMock(return_value=(False, "Failed")) - - # should return True - self.assertIsNot( - self.subtensor.register(wallet=wallet, netuid=3), - True, - msg="Registration should fail", - ) - self.assertEqual(mock_create_pow.call_count, 3) - - def test_registration_stale_then_continue(self): - # verify that after a stale solution, the solve will continue without exiting - - class ExitEarly(Exception): - pass - - mock_is_stale = MagicMock(side_effect=[True, False]) - - mock_do_pow_register = MagicMock(side_effect=ExitEarly()) - - mock_subtensor_self = MagicMock( - neuron_for_pubkey=MagicMock( - return_value=MagicMock(is_null=True) - ), # not registered - _do_pow_register=mock_do_pow_register, - substrate=MagicMock( - get_block_hash=MagicMock(return_value="0x" + "0" * 64), - ), - ) - - mock_wallet = MagicMock() - - mock_create_pow = MagicMock(return_value=MagicMock(is_stale=mock_is_stale)) - - with patch("bittensor.extrinsics.registration.create_pow", mock_create_pow): - # should create a pow and check if it is stale - # then should create a new pow and check if it is stale - # then should enter substrate and exit early because of test - self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( - return_value=bittensor.NeuronInfo.get_null_neuron() - ) - with pytest.raises(ExitEarly): - bittensor.subtensor.register(mock_subtensor_self, mock_wallet, netuid=3) - self.assertEqual( - mock_create_pow.call_count, 2, msg="must try another pow after stale" - ) - self.assertEqual(mock_is_stale.call_count, 2) - self.assertEqual( - mock_do_pow_register.call_count, - 1, - msg="only tries to submit once, then exits", - ) - - def test_defaults_to_finney(self): - sub = bittensor.subtensor() - assert sub.network == "finney" - assert sub.chain_endpoint == bittensor.__finney_entrypoint__ - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/pytest.ini b/tests/pytest.ini deleted file mode 100644 index 17ba4b865d..0000000000 --- a/tests/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -[pytest] -filterwarnings = - ignore::DeprecationWarning:pkg_resources.*: \ No newline at end of file diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit_tests/conftest.py b/tests/unit_tests/conftest.py deleted file mode 100644 index 90b6f25748..0000000000 --- a/tests/unit_tests/conftest.py +++ /dev/null @@ -1,13 +0,0 @@ -import pytest -from aioresponses import aioresponses - - -@pytest.fixture -def force_legacy_torch_compat_api(monkeypatch): - monkeypatch.setenv("USE_TORCH", "1") - - -@pytest.fixture -def mock_aioresponse(): - with aioresponses() as m: - yield m diff --git a/tests/unit_tests/extrinsics/test_delegation.py b/tests/unit_tests/extrinsics/test_delegation.py deleted file mode 100644 index 42dcf4e706..0000000000 --- a/tests/unit_tests/extrinsics/test_delegation.py +++ /dev/null @@ -1,459 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from bittensor.subtensor import Subtensor -from bittensor.wallet import wallet as Wallet -from bittensor.utils.balance import Balance -from bittensor.extrinsics.delegation import ( - nominate_extrinsic, - delegate_extrinsic, - undelegate_extrinsic, -) -from bittensor.errors import ( - NominationError, - NotDelegateError, - NotRegisteredError, - StakeError, -) - - -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=Subtensor) - mock.network = "magic_mock" - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock(spec=Wallet) - mock.hotkey.ss58_address = "fake_hotkey_address" - mock.coldkey.ss58_address = "fake_coldkey_address" - mock.coldkey = MagicMock() - mock.hotkey = MagicMock() - mock.name = "fake_wallet_name" - mock.hotkey_str = "fake_hotkey_str" - return mock - - -@pytest.mark.parametrize( - "already_delegate, nomination_success, raises_exception, expected_result", - [ - (False, True, None, True), # Successful nomination - (True, None, None, False), # Already a delegate - (False, None, NominationError, False), # Failure - Nomination error - (False, None, ValueError, False), # Failure - ValueError - ], - ids=[ - "success-nomination-done", - "failure-already-delegate", - "failure-nomination-error", - "failure-value-error", - ], -) -def test_nominate_extrinsic( - mock_subtensor, - mock_wallet, - already_delegate, - nomination_success, - raises_exception, - expected_result, -): - # Arrange - with patch.object( - mock_subtensor, "is_hotkey_delegate", return_value=already_delegate - ), patch.object( - mock_subtensor, "_do_nominate", return_value=nomination_success - ) as mock_nominate: - if raises_exception: - mock_subtensor._do_nominate.side_effect = raises_exception - - # Act - result = nominate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - wait_for_finalization=False, - wait_for_inclusion=True, - ) - # Assert - assert result == expected_result - - if not already_delegate and nomination_success is not None: - mock_nominate.assert_called_once_with( - wallet=mock_wallet, wait_for_inclusion=True, wait_for_finalization=False - ) - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, is_delegate, prompt_response, stake_amount, balance_sufficient, transaction_success, raises_error, expected_result, delegate_called", - [ - (True, False, True, True, 100, True, True, None, True, True), # Success case - ( - False, - False, - True, - True, - 100, - True, - True, - None, - True, - True, - ), # Success case - no wait - ( - True, - False, - True, - True, - None, - True, - True, - None, - True, - True, - ), # Success case - all stake - ( - True, - False, - True, - True, - 0.000000100, - True, - True, - None, - True, - True, - ), # Success case - below cutoff threshold - ( - True, - False, - True, - True, - Balance.from_tao(1), - True, - True, - None, - True, - True, - ), # Success case - from Tao - ( - True, - False, - False, - None, - 100, - True, - False, - NotDelegateError, - False, - False, - ), # Not a delegate error - ( - True, - False, - True, - True, - 200, - False, - False, - None, - False, - False, - ), # Insufficient balance - ( - True, - False, - True, - False, - 100, - True, - True, - None, - False, - False, - ), # User declines prompt - ( - True, - False, - True, - True, - 100, - True, - False, - None, - False, - True, - ), # Transaction fails - ( - True, - False, - True, - True, - 100, - True, - False, - NotRegisteredError, - False, - True, - ), # Raises a NotRegisteredError - ( - True, - False, - True, - True, - 100, - True, - False, - StakeError, - False, - True, - ), # Raises a StakeError - ], - ids=[ - "success-delegate", - "success-no-wait", - "success-all-stake", - "success-below-existential-threshold", - "success-from-tao", - "failure-not-delegate", - "failure-low-balance", - "failure-prompt-declined", - "failure-transaction-failed", - "failure-NotRegisteredError", - "failure-StakeError", - ], -) -def test_delegate_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - is_delegate, - prompt_response, - stake_amount, - balance_sufficient, - transaction_success, - raises_error, - expected_result, - delegate_called, -): - # Arrange - wallet_balance = Balance.from_tao(500) - wallet_insufficient_balance = Balance.from_tao(0.002) - - with patch("rich.prompt.Confirm.ask", return_value=prompt_response), patch.object( - mock_subtensor, - "get_balance", - return_value=wallet_balance - if balance_sufficient - else wallet_insufficient_balance, - ), patch.object( - mock_subtensor, "is_hotkey_delegate", return_value=is_delegate - ), patch.object( - mock_subtensor, "_do_delegation", return_value=transaction_success - ) as mock_delegate: - if raises_error: - mock_delegate.side_effect = raises_error - - # Act - if raises_error == NotDelegateError: - with pytest.raises(raises_error): - result = delegate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - delegate_ss58=mock_wallet.hotkey.ss58_address, - amount=stake_amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=True, - ) - else: - result = delegate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - delegate_ss58=mock_wallet.hotkey.ss58_address, - amount=stake_amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=True, - ) - # Assert - assert result == expected_result - - if delegate_called: - if stake_amount is None: - called_stake_amount = wallet_balance - elif isinstance(stake_amount, Balance): - called_stake_amount = stake_amount - else: - called_stake_amount = Balance.from_tao(stake_amount) - - if called_stake_amount > Balance.from_rao(1000): - called_stake_amount -= Balance.from_rao(1000) - - mock_delegate.assert_called_once_with( - wallet=mock_wallet, - delegate_ss58=mock_wallet.hotkey.ss58_address, - amount=called_stake_amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, is_delegate, prompt_response, unstake_amount, current_stake, transaction_success, raises_error, expected_result", - [ - (True, False, True, True, 50, 100, True, None, True), # Success case - (False, False, True, True, 50, 100, True, None, True), # Success case - no wait - ( - False, - False, - True, - True, - Balance.from_tao(1), - 100, - True, - None, - True, - ), # Success case - from tao - (True, False, True, True, None, 100, True, None, True), # Success - unstake all - ( - True, - False, - True, - True, - 1000, - 1000, - False, - None, - False, - ), # Failure - transaction fails - ( - True, - False, - False, - None, - 100, - 120, - True, - NotDelegateError, - False, - ), # Not a delegate - (True, False, True, False, 100, 111, True, None, False), # User declines prompt - ( - True, - False, - True, - True, - 100, - 90, - True, - None, - False, - ), # Insufficient stake to unstake - ( - True, - False, - True, - True, - 100, - 100, - False, - StakeError, - False, - ), # StakeError raised - ( - True, - False, - True, - True, - 100, - 100, - False, - NotRegisteredError, - False, - ), # NotRegisteredError raised - ], - ids=[ - "success-undelegate", - "success-undelegate-no-wait", - "success-from-tao", - "success-undelegate-all", - "failure-transaction-failed", - "failure-NotDelegateError", - "failure-prompt-declined", - "failure-insufficient-stake", - "failure--StakeError", - "failure-NotRegisteredError", - ], -) -def test_undelegate_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - is_delegate, - prompt_response, - unstake_amount, - current_stake, - transaction_success, - raises_error, - expected_result, -): - # Arrange - wallet_balance = Balance.from_tao(500) - - with patch("rich.prompt.Confirm.ask", return_value=prompt_response), patch.object( - mock_subtensor, "is_hotkey_delegate", return_value=is_delegate - ), patch.object( - mock_subtensor, "get_balance", return_value=wallet_balance - ), patch.object( - mock_subtensor, - "get_stake_for_coldkey_and_hotkey", - return_value=Balance.from_tao(current_stake), - ), patch.object( - mock_subtensor, "_do_undelegation", return_value=transaction_success - ) as mock_undelegate: - if raises_error: - mock_undelegate.side_effect = raises_error - - # Act - if raises_error == NotDelegateError: - with pytest.raises(raises_error): - result = undelegate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - delegate_ss58=mock_wallet.hotkey.ss58_address, - amount=unstake_amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=True, - ) - else: - result = undelegate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - delegate_ss58=mock_wallet.hotkey.ss58_address, - amount=unstake_amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=True, - ) - - # Assert - assert result == expected_result - - if expected_result and prompt_response: - if unstake_amount is None: - called_unstake_amount = Balance.from_tao(current_stake) - elif isinstance(unstake_amount, Balance): - called_unstake_amount = unstake_amount - else: - called_unstake_amount = Balance.from_tao(unstake_amount) - - mock_undelegate.assert_called_once_with( - wallet=mock_wallet, - delegate_ss58=mock_wallet.hotkey.ss58_address, - amount=called_unstake_amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) diff --git a/tests/unit_tests/extrinsics/test_init.py b/tests/unit_tests/extrinsics/test_init.py deleted file mode 100644 index 8e3caaf900..0000000000 --- a/tests/unit_tests/extrinsics/test_init.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Tests for bittensor/extrinsics/__ini__ module.""" - -from bittensor.utils import format_error_message - - -def test_format_error_message_with_right_error_message(): - # Prep - fake_error_message = { - "type": "SomeType", - "name": "SomeErrorName", - "docs": ["Some error description."], - } - - # Call - result = format_error_message(fake_error_message) - - # Assertions - - assert "SomeType" in result - assert "SomeErrorName" in result - assert "Some error description." in result - - -def test_format_error_message_with_empty_error_message(): - # Prep - fake_error_message = {} - - # Call - result = format_error_message(fake_error_message) - - # Assertions - - assert "UnknownType" in result - assert "UnknownError" in result - assert "Unknown Description" in result - - -def test_format_error_message_with_wrong_type_error_message(): - # Prep - fake_error_message = None - - # Call - result = format_error_message(fake_error_message) - - # Assertions - - assert "UnknownType" in result - assert "UnknownError" in result - assert "Unknown Description" in result diff --git a/tests/unit_tests/extrinsics/test_network.py b/tests/unit_tests/extrinsics/test_network.py deleted file mode 100644 index 67df030ffe..0000000000 --- a/tests/unit_tests/extrinsics/test_network.py +++ /dev/null @@ -1,157 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from bittensor.subtensor import Subtensor -from bittensor.wallet import wallet as Wallet -from bittensor.extrinsics.network import ( - set_hyperparameter_extrinsic, - register_subnetwork_extrinsic, -) - - -# Mock the bittensor and related modules to avoid real network calls and wallet operations -@pytest.fixture -def mock_subtensor(): - subtensor = MagicMock(spec=Subtensor) - subtensor.get_balance.return_value = 100 - subtensor.get_subnet_burn_cost.return_value = 10 - subtensor.substrate = MagicMock() - subtensor.substrate.get_block_hash = MagicMock(return_value="0x" + "0" * 64) - return subtensor - - -@pytest.fixture -def mock_wallet(): - wallet = MagicMock(spec=Wallet) - wallet.coldkeypub.ss58_address = "fake_address" - wallet.coldkey = MagicMock() - return wallet - - -@pytest.fixture -def mock_other_owner_wallet(): - wallet = MagicMock(spec=Wallet) - wallet.coldkeypub.ss58_address = "fake_other_owner" - return wallet - - -@pytest.mark.parametrize( - "test_id, wait_for_inclusion, wait_for_finalization, prompt, expected", - [ - ("happy-path-01", False, True, False, True), - ("happy-path-02", True, False, False, True), - ("happy-path-03", False, False, False, True), - ("happy-path-04", True, True, False, True), - ], -) -def test_register_subnetwork_extrinsic_happy_path( - mock_subtensor, - mock_wallet, - test_id, - wait_for_inclusion, - wait_for_finalization, - prompt, - expected, -): - # Arrange - mock_subtensor.substrate.submit_extrinsic.return_value.is_success = True - - # Act - result = register_subnetwork_extrinsic( - mock_subtensor, mock_wallet, wait_for_inclusion, wait_for_finalization, prompt - ) - - # Assert - assert result == expected - - -# Edge cases -@pytest.mark.parametrize( - "test_id, balance, burn_cost, prompt_input, expected", - [ - ("edge-case-01", 0, 10, False, False), # Balance is zero - ("edge-case-02", 10, 10, False, False), # Balance equals burn cost - ("edge-case-03", 9, 10, False, False), # Balance less than burn cost - ("edge-case-04", 100, 10, True, True), # User declines prompt - ], -) -def test_register_subnetwork_extrinsic_edge_cases( - mock_subtensor, - mock_wallet, - test_id, - balance, - burn_cost, - prompt_input, - expected, - monkeypatch, -): - # Arrange - mock_subtensor.get_balance.return_value = balance - mock_subtensor.get_subnet_burn_cost.return_value = burn_cost - monkeypatch.setattr("rich.prompt.Confirm.ask", lambda x: prompt_input) - - # Act - result = register_subnetwork_extrinsic(mock_subtensor, mock_wallet, prompt=True) - - # Assert - assert result == expected - - -@pytest.mark.parametrize( - "netuid, parameter, value, is_owner, wait_for_inclusion, wait_for_finalization, prompt, extrinsic_success, expected_result", - [ - # Success - no wait - (1, "serving_rate_limit", 49, True, False, False, False, True, True), - # Success - with wait - (1, "serving_rate_limit", 50, True, True, True, False, True, True), - # Failure - wallet doesn't own subnet - (1, "serving_rate_limit", 50, False, True, True, False, True, False), - # Failure - invalid hyperparameter - (1, None, 50, True, True, False, False, False, False), - ], - ids=[ - "success-no-wait", - "success-with-wait", - "failure-not-owner", - "failure-invalid-hyperparameter", - ], -) -def test_set_hyperparameter_extrinsic( - mock_subtensor, - mock_wallet, - mock_other_owner_wallet, - netuid, - parameter, - value, - is_owner, - wait_for_inclusion, - wait_for_finalization, - prompt, - extrinsic_success, - expected_result, -): - # Arrange - with patch.object( - mock_subtensor, - "get_subnet_owner", - return_value=mock_wallet.coldkeypub.ss58_address - if is_owner - else mock_other_owner_wallet.coldkeypub.ss58_address, - ), patch.object( - mock_subtensor.substrate, - "submit_extrinsic", - return_value=MagicMock(is_success=extrinsic_success), - ): - # Act - result = set_hyperparameter_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - netuid=netuid, - parameter=parameter, - value=value, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert result == expected_result diff --git a/tests/unit_tests/extrinsics/test_prometheus.py b/tests/unit_tests/extrinsics/test_prometheus.py deleted file mode 100644 index 7d9c975fbc..0000000000 --- a/tests/unit_tests/extrinsics/test_prometheus.py +++ /dev/null @@ -1,154 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -import bittensor -from bittensor.subtensor import Subtensor -from bittensor.wallet import wallet as Wallet -from bittensor.extrinsics.prometheus import prometheus_extrinsic - - -# Mocking the bittensor and networking modules -@pytest.fixture -def mock_bittensor(): - with patch("bittensor.subtensor") as mock: - yield mock - - -@pytest.fixture -def mock_wallet(): - with patch("bittensor.wallet") as mock: - yield mock - - -@pytest.fixture -def mock_net(): - with patch("bittensor.utils.networking") as mock: - yield mock - - -@pytest.mark.parametrize( - "ip, port, netuid, wait_for_inclusion, wait_for_finalization, expected_result, test_id", - [ - (None, 9221, 0, False, True, True, "happy-path-default-ip"), - ("192.168.0.1", 9221, 0, False, True, True, "happy-path-custom-ip"), - (None, 9221, 0, True, False, True, "happy-path-wait-for-inclusion"), - (None, 9221, 0, False, False, True, "happy-path-no-waiting"), - ], -) -def test_prometheus_extrinsic_happy_path( - mock_bittensor, - mock_wallet, - mock_net, - ip, - port, - netuid, - wait_for_inclusion, - wait_for_finalization, - expected_result, - test_id, -): - # Arrange - subtensor = MagicMock(spec=Subtensor) - subtensor.network = "test_network" - wallet = MagicMock(spec=Wallet) - mock_net.get_external_ip.return_value = "192.168.0.1" - mock_net.ip_to_int.return_value = 3232235521 # IP in integer form - mock_net.ip_version.return_value = 4 - neuron = MagicMock() - neuron.is_null = False - neuron.prometheus_info.version = bittensor.__version_as_int__ - neuron.prometheus_info.ip = 3232235521 - neuron.prometheus_info.port = port - neuron.prometheus_info.ip_type = 4 - subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron - subtensor._do_serve_prometheus.return_value = (True, None) - - # Act - result = prometheus_extrinsic( - subtensor=subtensor, - wallet=wallet, - ip=ip, - port=port, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # Assert - assert result == expected_result, f"Test ID: {test_id}" - - -# Edge cases -@pytest.mark.parametrize( - "ip, port, netuid, test_id", - [ - ("0.0.0.0", 0, 0, "edge-case-min-values"), - ("255.255.255.255", 65535, 2147483647, "edge-case-max-values"), - ], -) -def test_prometheus_extrinsic_edge_cases( - mock_bittensor, mock_wallet, mock_net, ip, port, netuid, test_id -): - # Arrange - subtensor = MagicMock(spec=Subtensor) - subtensor.network = "test_network" - wallet = MagicMock(spec=Wallet) - mock_net.get_external_ip.return_value = ip - mock_net.ip_to_int.return_value = 3232235521 # IP in integer form - mock_net.ip_version.return_value = 4 - neuron = MagicMock() - neuron.is_null = True - subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron - subtensor._do_serve_prometheus.return_value = (True, None) - - # Act - result = prometheus_extrinsic( - subtensor=subtensor, - wallet=wallet, - ip=ip, - port=port, - netuid=netuid, - wait_for_inclusion=False, - wait_for_finalization=True, - ) - - # Assert - assert result == True, f"Test ID: {test_id}" - - -# Error cases -@pytest.mark.parametrize( - "ip, port, netuid, exception, test_id", - [ - ( - None, - 9221, - 0, - RuntimeError("Unable to attain your external ip."), - "error-case-no-external-ip", - ), - ], -) -def test_prometheus_extrinsic_error_cases( - mock_bittensor, mock_wallet, mock_net, ip, port, netuid, exception, test_id -): - # Arrange - subtensor = MagicMock(spec=Subtensor) - subtensor.network = "test_network" - wallet = MagicMock(spec=Wallet) - mock_net.get_external_ip.side_effect = exception - neuron = MagicMock() - neuron.is_null = True - subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron - subtensor._do_serve_prometheus.return_value = (True,) - - # Act & Assert - with pytest.raises(ValueError): - prometheus_extrinsic( - subtensor=subtensor, - wallet=wallet, - ip=ip, - port=port, - netuid=netuid, - wait_for_inclusion=False, - wait_for_finalization=True, - ) diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py deleted file mode 100644 index 49805f0cf4..0000000000 --- a/tests/unit_tests/extrinsics/test_registration.py +++ /dev/null @@ -1,401 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from bittensor.subtensor import Subtensor -from bittensor.wallet import wallet as Wallet -from bittensor.utils.registration import POWSolution -from bittensor.extrinsics.registration import ( - MaxSuccessException, - MaxAttemptsException, - swap_hotkey_extrinsic, - burned_register_extrinsic, - register_extrinsic, -) - - -# Mocking external dependencies -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=Subtensor) - mock.network = "mock_network" - mock.substrate = MagicMock() - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock(spec=Wallet) - mock.coldkeypub.ss58_address = "mock_address" - mock.coldkey = MagicMock() - mock.hotkey = MagicMock() - mock.hotkey.ss58_address = "fake_ss58_address" - return mock - - -@pytest.fixture -def mock_pow_solution(): - mock = MagicMock(spec=POWSolution) - mock.block_number = 123 - mock.nonce = 456 - mock.seal = [0, 1, 2, 3] - mock.is_stale.return_value = False - return mock - - -@pytest.fixture -def mock_new_wallet(): - mock = MagicMock(spec=Wallet) - mock.coldkeypub.ss58_address = "mock_address" - mock.coldkey = MagicMock() - mock.hotkey = MagicMock() - return mock - - -@pytest.mark.parametrize( - "wait_for_inclusion,wait_for_finalization,prompt,cuda,dev_id,tpb,num_processes,update_interval,log_verbose,expected", - [ - (False, True, False, False, 0, 256, None, None, False, True), - (True, False, False, True, [0], 256, 1, 100, True, False), - (False, False, False, True, 1, 512, 2, 200, False, False), - ], - ids=["happy-path-1", "happy-path-2", "happy-path-3"], -) -def test_run_faucet_extrinsic_happy_path( - mock_subtensor, - mock_wallet, - mock_pow_solution, - wait_for_inclusion, - wait_for_finalization, - prompt, - cuda, - dev_id, - tpb, - num_processes, - update_interval, - log_verbose, - expected, -): - with patch( - "bittensor.utils.registration._solve_for_difficulty_fast", - return_value=mock_pow_solution, - ) as mock_create_pow, patch("rich.prompt.Confirm.ask", return_value=True): - from bittensor.extrinsics.registration import run_faucet_extrinsic - - # Arrange - mock_subtensor.get_balance.return_value = 100 - mock_subtensor.substrate.submit_extrinsic.return_value.is_success = True - - # Act - result = run_faucet_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - cuda=cuda, - dev_id=dev_id, - tpb=tpb, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - - # Assert - if isinstance(result, tuple): - assert result[0] == expected - if result[0] is True: - # Checks only if successful - mock_subtensor.substrate.submit_extrinsic.assert_called() - else: - assert result == expected - mock_subtensor.get_balance.assert_called_with("mock_address") - - -@pytest.mark.parametrize( - "cuda,torch_cuda_available,prompt_response,expected", - [ - ( - True, - False, - False, - False, - ), # ID: edge-case-1: CUDA required but not available, user declines prompt - ( - True, - False, - True, - False, - ), # ID: edge-case-2: CUDA required but not available, user accepts prompt but fails due to CUDA unavailability - ], - ids=["edge-case-1", "edge-case-2"], -) -def test_run_faucet_extrinsic_edge_cases( - mock_subtensor, mock_wallet, cuda, torch_cuda_available, prompt_response, expected -): - with patch("torch.cuda.is_available", return_value=torch_cuda_available), patch( - "rich.prompt.Confirm.ask", return_value=prompt_response - ): - from bittensor.extrinsics.registration import run_faucet_extrinsic - - # Act - result = run_faucet_extrinsic( - subtensor=mock_subtensor, wallet=mock_wallet, cuda=cuda - ) - - # Assert - assert result[0] == expected - - -@pytest.mark.parametrize( - "exception,expected", - [ - (KeyboardInterrupt, (True, "Done")), # ID: error-1: User interrupts the process - ( - MaxSuccessException, - (True, "Max successes reached: 3"), - ), # ID: error-2: Maximum successes reached - ( - MaxAttemptsException, - (False, "Max attempts reached: 3"), - ), # ID: error-3: Maximum attempts reached - ], - ids=["error-1", "error-2", "error-3"], -) -@pytest.mark.skip(reason="Waiting for fix to MaxAttemptedException") -def test_run_faucet_extrinsic_error_cases( - mock_subtensor, mock_wallet, mock_pow_solution, exception, expected -): - with patch( - "bittensor.utils.registration.create_pow", - side_effect=[mock_pow_solution, exception], - ): - from bittensor.extrinsics.registration import run_faucet_extrinsic - - # Act - result = run_faucet_extrinsic( - subtensor=mock_subtensor, wallet=mock_wallet, max_allowed_attempts=3 - ) - - # Assert - assert result == expected - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, prompt, swap_success, prompt_response, expected_result, test_id", - [ - # Happy paths - (False, True, False, True, None, True, "happy-path-finalization-true"), - (True, False, False, True, None, True, "happy-path-inclusion-true"), - (True, True, False, True, None, True, "edge-both-waits-true"), - # Error paths - (False, True, False, False, None, False, "swap_failed"), - (False, True, True, True, False, False, "error-prompt-declined"), - ], -) -def test_swap_hotkey_extrinsic( - mock_subtensor, - mock_wallet, - mock_new_wallet, - wait_for_inclusion, - wait_for_finalization, - prompt, - swap_success, - prompt_response, - expected_result, - test_id, -): - # Arrange - with patch.object( - mock_subtensor, - "_do_swap_hotkey", - return_value=(swap_success, "Mock error message"), - ): - with patch( - "rich.prompt.Confirm.ask", return_value=prompt_response - ) as mock_confirm: - # Act - result = swap_hotkey_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - new_wallet=mock_new_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert result == expected_result, f"Test failed for test_id: {test_id}" - - if prompt: - mock_confirm.assert_called_once() - else: - mock_confirm.assert_not_called() - - -@pytest.mark.parametrize( - "subnet_exists, neuron_is_null, recycle_success, prompt, prompt_response, is_registered, expected_result, test_id", - [ - # Happy paths - (True, False, None, False, None, None, True, "neuron-not-null"), - (True, True, True, True, True, True, True, "happy-path-wallet-registered"), - # Error paths - (False, True, False, False, None, None, False, "subnet-non-existence"), - (True, True, True, True, False, None, False, "prompt-declined"), - (True, True, False, True, True, False, False, "error-path-recycling-failed"), - (True, True, True, True, True, False, False, "error-path-not-registered"), - ], -) -def test_burned_register_extrinsic( - mock_subtensor, - mock_wallet, - subnet_exists, - neuron_is_null, - recycle_success, - prompt, - prompt_response, - is_registered, - expected_result, - test_id, -): - # Arrange - with patch.object( - mock_subtensor, "subnet_exists", return_value=subnet_exists - ), patch.object( - mock_subtensor, - "get_neuron_for_pubkey_and_subnet", - return_value=MagicMock(is_null=neuron_is_null), - ), patch.object( - mock_subtensor, - "_do_burned_register", - return_value=(recycle_success, "Mock error message"), - ), patch.object( - mock_subtensor, "is_hotkey_registered", return_value=is_registered - ), patch("rich.prompt.Confirm.ask", return_value=prompt_response) as mock_confirm: - # Act - result = burned_register_extrinsic( - subtensor=mock_subtensor, wallet=mock_wallet, netuid=123, prompt=True - ) - - # Assert - assert result == expected_result, f"Test failed for test_id: {test_id}" - - if prompt: - mock_confirm.assert_called_once() - else: - mock_confirm.assert_not_called() - - -@pytest.mark.parametrize( - "subnet_exists, neuron_is_null, prompt, prompt_response, cuda_available, expected_result, test_id", - [ - (False, True, True, True, True, False, "subnet-does-not-exist"), - (True, False, True, True, True, True, "neuron-already-registered"), - (True, True, True, False, True, False, "user-declines-prompt"), - (True, True, False, None, False, False, "cuda-unavailable"), - ], -) -def test_register_extrinsic_without_pow( - mock_subtensor, - mock_wallet, - subnet_exists, - neuron_is_null, - prompt, - prompt_response, - cuda_available, - expected_result, - test_id, -): - # Arrange - with patch.object( - mock_subtensor, "subnet_exists", return_value=subnet_exists - ), patch.object( - mock_subtensor, - "get_neuron_for_pubkey_and_subnet", - return_value=MagicMock(is_null=neuron_is_null), - ), patch("rich.prompt.Confirm.ask", return_value=prompt_response), patch( - "torch.cuda.is_available", return_value=cuda_available - ): - # Act - result = register_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - netuid=123, - wait_for_inclusion=True, - wait_for_finalization=True, - prompt=prompt, - max_allowed_attempts=3, - output_in_place=True, - cuda=True, - dev_id=0, - tpb=256, - num_processes=None, - update_interval=None, - log_verbose=False, - ) - - # Assert - assert result == expected_result, f"Test failed for test_id: {test_id}" - - -@pytest.mark.parametrize( - "pow_success, pow_stale, registration_success, cuda, hotkey_registered, expected_result, test_id", - [ - (True, False, True, False, False, True, "successful-with-valid-pow"), - (True, False, True, True, False, True, "successful-with-valid-cuda-pow"), - # Pow failed but key was registered already - (False, False, False, False, True, True, "hotkey-registered"), - # Pow was a success but registration failed with error 'key already registered' - (True, False, False, False, False, True, "registration-fail-key-registered"), - ], -) -def test_register_extrinsic_with_pow( - mock_subtensor, - mock_wallet, - mock_pow_solution, - pow_success, - pow_stale, - registration_success, - cuda, - hotkey_registered, - expected_result, - test_id, -): - # Arrange - with patch( - "bittensor.utils.registration._solve_for_difficulty_fast", - return_value=mock_pow_solution if pow_success else None, - ), patch( - "bittensor.utils.registration._solve_for_difficulty_fast_cuda", - return_value=mock_pow_solution if pow_success else None, - ), patch.object( - mock_subtensor, - "_do_pow_register", - return_value=(registration_success, "HotKeyAlreadyRegisteredInSubNet"), - ), patch("torch.cuda.is_available", return_value=cuda): - # Act - if pow_success: - mock_pow_solution.is_stale.return_value = pow_stale - - if not pow_success and hotkey_registered: - mock_subtensor.is_hotkey_registered = MagicMock( - return_value=hotkey_registered - ) - - result = register_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - netuid=123, - wait_for_inclusion=True, - wait_for_finalization=True, - prompt=False, - max_allowed_attempts=3, - output_in_place=True, - cuda=cuda, - dev_id=0, - tpb=256, - num_processes=None, - update_interval=None, - log_verbose=False, - ) - - # Assert - assert result == expected_result, f"Test failed for test_id: {test_id}" diff --git a/tests/unit_tests/extrinsics/test_root.py b/tests/unit_tests/extrinsics/test_root.py deleted file mode 100644 index b801f7b4e1..0000000000 --- a/tests/unit_tests/extrinsics/test_root.py +++ /dev/null @@ -1,308 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from bittensor.subtensor import Subtensor -from bittensor.extrinsics.root import ( - root_register_extrinsic, - set_root_weights_extrinsic, -) - - -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=Subtensor) - mock.network = "magic_mock" - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock() - mock.hotkey.ss58_address = "fake_hotkey_address" - return mock - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, hotkey_registered, registration_success, prompt, user_response, expected_result", - [ - ( - False, - True, - [True, None], - True, - True, - True, - True, - ), # Already registered after attempt - ( - False, - True, - [False, True], - True, - True, - True, - True, - ), # Registration succeeds with user confirmation - (False, True, [False, False], False, False, None, None), # Registration fails - ( - False, - True, - [False, False], - True, - False, - None, - None, - ), # Registration succeeds but neuron not found - ( - False, - True, - [False, False], - True, - True, - False, - False, - ), # User declines registration - ], - ids=[ - "success-already-registered", - "success-registration-succeeds", - "failure-registration-failed", - "failure-neuron-not-found", - "failure-prompt-declined", - ], -) -def test_root_register_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - hotkey_registered, - registration_success, - prompt, - user_response, - expected_result, -): - # Arrange - mock_subtensor.is_hotkey_registered.side_effect = hotkey_registered - - with patch.object( - mock_subtensor, - "_do_root_register", - return_value=(registration_success, "Error registering"), - ) as mock_register, patch("rich.prompt.Confirm.ask", return_value=user_response): - # Act - result = root_register_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - # Assert - assert result == expected_result - - if not hotkey_registered[0] and user_response: - mock_register.assert_called_once() - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, netuids, weights, prompt, user_response, expected_success", - [ - (True, False, [1, 2], [0.5, 0.5], True, True, True), # Success - weights set - ( - False, - False, - [1, 2], - [0.5, 0.5], - False, - None, - True, - ), # Success - weights set no wait - ( - True, - False, - [1, 2], - [2000, 20], - True, - True, - True, - ), # Success - large value to be normalized - ( - True, - False, - [1, 2], - [2000, 0], - True, - True, - True, - ), # Success - single large value - ( - True, - False, - [1, 2], - [0.5, 0.5], - True, - False, - False, - ), # Failure - prompt declined - ( - True, - False, - [1, 2], - [0.5, 0.5], - False, - None, - False, - ), # Failure - setting weights failed - ( - True, - False, - [], - [], - None, - False, - False, - ), # Exception catched - ValueError 'min() arg is an empty sequence' - ], - ids=[ - "success-weights-set", - "success-not-wait", - "success-large-value", - "success-single-value", - "failure-user-declines", - "failure-setting-weights", - "failure-value-error-exception", - ], -) -def test_set_root_weights_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - netuids, - weights, - prompt, - user_response, - expected_success, -): - # Arrange - with patch.object( - mock_subtensor, - "_do_set_root_weights", - return_value=(expected_success, "Mock error"), - ), patch.object( - mock_subtensor, "min_allowed_weights", return_value=0 - ), patch.object(mock_subtensor, "max_weight_limit", return_value=1), patch( - "rich.prompt.Confirm.ask", return_value=user_response - ) as mock_confirm: - # Act - result = set_root_weights_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - netuids=netuids, - weights=weights, - version_key=0, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert result == expected_success - if prompt: - mock_confirm.assert_called_once() - else: - mock_confirm.assert_not_called() - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, netuids, weights, prompt, user_response, expected_success", - [ - (True, False, [1, 2], [0.5, 0.5], True, True, True), # Success - weights set - ( - False, - False, - [1, 2], - [0.5, 0.5], - False, - None, - True, - ), # Success - weights set no wait - ( - True, - False, - [1, 2], - [2000, 20], - True, - True, - True, - ), # Success - large value to be normalized - ( - True, - False, - [1, 2], - [2000, 0], - True, - True, - True, - ), # Success - single large value - ( - True, - False, - [1, 2], - [0.5, 0.5], - True, - False, - False, - ), # Failure - prompt declined - ( - True, - False, - [1, 2], - [0.5, 0.5], - False, - None, - False, - ), # Failure - setting weights failed - ( - True, - False, - [], - [], - None, - False, - False, - ), # Exception catched - ValueError 'min() arg is an empty sequence' - ], - ids=[ - "success-weights-set", - "success-not-wait", - "success-large-value", - "success-single-value", - "failure-user-declines", - "failure-setting-weights", - "failure-value-error-exception", - ], -) -def test_set_root_weights_extrinsic_torch( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - netuids, - weights, - prompt, - user_response, - expected_success, - force_legacy_torch_compat_api, -): - test_set_root_weights_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - netuids, - weights, - prompt, - user_response, - expected_success, - ) diff --git a/tests/unit_tests/extrinsics/test_senate.py b/tests/unit_tests/extrinsics/test_senate.py deleted file mode 100644 index 66849efc5c..0000000000 --- a/tests/unit_tests/extrinsics/test_senate.py +++ /dev/null @@ -1,237 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from bittensor import subtensor, wallet -from bittensor.extrinsics.senate import ( - leave_senate_extrinsic, - register_senate_extrinsic, - vote_senate_extrinsic, -) - - -# Mocking external dependencies -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=subtensor) - mock.substrate = MagicMock() - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock(spec=wallet) - mock.coldkey = MagicMock() - mock.hotkey = MagicMock() - mock.hotkey.ss58_address = "fake_hotkey_address" - mock.is_senate_member = None - return mock - - -# Parametrized test cases -@pytest.mark.parametrize( - "wait_for_inclusion,wait_for_finalization,prompt,response_success,is_registered,expected_result, test_id", - [ - # Happy path tests - (False, True, False, True, True, True, "happy-path-finalization-true"), - (True, False, False, True, True, True, "happy-path-inclusion-true"), - (False, False, False, True, True, True, "happy-path-no_wait"), - # Edge cases - (True, True, False, True, True, True, "edge-both-waits-true"), - # Error cases - (False, True, False, False, False, None, "error-finalization-failed"), - (True, False, False, False, False, None, "error-inclusion-failed"), - (False, True, True, True, False, False, "error-prompt-declined"), - ], -) -def test_register_senate_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - prompt, - response_success, - is_registered, - expected_result, - test_id, -): - # Arrange - with patch( - "bittensor.extrinsics.senate.Confirm.ask", return_value=not prompt - ), patch("bittensor.extrinsics.senate.time.sleep"), patch.object( - mock_subtensor.substrate, "compose_call" - ), patch.object(mock_subtensor.substrate, "create_signed_extrinsic"), patch.object( - mock_subtensor.substrate, - "submit_extrinsic", - return_value=MagicMock( - is_success=response_success, - process_events=MagicMock(), - error_message="error", - ), - ) as mock_submit_extrinsic, patch.object( - mock_wallet, "is_senate_member", return_value=is_registered - ): - # Act - result = register_senate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert result == expected_result, f"Test ID: {test_id}" - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, prompt, response_success, \ - vote, vote_in_ayes, vote_in_nays, expected_result, test_id", - [ - # Happy path tests - (False, True, False, True, True, True, False, True, "happy-finalization-aye"), - (True, False, False, True, False, False, True, True, "happy-inclusion-nay"), - (False, False, False, True, True, True, False, True, "happy-no-wait-aye"), - # Edge cases - (True, True, False, True, True, True, False, True, "edge-both-waits-true-aye"), - # Error cases - (True, False, False, False, True, False, False, None, "error-inclusion-failed"), - (True, False, True, True, True, True, False, False, "error-prompt-declined"), - ( - True, - False, - False, - True, - True, - False, - False, - None, - "error-no-vote-registered-aye", - ), - ( - False, - True, - False, - True, - False, - False, - False, - None, - "error-no-vote-registered-nay", - ), - ( - False, - True, - False, - False, - True, - False, - False, - None, - "error-finalization-failed", - ), - ], -) -def test_vote_senate_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - prompt, - vote, - response_success, - vote_in_ayes, - vote_in_nays, - expected_result, - test_id, -): - # Arrange - proposal_hash = "mock_hash" - proposal_idx = 123 - - with patch( - "bittensor.extrinsics.senate.Confirm.ask", return_value=not prompt - ), patch("bittensor.extrinsics.senate.time.sleep"), patch.object( - mock_subtensor.substrate, "compose_call" - ), patch.object(mock_subtensor.substrate, "create_signed_extrinsic"), patch.object( - mock_subtensor.substrate, - "submit_extrinsic", - return_value=MagicMock( - is_success=response_success, - process_events=MagicMock(), - error_message="error", - ), - ), patch.object( - mock_subtensor, - "get_vote_data", - return_value={ - "ayes": [mock_wallet.hotkey.ss58_address] if vote_in_ayes else [], - "nays": [mock_wallet.hotkey.ss58_address] if vote_in_nays else [], - }, - ): - # Act - result = vote_senate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - proposal_hash=proposal_hash, - proposal_idx=proposal_idx, - vote=vote, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert result == expected_result, f"Test ID: {test_id}" - - -# Parametrized test cases -@pytest.mark.parametrize( - "wait_for_inclusion,wait_for_finalization,prompt,response_success,is_registered,expected_result, test_id", - [ - # Happy path tests - (False, True, False, True, False, True, "happy-path-finalization-true"), - (True, False, False, True, False, True, "happy-path-inclusion-true"), - (False, False, False, True, False, True, "happy-path-no_wait"), - # Edge cases - (True, True, False, True, False, True, "edge-both-waits-true"), - # Error cases - (False, True, False, False, True, None, "error-finalization-failed"), - (True, False, False, False, True, None, "error-inclusion-failed"), - (False, True, True, True, False, False, "error-prompt-declined"), - ], -) -def test_leave_senate_extrinsic( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - prompt, - response_success, - is_registered, - expected_result, - test_id, -): - # Arrange - with patch( - "bittensor.extrinsics.senate.Confirm.ask", return_value=not prompt - ), patch("bittensor.extrinsics.senate.time.sleep"), patch.object( - mock_subtensor.substrate, "compose_call" - ), patch.object(mock_subtensor.substrate, "create_signed_extrinsic"), patch.object( - mock_subtensor.substrate, - "submit_extrinsic", - return_value=MagicMock( - is_success=response_success, - process_events=MagicMock(), - error_message="error", - ), - ), patch.object(mock_wallet, "is_senate_member", return_value=is_registered): - # Act - result = leave_senate_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert result == expected_result, f"Test ID: {test_id}" diff --git a/tests/unit_tests/extrinsics/test_serving.py b/tests/unit_tests/extrinsics/test_serving.py deleted file mode 100644 index 7aa3ebf5b4..0000000000 --- a/tests/unit_tests/extrinsics/test_serving.py +++ /dev/null @@ -1,374 +0,0 @@ -import pytest - -from unittest.mock import MagicMock, patch -from bittensor.subtensor import Subtensor -from bittensor.wallet import wallet as Wallet -from bittensor.axon import axon as Axon -from bittensor.extrinsics.serving import ( - serve_extrinsic, - publish_metadata, - serve_axon_extrinsic, -) - - -@pytest.fixture -def mock_subtensor(): - mock_subtensor = MagicMock(spec=Subtensor) - mock_subtensor.network = "test_network" - mock_subtensor.substrate = MagicMock() - return mock_subtensor - - -@pytest.fixture -def mock_wallet(): - wallet = MagicMock(spec=Wallet) - wallet.hotkey.ss58_address = "hotkey_address" - wallet.coldkeypub.ss58_address = "coldkey_address" - return wallet - - -@pytest.fixture -def mock_axon(mock_wallet): - axon = MagicMock(spec=Axon) - axon.wallet = mock_wallet() - axon.external_port = 9221 - return axon - - -@pytest.mark.parametrize( - "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,", - [ - ( - "192.168.1.1", - 9221, - 1, - 0, - 0, - 0, - False, - True, - False, - True, - "happy-path-no-wait", - ), - ( - "192.168.1.2", - 9222, - 2, - 1, - 1, - 1, - True, - False, - False, - True, - "happy-path-wait-for-inclusion", - ), - ( - "192.168.1.3", - 9223, - 3, - 2, - 2, - 2, - False, - True, - True, - True, - "happy-path-wait-for-finalization-and-prompt", - ), - ], - ids=[ - "happy-path-no-wait", - "happy-path-wait-for-inclusion", - "happy-path-wait-for-finalization-and-prompt", - ], -) -def test_serve_extrinsic_happy_path( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - expected, - test_id, -): - # Arrange - mock_subtensor._do_serve_axon.return_value = (True, "") - with patch("bittensor.extrinsics.serving.Confirm.ask", return_value=True): - # Act - result = serve_extrinsic( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - # Assert - assert result == expected, f"Test ID: {test_id}" - - -# Various edge cases -@pytest.mark.parametrize( - "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,", - [ - ( - "192.168.1.4", - 9224, - 4, - 3, - 3, - 3, - True, - True, - False, - True, - "edge_case_max_values", - ), - ], - ids=["edge-case-max-values"], -) -def test_serve_extrinsic_edge_cases( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - expected, - test_id, -): - # Arrange - mock_subtensor._do_serve_axon.return_value = (True, "") - with patch("bittensor.extrinsics.serving.Confirm.ask", return_value=True): - # Act - result = serve_extrinsic( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - # Assert - assert result == expected, f"Test ID: {test_id}" - - -# Various error cases -@pytest.mark.parametrize( - "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected_error_message,test_id,", - [ - ( - "192.168.1.5", - 9225, - 5, - 4, - 4, - 4, - True, - True, - False, - False, - "error-case-failed-serve", - ), - ], - ids=["error-case-failed-serve"], -) -def test_serve_extrinsic_error_cases( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - expected_error_message, - test_id, -): - # Arrange - mock_subtensor._do_serve_axon.return_value = (False, "Error serving axon") - with patch("bittensor.extrinsics.serving.Confirm.ask", return_value=True): - # Act - result = serve_extrinsic( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) - - # Assert - assert result == expected_error_message, f"Test ID: {test_id}" - - -@pytest.mark.parametrize( - "netuid, wait_for_inclusion, wait_for_finalization, prompt, external_ip, external_ip_success, serve_success, expected_result, test_id", - [ - # Happy path test - (1, False, True, False, "192.168.1.1", True, True, True, "happy-ext-ip"), - (1, False, True, True, None, True, True, True, "happy-net-external-ip"), - # Edge cases - (1, True, True, False, "192.168.1.1", True, True, True, "edge-case-wait"), - # Error cases - (1, False, True, False, None, False, True, False, "error-fetching-external-ip"), - ( - 1, - False, - True, - False, - "192.168.1.1", - True, - False, - False, - "error-serving-axon", - ), - ], - ids=[ - "happy-axon-external-ip", - "happy-net-external-ip", - "edge-case-wait", - "error-fetching-external-ip", - "error-serving-axon", - ], -) -def test_serve_axon_extrinsic( - mock_subtensor, - mock_axon, - netuid, - wait_for_inclusion, - wait_for_finalization, - prompt, - external_ip, - external_ip_success, - serve_success, - expected_result, - test_id, -): - mock_axon.external_ip = external_ip - # Arrange - with patch( - "bittensor.utils.networking.get_external_ip", - side_effect=Exception("Failed to fetch IP") - if not external_ip_success - else MagicMock(return_value="192.168.1.1"), - ), patch.object(mock_subtensor, "serve", return_value=serve_success): - # Act - if not external_ip_success: - with pytest.raises(RuntimeError): - result = serve_axon_extrinsic( - mock_subtensor, - netuid, - mock_axon, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - else: - result = serve_axon_extrinsic( - mock_subtensor, - netuid, - mock_axon, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - # Assert - assert result == expected_result, f"Test ID: {test_id}" - - -@pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, net_uid, type_u, data, response_success, expected_result, test_id", - [ - ( - True, - True, - 1, - "Sha256", - b"mock_bytes_data", - True, - True, - "happy-path-wait", - ), - ( - False, - False, - 1, - "Sha256", - b"mock_bytes_data", - True, - True, - "happy-path-no-wait", - ), - ], - ids=["happy-path-wait", "happy-path-no-wait"], -) -def test_publish_metadata( - mock_subtensor, - mock_wallet, - wait_for_inclusion, - wait_for_finalization, - net_uid, - type_u, - data, - response_success, - expected_result, - test_id, -): - # Arrange - with patch.object(mock_subtensor.substrate, "compose_call"), patch.object( - mock_subtensor.substrate, "create_signed_extrinsic" - ), patch.object( - mock_subtensor.substrate, - "submit_extrinsic", - return_value=MagicMock( - is_success=response_success, - process_events=MagicMock(), - error_message="error", - ), - ): - # Act - result = publish_metadata( - subtensor=mock_subtensor, - wallet=mock_wallet, - netuid=net_uid, - data_type=type_u, - data=data, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # Assert - assert result == expected_result, f"Test ID: {test_id}" diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py deleted file mode 100644 index 68ce7acae9..0000000000 --- a/tests/unit_tests/extrinsics/test_set_weights.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -import pytest -from unittest.mock import MagicMock, patch -from bittensor import subtensor, wallet -from bittensor.extrinsics.set_weights import set_weights_extrinsic - - -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=subtensor) - mock.network = "mock_network" - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock(spec=wallet) - return mock - - -@pytest.mark.parametrize( - "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, expected_message", - [ - ( - [1, 2], - [0.5, 0.5], - 0, - True, - False, - True, - True, - True, - "Successfully set weights and Finalized.", - ), - ( - [1, 2], - [0.5, 0.4], - 0, - False, - False, - False, - True, - True, - "Not waiting for finalization or inclusion.", - ), - ([1, 2], [0.5, 0.5], 0, True, False, True, True, False, "Mock error message"), - ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."), - ], - ids=[ - "happy-flow", - "not-waiting-finalization-inclusion", - "error-flow", - "prompt-refused", - ], -) -def test_set_weights_extrinsic( - mock_subtensor, - mock_wallet, - uids, - weights, - version_key, - wait_for_inclusion, - wait_for_finalization, - prompt, - user_accepts, - expected_success, - expected_message, -): - uids_tensor = torch.tensor(uids, dtype=torch.int64) - weights_tensor = torch.tensor(weights, dtype=torch.float32) - with patch( - "bittensor.utils.weight_utils.convert_weights_and_uids_for_emit", - return_value=(uids_tensor, weights_tensor), - ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch.object( - mock_subtensor, - "_do_set_weights", - return_value=(expected_success, "Mock error message"), - ) as mock_do_set_weights: - result, message = set_weights_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - netuid=123, - uids=uids, - weights=weights, - version_key=version_key, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - assert result == expected_success, f"Test {expected_message} failed." - assert message == expected_message, f"Test {expected_message} failed." - if user_accepts is not False: - mock_do_set_weights.assert_called_once_with( - wallet=mock_wallet, - netuid=123, - uids=uids_tensor, - vals=weights_tensor, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) diff --git a/tests/unit_tests/extrinsics/test_staking.py b/tests/unit_tests/extrinsics/test_staking.py deleted file mode 100644 index c3b888520b..0000000000 --- a/tests/unit_tests/extrinsics/test_staking.py +++ /dev/null @@ -1,551 +0,0 @@ -import pytest -from unittest.mock import patch, MagicMock -import bittensor -from bittensor.utils.balance import Balance -from bittensor.extrinsics.staking import ( - add_stake_extrinsic, - add_stake_multiple_extrinsic, -) -from bittensor.errors import NotDelegateError - - -# Mocking external dependencies -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=bittensor.subtensor) - mock.network = "mock_network" - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock(spec=bittensor.wallet) - mock.hotkey.ss58_address = "5FHneW46..." - mock.coldkeypub.ss58_address = "5Gv8YYFu8..." - mock.hotkey_str = "mock_hotkey_str" - mock.name = "mock_wallet" - return mock - - -@pytest.fixture -def mock_other_owner_wallet(): - mock = MagicMock(spec=bittensor.wallet) - mock.hotkey.ss58_address = "11HneC46..." - mock.coldkeypub.ss58_address = "6Gv9ZZFu8..." - mock.hotkey_str = "mock_hotkey_str_other_owner" - mock.name = "mock_wallet_other_owner" - return mock - - -# Parametrized test cases -@pytest.mark.parametrize( - "hotkey_ss58, hotkey_owner, hotkey_delegate, amount, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, exception", - [ - # Simple staking to own hotkey, float - (None, True, None, 10.0, True, False, False, None, True, None), - # Simple staking to own hotkey, int - (None, True, None, 10, True, False, False, None, True, None), - # Not waiting for inclusion & finalization, own hotkey - ("5FHneW46...", True, None, 10.0, False, False, False, None, True, None), - # Prompt refused - (None, True, None, 10.0, True, False, True, False, False, None), - # Stake all - (None, True, None, None, True, False, False, None, True, None), - # Insufficient balance - (None, True, None, 110, True, False, False, None, False, None), - # No deduction scenario - (None, True, None, 0.000000100, True, False, False, None, True, None), - # Not owner but Delegate - ("5FHneW46...", False, True, 10.0, True, False, False, None, True, None), - # Not owner but Delegate and prompt refused - ("5FHneW46...", False, True, 10.0, True, False, True, False, False, None), - # Not owner and not delegate - ( - "5FHneW46...", - False, - False, - 10.0, - True, - False, - False, - None, - False, - NotDelegateError, - ), - # Staking failed - (None, True, None, 10.0, True, False, False, None, False, None), - ], - ids=[ - "success-own-hotkey-float", - "success-own-hotkey-int", - "success-own-hotkey-no-wait", - "prompt-refused", - "success-staking-all", - "failure-insufficient-balance", - "success-no-deduction", - "success-delegate", - "failure-delegate-prompt-refused", - "failure-not-delegate", - "failure-staking", - ], -) -def test_add_stake_extrinsic( - mock_subtensor, - mock_wallet, - mock_other_owner_wallet, - hotkey_ss58, - hotkey_owner, - hotkey_delegate, - amount, - wait_for_inclusion, - wait_for_finalization, - prompt, - user_accepts, - expected_success, - exception, -): - # Arrange - if not amount: - staking_balance = amount if amount else Balance.from_tao(100) - else: - staking_balance = ( - Balance.from_tao(amount) - if not isinstance(amount, bittensor.Balance) - else amount - ) - - with patch.object( - mock_subtensor, "_do_stake", return_value=expected_success - ) as mock_add_stake, patch.object( - mock_subtensor, "get_balance", return_value=Balance.from_tao(100) - ), patch.object( - mock_subtensor, - "get_stake_for_coldkey_and_hotkey", - return_value=Balance.from_tao(50), - ), patch.object( - mock_subtensor, - "get_hotkey_owner", - return_value=mock_wallet.coldkeypub.ss58_address - if hotkey_owner - else mock_other_owner_wallet.coldkeypub.ss58_address, - ), patch.object( - mock_subtensor, "is_hotkey_delegate", return_value=hotkey_delegate - ), patch.object(mock_subtensor, "get_delegate_take", return_value=0.01), patch( - "rich.prompt.Confirm.ask", return_value=user_accepts - ) as mock_confirm, patch.object( - mock_subtensor, - "get_minimum_required_stake", - return_value=bittensor.Balance.from_tao(0.01), - ), patch.object( - mock_subtensor, - "get_existential_deposit", - return_value=bittensor.Balance.from_rao(100_000), - ): - mock_balance = mock_subtensor.get_balance() - existential_deposit = mock_subtensor.get_existential_deposit() - if staking_balance > mock_balance - existential_deposit: - staking_balance = mock_balance - existential_deposit - - # Act - if not hotkey_owner and not hotkey_delegate: - with pytest.raises(exception): - result = add_stake_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58=hotkey_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - else: - result = add_stake_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58=hotkey_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert ( - result == expected_success - ), f"Expected {expected_success}, but got {result}" - - if prompt: - mock_confirm.assert_called_once() - - if expected_success: - if not hotkey_ss58: - hotkey_ss58 = mock_wallet.hotkey.ss58_address - - mock_add_stake.assert_called_once_with( - wallet=mock_wallet, - hotkey_ss58=hotkey_ss58, - amount=staking_balance, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - -# Parametrized test cases -@pytest.mark.parametrize( - "hotkey_ss58s, amounts, hotkey_owner, hotkey_delegates ,wallet_balance, wait_for_inclusion, wait_for_finalization, prompt, prompt_response, stake_responses, expected_success, stake_attempted, exception, exception_msg", - [ - # Successful stake - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, True], - [None, None], - 100.0, - True, - False, - False, - None, - [True, True], - True, - 2, - None, - None, - ), - # Successful stake with prompt - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, True], - [None, None], - 100.0, - True, - True, - True, - True, - [True, True], - True, - 2, - None, - None, - ), - # Successful stake, no deduction scenario - ( - ["5FHneW46...", "11HneC46..."], - [0.000000100, 0.000000100], - [True, True], - [None, None], - 100.0, - True, - False, - False, - None, - [True, True], - True, - 2, - None, - None, - ), - # Successful stake, not waiting for finalization & inclusion - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, True], - [None, None], - 100.0, - False, - False, - False, - None, - [True, True], - True, - 2, - None, - None, - ), - # Successful stake, one key is a delegate - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, False], - [True, True], - 100.0, - True, - False, - False, - None, - [True, True], - True, - 2, - None, - None, - ), - # Partial successful stake, one key is not a delegate - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, False], - [True, False], - 100.0, - True, - False, - False, - None, - [True, False], - True, - 1, - None, - None, - ), - # Successful, staking all tao to first wallet, not waiting for finalization + inclusion - ( - ["5FHneW46...", "11HneC46..."], - None, - [True, True], - [None, None], - 100.0, - False, - False, - False, - None, - [True, False], - True, - 1, - None, - None, - ), - # Successful, staking all tao to first wallet - ( - ["5FHneW46...", "11HneC46..."], - None, - [True, True], - [None, None], - 100.0, - True, - False, - False, - None, - [True, False], - True, - 1, - None, - None, - ), - # Success, staking 0 tao - ( - ["5FHneW46...", "11HneC46..."], - [0.0, 0.0], - [True, True], - [None, None], - 100.0, - True, - False, - False, - None, - [None, None], - True, - 0, - None, - None, - ), - # Complete failure to stake for both keys - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, True], - [None, None], - 100.0, - True, - False, - False, - None, - [False, False], - False, - 2, - None, - None, - ), - # Complete failure, both keys are not delegates - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [False, False], - [False, False], - 100.0, - True, - False, - False, - None, - [False, False], - False, - 0, - None, - None, - ), - # Unsuccessful stake with prompt declined both times - ( - ["5FHneW46...", "11HneC46..."], - [10.0, 20.0], - [True, True], - [None, None], - 100.0, - True, - True, - True, - False, - [None, None], - False, - 0, - None, - None, - ), - # Exception, TypeError for incorrect hotkey_ss58s - ( - [123, "11HneC46..."], - [10.0, 20.0], - [False, False], - [False, False], - 100.0, - True, - False, - False, - None, - [None, None], - None, - 0, - TypeError, - "hotkey_ss58s must be a list of str", - ), - # Exception, ValueError for mismatch between hotkeys and amounts - ( - ["5FHneW46...", "11HneC46..."], - [10.0], - [False, False], - [False, False], - 100.0, - True, - False, - False, - None, - [None, None], - None, - 0, - ValueError, - "amounts must be a list of the same length as hotkey_ss58s", - ), - # Exception, TypeError for incorrect amounts - ( - ["5FHneW46...", "11HneC46..."], - ["abc", 12], - [False, False], - [False, False], - 100.0, - True, - False, - False, - None, - [None, None], - None, - 0, - TypeError, - "amounts must be a [list of bittensor.Balance or float] or None", - ), - ], - ids=[ - "success-basic-path", - "success-with-prompt", - "success-no-deduction", - "success-no-wait", - "success-one-delegate", - "partial-success-one-not-delegate", - "success-all-tao-no-wait", - "success-all-tao", - "success-0-tao", - "failure-both-keys", - "failure-both-not-delegates", - "failure-prompt-declined", - "failure-type-error-hotkeys", - "failure-value-error-amount", - "failure-type-error-amount", - ], -) -def test_add_stake_multiple_extrinsic( - mock_subtensor, - mock_wallet, - mock_other_owner_wallet, - hotkey_ss58s, - amounts, - hotkey_owner, - hotkey_delegates, - wallet_balance, - wait_for_inclusion, - wait_for_finalization, - prompt, - prompt_response, - stake_responses, - expected_success, - stake_attempted, - exception, - exception_msg, -): - # Arrange - def hotkey_delegate_side_effect(hotkey_ss58): - index = hotkey_ss58s.index(hotkey_ss58) - return hotkey_delegates[index] - - def owner_side_effect(hotkey_ss58): - index = hotkey_ss58s.index(hotkey_ss58) - return ( - mock_wallet.coldkeypub.ss58_address - if hotkey_owner[index] - else mock_other_owner_wallet.coldkeypub.ss58_address - ) - - def stake_side_effect(hotkey_ss58, *args, **kwargs): - index = hotkey_ss58s.index(hotkey_ss58) - return stake_responses[index] - - with patch.object( - mock_subtensor, "get_balance", return_value=Balance.from_tao(wallet_balance) - ), patch.object( - mock_subtensor, "is_hotkey_delegate", side_effect=hotkey_delegate_side_effect - ), patch.object( - mock_subtensor, "get_hotkey_owner", side_effect=owner_side_effect - ), patch.object( - mock_subtensor, "_do_stake", side_effect=stake_side_effect - ) as mock_do_stake, patch.object( - mock_subtensor, "tx_rate_limit", return_value=0 - ), patch("rich.prompt.Confirm.ask", return_value=prompt_response) as mock_confirm: - # Act - if exception: - with pytest.raises(exception) as exc_info: - result = add_stake_multiple_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58s=hotkey_ss58s, - amounts=amounts, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - # Assert - assert str(exc_info.value) == exception_msg - - # Act - else: - result = add_stake_multiple_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58s=hotkey_ss58s, - amounts=amounts, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert ( - result == expected_success - ), f"Expected {expected_success}, but got {result}" - if prompt: - assert mock_confirm.called - assert mock_do_stake.call_count == stake_attempted diff --git a/tests/unit_tests/extrinsics/test_unstaking.py b/tests/unit_tests/extrinsics/test_unstaking.py deleted file mode 100644 index 0fa6ba84c4..0000000000 --- a/tests/unit_tests/extrinsics/test_unstaking.py +++ /dev/null @@ -1,332 +0,0 @@ -import bittensor -import pytest - -from unittest.mock import patch, MagicMock - -from bittensor.utils.balance import Balance -from bittensor.extrinsics.unstaking import unstake_extrinsic, unstake_multiple_extrinsic - - -@pytest.fixture -def mock_subtensor(): - mock = MagicMock(spec=bittensor.subtensor) - mock.network = "mock_network" - return mock - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock(spec=bittensor.wallet) - mock.hotkey.ss58_address = "5FHneW46..." - mock.coldkeypub.ss58_address = "5Gv8YYFu8..." - mock.hotkey_str = "mock_hotkey_str" - return mock - - -def mock_get_minimum_required_stake(): - # Valid minimum threshold as of 2024/05/01 - return Balance.from_rao(100_000_000) - - -@pytest.mark.parametrize( - "hotkey_ss58, amount, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, unstake_attempted", - [ - # Successful unstake without waiting for inclusion or finalization - (None, 10.0, False, False, False, None, True, True), - # Successful unstake with prompt accepted - ("5FHneW46...", 10.0, True, True, True, True, True, True), - # Prompt declined - ("5FHneW46...", 10.0, True, True, True, False, False, False), - # Not enough stake to unstake - ("5FHneW46...", 1000.0, True, True, False, None, False, False), - # Successful - unstake threshold not reached - (None, 0.01, True, True, False, None, True, True), - # Successful unstaking all - (None, None, False, False, False, None, True, True), - # Failure - unstaking failed - (None, 10.0, False, False, False, None, False, True), - ], - ids=[ - "successful-no-wait", - "successful-with-prompt", - "failure-prompt-declined", - "failure-not-enough-stake", - "success-threshold-not-reached", - "success-unstake-all", - "failure-unstake-failed", - ], -) -def test_unstake_extrinsic( - mock_subtensor, - mock_wallet, - hotkey_ss58, - amount, - wait_for_inclusion, - wait_for_finalization, - prompt, - user_accepts, - expected_success, - unstake_attempted, -): - mock_current_stake = Balance.from_tao(50) - mock_current_balance = Balance.from_tao(100) - - with patch.object( - mock_subtensor, "_do_unstake", return_value=(expected_success) - ), patch.object( - mock_subtensor, "get_balance", return_value=mock_current_balance - ), patch.object( - mock_subtensor, - "get_minimum_required_stake", - side_effect=mock_get_minimum_required_stake, - ), patch.object( - mock_subtensor, - "get_stake_for_coldkey_and_hotkey", - return_value=mock_current_stake, - ), patch("rich.prompt.Confirm.ask", return_value=user_accepts) as mock_confirm: - result = unstake_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58=hotkey_ss58, - amount=amount, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - assert ( - result == expected_success - ), f"Expected result {expected_success}, but got {result}" - - if prompt: - mock_confirm.assert_called_once() - - if unstake_attempted: - mock_subtensor._do_unstake.assert_called_once_with( - wallet=mock_wallet, - hotkey_ss58=hotkey_ss58 or mock_wallet.hotkey.ss58_address, - amount=bittensor.Balance.from_tao(amount) - if amount - else mock_current_stake, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - else: - mock_subtensor._do_unstake.assert_not_called() - - -@pytest.mark.parametrize( - # TODO: Write dynamic test to test for amount = None with multiple hotkeys - "hotkey_ss58s, amounts, wallet_balance, wait_for_inclusion, wait_for_finalization, prompt, prompt_response, unstake_responses, expected_success, unstake_attempted, exception, exception_msg", - [ - # Successful unstake - no wait - ( - ["5FHneW46...", "5FHneW47..."], - [10.0, 20.0], - 100, - False, - False, - True, - True, - [True, True], - True, - 2, - None, - None, - ), - # Partial-success unstake - one unstake fails - ( - ["5FHneW46...", "5FHneW47..."], - [10.0, 20.0], - 100, - True, - False, - True, - True, - [True, False], - True, - 2, - None, - None, - ), - # Success, based on no hotkeys - func to be confirmed - ([], [], 100, True, True, False, None, [None], True, 0, None, None), - # Unsuccessful unstake - not enough stake - ( - ["5FHneW46..."], - [1000.0], - 100, - True, - True, - False, - True, - [None], - False, - 0, - None, - None, - ), - # Successful unstake - new stake below threshold - ( - ["5FHneW46..."], - [ - 100 - mock_get_minimum_required_stake() + 0.01 - ], # New stake just below threshold - 100, - True, - True, - False, - True, - [True], - True, # Sucessful unstake - 1, - None, - None, - ), - # Unsuccessful unstake with prompt declined both times - ( - ["5FHneW46...", "5FHneW48..."], - [10.0, 10.0], - 100, - True, - True, - True, - False, - [None, None], - False, - 0, - None, - None, - ), - # Exception, TypeError for incorrect hotkey_ss58s - ( - ["5FHneW46...", 123], - [10.0, 20.0], - 100, - True, - False, - False, - None, - [None, None], - None, - 0, - TypeError, - "hotkey_ss58s must be a list of str", - ), - # Exception, ValueError for mismatch between hotkeys and amounts - ( - ["5FHneW46...", "5FHneW48..."], - [10.0], - 100, - True, - False, - False, - None, - [None, None], - None, - 0, - ValueError, - "amounts must be a list of the same length as hotkey_ss58s", - ), - # Exception, TypeError for incorrect amounts - ( - ["5FHneW46...", "5FHneW48..."], - [10.0, "tao"], - 100, - True, - False, - False, - None, - [None, None], - None, - 0, - TypeError, - "amounts must be a [list of bittensor.Balance or float] or None", - ), - ], - ids=[ - "success-no-wait", - "partial-success-one-fail", - "success-no-hotkey", - "failure-not-enough-stake", - "success-threshold-not-reached", - "failure-prompt-declined", - "failure-type-error-hotkeys", - "failure-value-error-amounts", - "failure-type-error-amounts", - ], -) -def test_unstake_multiple_extrinsic( - mock_subtensor, - mock_wallet, - hotkey_ss58s, - amounts, - wallet_balance, - wait_for_inclusion, - wait_for_finalization, - prompt, - prompt_response, - unstake_responses, - expected_success, - unstake_attempted, - exception, - exception_msg, -): - # Arrange - mock_current_stake = Balance.from_tao(100) - amounts_in_balances = [ - Balance.from_tao(amount) if isinstance(amount, float) else amount - for amount in amounts - ] - - def unstake_side_effect(hotkey_ss58, *args, **kwargs): - index = hotkey_ss58s.index(hotkey_ss58) - return unstake_responses[index] - - with patch.object( - mock_subtensor, "_do_unstake", side_effect=unstake_side_effect - ) as mock_unstake, patch.object( - mock_subtensor, - "get_minimum_required_stake", - side_effect=mock_get_minimum_required_stake, - ), patch.object( - mock_subtensor, "get_balance", return_value=Balance.from_tao(wallet_balance) - ), patch.object(mock_subtensor, "tx_rate_limit", return_value=0), patch.object( - mock_subtensor, - "get_stake_for_coldkey_and_hotkey", - return_value=mock_current_stake, - ), patch("rich.prompt.Confirm.ask", return_value=prompt_response) as mock_confirm: - # Act - if exception: - with pytest.raises(exception) as exc_info: - result = unstake_multiple_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58s=hotkey_ss58s, - amounts=amounts, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - # Assert - assert str(exc_info.value) == exception_msg - - # Act - else: - result = unstake_multiple_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - hotkey_ss58s=hotkey_ss58s, - amounts=amounts_in_balances, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - - # Assert - assert ( - result == expected_success - ), f"Expected {expected_success}, but got {result}" - if prompt: - assert mock_confirm.called - assert mock_unstake.call_count == unstake_attempted diff --git a/tests/unit_tests/factories/__init__.py b/tests/unit_tests/factories/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit_tests/factories/neuron_factory.py b/tests/unit_tests/factories/neuron_factory.py deleted file mode 100644 index 4ad70c5dca..0000000000 --- a/tests/unit_tests/factories/neuron_factory.py +++ /dev/null @@ -1,63 +0,0 @@ -import factory - -from bittensor.chain_data import AxonInfo, NeuronInfoLite, PrometheusInfo -from bittensor.utils.balance import Balance - - -class BalanceFactory(factory.Factory): - class Meta: - model = Balance - - balance = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - - -class PrometheusInfoFactory(factory.Factory): - class Meta: - model = PrometheusInfo - - block = factory.Faker("random_int", min=0, max=100) - version = factory.Faker("random_int", min=0, max=100) - ip = factory.Faker("ipv4") - port = factory.Faker("random_int", min=0, max=100) - ip_type = factory.Faker("random_int", min=0, max=100) - - -class AxonInfoFactory(factory.Factory): - class Meta: - model = AxonInfo - - version = factory.Faker("random_int", min=0, max=100) - ip = factory.Faker("ipv4") - port = factory.Faker("random_int", min=0, max=100) - ip_type = factory.Faker("random_int", min=0, max=100) - hotkey = factory.Faker("uuid4") - coldkey = factory.Faker("uuid4") - - -class NeuronInfoLiteFactory(factory.Factory): - class Meta: - model = NeuronInfoLite - - hotkey = factory.Faker("uuid4") - coldkey = factory.Faker("uuid4") - uid = factory.Sequence(lambda n: n) - netuid = factory.Sequence(lambda n: n) - active = factory.Faker("random_int", min=0, max=1) - stake = factory.SubFactory(BalanceFactory) - stake_dict = factory.Dict({"balance": 10}) - total_stake = factory.SubFactory(BalanceFactory) - rank = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - emission = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - incentive = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - consensus = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - trust = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - validator_trust = factory.Faker( - "pyfloat", left_digits=3, right_digits=6, positive=True - ) - dividends = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) - last_update = factory.Faker("unix_time") - validator_permit = factory.Faker("boolean") - prometheus_info = factory.SubFactory(PrometheusInfoFactory) - axon_info = factory.SubFactory(AxonInfoFactory) - pruning_score = factory.Faker("random_int", min=0, max=100) - is_null = factory.Faker("boolean") diff --git a/tests/unit_tests/test_axon.py b/tests/unit_tests/test_axon.py deleted file mode 100644 index 7ba433a151..0000000000 --- a/tests/unit_tests/test_axon.py +++ /dev/null @@ -1,781 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -# Standard Lib -import re -import time -from dataclasses import dataclass - -from typing import Any, Optional -from unittest import IsolatedAsyncioTestCase -from unittest.mock import AsyncMock, MagicMock, patch - -# Third Party -import fastapi -import netaddr -import pydantic -import pytest -from starlette.requests import Request -from fastapi.testclient import TestClient - -# Bittensor -import bittensor -from bittensor import Synapse, RunException, StreamingSynapse -from bittensor.axon import AxonMiddleware -from bittensor.axon import axon as Axon -from bittensor.utils.axon_utils import allowed_nonce_window_ns, calculate_diff_seconds -from bittensor.constants import ALLOWED_DELTA, NANOSECONDS_IN_SECOND - - -def test_attach(): - # Create a mock AxonServer instance - server = bittensor.axon() - - # Define the Synapse type - class Synapse(bittensor.Synapse): - pass - - # Define the functions with the correct signatures - def forward_fn(synapse: Synapse) -> Any: - pass - - def blacklist_fn(synapse: Synapse) -> bool: - return True - - def priority_fn(synapse: Synapse) -> float: - return 1.0 - - def verify_fn(synapse: Synapse) -> None: - pass - - # Test attaching with correct signatures - server.attach(forward_fn, blacklist_fn, priority_fn, verify_fn) - - # Define functions with incorrect signatures - def wrong_blacklist_fn(synapse: Synapse) -> int: - return 1 - - def wrong_priority_fn(synapse: Synapse) -> int: - return 1 - - def wrong_verify_fn(synapse: Synapse) -> bool: - return True - - # Test attaching with incorrect signatures - with pytest.raises(AssertionError): - server.attach(forward_fn, wrong_blacklist_fn, priority_fn, verify_fn) - - with pytest.raises(AssertionError): - server.attach(forward_fn, blacklist_fn, wrong_priority_fn, verify_fn) - - with pytest.raises(AssertionError): - server.attach(forward_fn, blacklist_fn, priority_fn, wrong_verify_fn) - - -def test_attach(): - # Create a mock AxonServer instance - server = bittensor.axon() - - # Define the Synapse type - class Synapse: - pass - - # Define a class that inherits from Synapse - class InheritedSynapse(bittensor.Synapse): - pass - - # Define a function with the correct signature - def forward_fn(synapse: InheritedSynapse) -> Any: - pass - - # Test attaching with correct signature and inherited class - server.attach(forward_fn) - - # Define a class that does not inherit from Synapse - class NonInheritedSynapse: - pass - - # Define a function with an argument of a class not inheriting from Synapse - def wrong_forward_fn(synapse: NonInheritedSynapse) -> Any: - pass - - # Test attaching with incorrect class inheritance - with pytest.raises(AssertionError): - server.attach(wrong_forward_fn) - - -def test_log_and_handle_error(): - from bittensor.axon import log_and_handle_error - - synapse = SynapseMock() - - synapse = log_and_handle_error(synapse, Exception("Error"), 500, 100) - assert synapse.axon.status_code == 500 - assert re.match(r"Internal Server Error #[\da-f\-]+", synapse.axon.status_message) - assert synapse.axon.process_time is not None - - -def test_create_error_response(): - from bittensor.axon import create_error_response - - synapse = SynapseMock() - synapse.axon.status_code = 500 - synapse.axon.status_message = "Error" - - response = create_error_response(synapse) - assert response.status_code == 500 - assert response.body == b'{"message":"Error"}' - - -# Fixtures -@pytest.fixture -def middleware(): - # Mock AxonMiddleware instance with empty axon object - axon = AxonMock() - return AxonMiddleware(None, axon) - - -@pytest.fixture -def mock_request(): - request = AsyncMock(spec=Request) - request.body = AsyncMock(return_value=b'{"field1": "value1", "field2": "value2"}') - request.url.path = "/test_endpoint" - request.headers = {"computed_body_hash": "correct_hash"} - return request - - -@pytest.fixture -def axon_instance(): - axon = Axon() - axon.required_hash_fields = {"test_endpoint": ["field1", "field2"]} - axon.forward_class_types = { - "test_endpoint": MagicMock(return_value=MagicMock(body_hash="correct_hash")) - } - return axon - - -# Mocks -@dataclass -class MockWallet: - hotkey: Any - coldkey: Any = None - coldkeypub: Any = None - - -class MockHotkey: - def __init__(self, ss58_address): - self.ss58_address = ss58_address - - def sign(self, *args, **kwargs): - return f"Signed: {args!r} {kwargs!r}".encode() - - -class MockInfo: - def to_string(self): - return "MockInfoString" - - -class AxonMock: - def __init__(self): - self.status_code = None - self.forward_class_types = {} - self.blacklist_fns = {} - self.priority_fns = {} - self.forward_fns = {} - self.verify_fns = {} - self.thread_pool = bittensor.PriorityThreadPoolExecutor(max_workers=1) - - -class SynapseMock(bittensor.Synapse): - pass - - -def verify_fn_pass(synapse): - pass - - -def verify_fn_fail(synapse): - raise Exception("Verification failed") - - -def blacklist_fn_pass(synapse): - return False, "" - - -def blacklist_fn_fail(synapse): - return True, "" - - -def priority_fn_pass(synapse) -> float: - return 0.0 - - -def priority_fn_timeout(synapse) -> float: - return 2.0 - - -@pytest.mark.asyncio -async def test_verify_pass(middleware): - synapse = SynapseMock() - middleware.axon.verify_fns = {"SynapseMock": verify_fn_pass} - await middleware.verify(synapse) - assert synapse.axon.status_code != 401 - - -@pytest.mark.asyncio -async def test_verify_fail(middleware): - synapse = SynapseMock() - middleware.axon.verify_fns = {"SynapseMock": verify_fn_fail} - with pytest.raises(Exception): - await middleware.verify(synapse) - assert synapse.axon.status_code == 401 - - -@pytest.mark.asyncio -async def test_blacklist_pass(middleware): - synapse = SynapseMock() - middleware.axon.blacklist_fns = {"SynapseMock": blacklist_fn_pass} - await middleware.blacklist(synapse) - assert synapse.axon.status_code != 403 - - -@pytest.mark.asyncio -async def test_blacklist_fail(middleware): - synapse = SynapseMock() - middleware.axon.blacklist_fns = {"SynapseMock": blacklist_fn_fail} - with pytest.raises(Exception): - await middleware.blacklist(synapse) - assert synapse.axon.status_code == 403 - - -@pytest.mark.asyncio -async def test_priority_pass(middleware): - synapse = SynapseMock() - middleware.axon.priority_fns = {"SynapseMock": priority_fn_pass} - await middleware.priority(synapse) - assert synapse.axon.status_code != 408 - - -@pytest.mark.parametrize( - "body, expected", - [ - ( - b'{"field1": "value1", "field2": "value2"}', - {"field1": "value1", "field2": "value2"}, - ), - ( - b'{"field1": "different_value", "field2": "another_value"}', - {"field1": "different_value", "field2": "another_value"}, - ), - ], -) -@pytest.mark.asyncio -async def test_verify_body_integrity_happy_path( - mock_request, axon_instance, body, expected -): - # Arrange - mock_request.body.return_value = body - - # Act - result = await axon_instance.verify_body_integrity(mock_request) - - # Assert - assert result == expected, "The parsed body should match the expected dictionary." - - -@pytest.mark.parametrize( - "body, expected_exception_message", - [ - (b"", "Expecting value: line 1 column 1 (char 0)"), # Empty body - (b"not_json", "Expecting value: line 1 column 1 (char 0)"), # Non-JSON body - ], - ids=["empty_body", "non_json_body"], -) -@pytest.mark.asyncio -async def test_verify_body_integrity_edge_cases( - mock_request, axon_instance, body, expected_exception_message -): - # Arrange - mock_request.body.return_value = body - - # Act & Assert - with pytest.raises(Exception) as exc_info: - await axon_instance.verify_body_integrity(mock_request) - assert expected_exception_message in str( - exc_info.value - ), "Expected specific exception message." - - -@pytest.mark.parametrize( - "computed_hash, expected_error", - [ - ("incorrect_hash", ValueError), - ], -) -@pytest.mark.asyncio -async def test_verify_body_integrity_error_cases( - mock_request, axon_instance, computed_hash, expected_error -): - # Arrange - mock_request.headers["computed_body_hash"] = computed_hash - - # Act & Assert - with pytest.raises(expected_error) as exc_info: - await axon_instance.verify_body_integrity(mock_request) - assert "Hash mismatch" in str(exc_info.value), "Expected a hash mismatch error." - - -@pytest.mark.parametrize( - "info_return, expected_output, test_id", - [ - (MockInfo(), "MockInfoString", "happy_path_basic"), - (MockInfo(), "MockInfoString", "edge_case_empty_string"), - ], -) -def test_to_string(info_return, expected_output, test_id): - # Arrange - axon = Axon() - with patch.object(axon, "info", return_value=info_return): - # Act - output = axon.to_string() - - # Assert - assert output == expected_output, f"Test ID: {test_id}" - - -@pytest.mark.parametrize( - "ip, port, expected_ip_type, test_id", - [ - # Happy path - ( - "127.0.0.1", - 8080, - 4, - "valid_ipv4", - ), - ( - "2001:0db8:85a3:0000:0000:8a2e:0370:7334", - 3030, - 6, - "valid_ipv6", - ), - ], -) -def test_valid_ipv4_and_ipv6_address(ip, port, expected_ip_type, test_id): - # Arrange - axon = Axon() - axon.ip = ip - axon.external_ip = ip - axon.port = port - - # Act - ip_type = axon.info().ip_type - - # Assert - assert ip_type == expected_ip_type, f"Test ID: {test_id}" - - -@pytest.mark.parametrize( - "ip, port, expected_exception", - [ - ( - "This Is not a valid address", - 65534, - netaddr.core.AddrFormatError, - ), - ], - ids=["failed to detect a valid IP " "address from %r"], -) -def test_invalid_ip_address(ip, port, expected_exception): - # Assert - with pytest.raises(expected_exception): - Axon(ip=ip, external_ip=ip, port=port).info() - - -@pytest.mark.parametrize( - "ip, port, ss58_address, started, forward_fns, expected_str, test_id", - [ - # Happy path - ( - "127.0.0.1", - 8080, - "5G9RtsTbiYJYQYJzUfTCs...", - True, - {"fn1": None}, - "Axon(127.0.0.1, 8080, 5G9RtsTbiYJYQYJzUfTCs..., started, ['fn1'])", - "happy_path_started_with_forward_fn", - ), - ( - "192.168.1.1", - 3030, - "5HqUkGuo62b5...", - False, - {}, - "Axon(192.168.1.1, 3030, 5HqUkGuo62b5..., stopped, [])", - "happy_path_stopped_no_forward_fn", - ), - # Edge cases - ("", 0, "", False, {}, "Axon(, 0, , stopped, [])", "edge_empty_values"), - ( - "255.255.255.255", - 65535, - "5G9RtsTbiYJYQYJzUfTCs...", - True, - {"fn1": None, "fn2": None}, - "Axon(255.255.255.255, 65535, 5G9RtsTbiYJYQYJzUfTCs..., started, ['fn1', 'fn2'])", - "edge_max_values", - ), - ], -) -def test_axon_str_representation( - ip, port, ss58_address, started, forward_fns, expected_str, test_id -): - # Arrange - hotkey = MockHotkey(ss58_address) - wallet = MockWallet(hotkey) - axon = Axon() - axon.ip = ip - axon.port = port - axon.wallet = wallet - axon.started = started - axon.forward_fns = forward_fns - - # Act - result_dunder_str = axon.__str__() - result_dunder_repr = axon.__repr__() - - # Assert - assert result_dunder_str == expected_str, f"Test ID: {test_id}" - assert result_dunder_repr == expected_str, f"Test ID: {test_id}" - - -class TestAxonMiddleware(IsolatedAsyncioTestCase): - def setUp(self): - # Create a mock app - self.mock_app = MagicMock() - # Create a mock axon - self.mock_axon = MagicMock() - self.mock_axon.uuid = "1234" - self.mock_axon.forward_class_types = { - "request_name": bittensor.Synapse, - } - self.mock_axon.wallet.hotkey.sign.return_value = bytes.fromhex("aabbccdd") - # Create an instance of AxonMiddleware - self.axon_middleware = AxonMiddleware(self.mock_app, self.mock_axon) - return self.axon_middleware - - @pytest.mark.asyncio - async def test_preprocess(self): - # Mock the request - request = MagicMock(spec=Request) - request.url.path = "/request_name" - request.client.port = "5000" - request.client.host = "192.168.0.1" - request.headers = {} - - synapse = await self.axon_middleware.preprocess(request) - - # Check if the preprocess function fills the axon information into the synapse - assert synapse.axon.version == str(bittensor.__version_as_int__) - assert synapse.axon.uuid == "1234" - assert synapse.axon.nonce is not None - assert synapse.axon.status_message is None - assert synapse.axon.status_code == 100 - assert synapse.axon.signature == "0xaabbccdd" - - # Check if the preprocess function fills the dendrite information into the synapse - assert synapse.dendrite.port == "5000" - assert synapse.dendrite.ip == "192.168.0.1" - - # Check if the preprocess function sets the request name correctly - assert synapse.name == "request_name" - - -class SynapseHTTPClient(TestClient): - def post_synapse(self, synapse: Synapse): - return self.post( - f"/{synapse.__class__.__name__}", - json=synapse.model_dump(), - headers={"computed_body_hash": synapse.body_hash}, - ) - - -@pytest.mark.asyncio -class TestAxonHTTPAPIResponses: - @pytest.fixture - def axon(self): - return Axon( - ip="192.0.2.1", - external_ip="192.0.2.1", - wallet=MockWallet(MockHotkey("A"), MockHotkey("B"), MockHotkey("PUB")), - ) - - @pytest.fixture - def no_verify_axon(self, axon): - axon.default_verify = self.no_verify_fn - return axon - - @pytest.fixture - def http_client(self, axon): - return SynapseHTTPClient(axon.app) - - async def no_verify_fn(self, synapse): - return - - class NonDeterministicHeaders(pydantic.BaseModel): - """ - Helper class to verify headers. - - Size headers are non-determistic as for example, header_size depends on non-deterministic - processing-time value. - """ - - bt_header_axon_process_time: float = pydantic.Field(gt=0, lt=30) - timeout: float = pydantic.Field(gt=0, lt=30) - header_size: int = pydantic.Field(None, gt=10, lt=400) - total_size: int = pydantic.Field(gt=100, lt=10000) - content_length: Optional[int] = pydantic.Field( - None, alias="content-length", gt=100, lt=10000 - ) - - def assert_headers(self, response, expected_headers): - expected_headers = { - "bt_header_axon_status_code": "200", - "bt_header_axon_status_message": "Success", - **expected_headers, - } - headers = dict(response.headers) - non_deterministic_headers_names = { - field.alias or field_name - for field_name, field in self.NonDeterministicHeaders.model_fields.items() - } - non_deterministic_headers = { - field: headers.pop(field, None) for field in non_deterministic_headers_names - } - assert headers == expected_headers - self.NonDeterministicHeaders.model_validate(non_deterministic_headers) - - async def test_unknown_path(self, http_client): - response = http_client.get("/no_such_path") - assert (response.status_code, response.json()) == ( - 404, - { - "message": "Synapse name 'no_such_path' not found. Available synapses ['Synapse']" - }, - ) - - async def test_ping__no_dendrite(self, http_client): - response = http_client.post_synapse(bittensor.Synapse()) - assert (response.status_code, response.json()) == ( - 401, - { - "message": "Not Verified with error: No SS58 formatted address or public key provided" - }, - ) - - async def test_ping__without_verification(self, http_client, axon): - axon.verify_fns["Synapse"] = self.no_verify_fn - request_synapse = Synapse() - response = http_client.post_synapse(request_synapse) - assert response.status_code == 200 - response_synapse = Synapse(**response.json()) - assert response_synapse.axon.status_code == 200 - self.assert_headers( - response, - { - "computed_body_hash": "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", - "content-type": "application/json", - "name": "Synapse", - }, - ) - - @pytest.fixture - def custom_synapse_cls(self): - class CustomSynapse(Synapse): - pass - - return CustomSynapse - - @pytest.fixture - def streaming_synapse_cls(self): - class CustomStreamingSynapse(StreamingSynapse): - async def process_streaming_response(self, response): - pass - - def extract_response_json(self, response) -> dict: - return {} - - return CustomStreamingSynapse - - async def test_synapse__explicitly_set_status_code( - self, http_client, axon, custom_synapse_cls, no_verify_axon - ): - error_message = "Essential resource for CustomSynapse not found" - - async def forward_fn(synapse: custom_synapse_cls): - synapse.axon.status_code = 404 - synapse.axon.status_message = error_message - return synapse - - axon.attach(forward_fn) - - response = http_client.post_synapse(custom_synapse_cls()) - assert response.status_code == 404 - response_synapse = custom_synapse_cls(**response.json()) - assert ( - response_synapse.axon.status_code, - response_synapse.axon.status_message, - ) == (404, error_message) - - async def test_synapse__exception_with_set_status_code( - self, http_client, axon, custom_synapse_cls, no_verify_axon - ): - error_message = "Conflicting request" - - async def forward_fn(synapse: custom_synapse_cls): - synapse.axon.status_code = 409 - raise RunException(message=error_message, synapse=synapse) - - axon.attach(forward_fn) - - response = http_client.post_synapse(custom_synapse_cls()) - assert response.status_code == 409 - assert response.json() == {"message": error_message} - - async def test_synapse__internal_error( - self, http_client, axon, custom_synapse_cls, no_verify_axon - ): - async def forward_fn(synapse: custom_synapse_cls): - raise ValueError("error with potentially sensitive information") - - axon.attach(forward_fn) - - response = http_client.post_synapse(custom_synapse_cls()) - assert response.status_code == 500 - response_data = response.json() - assert sorted(response_data.keys()) == ["message"] - assert re.match(r"Internal Server Error #[\da-f\-]+", response_data["message"]) - - -def test_allowed_nonce_window_ns(): - mock_synapse = SynapseMock() - current_time = time.time_ns() - allowed_window_ns = allowed_nonce_window_ns(current_time, mock_synapse.timeout) - expected_window_ns = ( - current_time - ALLOWED_DELTA - (mock_synapse.timeout * NANOSECONDS_IN_SECOND) - ) - assert ( - allowed_window_ns < current_time - ), "Allowed window should be less than the current time" - assert ( - allowed_window_ns == expected_window_ns - ), f"Expected {expected_window_ns} but got {allowed_window_ns}" - - -@pytest.mark.parametrize("nonce_offset_seconds", [1, 3, 5, 10]) -def test_nonce_diff_seconds(nonce_offset_seconds): - mock_synapse = SynapseMock() - current_time_ns = time.time_ns() - synapse_nonce = current_time_ns - (nonce_offset_seconds * NANOSECONDS_IN_SECOND) - diff_seconds, allowed_delta_seconds = calculate_diff_seconds( - current_time_ns, mock_synapse.timeout, synapse_nonce - ) - - expected_diff_seconds = nonce_offset_seconds # Because we subtracted nonce_offset_seconds from current_time_ns - expected_allowed_delta_seconds = ( - ALLOWED_DELTA + (mock_synapse.timeout * NANOSECONDS_IN_SECOND) - ) / NANOSECONDS_IN_SECOND - - assert ( - diff_seconds == expected_diff_seconds - ), f"Expected {expected_diff_seconds} but got {diff_seconds}" - assert ( - allowed_delta_seconds == expected_allowed_delta_seconds - ), f"Expected {expected_allowed_delta_seconds} but got {allowed_delta_seconds}" - - -# Mimicking axon default_verify nonce verification -# True: Nonce is fresh, False: Nonce is old -def is_nonce_within_allowed_window(synapse_nonce, allowed_window_ns): - return not (synapse_nonce <= allowed_window_ns) - - -# Test assuming synapse timeout is the default 12 seconds -@pytest.mark.parametrize( - "nonce_offset_seconds, expected_result", - [(1, True), (3, True), (5, True), (15, True), (18, False), (19, False)], -) -def test_nonce_within_allowed_window(nonce_offset_seconds, expected_result): - mock_synapse = SynapseMock() - current_time_ns = time.time_ns() - synapse_nonce = current_time_ns - (nonce_offset_seconds * NANOSECONDS_IN_SECOND) - allowed_window_ns = allowed_nonce_window_ns(current_time_ns, mock_synapse.timeout) - - result = is_nonce_within_allowed_window(synapse_nonce, allowed_window_ns) - - assert result == expected_result, f"Expected {expected_result} but got {result}" - - @pytest.mark.parametrize( - "forward_fn_return_annotation", - [ - None, - fastapi.Response, - bittensor.StreamingSynapse, - ], - ) - async def test_streaming_synapse( - self, - http_client, - axon, - streaming_synapse_cls, - no_verify_axon, - forward_fn_return_annotation, - ): - tokens = [f"data{i}\n" for i in range(10)] - - async def streamer(send): - for token in tokens: - await send( - { - "type": "http.response.body", - "body": token.encode(), - "more_body": True, - } - ) - await send({"type": "http.response.body", "body": b"", "more_body": False}) - - async def forward_fn(synapse: streaming_synapse_cls): - return synapse.create_streaming_response(token_streamer=streamer) - - if forward_fn_return_annotation is not None: - forward_fn.__annotations__["return"] = forward_fn_return_annotation - - axon.attach(forward_fn) - - response = http_client.post_synapse(streaming_synapse_cls()) - assert (response.status_code, response.text) == (200, "".join(tokens)) - self.assert_headers( - response, - { - "content-type": "text/event-stream", - "name": "CustomStreamingSynapse", - "computed_body_hash": "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", - }, - ) diff --git a/tests/unit_tests/test_chain_data.py b/tests/unit_tests/test_chain_data.py deleted file mode 100644 index a6474bbee9..0000000000 --- a/tests/unit_tests/test_chain_data.py +++ /dev/null @@ -1,621 +0,0 @@ -import pytest -import bittensor -import torch -from bittensor.chain_data import AxonInfo, ChainDataType, DelegateInfo, NeuronInfo - -SS58_FORMAT = bittensor.__ss58_format__ -RAOPERTAO = 10**18 - - -@pytest.mark.parametrize( - "ip, expected, test_case", - [ - ("0.0.0.0", False, "ID_is_serving_false"), - ("127.0.0.1", True, "ID_is_serving_true"), - ], -) -def test_is_serving(ip, expected, test_case): - # Arrange - axon_info = AxonInfo( - version=1, ip=ip, port=8080, ip_type=4, hotkey="", coldkey="cold" - ) - - # Act - result = axon_info.is_serving - - # Assert - assert result == expected, f"Test case: {test_case}" - - -@pytest.mark.parametrize( - "ip_type, ip, port, expected, test_case", - [ - (4, "127.0.0.1", 8080, "/ipv4/127.0.0.1:8080", "ID_ip_str_ipv4"), - (6, "::1", 8080, "/ipv6/::1:8080", "ID_ip_str_ipv6"), - ], -) -def test_ip_str(ip_type, ip, port, expected, test_case): - # Arrange - axon_info = AxonInfo( - version=1, ip=ip, port=port, ip_type=ip_type, hotkey="hot", coldkey="cold" - ) - - # Act - result = axon_info.ip_str() - - # Assert - assert result == expected, f"Test case: {test_case}" - - -@pytest.mark.parametrize( - "other, expected, test_case", - [ - (None, False, "ID_eq_none"), - ( - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - True, - "ID_eq_equal", - ), - ( - AxonInfo( - version=2, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - False, - "ID_eq_diff_version", - ), - ], -) -def test_eq(other, expected, test_case): - # Arrange - axon_info = AxonInfo( - version=1, ip="127.0.0.1", port=8080, ip_type=4, hotkey="hot", coldkey="cold" - ) - - # Act - result = axon_info == other - - # Assert - assert result == expected, f"Test case: {test_case}" - - -@pytest.mark.parametrize( - "axon_info, expected, test_case", - [ - ( - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - '{"version": 1, "ip": "127.0.0.1", "port": 8080, "ip_type": 4, "hotkey": "hot", "coldkey": "cold", "protocol": 4, "placeholder1": 0, "placeholder2": 0}', - "ID_to_string", - ), - ], -) -def test_to_string(axon_info, expected, test_case): - # Act - result = axon_info.to_string() - - # Assert - assert result == expected, f"Test case: {test_case}" - - -# Test AxonInfo.from_string method -@pytest.mark.parametrize( - "string, expected, test_case", - [ - ( - '{"version": 1, "ip": "127.0.0.1", "port": 8080, "ip_type": 4, "hotkey": "hot", "coldkey": "cold"}', - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - "ID_from_string_valid", - ), - ("invalid_json", AxonInfo(0, "", 0, 0, "", ""), "ID_from_string_invalid_json"), - ], -) -def test_from_string(string, expected, test_case): - # Act - result = AxonInfo.from_string(string) - - # Assert - assert result == expected, f"Test case: {test_case}" - - -# Test AxonInfo.from_neuron_info method -@pytest.mark.parametrize( - "neuron_info, expected, test_case", - [ - ( - { - "axon_info": { - "version": 1, - "ip": 2130706433, - "port": 8080, - "ip_type": 4, - }, - "hotkey": "hot", - "coldkey": "cold", - }, - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - "ID_from_neuron_info", - ), - ], -) -def test_from_neuron_info(neuron_info, expected, test_case): - # Act - result = AxonInfo.from_neuron_info(neuron_info) - - # Assert - assert result == expected, f"Test case: {test_case}" - - -# Test AxonInfo.to_parameter_dict method -@pytest.mark.parametrize( - "axon_info, test_case", - [ - ( - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - "ID_to_parameter_dict", - ), - ], -) -def test_to_parameter_dict(axon_info, test_case): - # Act - result = axon_info.to_parameter_dict() - - # Assert - assert isinstance(result, dict) - for key, value in axon_info.__dict__.items(): - assert key in result - assert result[key] == value, f"Test case: {test_case}" - - -@pytest.mark.parametrize( - "axon_info, test_case", - [ - ( - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - "ID_to_parameter_dict", - ), - ], -) -def test_to_parameter_dict_torch( - axon_info, - test_case, - force_legacy_torch_compat_api, -): - result = axon_info.to_parameter_dict() - - # Assert - assert isinstance(result, torch.nn.ParameterDict) - for key, value in axon_info.__dict__.items(): - assert key in result - assert result[key] == value, f"Test case: {test_case}" - - -@pytest.mark.parametrize( - "parameter_dict, expected, test_case", - [ - ( - { - "version": 1, - "ip": "127.0.0.1", - "port": 8080, - "ip_type": 4, - "hotkey": "hot", - "coldkey": "cold", - }, - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - "ID_from_parameter_dict", - ), - ], -) -def test_from_parameter_dict(parameter_dict, expected, test_case): - # Act - result = AxonInfo.from_parameter_dict(parameter_dict) - - # Assert - assert result == expected, f"Test case: {test_case}" - - -@pytest.mark.parametrize( - "parameter_dict, expected, test_case", - [ - ( - torch.nn.ParameterDict( - { - "version": 1, - "ip": "127.0.0.1", - "port": 8080, - "ip_type": 4, - "hotkey": "hot", - "coldkey": "cold", - } - ), - AxonInfo( - version=1, - ip="127.0.0.1", - port=8080, - ip_type=4, - hotkey="hot", - coldkey="cold", - ), - "ID_from_parameter_dict", - ), - ], -) -def test_from_parameter_dict_torch( - parameter_dict, expected, test_case, force_legacy_torch_compat_api -): - # Act - result = AxonInfo.from_parameter_dict(parameter_dict) - - # Assert - assert result == expected, f"Test case: {test_case}" - - -def create_neuron_info_decoded( - hotkey, - coldkey, - stake, - weights, - bonds, - rank, - emission, - incentive, - consensus, - trust, - validator_trust, - dividends, - uid, - netuid, - active, - last_update, - validator_permit, - pruning_score, - prometheus_info, - axon_info, -): - return { - "hotkey": hotkey, - "coldkey": coldkey, - "stake": stake, - "weights": weights, - "bonds": bonds, - "rank": rank, - "emission": emission, - "incentive": incentive, - "consensus": consensus, - "trust": trust, - "validator_trust": validator_trust, - "dividends": dividends, - "uid": uid, - "netuid": netuid, - "active": active, - "last_update": last_update, - "validator_permit": validator_permit, - "pruning_score": pruning_score, - "prometheus_info": prometheus_info, - "axon_info": axon_info, - } - - -@pytest.mark.parametrize( - "test_id, neuron_info_decoded,", - [ - ( - "happy-path-1", - create_neuron_info_decoded( - hotkey=b"\x01" * 32, - coldkey=b"\x02" * 32, - stake=[(b"\x02" * 32, 1000)], - weights=[(1, 2)], - bonds=[(3, 4)], - rank=100, - emission=1000, - incentive=200, - consensus=300, - trust=400, - validator_trust=500, - dividends=600, - uid=1, - netuid=2, - active=True, - last_update=1000, - validator_permit=100, - pruning_score=1000, - prometheus_info={ - "version": 1, - "ip": 2130706433, - "port": 8080, - "ip_type": 4, - "block": 100, - }, - axon_info={ - "version": 1, - "ip": 2130706433, - "port": 8080, - "ip_type": 4, - }, - ), - ), - ], -) -def test_fix_decoded_values_happy_path(test_id, neuron_info_decoded): - # Act - result = NeuronInfo.fix_decoded_values(neuron_info_decoded) - - # Assert - assert result.hotkey == neuron_info_decoded["hotkey"], f"Test case: {test_id}" - assert result.coldkey == neuron_info_decoded["coldkey"], f"Test case: {test_id}" - assert result.stake == neuron_info_decoded["stake"], f"Test case: {test_id}" - assert result.weights == neuron_info_decoded["weights"], f"Test case: {test_id}" - assert result.bonds == neuron_info_decoded["bonds"], f"Test case: {test_id}" - assert result.rank == neuron_info_decoded["rank"], f"Test case: {test_id}" - assert result.emission == neuron_info_decoded["emission"], f"Test case: {test_id}" - assert result.incentive == neuron_info_decoded["incentive"], f"Test case: {test_id}" - assert result.consensus == neuron_info_decoded["consensus"], f"Test case: {test_id}" - assert result.trust == neuron_info_decoded["trust"], f"Test case: {test_id}" - assert ( - result.validator_trust == neuron_info_decoded["validator_trust"] - ), f"Test case: {test_id}" - assert result.dividends == neuron_info_decoded["dividends"], f"Test case: {test_id}" - assert result.uid == neuron_info_decoded["uid"], f"Test case: {test_id}" - assert result.netuid == neuron_info_decoded["netuid"], f"Test case: {test_id}" - assert result.active == neuron_info_decoded["active"], f"Test case: {test_id}" - assert ( - result.last_update == neuron_info_decoded["last_update"] - ), f"Test case: {test_id}" - - -@pytest.mark.parametrize( - "test_id, neuron_info_decoded", - [ - ( - "edge-1", - create_neuron_info_decoded( - hotkey=b"\x01" * 32, - coldkey=b"\x02" * 32, - stake=[], - weights=[(1, 2)], - bonds=[(3, 4)], - rank=100, - emission=1000, - incentive=200, - consensus=300, - trust=400, - validator_trust=500, - dividends=600, - uid=1, - netuid=2, - active=True, - last_update=1000, - validator_permit=100, - pruning_score=1000, - prometheus_info={ - "version": 1, - "ip": 2130706433, - "port": 8080, - "ip_type": 4, - "block": 100, - }, - axon_info={ - "version": 1, - "ip": 2130706433, - "port": 8080, - "ip_type": 4, - }, - ), - ), - ], -) -def test_fix_decoded_values_edge_cases(test_id, neuron_info_decoded): - # Act - result = NeuronInfo.fix_decoded_values(neuron_info_decoded) - - # Assert - assert result.stake == 0, f"Test case: {test_id}" - assert result.weights == neuron_info_decoded["weights"], f"Test case: {test_id}" - - -@pytest.mark.parametrize( - "test_id, neuron_info_decoded, expected_exception", - [ - ( - "error-1", - create_neuron_info_decoded( - hotkey="not_bytes", - coldkey=b"\x02" * 32, - stake=[(b"\x02" * 32, 1000)], - weights=[(1, 2)], - bonds=[(3, 4)], - rank=100, - emission=1000, - incentive=200, - consensus=300, - trust=400, - validator_trust=500, - dividends=600, - uid=1, - netuid=2, - active=True, - last_update=1000, - validator_permit=100, - pruning_score=1000, - prometheus_info={}, - axon_info={}, - ), - ValueError, - ), - ], -) -def test_fix_decoded_values_error_cases( - test_id, neuron_info_decoded, expected_exception -): - # Arrange - # (Omitted since all input values are provided via test parameters) - - # Act / Assert - with pytest.raises(expected_exception): - NeuronInfo.fix_decoded_values(neuron_info_decoded), f"Test case: {test_id}" - - -@pytest.fixture -def mock_from_scale_encoding(mocker): - return mocker.patch("bittensor.chain_data.from_scale_encoding") - - -@pytest.fixture -def mock_fix_decoded_values(mocker): - return mocker.patch( - "bittensor.DelegateInfo.fix_decoded_values", side_effect=lambda x: x - ) - - -@pytest.mark.parametrize( - "test_id, vec_u8, expected", - [ - ( - "happy-path-1", - [1, 2, 3], - [ - DelegateInfo( - hotkey_ss58="hotkey", - total_stake=1000, - nominators=[ - "nominator1", - "nominator2", - ], - owner_ss58="owner", - take=10.1, - validator_permits=[1, 2, 3], - registrations=[4, 5, 6], - return_per_1000=100, - total_daily_return=1000, - ) - ], - ), - ( - "happy-path-2", - [4, 5, 6], - [ - DelegateInfo( - hotkey_ss58="hotkey", - total_stake=1000, - nominators=[ - "nominator1", - "nominator2", - ], - owner_ss58="owner", - take=2.1, - validator_permits=[1, 2, 3], - registrations=[4, 5, 6], - return_per_1000=100, - total_daily_return=1000, - ) - ], - ), - ], -) -def test_list_from_vec_u8_happy_path( - mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected -): - # Arrange - mock_from_scale_encoding.return_value = expected - - # Act - result = DelegateInfo.list_from_vec_u8(vec_u8) - - # Assert - mock_from_scale_encoding.assert_called_once_with( - vec_u8, ChainDataType.DelegateInfo, is_vec=True - ) - assert result == expected, f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, vec_u8, expected", - [ - ("edge_empty_list", [], []), - ], -) -def test_list_from_vec_u8_edge_cases( - mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected -): - # Arrange - mock_from_scale_encoding.return_value = None - - # Act - result = DelegateInfo.list_from_vec_u8(vec_u8) - - # Assert - mock_from_scale_encoding.assert_called_once_with( - vec_u8, ChainDataType.DelegateInfo, is_vec=True - ) - assert result == expected, f"Failed {test_id}" - - -@pytest.mark.parametrize( - "vec_u8, expected_exception", - [ - ("not_a_list", TypeError), - ], -) -def test_list_from_vec_u8_error_cases( - vec_u8, - expected_exception, -): - # No Arrange section needed as input values are provided via test parameters - - # Act & Assert - with pytest.raises(expected_exception): - _ = DelegateInfo.list_from_vec_u8(vec_u8) diff --git a/tests/unit_tests/test_dendrite.py b/tests/unit_tests/test_dendrite.py deleted file mode 100644 index 0146bb7782..0000000000 --- a/tests/unit_tests/test_dendrite.py +++ /dev/null @@ -1,415 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Yuma Rao -# Copyright © 2022-2023 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -# Standard Lib -import asyncio -import typing -from unittest.mock import MagicMock, Mock - -# Third Party -import aiohttp -import pytest - -# Application -import bittensor -from bittensor.constants import DENDRITE_ERROR_MAPPING, DENDRITE_DEFAULT_ERROR -from bittensor.dendrite import dendrite as Dendrite -from bittensor.synapse import TerminalInfo -from tests.helpers import _get_mock_wallet - - -class SynapseDummy(bittensor.Synapse): - input: int - output: typing.Optional[int] = None - - -def dummy(synapse: SynapseDummy) -> SynapseDummy: - synapse.output = synapse.input + 1 - return synapse - - -@pytest.fixture -def setup_dendrite(): - user_wallet = ( - _get_mock_wallet() - ) # assuming bittensor.wallet() returns a wallet object - dendrite_obj = bittensor.dendrite(user_wallet) - return dendrite_obj - - -@pytest.fixture -def dendrite_obj(setup_dendrite): - return setup_dendrite - - -@pytest.fixture -def axon_info(): - return bittensor.AxonInfo( - version=1, - ip="127.0.0.1", - port=666, - ip_type=4, - hotkey="hot", - coldkey="cold", - ) - - -@pytest.fixture(scope="session") -def setup_axon(): - axon = bittensor.axon() - axon.attach(forward_fn=dummy) - axon.start() - yield axon - del axon - - -def test_init(setup_dendrite): - dendrite_obj = setup_dendrite - assert isinstance(dendrite_obj, bittensor.dendrite) - assert dendrite_obj.keypair == setup_dendrite.keypair - - -def test_str(dendrite_obj): - expected_string = "dendrite({})".format(dendrite_obj.keypair.ss58_address) - assert str(dendrite_obj) == expected_string - - -def test_repr(dendrite_obj): - expected_string = "dendrite({})".format(dendrite_obj.keypair.ss58_address) - assert repr(dendrite_obj) == expected_string - - -def test_close(dendrite_obj, setup_axon): - axon = setup_axon - # Query the axon to open a session - dendrite_obj.query(axon, SynapseDummy(input=1)) - # Session should be automatically closed after query - assert dendrite_obj._session is None - - -@pytest.mark.asyncio -async def test_aclose(dendrite_obj, setup_axon): - axon = setup_axon - # Use context manager to open an async session - async with dendrite_obj: - resp = await dendrite_obj([axon], SynapseDummy(input=1), deserialize=False) - # Close should automatically be called on the session after context manager scope - assert dendrite_obj._session is None - - -class AsyncMock(Mock): - def __call__(self, *args, **kwargs): - sup = super(AsyncMock, self) - - async def coro(): - return sup.__call__(*args, **kwargs) - - return coro() - - def __await__(self): - return self().__await__() - - -def test_dendrite_create_wallet(): - d = bittensor.dendrite(_get_mock_wallet()) - d = bittensor.dendrite(_get_mock_wallet().hotkey) - d = bittensor.dendrite(_get_mock_wallet().coldkeypub) - assert d.__str__() == d.__repr__() - - -@pytest.mark.asyncio -async def test_forward_many(): - n = 10 - d = bittensor.dendrite(wallet=_get_mock_wallet()) - d.call = AsyncMock() - axons = [MagicMock() for _ in range(n)] - - resps = await d(axons) - assert len(resps) == n - resp = await d(axons[0]) - assert len([resp]) == 1 - - resps = await d.forward(axons) - assert len(resps) == n - resp = await d.forward(axons[0]) - assert len([resp]) == 1 - - -def test_pre_process_synapse(): - d = bittensor.dendrite(wallet=_get_mock_wallet()) - s = bittensor.Synapse() - synapse = d.preprocess_synapse_for_request( - target_axon_info=bittensor.axon(wallet=_get_mock_wallet()).info(), - synapse=s, - timeout=12, - ) - assert synapse.timeout == 12 - assert synapse.dendrite - assert synapse.axon - assert synapse.dendrite.ip - assert synapse.dendrite.version - assert synapse.dendrite.nonce - assert synapse.dendrite.uuid - assert synapse.dendrite.hotkey - assert synapse.axon.ip - assert synapse.axon.port - assert synapse.axon.hotkey - assert synapse.dendrite.signature - - -# Helper functions for casting, assuming they exist and work correctly. -def cast_int(value: typing.Any) -> int: - return int(value) - - -def cast_float(value: typing.Any) -> float: - return float(value) - - -# Happy path tests -@pytest.mark.parametrize( - "status_code, status_message, process_time, ip, port, version, nonce, uuid, hotkey, signature, expected", - [ - ( - 200, - "Success", - 0.1, - "198.123.23.1", - 9282, - 111, - 111111, - "5ecbd69c-1cec-11ee-b0dc-e29ce36fec1a", - "5EnjDGNqqWnuL2HCAdxeEtN2oqtXZw6BMBe936Kfy2PFz1J1", - "0x0813029319030129u4120u10841824y0182u091u230912u", - True, - ), - # Add more test cases with different combinations of realistic values - ], - ids=["basic-success"], -) -def test_terminal_info_happy_path( - status_code, - status_message, - process_time, - ip, - port, - version, - nonce, - uuid, - hotkey, - signature, - expected, -): - # Act - terminal_info = TerminalInfo( - status_code=status_code, - status_message=status_message, - process_time=process_time, - ip=ip, - port=port, - version=version, - nonce=nonce, - uuid=uuid, - hotkey=hotkey, - signature=signature, - ) - - # Assert - assert isinstance(terminal_info, TerminalInfo) == expected - assert terminal_info.status_code == status_code - assert terminal_info.status_message == status_message - assert terminal_info.process_time == process_time - assert terminal_info.ip == ip - assert terminal_info.port == port - assert terminal_info.version == version - assert terminal_info.nonce == nonce - assert terminal_info.uuid == uuid - assert terminal_info.hotkey == hotkey - assert terminal_info.signature == signature - - -# Edge cases -@pytest.mark.parametrize( - "status_code, process_time, port, version, nonce, expected_exception", - [ - ("not-an-int", 0.1, 9282, 111, 111111, ValueError), # status_code not an int - (200, "not-a-float", 9282, 111, 111111, ValueError), # process_time not a float - (200, 0.1, "not-an-int", 111, 111111, ValueError), # port not an int - # Add more edge cases as needed - ], - ids=["status_code-not-int", "process_time-not-float", "port-not-int"], -) -def test_terminal_info_edge_cases( - status_code, process_time, port, version, nonce, expected_exception -): - # Act & Assert - with pytest.raises(expected_exception): - TerminalInfo( - status_code=status_code, - process_time=process_time, - port=port, - version=version, - nonce=nonce, - ) - - -# Error case -@pytest.mark.parametrize( - "status_code, process_time, port, ip, version, nonce, expected_exception", - [ - (None, 0.1, 9282, 111, TerminalInfo(), 111111, TypeError), - ], - ids=[ - "int() argument must be a string, a bytes-like object or a real number, not 'TerminalInfo'" - ], -) -def test_terminal_info_error_cases( - status_code, process_time, port, ip, version, nonce, expected_exception -): - # Act & Assert - with pytest.raises(expected_exception): - TerminalInfo( - status_code=status_code, - process_time=process_time, - port=port, - ip=ip, - version=version, - nonce=nonce, - ) - - -@pytest.mark.asyncio -async def test_dendrite__call__success_response( - axon_info, dendrite_obj, mock_aioresponse -): - input_synapse = SynapseDummy(input=1) - expected_synapse = SynapseDummy( - **( - input_synapse.model_dump() - | dict( - output=2, - axon=TerminalInfo( - status_code=200, - status_message="Success", - process_time=0.1, - ), - ) - ) - ) - mock_aioresponse.post( - f"http://127.0.0.1:666/SynapseDummy", - body=expected_synapse.json(), - ) - synapse = await dendrite_obj.call(axon_info, synapse=input_synapse) - - assert synapse.input == 1 - assert synapse.output == 2 - assert synapse.dendrite.status_code == 200 - assert synapse.dendrite.status_message == "Success" - assert synapse.dendrite.process_time >= 0 - - -@pytest.mark.asyncio -async def test_dendrite__call__handles_http_error_response( - axon_info, dendrite_obj, mock_aioresponse -): - status_code = 414 - message = "Custom Error" - - mock_aioresponse.post( - f"http://127.0.0.1:666/SynapseDummy", - status=status_code, - payload={"message": message}, - ) - synapse = await dendrite_obj.call(axon_info, synapse=SynapseDummy(input=1)) - - assert synapse.axon.status_code == synapse.dendrite.status_code == status_code - assert synapse.axon.status_message == synapse.dendrite.status_message == message - - -@pytest.mark.parametrize( - "exception, expected_status_code, expected_message, synapse_timeout, synapse_ip, synapse_port, request_name", - [ - ( - aiohttp.ClientConnectorError(Mock(), Mock()), - DENDRITE_ERROR_MAPPING[aiohttp.ClientConnectorError][0], - f"{DENDRITE_ERROR_MAPPING[aiohttp.ClientConnectorError][1]} at 127.0.0.1:8080/test_request", - None, - "127.0.0.1", - "8080", - "test_request_client_connector_error", - ), - ( - asyncio.TimeoutError(), - DENDRITE_ERROR_MAPPING[asyncio.TimeoutError][0], - f"{DENDRITE_ERROR_MAPPING[asyncio.TimeoutError][1]} after 5 seconds", - 5, - None, - None, - "test_request_timeout", - ), - ( - aiohttp.ClientResponseError(Mock(), Mock(), status=404), - "404", - f"{DENDRITE_ERROR_MAPPING[aiohttp.ClientResponseError][1]}: 404, message=''", - None, - None, - None, - "test_request_client_response_error", - ), - ( - Exception("Unknown error"), - DENDRITE_DEFAULT_ERROR[0], - f"{DENDRITE_DEFAULT_ERROR[1]}: Unknown error", - None, - None, - None, - "test_request_unknown_error", - ), - ], - ids=[ - "ClientConnectorError", - "TimeoutError", - "ClientResponseError", - "GenericException", - ], -) -def test_process_error_message( - exception, - expected_status_code, - expected_message, - synapse_timeout, - synapse_ip, - synapse_port, - request_name, -): - # Arrange - dendrite = Dendrite() - synapse = Mock() - - synapse.timeout = synapse_timeout - synapse.axon.ip = synapse_ip - synapse.axon.port = synapse_port - - # Act - result = dendrite.process_error_message(synapse, request_name, exception) - - # Assert - assert result.dendrite.status_code == expected_status_code - assert expected_message in result.dendrite.status_message diff --git a/tests/unit_tests/test_keyfile.py b/tests/unit_tests/test_keyfile.py deleted file mode 100644 index 0f3b69cacf..0000000000 --- a/tests/unit_tests/test_keyfile.py +++ /dev/null @@ -1,643 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import json -import time -import pytest -import shutil -import bittensor -import unittest.mock as mock -from scalecodec import ScaleBytes -from substrateinterface import Keypair, KeypairType -from substrateinterface.constants import DEV_PHRASE -from substrateinterface.exceptions import ConfigurationError -from bip39 import bip39_validate - -from bittensor import get_coldkey_password_from_environment - - -def test_generate_mnemonic(): - """ - Test the generation of a mnemonic and its validation. - """ - mnemonic = Keypair.generate_mnemonic() - assert bip39_validate(mnemonic) == True - - -def test_invalid_mnemonic(): - """ - Test the validation of an invalid mnemonic. - """ - mnemonic = "This is an invalid mnemonic" - assert bip39_validate(mnemonic) == False - - -def test_create_sr25519_keypair(): - """ - Test the creation of a sr25519 keypair from a mnemonic and verify the SS58 address. - """ - mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review" - keypair = Keypair.create_from_mnemonic(mnemonic, ss58_format=0) - assert keypair.ss58_address == "16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2" - - -def test_only_provide_ss58_address(): - """ - Test the creation of a keypair with only the SS58 address provided. - """ - keypair = Keypair(ss58_address="16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2") - - assert ( - f"0x{keypair.public_key.hex()}" - == "0xe4359ad3e2716c539a1d663ebd0a51bdc5c98a12e663bb4c4402db47828c9446" - ) - - -def test_only_provide_public_key(): - """ - Test the creation of a keypair with only the public key provided. - """ - keypair = Keypair( - public_key="0xe4359ad3e2716c539a1d663ebd0a51bdc5c98a12e663bb4c4402db47828c9446", - ss58_format=0, - ) - - assert keypair.ss58_address == "16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2" - - -def test_provide_no_ss58_address_and_public_key(): - """ - Test the creation of a keypair without providing SS58 address and public key. - """ - with pytest.raises(ValueError): - Keypair() - - -def test_incorrect_private_key_length_sr25519(): - """ - Test the creation of a keypair with an incorrect private key length for sr25519. - """ - with pytest.raises(ValueError): - Keypair( - private_key="0x23", - ss58_address="16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2", - ) - - -def test_incorrect_public_key(): - """ - Test the creation of a keypair with an incorrect public key. - """ - with pytest.raises(ValueError): - Keypair(public_key="0x23") - - -def test_sign_and_verify(): - """ - Test the signing and verification of a message using a keypair. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic) - signature = keypair.sign("Test1231223123123") - assert keypair.verify("Test1231223123123", signature) == True - - -def test_sign_and_verify_hex_data(): - """ - Test the signing and verification of hex data using a keypair. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic) - signature = keypair.sign("0x1234") - assert keypair.verify("0x1234", signature) == True - - -def test_sign_and_verify_scale_bytes(): - """ - Test the signing and verification of ScaleBytes data using a keypair. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic) - data = ScaleBytes("0x1234") - signature = keypair.sign(data) - assert keypair.verify(data, signature) == True - - -def test_sign_missing_private_key(): - """ - Test signing a message with a keypair that is missing the private key. - """ - keypair = Keypair(ss58_address="5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY") - with pytest.raises(ConfigurationError): - keypair.sign("0x1234") - - -def test_sign_unsupported_crypto_type(): - """ - Test signing a message with an unsupported crypto type. - """ - keypair = Keypair.create_from_private_key( - ss58_address="16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2", - private_key="0x1f1995bdf3a17b60626a26cfe6f564b337d46056b7a1281b64c649d592ccda0a9cffd34d9fb01cae1fba61aeed184c817442a2186d5172416729a4b54dd4b84e", - crypto_type=3, - ) - with pytest.raises(ConfigurationError): - keypair.sign("0x1234") - - -def test_verify_unsupported_crypto_type(): - """ - Test verifying a signature with an unsupported crypto type. - """ - keypair = Keypair.create_from_private_key( - ss58_address="16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2", - private_key="0x1f1995bdf3a17b60626a26cfe6f564b337d46056b7a1281b64c649d592ccda0a9cffd34d9fb01cae1fba61aeed184c817442a2186d5172416729a4b54dd4b84e", - crypto_type=3, - ) - with pytest.raises(ConfigurationError): - keypair.verify("0x1234", "0x1234") - - -def test_sign_and_verify_incorrect_signature(): - """ - Test verifying an incorrect signature for a signed message. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic) - signature = "0x4c291bfb0bb9c1274e86d4b666d13b2ac99a0bacc04a4846fb8ea50bda114677f83c1f164af58fc184451e5140cc8160c4de626163b11451d3bbb208a1889f8a" - assert keypair.verify("Test1231223123123", signature) == False - - -def test_sign_and_verify_invalid_signature(): - """ - Test verifying an invalid signature format for a signed message. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic) - signature = "Test" - with pytest.raises(TypeError): - keypair.verify("Test1231223123123", signature) - - -def test_sign_and_verify_invalid_message(): - """ - Test verifying a signature against an incorrect message. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic) - signature = keypair.sign("Test1231223123123") - assert keypair.verify("OtherMessage", signature) == False - - -def test_create_ed25519_keypair(): - """ - Test the creation of an ed25519 keypair from a mnemonic and verify the SS58 address. - """ - mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review" - keypair = Keypair.create_from_mnemonic( - mnemonic, ss58_format=0, crypto_type=KeypairType.ED25519 - ) - assert keypair.ss58_address == "16dYRUXznyhvWHS1ktUENGfNAEjCawyDzHRtN9AdFnJRc38h" - - -def test_sign_and_verify_ed25519(): - """ - Test the signing and verification of a message using an ed25519 keypair. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ED25519) - signature = keypair.sign("Test1231223123123") - assert keypair.verify("Test1231223123123", signature) == True - - -def test_sign_and_verify_invalid_signature_ed25519(): - """ - Test verifying an incorrect signature for a message signed with an ed25519 keypair. - """ - mnemonic = Keypair.generate_mnemonic() - keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ED25519) - signature = "0x4c291bfb0bb9c1274e86d4b666d13b2ac99a0bacc04a4846fb8ea50bda114677f83c1f164af58fc184451e5140cc8160c4de626163b11451d3bbb208a1889f8a" - assert keypair.verify("Test1231223123123", signature) == False - - -def test_unsupport_crypto_type(): - """ - Test creating a keypair with an unsupported crypto type. - """ - with pytest.raises(ValueError): - Keypair.create_from_seed( - seed_hex="0xda3cf5b1e9144931?a0f0db65664aab662673b099415a7f8121b7245fb0be4143", - crypto_type=2, - ) - - -def test_create_keypair_from_private_key(): - """ - Test creating a keypair from a private key and verify the public key. - """ - keypair = Keypair.create_from_private_key( - ss58_address="16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2", - private_key="0x1f1995bdf3a17b60626a26cfe6f564b337d46056b7a1281b64c649d592ccda0a9cffd34d9fb01cae1fba61aeed184c817442a2186d5172416729a4b54dd4b84e", - ) - assert ( - f"0x{keypair.public_key.hex()}" - == "0xe4359ad3e2716c539a1d663ebd0a51bdc5c98a12e663bb4c4402db47828c9446" - ) - - -def test_hdkd_hard_path(): - """ - Test hierarchical deterministic key derivation with a hard derivation path. - """ - mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review" - derivation_address = "5FEiH8iuDUw271xbqWTWuB6WrDjv5dnCeDX1CyHubAniXDNN" - derivation_path = "//Alice" - derived_keypair = Keypair.create_from_uri(mnemonic + derivation_path) - assert derivation_address == derived_keypair.ss58_address - - -def test_hdkd_soft_path(): - """ - Test hierarchical deterministic key derivation with a soft derivation path. - """ - derivation_address = "5GNXbA46ma5dg19GXdiKi5JH3mnkZ8Yea3bBtZAvj7t99P9i" - mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review" - derived_keypair = Keypair.create_from_uri(f"{mnemonic}/Alice") - assert derivation_address == derived_keypair.ss58_address - - -def test_hdkd_default_to_dev_mnemonic(): - """ - Test hierarchical deterministic key derivation with a default development mnemonic. - """ - derivation_address = "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" - derivation_path = "//Alice" - derived_keypair = Keypair.create_from_uri(derivation_path) - assert derivation_address == derived_keypair.ss58_address - - -def test_hdkd_nested_hard_soft_path(): - """ - Test hierarchical deterministic key derivation with a nested hard and soft derivation path. - """ - derivation_address = "5CJGwWiKXSE16WJaxBdPZhWqUYkotgenLUALv7ZvqQ4TXeqf" - derivation_path = "//Bob/test" - derived_keypair = Keypair.create_from_uri(derivation_path) - assert derivation_address == derived_keypair.ss58_address - - -def test_hdkd_nested_soft_hard_path(): - """ - Test hierarchical deterministic key derivation with a nested soft and hard derivation path. - """ - derivation_address = "5Cwc8tShrshDJUp1P1M21dKUTcYQpV9GcfSa4hUBNmMdV3Cx" - derivation_path = "/Bob//test" - derived_keypair = Keypair.create_from_uri(derivation_path) - assert derivation_address == derived_keypair.ss58_address - - -def test_hdkd_path_gt_32_bytes(): - """ - Test hierarchical deterministic key derivation with a derivation path longer than 32 bytes. - """ - derivation_address = "5GR5pfZeNs1uQiSWVxZaQiZou3wdZiX894eqgvfNfHbEh7W2" - derivation_path = "//PathNameLongerThan32BytesWhichShouldBeHashed" - derived_keypair = Keypair.create_from_uri(derivation_path) - assert derivation_address == derived_keypair.ss58_address - - -def test_hdkd_unsupported_password(): - """ - Test hierarchical deterministic key derivation with an unsupported password. - """ - - with pytest.raises(NotImplementedError): - Keypair.create_from_uri(f"{DEV_PHRASE}///test") - - -def create_keyfile(root_path): - """ - Creates a keyfile object with two keypairs: alice and bob. - - Args: - root_path (str): The root path for the keyfile. - - Returns: - bittensor.keyfile: The created keyfile object. - """ - keyfile = bittensor.keyfile(path=os.path.join(root_path, "keyfile")) - - mnemonic = bittensor.Keypair.generate_mnemonic(12) - alice = bittensor.Keypair.create_from_mnemonic(mnemonic) - keyfile.set_keypair( - alice, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - - bob = bittensor.Keypair.create_from_uri("/Bob") - keyfile.set_keypair( - bob, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - - return keyfile - - -@pytest.fixture(scope="session") -def keyfile_setup_teardown(): - root_path = f"/tmp/pytest{time.time()}" - os.makedirs(root_path, exist_ok=True) - - create_keyfile(root_path) - - yield root_path - - shutil.rmtree(root_path) - - -def test_create(keyfile_setup_teardown): - """ - Test case for creating a keyfile and performing various operations on it. - """ - root_path = keyfile_setup_teardown - keyfile = bittensor.keyfile(path=os.path.join(root_path, "keyfile")) - - mnemonic = bittensor.Keypair.generate_mnemonic(12) - alice = bittensor.Keypair.create_from_mnemonic(mnemonic) - keyfile.set_keypair( - alice, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - assert keyfile.is_readable() - assert keyfile.is_writable() - assert keyfile.is_encrypted() - keyfile.decrypt(password="thisisafakepassword") - assert not keyfile.is_encrypted() - keyfile.encrypt(password="thisisafakepassword") - assert keyfile.is_encrypted() - str(keyfile) - keyfile.decrypt(password="thisisafakepassword") - assert not keyfile.is_encrypted() - str(keyfile) - - assert ( - keyfile.get_keypair(password="thisisafakepassword").ss58_address - == alice.ss58_address - ) - assert ( - keyfile.get_keypair(password="thisisafakepassword").private_key - == alice.private_key - ) - assert ( - keyfile.get_keypair(password="thisisafakepassword").public_key - == alice.public_key - ) - - bob = bittensor.Keypair.create_from_uri("/Bob") - keyfile.set_keypair( - bob, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - assert ( - keyfile.get_keypair(password="thisisafakepassword").ss58_address - == bob.ss58_address - ) - assert ( - keyfile.get_keypair(password="thisisafakepassword").public_key == bob.public_key - ) - - repr(keyfile) - - -def test_legacy_coldkey(keyfile_setup_teardown): - """ - Test case for legacy cold keyfile. - """ - root_path = keyfile_setup_teardown - legacy_filename = os.path.join(root_path, "coldlegacy_keyfile") - keyfile = bittensor.keyfile(path=legacy_filename) - keyfile.make_dirs() - keyfile_data = b"0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f" - with open(legacy_filename, "wb") as keyfile_obj: - keyfile_obj.write(keyfile_data) - assert keyfile.keyfile_data == keyfile_data - keyfile.encrypt(password="this is the fake password") - keyfile.decrypt(password="this is the fake password") - expected_decryption = { - "accountId": "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f", - "publicKey": "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f", - "privateKey": None, - "secretPhrase": None, - "secretSeed": None, - "ss58Address": "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm", - } - for key, value in expected_decryption.items(): - value_str = f'"{value}"' if value is not None else "null" - assert f'"{key}": {value_str}'.encode() in keyfile.keyfile_data - - assert ( - keyfile.get_keypair().ss58_address - == "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - ) - assert ( - f"0x{keyfile.get_keypair().public_key.hex()}" - == "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f" - ) - - -def test_validate_password(): - """ - Test case for the validate_password function. - - This function tests the behavior of the validate_password function from the bittensor.keyfile module. - It checks various scenarios to ensure that the function correctly validates passwords. - """ - from bittensor.keyfile import validate_password - - assert validate_password(None) == False - assert validate_password("passw0rd") == False - assert validate_password("123456789") == False - with mock.patch("getpass.getpass", return_value="biTTensor"): - assert validate_password("biTTensor") == True - with mock.patch("getpass.getpass", return_value="biTTenso"): - assert validate_password("biTTensor") == False - - -def test_decrypt_keyfile_data_legacy(): - """ - Test case for decrypting legacy keyfile data. - - This test case verifies that the `decrypt_keyfile_data` function correctly decrypts - encrypted data using a legacy encryption scheme. - - The test generates a key using a password and encrypts a sample data. Then, it decrypts - the encrypted data using the same password and asserts that the decrypted data matches - the original data. - """ - import base64 - - from cryptography.fernet import Fernet - from cryptography.hazmat.backends import default_backend - from cryptography.hazmat.primitives import hashes - from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC - - from bittensor.keyfile import decrypt_keyfile_data - - __SALT = b"Iguesscyborgslikemyselfhaveatendencytobeparanoidaboutourorigins" - - def __generate_key(password): - kdf = PBKDF2HMAC( - algorithm=hashes.SHA256(), - salt=__SALT, - length=32, - iterations=10000000, - backend=default_backend(), - ) - key = base64.urlsafe_b64encode(kdf.derive(password.encode())) - return key - - pw = "fakepasssword238947239" - data = b"encrypt me!" - key = __generate_key(pw) - cipher_suite = Fernet(key) - encrypted_data = cipher_suite.encrypt(data) - - decrypted_data = decrypt_keyfile_data(encrypted_data, pw) - assert decrypted_data == data - - -def test_user_interface(): - """ - Test the user interface for asking password to encrypt. - - This test case uses the `ask_password_to_encrypt` function from the `bittensor.keyfile` module. - It mocks the `getpass.getpass` function to simulate user input of passwords. - The expected result is that the `ask_password_to_encrypt` function returns the correct password. - """ - from bittensor.keyfile import ask_password_to_encrypt - - with mock.patch( - "getpass.getpass", - side_effect=["pass", "password", "asdury3294y", "asdury3294y"], - ): - assert ask_password_to_encrypt() == "asdury3294y" - - -def test_overwriting(keyfile_setup_teardown): - """ - Test case for overwriting a keypair in the keyfile. - """ - root_path = keyfile_setup_teardown - keyfile = bittensor.keyfile(path=os.path.join(root_path, "keyfile")) - alice = bittensor.Keypair.create_from_uri("/Alice") - keyfile.set_keypair( - alice, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - bob = bittensor.Keypair.create_from_uri("/Bob") - - with pytest.raises(bittensor.KeyFileError) as pytest_wrapped_e: - with mock.patch("builtins.input", return_value="n"): - keyfile.set_keypair( - bob, encrypt=True, overwrite=False, password="thisisafakepassword" - ) - - -def test_serialized_keypair_to_keyfile_data(keyfile_setup_teardown): - """ - Test case for serializing a keypair to keyfile data. - - This test case verifies that the `serialized_keypair_to_keyfile_data` function correctly - serializes a keypair to keyfile data. It then deserializes the keyfile data and asserts - that the deserialized keypair matches the original keypair. - """ - from bittensor.keyfile import serialized_keypair_to_keyfile_data - - root_path = keyfile_setup_teardown - keyfile = bittensor.keyfile(path=os.path.join(root_path, "keyfile")) - - mnemonic = bittensor.Keypair.generate_mnemonic(12) - keypair = bittensor.Keypair.create_from_mnemonic(mnemonic) - - keyfile.set_keypair( - keypair, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - keypair_data = serialized_keypair_to_keyfile_data(keypair) - decoded_keypair_data = json.loads(keypair_data.decode()) - - assert decoded_keypair_data["secretPhrase"] == keypair.mnemonic - assert decoded_keypair_data["ss58Address"] == keypair.ss58_address - assert decoded_keypair_data["publicKey"] == f"0x{keypair.public_key.hex()}" - assert decoded_keypair_data["accountId"] == f"0x{keypair.public_key.hex()}" - - -def test_deserialize_keypair_from_keyfile_data(keyfile_setup_teardown): - """ - Test case for deserializing a keypair from keyfile data. - - This test case verifies that the `deserialize_keypair_from_keyfile_data` function correctly - deserializes keyfile data to a keypair. It first serializes a keypair to keyfile data and - then deserializes the keyfile data to a keypair. It then asserts that the deserialized keypair - matches the original keypair. - """ - from bittensor.keyfile import serialized_keypair_to_keyfile_data - from bittensor.keyfile import deserialize_keypair_from_keyfile_data - - root_path = keyfile_setup_teardown - keyfile = bittensor.keyfile(path=os.path.join(root_path, "keyfile")) - - mnemonic = bittensor.Keypair.generate_mnemonic(12) - keypair = bittensor.Keypair.create_from_mnemonic(mnemonic) - - keyfile.set_keypair( - keypair, encrypt=True, overwrite=True, password="thisisafakepassword" - ) - keypair_data = serialized_keypair_to_keyfile_data(keypair) - deserialized_keypair = deserialize_keypair_from_keyfile_data(keypair_data) - - assert deserialized_keypair.ss58_address == keypair.ss58_address - assert deserialized_keypair.public_key == keypair.public_key - assert deserialized_keypair.private_key == keypair.private_key - - -def test_get_coldkey_password_from_environment(monkeypatch): - password_by_wallet = { - "WALLET": "password", - "my_wallet": "password2", - "my-wallet": "password2", - } - - monkeypatch.setenv("bt_cold_pw_wallet", password_by_wallet["WALLET"]) - monkeypatch.setenv("BT_COLD_PW_My_Wallet", password_by_wallet["my_wallet"]) - - for wallet, password in password_by_wallet.items(): - assert get_coldkey_password_from_environment(wallet) == password - - assert get_coldkey_password_from_environment("non_existent_wallet") is None - - -def test_keyfile_error_incorrect_password(keyfile_setup_teardown): - """ - Test case for attempting to decrypt a keyfile with an incorrect password. - """ - root_path = keyfile_setup_teardown - keyfile = bittensor.keyfile(path=os.path.join(root_path, "keyfile")) - - # Ensure the keyfile is encrypted - assert keyfile.is_encrypted() - - # Attempt to decrypt with an incorrect password - with pytest.raises(bittensor.KeyFileError) as excinfo: - keyfile.get_keypair(password="incorrect_password") - - assert "Invalid password" in str(excinfo.value) diff --git a/tests/unit_tests/test_logging.py b/tests/unit_tests/test_logging.py deleted file mode 100644 index 1822fc86ef..0000000000 --- a/tests/unit_tests/test_logging.py +++ /dev/null @@ -1,170 +0,0 @@ -import pytest -import multiprocessing -import logging as stdlogging -from unittest.mock import MagicMock, patch -from bittensor.btlogging import LoggingMachine -from bittensor.btlogging.defines import DEFAULT_LOG_FILE_NAME, BITTENSOR_LOGGER_NAME -from bittensor.btlogging.loggingmachine import LoggingConfig - - -@pytest.fixture(autouse=True, scope="session") -def disable_stdout_streaming(): - # Backup original handlers - original_handlers = stdlogging.root.handlers[:] - - # Remove all handlers that stream to stdout - stdlogging.root.handlers = [ - h - for h in stdlogging.root.handlers - if not isinstance(h, stdlogging.StreamHandler) - ] - - yield # Yield control to the test or fixture setup - - # Restore original handlers after the test - stdlogging.root.handlers = original_handlers - - -@pytest.fixture -def mock_config(tmp_path): - # Using pytest's tmp_path fixture to generate a temporary directory - log_dir = tmp_path / "logs" - log_dir.mkdir() # Create the temporary directory - log_file_path = log_dir / DEFAULT_LOG_FILE_NAME - - mock_config = LoggingConfig( - debug=False, trace=False, record_log=True, logging_dir=str(log_dir) - ) - - yield mock_config, log_file_path - # Cleanup: No need to explicitly delete the log file or directory, tmp_path does it automatically - - -@pytest.fixture -def logging_machine(mock_config): - config, _ = mock_config - logging_machine = LoggingMachine(config=config) - yield logging_machine - - -def test_initialization(logging_machine, mock_config): - """ - Test initialization of LoggingMachine. - """ - config, log_file_path = mock_config # Unpack to get the log_file_path - - assert logging_machine.get_queue() is not None - assert isinstance(logging_machine.get_queue(), multiprocessing.queues.Queue) - assert logging_machine.get_config() == config - - # Ensure that handlers are set up correctly - assert any( - isinstance(handler, stdlogging.StreamHandler) - for handler in logging_machine._handlers - ) - if config.record_log and config.logging_dir: - assert any( - isinstance(handler, stdlogging.FileHandler) - for handler in logging_machine._handlers - ) - assert log_file_path.exists() # Check if log file is created - - -def test_state_transitions(logging_machine, mock_config): - """ - Test state transitions and the associated logging level changes. - """ - config, log_file_path = mock_config - with patch("bittensor.btlogging.loggingmachine.all_loggers") as mocked_all_loggers: - # mock the main bittensor logger, identified by its `name` field - mocked_bt_logger = MagicMock() - mocked_bt_logger.name = BITTENSOR_LOGGER_NAME - # third party loggers are treated differently and silenced under default - # logging settings - mocked_third_party_logger = MagicMock() - logging_machine._logger = mocked_bt_logger - mocked_all_loggers.return_value = [mocked_third_party_logger, mocked_bt_logger] - - # Enable/Disable Debug - # from default - assert logging_machine.current_state_value == "Default" - logging_machine.enable_debug() - assert logging_machine.current_state_value == "Debug" - # check log levels - mocked_bt_logger.setLevel.assert_called_with(stdlogging.DEBUG) - mocked_third_party_logger.setLevel.assert_called_with(stdlogging.DEBUG) - - logging_machine.disable_debug() - - # Enable/Disable Trace - assert logging_machine.current_state_value == "Default" - logging_machine.enable_trace() - assert logging_machine.current_state_value == "Trace" - # check log levels - mocked_bt_logger.setLevel.assert_called_with(stdlogging.TRACE) - mocked_third_party_logger.setLevel.assert_called_with(stdlogging.TRACE) - logging_machine.disable_trace() - assert logging_machine.current_state_value == "Default" - - # Enable Default - logging_machine.enable_debug() - assert logging_machine.current_state_value == "Debug" - logging_machine.enable_default() - assert logging_machine.current_state_value == "Default" - # main logger set to INFO - mocked_bt_logger.setLevel.assert_called_with(stdlogging.INFO) - # 3rd party loggers should be disabled by setting to CRITICAL - mocked_third_party_logger.setLevel.assert_called_with(stdlogging.CRITICAL) - - # Disable Logging - # from default - logging_machine.disable_logging() - assert logging_machine.current_state_value == "Disabled" - mocked_bt_logger.setLevel.assert_called_with(stdlogging.CRITICAL) - mocked_third_party_logger.setLevel.assert_called_with(stdlogging.CRITICAL) - - -def test_enable_file_logging_with_new_config(tmp_path): - """ - Test enabling file logging by setting a new config. - """ - log_dir = tmp_path / "logs" - log_dir.mkdir() # Create the temporary directory - log_file_path = log_dir / DEFAULT_LOG_FILE_NAME - - # check no file handler is created - config = LoggingConfig(debug=False, trace=False, record_log=True, logging_dir=None) - lm = LoggingMachine(config) - assert not any( - isinstance(handler, stdlogging.FileHandler) for handler in lm._handlers - ) - - # check file handler now exists - new_config = LoggingConfig( - debug=False, trace=False, record_log=True, logging_dir=str(log_dir) - ) - lm.set_config(new_config) - assert any(isinstance(handler, stdlogging.FileHandler) for handler in lm._handlers) - - -def test_all_log_levels_output(logging_machine, caplog): - """ - Test that all log levels are captured. - """ - logging_machine.set_trace() - - logging_machine.trace("Test trace") - logging_machine.debug("Test debug") - logging_machine.info("Test info") - logging_machine.success("Test success") - logging_machine.warning("Test warning") - logging_machine.error("Test error") - logging_machine.critical("Test critical") - - assert "Test trace" in caplog.text - assert "Test debug" in caplog.text - assert "Test info" in caplog.text - assert "Test success" in caplog.text - assert "Test warning" in caplog.text - assert "Test error" in caplog.text - assert "Test critical" in caplog.text diff --git a/tests/unit_tests/test_metagraph.py b/tests/unit_tests/test_metagraph.py deleted file mode 100644 index 40303297a5..0000000000 --- a/tests/unit_tests/test_metagraph.py +++ /dev/null @@ -1,206 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from unittest.mock import Mock -import pytest -import numpy as np -import bittensor - -from bittensor.metagraph import metagraph as Metagraph -from unittest.mock import MagicMock - - -@pytest.fixture -def mock_environment(): - # Create a Mock for subtensor - subtensor = Mock() - - # Create a list of Mock Neurons - neurons = [ - Mock( - uid=i, - trust=i + 0.5, - consensus=i + 0.1, - incentive=i + 0.2, - dividends=i + 0.3, - rank=i + 0.4, - emission=i + 0.5, - active=i, - last_update=i, - validator_permit=i % 2 == 0, - validator_trust=i + 0.6, - total_stake=Mock(tao=i + 0.7), - stake=i + 0.8, - axon_info=f"axon_info_{i}", - weights=[(j, j + 0.1) for j in range(5)], - bonds=[(j, j + 0.2) for j in range(5)], - ) - for i in range(10) - ] - - return subtensor, neurons - - -def test_set_metagraph_attributes(mock_environment): - subtensor, neurons = mock_environment - metagraph = bittensor.metagraph(1, sync=False) - metagraph.neurons = neurons - metagraph._set_metagraph_attributes(block=5, subtensor=subtensor) - - # Check the attributes are set as expected - assert metagraph.n.item() == len(neurons) - assert metagraph.block.item() == 5 - assert ( - np.array_equal( - metagraph.uids, - np.array([neuron.uid for neuron in neurons], dtype=np.int64), - ) - is True - ) - - assert ( - np.array_equal( - metagraph.trust, - np.array([neuron.trust for neuron in neurons], dtype=np.float32), - ) - is True - ) - - assert ( - np.array_equal( - metagraph.consensus, - np.array([neuron.consensus for neuron in neurons], dtype=np.float32), - ) - is True - ) - # Similarly for other attributes... - - # Test the axons - assert metagraph.axons == [n.axon_info for n in neurons] - - -def test_process_weights_or_bonds(mock_environment): - _, neurons = mock_environment - metagraph = bittensor.metagraph(1, sync=False) - metagraph.neurons = neurons - - # Test weights processing - weights = metagraph._process_weights_or_bonds( - data=[neuron.weights for neuron in neurons], attribute="weights" - ) - assert weights.shape[0] == len( - neurons - ) # Number of rows should be equal to number of neurons - assert weights.shape[1] == len( - neurons - ) # Number of columns should be equal to number of neurons - # TODO: Add more checks to ensure the weights have been processed correctly - - # Test bonds processing - bonds = metagraph._process_weights_or_bonds( - data=[neuron.bonds for neuron in neurons], attribute="bonds" - ) - assert bonds.shape[0] == len( - neurons - ) # Number of rows should be equal to number of neurons - assert bonds.shape[1] == len( - neurons - ) # Number of columns should be equal to number of neurons - - # TODO: Add more checks to ensure the bonds have been processed correctly - - -def test_process_weights_or_bonds_torch( - mock_environment, force_legacy_torch_compat_api -): - _, neurons = mock_environment - metagraph = bittensor.metagraph(1, sync=False) - metagraph.neurons = neurons - - # Test weights processing - weights = metagraph._process_weights_or_bonds( - data=[neuron.weights for neuron in neurons], attribute="weights" - ) - assert weights.shape[0] == len( - neurons - ) # Number of rows should be equal to number of neurons - assert weights.shape[1] == len( - neurons - ) # Number of columns should be equal to number of neurons - # TODO: Add more checks to ensure the weights have been processed correctly - - # Test bonds processing - bonds = metagraph._process_weights_or_bonds( - data=[neuron.bonds for neuron in neurons], attribute="bonds" - ) - assert bonds.shape[0] == len( - neurons - ) # Number of rows should be equal to number of neurons - assert bonds.shape[1] == len( - neurons - ) # Number of columns should be equal to number of neurons - - -# Mocking the bittensor.subtensor class for testing purposes -@pytest.fixture -def mock_subtensor(): - subtensor = MagicMock() - subtensor.chain_endpoint = bittensor.__finney_entrypoint__ - subtensor.network = "finney" - subtensor.get_current_block.return_value = 601 - return subtensor - - -# Mocking the metagraph instance for testing purposes -@pytest.fixture -def metagraph_instance(): - metagraph = Metagraph(netuid=1337, sync=False) - metagraph._assign_neurons = MagicMock() - metagraph._set_metagraph_attributes = MagicMock() - metagraph._set_weights_and_bonds = MagicMock() - return metagraph - - -@pytest.fixture -def loguru_sink(): - class LogSink: - def __init__(self): - self.messages = [] - - def write(self, message): - # Assuming `message` is an object, you might need to adjust how you extract the text - self.messages.append(str(message)) - - def __contains__(self, item): - return any(item in message for message in self.messages) - - return LogSink() - - -@pytest.mark.parametrize( - "block, test_id", - [ - (300, "warning_case_block_greater_than_300"), - ], -) -def test_sync_warning_cases(block, test_id, metagraph_instance, mock_subtensor, caplog): - metagraph_instance.sync(block=block, lite=True, subtensor=mock_subtensor) - - expected_message = "Attempting to sync longer than 300 blocks ago on a non-archive node. Please use the 'archive' network for subtensor and retry." - assert ( - expected_message in caplog.text - ), f"Test ID: {test_id} - Expected warning message not found in Loguru sink." diff --git a/tests/unit_tests/test_overview.py b/tests/unit_tests/test_overview.py deleted file mode 100644 index 638ab4df4c..0000000000 --- a/tests/unit_tests/test_overview.py +++ /dev/null @@ -1,266 +0,0 @@ -# Standard Lib -from copy import deepcopy -from unittest.mock import MagicMock, patch - -# Pytest -import pytest - -# Bittensor -import bittensor -from bittensor.commands.overview import OverviewCommand -from tests.unit_tests.factories.neuron_factory import NeuronInfoLiteFactory - - -@pytest.fixture -def mock_subtensor(): - mock = MagicMock() - mock.get_balance = MagicMock(return_value=100) - return mock - - -def fake_config(**kwargs): - config = deepcopy(construct_config()) - for key, value in kwargs.items(): - setattr(config, key, value) - return config - - -def construct_config(): - parser = bittensor.cli.__create_parser__() - defaults = bittensor.config(parser=parser, args=[]) - # Parse commands and subcommands - for command in bittensor.ALL_COMMANDS: - if ( - command in bittensor.ALL_COMMANDS - and "commands" in bittensor.ALL_COMMANDS[command] - ): - for subcommand in bittensor.ALL_COMMANDS[command]["commands"]: - defaults.merge( - bittensor.config(parser=parser, args=[command, subcommand]) - ) - else: - defaults.merge(bittensor.config(parser=parser, args=[command])) - - defaults.netuid = 1 - # Always use mock subtensor. - defaults.subtensor.network = "finney" - # Skip version checking. - defaults.no_version_checking = True - - return defaults - - -@pytest.fixture -def mock_wallet(): - mock = MagicMock() - mock.coldkeypub_file.exists_on_device = MagicMock(return_value=True) - mock.coldkeypub_file.is_encrypted = MagicMock(return_value=False) - mock.coldkeypub.ss58_address = "fake_address" - return mock - - -class MockHotkey: - def __init__(self, hotkey_str): - self.hotkey_str = hotkey_str - - -class MockCli: - def __init__(self, config): - self.config = config - - -@pytest.mark.parametrize( - "config_all, exists_on_device, is_encrypted, expected_balance, test_id", - [ - (True, True, False, 100, "happy_path_all_wallets"), - (False, True, False, 100, "happy_path_single_wallet"), - (True, False, False, 0, "edge_case_no_wallets_found"), - (True, True, True, 0, "edge_case_encrypted_wallet"), - ], -) -def test_get_total_balance( - mock_subtensor, - mock_wallet, - config_all, - exists_on_device, - is_encrypted, - expected_balance, - test_id, -): - # Arrange - cli = MockCli(fake_config(all=config_all)) - mock_wallet.coldkeypub_file.exists_on_device.return_value = exists_on_device - mock_wallet.coldkeypub_file.is_encrypted.return_value = is_encrypted - - with patch( - "bittensor.wallet", return_value=mock_wallet - ) as mock_wallet_constructor, patch( - "bittensor.commands.overview.get_coldkey_wallets_for_path", - return_value=[mock_wallet] if config_all else [], - ), patch( - "bittensor.commands.overview.get_all_wallets_for_path", - return_value=[mock_wallet], - ), patch( - "bittensor.commands.overview.get_hotkey_wallets_for_wallet", - return_value=[mock_wallet], - ): - # Act - result_hotkeys, result_balance = OverviewCommand._get_total_balance( - 0, mock_subtensor, cli - ) - - # Assert - assert result_balance == expected_balance, f"Test ID: {test_id}" - assert all( - isinstance(hotkey, MagicMock) for hotkey in result_hotkeys - ), f"Test ID: {test_id}" - - -@pytest.mark.parametrize( - "config, all_hotkeys, expected_result, test_id", - [ - # Happy path tests - ( - {"all_hotkeys": False, "hotkeys": ["abc123", "xyz456"]}, - [MockHotkey("abc123"), MockHotkey("xyz456"), MockHotkey("mno567")], - ["abc123", "xyz456"], - "test_happy_path_included", - ), - ( - {"all_hotkeys": True, "hotkeys": ["abc123", "xyz456"]}, - [MockHotkey("abc123"), MockHotkey("xyz456"), MockHotkey("mno567")], - ["mno567"], - "test_happy_path_excluded", - ), - # Edge cases - ( - {"all_hotkeys": False, "hotkeys": []}, - [MockHotkey("abc123"), MockHotkey("xyz456")], - [], - "test_edge_no_hotkeys_specified", - ), - ( - {"all_hotkeys": True, "hotkeys": []}, - [MockHotkey("abc123"), MockHotkey("xyz456")], - ["abc123", "xyz456"], - "test_edge_all_hotkeys_excluded", - ), - ( - {"all_hotkeys": False, "hotkeys": ["abc123", "xyz456"]}, - [], - [], - "test_edge_no_hotkeys_available", - ), - ( - {"all_hotkeys": True, "hotkeys": ["abc123", "xyz456"]}, - [], - [], - "test_edge_no_hotkeys_available_excluded", - ), - ], -) -def test_get_hotkeys(config, all_hotkeys, expected_result, test_id): - # Arrange - cli = MockCli( - fake_config( - hotkeys=config.get("hotkeys"), all_hotkeys=config.get("all_hotkeys") - ) - ) - - # Act - result = OverviewCommand._get_hotkeys(cli, all_hotkeys) - - # Assert - assert [ - hotkey.hotkey_str for hotkey in result - ] == expected_result, f"Failed {test_id}" - - -def test_get_hotkeys_error(): - # Arrange - cli = MockCli(fake_config(hotkeys=["abc123", "xyz456"], all_hotkeys=False)) - all_hotkeys = None - - # Act - with pytest.raises(TypeError): - OverviewCommand._get_hotkeys(cli, all_hotkeys) - - -@pytest.fixture -def neuron_info(): - return [ - (1, [NeuronInfoLiteFactory(netuid=1)], None), - (2, [NeuronInfoLiteFactory(netuid=2)], None), - ] - - -@pytest.fixture -def neurons_dict(): - return { - "1": [NeuronInfoLiteFactory(netuid=1)], - "2": [NeuronInfoLiteFactory(netuid=2)], - } - - -@pytest.fixture -def netuids_list(): - return [1, 2] - - -# Test cases -@pytest.mark.parametrize( - "test_id, results, expected_neurons, expected_netuids", - [ - # Test ID: 01 - Happy path, all neurons processed correctly - ( - "01", - [ - (1, [NeuronInfoLiteFactory(netuid=1)], None), - (2, [NeuronInfoLiteFactory(netuid=2)], None), - ], - { - "1": [NeuronInfoLiteFactory(netuid=1)], - "2": [NeuronInfoLiteFactory(netuid=2)], - }, - [1, 2], - ), - # Test ID: 02 - Error message present, should skip processing for that netuid - ( - "02", - [ - (1, [NeuronInfoLiteFactory(netuid=1)], None), - (2, [], "Error fetching data"), - ], - {"1": [NeuronInfoLiteFactory()]}, - [1], - ), - # Test ID: 03 - No neurons found for a netuid, should remove the netuid - ( - "03", - [(1, [NeuronInfoLiteFactory()], None), (2, [], None)], - {"1": [NeuronInfoLiteFactory()]}, - [1], - ), - # Test ID: 04 - Mixed conditions - ( - "04", - [ - (1, [NeuronInfoLiteFactory(netuid=1)], None), - (2, [], None), - ], - {"1": [NeuronInfoLiteFactory()]}, - [1], - ), - ], -) -def test_process_neuron_results( - test_id, results, expected_neurons, expected_netuids, neurons_dict, netuids_list -): - # Act - actual_neurons = OverviewCommand._process_neuron_results( - results, neurons_dict, netuids_list - ) - - # Assert - assert actual_neurons.keys() == expected_neurons.keys(), f"Failed test {test_id}" - assert netuids_list == expected_netuids, f"Failed test {test_id}" diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py deleted file mode 100644 index b8dfc3e81b..0000000000 --- a/tests/unit_tests/test_subtensor.py +++ /dev/null @@ -1,2353 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -# Standard Lib -import argparse -import unittest.mock as mock -from unittest.mock import MagicMock - -# 3rd Party -import pytest - -# Application -import bittensor -from bittensor.subtensor import ( - Subtensor, - _logger, - Balance, -) -from bittensor.chain_data import SubnetHyperparameters -from bittensor.commands.utils import normalize_hyperparameters -from bittensor import subtensor_module -from bittensor.utils.balance import Balance - -U16_MAX = 65535 -U64_MAX = 18446744073709551615 - - -def test_serve_axon_with_external_ip_set(): - internal_ip: str = "192.0.2.146" - external_ip: str = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - - mock_serve_axon = MagicMock(return_value=True) - - mock_subtensor = MagicMock(spec=bittensor.subtensor, serve_axon=mock_serve_axon) - - mock_add_insecure_port = mock.MagicMock(return_value=None) - mock_wallet = MagicMock( - spec=bittensor.wallet, - coldkey=MagicMock(), - coldkeypub=MagicMock( - # mock ss58 address - ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - ), - hotkey=MagicMock( - ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" - ), - ) - - mock_config = bittensor.axon.config() - mock_axon_with_external_ip_set = bittensor.axon( - wallet=mock_wallet, - ip=internal_ip, - external_ip=external_ip, - config=mock_config, - ) - - mock_subtensor.serve_axon( - netuid=-1, - axon=mock_axon_with_external_ip_set, - ) - - mock_serve_axon.assert_called_once() - - # verify that the axon is served to the network with the external ip - _, kwargs = mock_serve_axon.call_args - axon_info = kwargs["axon"].info() - assert axon_info.ip == external_ip - - -def test_serve_axon_with_external_port_set(): - external_ip: str = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - - internal_port: int = 1234 - external_port: int = 5678 - - mock_serve = MagicMock(return_value=True) - - mock_serve_axon = MagicMock(return_value=True) - - mock_subtensor = MagicMock( - spec=bittensor.subtensor, - serve=mock_serve, - serve_axon=mock_serve_axon, - ) - - mock_wallet = MagicMock( - spec=bittensor.wallet, - coldkey=MagicMock(), - coldkeypub=MagicMock( - # mock ss58 address - ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - ), - hotkey=MagicMock( - ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" - ), - ) - - mock_config = bittensor.axon.config() - - mock_axon_with_external_port_set = bittensor.axon( - wallet=mock_wallet, - port=internal_port, - external_port=external_port, - config=mock_config, - ) - - with mock.patch( - "bittensor.utils.networking.get_external_ip", return_value=external_ip - ): - # mock the get_external_ip function to return the external ip - mock_subtensor.serve_axon( - netuid=-1, - axon=mock_axon_with_external_port_set, - ) - - mock_serve_axon.assert_called_once() - # verify that the axon is served to the network with the external port - _, kwargs = mock_serve_axon.call_args - axon_info = kwargs["axon"].info() - assert axon_info.port == external_port - - -class ExitEarly(Exception): - """Mock exception to exit early from the called code""" - - pass - - -def test_stake_multiple(): - mock_amount: bittensor.Balance = bittensor.Balance.from_tao(1.0) - - mock_wallet = MagicMock( - spec=bittensor.wallet, - coldkey=MagicMock(), - coldkeypub=MagicMock( - # mock ss58 address - ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - ), - hotkey=MagicMock( - ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" - ), - ) - - mock_hotkey_ss58s = ["5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg"] - - mock_amounts = [mock_amount] # more than 1000 RAO - - mock_neuron = MagicMock( - is_null=False, - ) - - mock_do_stake = MagicMock(side_effect=ExitEarly) - - mock_subtensor = MagicMock( - spec=bittensor.subtensor, - network="mock_net", - get_balance=MagicMock( - return_value=bittensor.Balance.from_tao(mock_amount.tao + 20.0) - ), # enough balance to stake - get_neuron_for_pubkey_and_subnet=MagicMock(return_value=mock_neuron), - _do_stake=mock_do_stake, - ) - - with pytest.raises(ExitEarly): - bittensor.subtensor.add_stake_multiple( - mock_subtensor, - wallet=mock_wallet, - hotkey_ss58s=mock_hotkey_ss58s, - amounts=mock_amounts, - ) - - mock_do_stake.assert_called_once() - # args, kwargs - _, kwargs = mock_do_stake.call_args - - assert kwargs["amount"] == pytest.approx( - mock_amount.rao, rel=1e9 - ) # delta of 1.0 TAO - - -@pytest.mark.parametrize( - "test_id, expected_output", - [ - # Happy path test - ( - "happy_path_default", - "Create and return a new object. See help(type) for accurate signature.", - ), - ], -) -def test_help(test_id, expected_output, capsys): - # Act - Subtensor.help() - - # Assert - captured = capsys.readouterr() - assert expected_output in captured.out, f"Test case {test_id} failed" - - -@pytest.fixture -def parser(): - return argparse.ArgumentParser() - - -# Mocking argparse.ArgumentParser.add_argument method to simulate ArgumentError -def test_argument_error_handling(monkeypatch, parser): - def mock_add_argument(*args, **kwargs): - raise argparse.ArgumentError(None, "message") - - monkeypatch.setattr(argparse.ArgumentParser, "add_argument", mock_add_argument) - # No exception should be raised - Subtensor.add_args(parser) - - -@pytest.mark.parametrize( - "network, expected_network, expected_endpoint", - [ - # Happy path tests - ("finney", "finney", bittensor.__finney_entrypoint__), - ("local", "local", bittensor.__local_entrypoint__), - ("test", "test", bittensor.__finney_test_entrypoint__), - ("archive", "archive", bittensor.__archive_entrypoint__), - # Endpoint override tests - ( - bittensor.__finney_entrypoint__, - "finney", - bittensor.__finney_entrypoint__, - ), - ( - "entrypoint-finney.opentensor.ai", - "finney", - bittensor.__finney_entrypoint__, - ), - ( - bittensor.__finney_test_entrypoint__, - "test", - bittensor.__finney_test_entrypoint__, - ), - ( - "test.finney.opentensor.ai", - "test", - bittensor.__finney_test_entrypoint__, - ), - ( - bittensor.__archive_entrypoint__, - "archive", - bittensor.__archive_entrypoint__, - ), - ( - "archive.chain.opentensor.ai", - "archive", - bittensor.__archive_entrypoint__, - ), - ("127.0.0.1", "local", "127.0.0.1"), - ("localhost", "local", "localhost"), - # Edge cases - (None, None, None), - ("unknown", "unknown network", "unknown"), - ], -) -def test_determine_chain_endpoint_and_network( - network, expected_network, expected_endpoint -): - # Act - result_network, result_endpoint = Subtensor.determine_chain_endpoint_and_network( - network - ) - - # Assert - assert result_network == expected_network - assert result_endpoint == expected_endpoint - - -# Subtensor().get_error_info_by_index tests -@pytest.fixture -def substrate(): - class MockSubstrate: - pass - - return MockSubstrate() - - -@pytest.fixture -def subtensor(substrate): - mock.patch.object( - subtensor_module, - "get_subtensor_errors", - return_value={ - "1": ("ErrorOne", "Description one"), - "2": ("ErrorTwo", "Description two"), - }, - ).start() - return Subtensor() - - -def test_get_error_info_by_index_known_error(subtensor): - name, description = subtensor.get_error_info_by_index(1) - assert name == "ErrorOne" - assert description == "Description one" - - -@pytest.fixture -def mock_logger(): - with mock.patch.object(_logger, "warning") as mock_warning: - yield mock_warning - - -def test_get_error_info_by_index_unknown_error(subtensor, mock_logger): - fake_index = 999 - name, description = subtensor.get_error_info_by_index(fake_index) - assert name == "Unknown Error" - assert description == "" - mock_logger.assert_called_once_with( - f"Subtensor returned an error with an unknown index: {fake_index}" - ) - - -# Subtensor()._get_hyperparameter tests -def test_hyperparameter_subnet_does_not_exist(subtensor, mocker): - """Tests when the subnet does not exist.""" - subtensor.subnet_exists = mocker.MagicMock(return_value=False) - assert subtensor._get_hyperparameter("Difficulty", 1, None) is None - subtensor.subnet_exists.assert_called_once_with(1, None) - - -def test_hyperparameter_result_is_none(subtensor, mocker): - """Tests when query_subtensor returns None.""" - subtensor.subnet_exists = mocker.MagicMock(return_value=True) - subtensor.query_subtensor = mocker.MagicMock(return_value=None) - assert subtensor._get_hyperparameter("Difficulty", 1, None) is None - subtensor.subnet_exists.assert_called_once_with(1, None) - subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) - - -def test_hyperparameter_result_has_no_value(subtensor, mocker): - """Test when the result has no 'value' attribute.""" - - subtensor.subnet_exists = mocker.MagicMock(return_value=True) - subtensor.query_subtensor = mocker.MagicMock(return_value=None) - assert subtensor._get_hyperparameter("Difficulty", 1, None) is None - subtensor.subnet_exists.assert_called_once_with(1, None) - subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) - - -def test_hyperparameter_success_int(subtensor, mocker): - """Test when query_subtensor returns an integer value.""" - subtensor.subnet_exists = mocker.MagicMock(return_value=True) - subtensor.query_subtensor = mocker.MagicMock( - return_value=mocker.MagicMock(value=100) - ) - assert subtensor._get_hyperparameter("Difficulty", 1, None) == 100 - subtensor.subnet_exists.assert_called_once_with(1, None) - subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) - - -def test_hyperparameter_success_float(subtensor, mocker): - """Test when query_subtensor returns a float value.""" - subtensor.subnet_exists = mocker.MagicMock(return_value=True) - subtensor.query_subtensor = mocker.MagicMock( - return_value=mocker.MagicMock(value=0.5) - ) - assert subtensor._get_hyperparameter("Difficulty", 1, None) == 0.5 - subtensor.subnet_exists.assert_called_once_with(1, None) - subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) - - -# Tests Hyper parameter calls -@pytest.mark.parametrize( - "method, param_name, value, expected_result_type", - [ - ("rho", "Rho", 1, int), - ("kappa", "Kappa", 1.0, float), - ("difficulty", "Difficulty", 1, int), - ("recycle", "Burn", 1, Balance), - ("immunity_period", "ImmunityPeriod", 1, int), - ("validator_batch_size", "ValidatorBatchSize", 1, int), - ("validator_prune_len", "ValidatorPruneLen", 1, int), - ("validator_logits_divergence", "ValidatorLogitsDivergence", 1.0, float), - ("validator_sequence_length", "ValidatorSequenceLength", 1, int), - ("validator_epochs_per_reset", "ValidatorEpochsPerReset", 1, int), - ("validator_epoch_length", "ValidatorEpochLen", 1, int), - ("validator_exclude_quantile", "ValidatorExcludeQuantile", 1.0, float), - ("max_allowed_validators", "MaxAllowedValidators", 1, int), - ("min_allowed_weights", "MinAllowedWeights", 1, int), - ("max_weight_limit", "MaxWeightsLimit", 1, float), - ("adjustment_alpha", "AdjustmentAlpha", 1, float), - ("bonds_moving_avg", "BondsMovingAverage", 1, float), - ("scaling_law_power", "ScalingLawPower", 1, float), - ("synergy_scaling_law_power", "SynergyScalingLawPower", 1, float), - ("subnetwork_n", "SubnetworkN", 1, int), - ("max_n", "MaxAllowedUids", 1, int), - ("blocks_since_epoch", "BlocksSinceEpoch", 1, int), - ("tempo", "Tempo", 1, int), - ], -) -def test_hyper_parameter_success_calls( - subtensor, mocker, method, param_name, value, expected_result_type -): - """ - Tests various hyperparameter methods to ensure they correctly fetch their respective hyperparameters and return the - expected values. - """ - # Prep - subtensor._get_hyperparameter = mocker.MagicMock(return_value=value) - - spy_u16_normalized_float = mocker.spy(subtensor_module, "U16_NORMALIZED_FLOAT") - spy_u64_normalized_float = mocker.spy(subtensor_module, "U64_NORMALIZED_FLOAT") - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - subtensor_method = getattr(subtensor, method) - result = subtensor_method(netuid=7, block=707) - - # Assertions - subtensor._get_hyperparameter.assert_called_once_with( - block=707, netuid=7, param_name=param_name - ) - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, expected_result_type) - - # Special cases - if method in [ - "kappa", - "validator_logits_divergence", - "validator_exclude_quantile", - "max_weight_limit", - ]: - spy_u16_normalized_float.assert_called_once() - - if method in ["adjustment_alpha", "bonds_moving_avg"]: - spy_u64_normalized_float.assert_called_once() - - if method in ["recycle"]: - spy_balance_from_rao.assert_called_once() - - -def test_blocks_since_last_update_success_calls(subtensor, mocker): - """Tests the weights_rate_limit method to ensure it correctly fetches the LastUpdate hyperparameter.""" - # Prep - uid = 7 - mocked_current_block = 2 - mocked_result = {uid: 1} - subtensor._get_hyperparameter = mocker.MagicMock(return_value=mocked_result) - subtensor.get_current_block = mocker.MagicMock(return_value=mocked_current_block) - - # Call - result = subtensor.blocks_since_last_update(netuid=7, uid=uid) - - # Assertions - subtensor.get_current_block.assert_called_once() - subtensor._get_hyperparameter.assert_called_once_with( - param_name="LastUpdate", netuid=7 - ) - assert result == 1 - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, int) - - -def test_weights_rate_limit_success_calls(subtensor, mocker): - """Tests the weights_rate_limit method to ensure it correctly fetches the WeightsSetRateLimit hyperparameter.""" - # Prep - subtensor._get_hyperparameter = mocker.MagicMock(return_value=5) - - # Call - result = subtensor.weights_rate_limit(netuid=7) - - # Assertions - subtensor._get_hyperparameter.assert_called_once_with( - param_name="WeightsSetRateLimit", netuid=7 - ) - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, int) - - -@pytest.fixture -def sample_hyperparameters(): - return MagicMock(spec=SubnetHyperparameters) - - -def get_normalized_value(normalized_data, param_name): - return next( - ( - norm_value - for p_name, _, norm_value in normalized_data - if p_name == param_name - ), - None, - ) - - -@pytest.mark.parametrize( - "param_name, max_value, mid_value, zero_value, is_balance", - [ - ("adjustment_alpha", U64_MAX, U64_MAX / 2, 0, False), - ("max_weight_limit", U16_MAX, U16_MAX / 2, 0, False), - ("difficulty", U64_MAX, U64_MAX / 2, 0, False), - ("min_difficulty", U64_MAX, U64_MAX / 2, 0, False), - ("max_difficulty", U64_MAX, U64_MAX / 2, 0, False), - ("bonds_moving_avg", U64_MAX, U64_MAX / 2, 0, False), - ("min_burn", 10000000000, 5000000000, 0, True), # These are in rao - ("max_burn", 20000000000, 10000000000, 0, True), - ], - ids=[ - "adjustment-alpha", - "max_weight_limit", - "difficulty", - "min_difficulty", - "max_difficulty", - "bonds_moving_avg", - "min_burn", - "max_burn", - ], -) -def test_hyperparameter_normalization( - sample_hyperparameters, param_name, max_value, mid_value, zero_value, is_balance -): - setattr(sample_hyperparameters, param_name, mid_value) - normalized = normalize_hyperparameters(sample_hyperparameters) - norm_value = get_normalized_value(normalized, param_name) - - # Mid-value test - if is_balance: - numeric_value = float(str(norm_value).lstrip(bittensor.__tao_symbol__)) - expected_tao = mid_value / 1e9 - assert ( - numeric_value == expected_tao - ), f"Mismatch in tao value for {param_name} at mid value" - else: - assert float(norm_value) == 0.5, f"Failed mid-point test for {param_name}" - - # Max-value test - setattr(sample_hyperparameters, param_name, max_value) - normalized = normalize_hyperparameters(sample_hyperparameters) - norm_value = get_normalized_value(normalized, param_name) - - if is_balance: - numeric_value = float(str(norm_value).lstrip(bittensor.__tao_symbol__)) - expected_tao = max_value / 1e9 - assert ( - numeric_value == expected_tao - ), f"Mismatch in tao value for {param_name} at max value" - else: - assert float(norm_value) == 1.0, f"Failed max value test for {param_name}" - - # Zero-value test - setattr(sample_hyperparameters, param_name, zero_value) - normalized = normalize_hyperparameters(sample_hyperparameters) - norm_value = get_normalized_value(normalized, param_name) - - if is_balance: - numeric_value = float(str(norm_value).lstrip(bittensor.__tao_symbol__)) - expected_tao = zero_value / 1e9 - assert ( - numeric_value == expected_tao - ), f"Mismatch in tao value for {param_name} at zero value" - else: - assert float(norm_value) == 0.0, f"Failed zero value test for {param_name}" - - -########################### -# Account functions tests # -########################### - - -# `get_total_stake_for_hotkey` tests -def test_get_total_stake_for_hotkey_success(subtensor, mocker): - """Tests successful retrieval of total stake for hotkey.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=mocker.MagicMock(value=1)) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_total_stake_for_hotkey(ss58_address=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - "TotalHotkeyStake", None, [fake_ss58_address] - ) - spy_balance_from_rao.assert_called_once() - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, Balance) - - -def test_get_total_stake_for_hotkey_not_result(subtensor, mocker): - """Tests retrieval of total stake for hotkey when no result is returned.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=None) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_total_stake_for_hotkey(ss58_address=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - "TotalHotkeyStake", None, [fake_ss58_address] - ) - spy_balance_from_rao.assert_not_called() - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, type(None)) - - -def test_get_total_stake_for_hotkey_not_value(subtensor, mocker): - """Tests retrieval of total stake for hotkey when no value attribute is present.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=object) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_total_stake_for_hotkey(ss58_address=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - "TotalHotkeyStake", None, [fake_ss58_address] - ) - spy_balance_from_rao.assert_not_called() - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(subtensor.query_subtensor.return_value, object) - assert not hasattr(result, "value") - - -# `get_total_stake_for_coldkey` tests -def test_get_total_stake_for_coldkey_success(subtensor, mocker): - """Tests successful retrieval of total stake for coldkey.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=mocker.MagicMock(value=1)) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_total_stake_for_coldkey(ss58_address=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - "TotalColdkeyStake", None, [fake_ss58_address] - ) - spy_balance_from_rao.assert_called_once() - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, Balance) - - -def test_get_total_stake_for_coldkey_not_result(subtensor, mocker): - """Tests retrieval of total stake for coldkey when no result is returned.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=None) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_total_stake_for_coldkey(ss58_address=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - "TotalColdkeyStake", None, [fake_ss58_address] - ) - spy_balance_from_rao.assert_not_called() - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(result, type(None)) - - -def test_get_total_stake_for_coldkey_not_value(subtensor, mocker): - """Tests retrieval of total stake for coldkey when no value attribute is present.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=object) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_total_stake_for_coldkey(ss58_address=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - "TotalColdkeyStake", None, [fake_ss58_address] - ) - spy_balance_from_rao.assert_not_called() - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert isinstance(subtensor.query_subtensor.return_value, object) - assert not hasattr(result, "value") - - -# `get_stake` tests -def test_get_stake_returns_correct_data(mocker, subtensor): - """Tests that get_stake returns correct data.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - expected_query_result = [ - (mocker.MagicMock(value="coldkey1"), mocker.MagicMock(value=100)), - (mocker.MagicMock(value="coldkey2"), mocker.MagicMock(value=200)), - ] - mocker.patch.object( - subtensor, "query_map_subtensor", return_value=expected_query_result - ) - - # Call - result = subtensor.get_stake(hotkey_ss58, block) - - # Assertion - assert result == [ - ("coldkey1", Balance.from_rao(100)), - ("coldkey2", Balance.from_rao(200)), - ] - subtensor.query_map_subtensor.assert_called_once_with("Stake", block, [hotkey_ss58]) - - -def test_get_stake_no_block(mocker, subtensor): - """Tests get_stake with no block specified.""" - # Prep - hotkey_ss58 = "test_hotkey" - expected_query_result = [ - (MagicMock(value="coldkey1"), MagicMock(value=100)), - ] - mocker.patch.object( - subtensor, "query_map_subtensor", return_value=expected_query_result - ) - - # Call - result = subtensor.get_stake(hotkey_ss58) - - # Assertion - assert result == [("coldkey1", Balance.from_rao(100))] - subtensor.query_map_subtensor.assert_called_once_with("Stake", None, [hotkey_ss58]) - - -def test_get_stake_empty_result(mocker, subtensor): - """Tests get_stake with an empty result.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - expected_query_result = [] - mocker.patch.object( - subtensor, "query_map_subtensor", return_value=expected_query_result - ) - - # Call - result = subtensor.get_stake(hotkey_ss58, block) - - # Assertion - assert result == [] - subtensor.query_map_subtensor.assert_called_once_with("Stake", block, [hotkey_ss58]) - - -# `does_hotkey_exist` tests -def test_does_hotkey_exist_true(mocker, subtensor): - """Test does_hotkey_exist returns True when hotkey exists and is valid.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock(value="valid_coldkey") - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.does_hotkey_exist(hotkey_ss58, block) - - # Assertions - assert result is True - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - - -def test_does_hotkey_exist_false_special_value(mocker, subtensor): - """Test does_hotkey_exist returns False when result value is the special value.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - special_value = "5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM" - mock_result = MagicMock(value=special_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.does_hotkey_exist(hotkey_ss58, block) - - # Assertions - assert result is False - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - - -def test_does_hotkey_exist_false_no_value(mocker, subtensor): - """Test does_hotkey_exist returns False when result has no value attribute.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.does_hotkey_exist(hotkey_ss58, block) - - # Assertions - assert result is False - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - - -def test_does_hotkey_exist_false_no_result(mocker, subtensor): - """Test does_hotkey_exist returns False when query_subtensor returns None.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.does_hotkey_exist(hotkey_ss58, block) - - # Assertions - assert result is False - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - - -def test_does_hotkey_exist_no_block(mocker, subtensor): - """Test does_hotkey_exist with no block specified.""" - # Prep - hotkey_ss58 = "test_hotkey" - mock_result = mocker.MagicMock(value="valid_coldkey") - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.does_hotkey_exist(hotkey_ss58) - - # Assertions - assert result is True - subtensor.query_subtensor.assert_called_once_with("Owner", None, [hotkey_ss58]) - - -# `get_hotkey_owner` tests -def test_get_hotkey_owner_exists(mocker, subtensor): - """Test get_hotkey_owner when the hotkey exists.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - expected_owner = "coldkey_owner" - mock_result = mocker.MagicMock(value=expected_owner) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - mocker.patch.object(subtensor, "does_hotkey_exist", return_value=True) - - # Call - result = subtensor.get_hotkey_owner(hotkey_ss58, block) - - # Assertions - assert result == expected_owner - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - subtensor.does_hotkey_exist.assert_called_once_with(hotkey_ss58, block) - - -def test_get_hotkey_owner_does_not_exist(mocker, subtensor): - """Test get_hotkey_owner when the hotkey does not exist.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - mocker.patch.object(subtensor, "does_hotkey_exist", return_value=False) - - # Call - result = subtensor.get_hotkey_owner(hotkey_ss58, block) - - # Assertions - assert result is None - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - subtensor.does_hotkey_exist.assert_not_called() - - -def test_get_hotkey_owner_no_block(mocker, subtensor): - """Test get_hotkey_owner with no block specified.""" - # Prep - hotkey_ss58 = "test_hotkey" - expected_owner = "coldkey_owner" - mock_result = mocker.MagicMock(value=expected_owner) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - mocker.patch.object(subtensor, "does_hotkey_exist", return_value=True) - - # Call - result = subtensor.get_hotkey_owner(hotkey_ss58) - - # Assertions - assert result == expected_owner - subtensor.query_subtensor.assert_called_once_with("Owner", None, [hotkey_ss58]) - subtensor.does_hotkey_exist.assert_called_once_with(hotkey_ss58, None) - - -def test_get_hotkey_owner_no_value_attribute(mocker, subtensor): - """Test get_hotkey_owner when the result has no value attribute.""" - # Prep - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - mocker.patch.object(subtensor, "does_hotkey_exist", return_value=True) - - # Call - result = subtensor.get_hotkey_owner(hotkey_ss58, block) - - # Assertions - assert result is None - subtensor.query_subtensor.assert_called_once_with("Owner", block, [hotkey_ss58]) - subtensor.does_hotkey_exist.assert_not_called() - - -# `get_axon_info` tests -def test_get_axon_info_success(mocker, subtensor): - """Test get_axon_info returns correct data when axon information is found.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock( - value={ - "ip": "192.168.1.1", - "ip_type": 4, - "port": 8080, - "protocol": "tcp", - "version": "1.0", - "placeholder1": "data1", - "placeholder2": "data2", - } - ) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_axon_info(netuid, hotkey_ss58, block) - - # Asserts - assert result is not None - assert result.ip == "192.168.1.1" - assert result.ip_type == 4 - assert result.port == 8080 - assert result.protocol == "tcp" - assert result.version == "1.0" - assert result.placeholder1 == "data1" - assert result.placeholder2 == "data2" - assert result.hotkey == hotkey_ss58 - assert result.coldkey == "" - subtensor.query_subtensor.assert_called_once_with( - "Axons", block, [netuid, hotkey_ss58] - ) - - -def test_get_axon_info_no_data(mocker, subtensor): - """Test get_axon_info returns None when no axon information is found.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.get_axon_info(netuid, hotkey_ss58, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with( - "Axons", block, [netuid, hotkey_ss58] - ) - - -def test_get_axon_info_no_value_attribute(mocker, subtensor): - """Test get_axon_info returns None when result has no value attribute.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_axon_info(netuid, hotkey_ss58, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with( - "Axons", block, [netuid, hotkey_ss58] - ) - - -def test_get_axon_info_no_block(mocker, subtensor): - """Test get_axon_info with no block specified.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - mock_result = mocker.MagicMock( - value={ - "ip": 3232235777, # 192.168.1.1 - "ip_type": 4, - "port": 8080, - "protocol": "tcp", - "version": "1.0", - "placeholder1": "data1", - "placeholder2": "data2", - } - ) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_axon_info(netuid, hotkey_ss58) - - # Asserts - assert result is not None - assert result.ip == "192.168.1.1" - assert result.ip_type == 4 - assert result.port == 8080 - assert result.protocol == "tcp" - assert result.version == "1.0" - assert result.placeholder1 == "data1" - assert result.placeholder2 == "data2" - assert result.hotkey == hotkey_ss58 - assert result.coldkey == "" - subtensor.query_subtensor.assert_called_once_with( - "Axons", None, [netuid, hotkey_ss58] - ) - - -# get_prometheus_info tests -def test_get_prometheus_info_success(mocker, subtensor): - """Test get_prometheus_info returns correct data when information is found.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock( - value={ - "ip": 3232235777, # 192.168.1.1 - "ip_type": 4, - "port": 9090, - "version": "1.0", - "block": 1000, - } - ) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_prometheus_info(netuid, hotkey_ss58, block) - - # Asserts - assert result is not None - assert result.ip == "192.168.1.1" - assert result.ip_type == 4 - assert result.port == 9090 - assert result.version == "1.0" - assert result.block == 1000 - subtensor.query_subtensor.assert_called_once_with( - "Prometheus", block, [netuid, hotkey_ss58] - ) - - -def test_get_prometheus_info_no_data(mocker, subtensor): - """Test get_prometheus_info returns None when no information is found.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.get_prometheus_info(netuid, hotkey_ss58, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with( - "Prometheus", block, [netuid, hotkey_ss58] - ) - - -def test_get_prometheus_info_no_value_attribute(mocker, subtensor): - """Test get_prometheus_info returns None when result has no value attribute.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_prometheus_info(netuid, hotkey_ss58, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with( - "Prometheus", block, [netuid, hotkey_ss58] - ) - - -def test_get_prometheus_info_no_block(mocker, subtensor): - """Test get_prometheus_info with no block specified.""" - # Prep - netuid = 1 - hotkey_ss58 = "test_hotkey" - mock_result = MagicMock( - value={ - "ip": "192.168.1.1", - "ip_type": 4, - "port": 9090, - "version": "1.0", - "block": 1000, - } - ) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_prometheus_info(netuid, hotkey_ss58) - - # Asserts - assert result is not None - assert result.ip == "192.168.1.1" - assert result.ip_type == 4 - assert result.port == 9090 - assert result.version == "1.0" - assert result.block == 1000 - subtensor.query_subtensor.assert_called_once_with( - "Prometheus", None, [netuid, hotkey_ss58] - ) - - -########################### -# Global Parameters tests # -########################### - - -# `block` property test -def test_block_property(mocker, subtensor): - """Test block property returns the correct block number.""" - expected_block = 123 - mocker.patch.object(subtensor, "get_current_block", return_value=expected_block) - - result = subtensor.block - - assert result == expected_block - subtensor.get_current_block.assert_called_once() - - -# `total_issuance` tests -def test_total_issuance_success(mocker, subtensor): - """Test total_issuance returns correct data when issuance information is found.""" - # Prep - block = 123 - issuance_value = 1000 - mock_result = mocker.MagicMock(value=issuance_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_issuance(block) - - # Asserts - assert result is not None - subtensor.query_subtensor.assert_called_once_with("TotalIssuance", block) - spy_balance_from_rao.assert_called_once_with( - subtensor.query_subtensor.return_value.value - ) - - -def test_total_issuance_no_data(mocker, subtensor): - """Test total_issuance returns None when no issuance information is found.""" - # Prep - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_issuance(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TotalIssuance", block) - spy_balance_from_rao.assert_not_called() - - -def test_total_issuance_no_value_attribute(mocker, subtensor): - """Test total_issuance returns None when result has no value attribute.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_issuance(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TotalIssuance", block) - spy_balance_from_rao.assert_not_called() - - -def test_total_issuance_no_block(mocker, subtensor): - """Test total_issuance with no block specified.""" - # Prep - issuance_value = 1000 - mock_result = mocker.MagicMock(value=issuance_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_issuance() - - # Asserts - assert result is not None - subtensor.query_subtensor.assert_called_once_with("TotalIssuance", None) - spy_balance_from_rao.assert_called_once_with( - subtensor.query_subtensor.return_value.value - ) - - -# `total_stake` method tests -def test_total_stake_success(mocker, subtensor): - """Test total_stake returns correct data when stake information is found.""" - # Prep - block = 123 - stake_value = 5000 - mock_result = mocker.MagicMock(value=stake_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_stake(block) - - # Asserts - assert result is not None - subtensor.query_subtensor.assert_called_once_with("TotalStake", block) - spy_balance_from_rao.assert_called_once_with( - subtensor.query_subtensor.return_value.value - ) - - -def test_total_stake_no_data(mocker, subtensor): - """Test total_stake returns None when no stake information is found.""" - # Prep - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_stake(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TotalStake", block) - spy_balance_from_rao.assert_not_called() - - -def test_total_stake_no_value_attribute(mocker, subtensor): - """Test total_stake returns None when result has no value attribute.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_stake(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TotalStake", block) - spy_balance_from_rao.assert_not_called() - - -def test_total_stake_no_block(mocker, subtensor): - """Test total_stake with no block specified.""" - # Prep - stake_value = 5000 - mock_result = mocker.MagicMock(value=stake_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.total_stake() - - # Asserts - assert result is not None - subtensor.query_subtensor.assert_called_once_with("TotalStake", None) - ( - spy_balance_from_rao.assert_called_once_with( - subtensor.query_subtensor.return_value.value - ), - ) - - -# `serving_rate_limit` method tests -def test_serving_rate_limit_success(mocker, subtensor): - """Test serving_rate_limit returns correct data when rate limit information is found.""" - # Prep - netuid = 1 - block = 123 - rate_limit_value = "10" - mocker.patch.object(subtensor, "_get_hyperparameter", return_value=rate_limit_value) - - # Call - result = subtensor.serving_rate_limit(netuid, block) - - # Asserts - assert result is not None - assert result == int(rate_limit_value) - subtensor._get_hyperparameter.assert_called_once_with( - param_name="ServingRateLimit", netuid=netuid, block=block - ) - - -def test_serving_rate_limit_no_data(mocker, subtensor): - """Test serving_rate_limit returns None when no rate limit information is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object(subtensor, "_get_hyperparameter", return_value=None) - - # Call - result = subtensor.serving_rate_limit(netuid, block) - - # Asserts - assert result is None - subtensor._get_hyperparameter.assert_called_once_with( - param_name="ServingRateLimit", netuid=netuid, block=block - ) - - -def test_serving_rate_limit_no_block(mocker, subtensor): - """Test serving_rate_limit with no block specified.""" - # Prep - netuid = 1 - rate_limit_value = "10" - mocker.patch.object(subtensor, "_get_hyperparameter", return_value=rate_limit_value) - - # Call - result = subtensor.serving_rate_limit(netuid) - - # Asserts - assert result is not None - assert result == int(rate_limit_value) - subtensor._get_hyperparameter.assert_called_once_with( - param_name="ServingRateLimit", netuid=netuid, block=None - ) - - -# `tx_rate_limit` tests -def test_tx_rate_limit_success(mocker, subtensor): - """Test tx_rate_limit returns correct data when rate limit information is found.""" - # Prep - block = 123 - rate_limit_value = 100 - mock_result = mocker.MagicMock(value=rate_limit_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.tx_rate_limit(block) - - # Asserts - assert result is not None - assert result == rate_limit_value - subtensor.query_subtensor.assert_called_once_with("TxRateLimit", block) - - -def test_tx_rate_limit_no_data(mocker, subtensor): - """Test tx_rate_limit returns None when no rate limit information is found.""" - # Prep - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.tx_rate_limit(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TxRateLimit", block) - - -def test_tx_rate_limit_no_value_attribute(mocker, subtensor): - """Test tx_rate_limit returns None when result has no value attribute.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.tx_rate_limit(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TxRateLimit", block) - - -def test_tx_rate_limit_no_block(mocker, subtensor): - """Test tx_rate_limit with no block specified.""" - # Prep - rate_limit_value = 100 - mock_result = mocker.MagicMock(value=rate_limit_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.tx_rate_limit() - - # Asserts - assert result is not None - assert result == rate_limit_value - subtensor.query_subtensor.assert_called_once_with("TxRateLimit", None) - - -############################ -# Network Parameters tests # -############################ - - -# `subnet_exists` tests -def test_subnet_exists_success(mocker, subtensor): - """Test subnet_exists returns True when subnet exists.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock(value=True) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.subnet_exists(netuid, block) - - # Asserts - assert result is True - subtensor.query_subtensor.assert_called_once_with("NetworksAdded", block, [netuid]) - - -def test_subnet_exists_no_data(mocker, subtensor): - """Test subnet_exists returns False when no subnet information is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.subnet_exists(netuid, block) - - # Asserts - assert result is False - subtensor.query_subtensor.assert_called_once_with("NetworksAdded", block, [netuid]) - - -def test_subnet_exists_no_value_attribute(mocker, subtensor): - """Test subnet_exists returns False when result has no value attribute.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.subnet_exists(netuid, block) - - # Asserts - assert result is False - subtensor.query_subtensor.assert_called_once_with("NetworksAdded", block, [netuid]) - - -def test_subnet_exists_no_block(mocker, subtensor): - """Test subnet_exists with no block specified.""" - # Prep - netuid = 1 - mock_result = mocker.MagicMock(value=True) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.subnet_exists(netuid) - - # Asserts - assert result is True - subtensor.query_subtensor.assert_called_once_with("NetworksAdded", None, [netuid]) - - -# `get_all_subnet_netuids` tests -def test_get_all_subnet_netuids_success(mocker, subtensor): - """Test get_all_subnet_netuids returns correct list when netuid information is found.""" - # Prep - block = 123 - mock_netuid1 = mocker.MagicMock(value=1) - mock_netuid2 = mocker.MagicMock(value=2) - mock_result = mocker.MagicMock() - mock_result.records = True - mock_result.__iter__.return_value = [(mock_netuid1, True), (mock_netuid2, True)] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_all_subnet_netuids(block) - - # Asserts - assert result == [1, 2] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) - - -def test_get_all_subnet_netuids_no_data(mocker, subtensor): - """Test get_all_subnet_netuids returns empty list when no netuid information is found.""" - # Prep - block = 123 - mocker.patch.object(subtensor, "query_map_subtensor", return_value=None) - - # Call - result = subtensor.get_all_subnet_netuids(block) - - # Asserts - assert result == [] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) - - -def test_get_all_subnet_netuids_no_records_attribute(mocker, subtensor): - """Test get_all_subnet_netuids returns empty list when result has no records attribute.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - del mock_result.records - mock_result.__iter__.return_value = [] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_all_subnet_netuids(block) - - # Asserts - assert result == [] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) - - -def test_get_all_subnet_netuids_no_block(mocker, subtensor): - """Test get_all_subnet_netuids with no block specified.""" - # Prep - mock_netuid1 = mocker.MagicMock(value=1) - mock_netuid2 = mocker.MagicMock(value=2) - mock_result = mocker.MagicMock() - mock_result.records = True - mock_result.__iter__.return_value = [(mock_netuid1, True), (mock_netuid2, True)] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_all_subnet_netuids() - - # Asserts - assert result == [1, 2] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", None) - - -# `get_total_subnets` tests -def test_get_total_subnets_success(mocker, subtensor): - """Test get_total_subnets returns correct data when total subnet information is found.""" - # Prep - block = 123 - total_subnets_value = 10 - mock_result = mocker.MagicMock(value=total_subnets_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_total_subnets(block) - - # Asserts - assert result is not None - assert result == total_subnets_value - subtensor.query_subtensor.assert_called_once_with("TotalNetworks", block) - - -def test_get_total_subnets_no_data(mocker, subtensor): - """Test get_total_subnets returns None when no total subnet information is found.""" - # Prep - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.get_total_subnets(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TotalNetworks", block) - - -def test_get_total_subnets_no_value_attribute(mocker, subtensor): - """Test get_total_subnets returns None when result has no value attribute.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value # Simulating a missing value attribute - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_total_subnets(block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("TotalNetworks", block) - - -def test_get_total_subnets_no_block(mocker, subtensor): - """Test get_total_subnets with no block specified.""" - # Prep - total_subnets_value = 10 - mock_result = mocker.MagicMock(value=total_subnets_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_total_subnets() - - # Asserts - assert result is not None - assert result == total_subnets_value - subtensor.query_subtensor.assert_called_once_with("TotalNetworks", None) - - -# `get_subnet_modality` tests -def test_get_subnet_modality_success(mocker, subtensor): - """Test get_subnet_modality returns correct data when modality information is found.""" - # Prep - netuid = 1 - block = 123 - modality_value = 42 - mock_result = mocker.MagicMock(value=modality_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_modality(netuid, block) - - # Asserts - assert result is not None - assert result == modality_value - subtensor.query_subtensor.assert_called_once_with( - "NetworkModality", block, [netuid] - ) - - -def test_get_subnet_modality_no_data(mocker, subtensor): - """Test get_subnet_modality returns None when no modality information is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.get_subnet_modality(netuid, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with( - "NetworkModality", block, [netuid] - ) - - -def test_get_subnet_modality_no_value_attribute(mocker, subtensor): - """Test get_subnet_modality returns None when result has no value attribute.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value # Simulating a missing value attribute - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_modality(netuid, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with( - "NetworkModality", block, [netuid] - ) - - -def test_get_subnet_modality_no_block_specified(mocker, subtensor): - """Test get_subnet_modality with no block specified.""" - # Prep - netuid = 1 - modality_value = 42 - mock_result = mocker.MagicMock(value=modality_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_modality(netuid) - - # Asserts - assert result is not None - assert result == modality_value - subtensor.query_subtensor.assert_called_once_with("NetworkModality", None, [netuid]) - - -# `get_emission_value_by_subnet` tests -def test_get_emission_value_by_subnet_success(mocker, subtensor): - """Test get_emission_value_by_subnet returns correct data when emission value is found.""" - # Prep - netuid = 1 - block = 123 - emission_value = 1000 - mock_result = mocker.MagicMock(value=emission_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_emission_value_by_subnet(netuid, block) - - # Asserts - assert result is not None - subtensor.query_subtensor.assert_called_once_with("EmissionValues", block, [netuid]) - spy_balance_from_rao.assert_called_once_with(emission_value) - assert result == Balance.from_rao(emission_value) - - -def test_get_emission_value_by_subnet_no_data(mocker, subtensor): - """Test get_emission_value_by_subnet returns None when no emission value is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_emission_value_by_subnet(netuid, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("EmissionValues", block, [netuid]) - spy_balance_from_rao.assert_not_called() - - -def test_get_emission_value_by_subnet_no_value_attribute(mocker, subtensor): - """Test get_emission_value_by_subnet returns None when result has no value attribute.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value # Simulating a missing value attribute - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_emission_value_by_subnet(netuid, block) - - # Asserts - assert result is None - subtensor.query_subtensor.assert_called_once_with("EmissionValues", block, [netuid]) - spy_balance_from_rao.assert_not_called() - - -def test_get_emission_value_by_subnet_no_block_specified(mocker, subtensor): - """Test get_emission_value_by_subnet with no block specified.""" - # Prep - netuid = 1 - emission_value = 1000 - mock_result = mocker.MagicMock(value=emission_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_balance_from_rao = mocker.spy(Balance, "from_rao") - - # Call - result = subtensor.get_emission_value_by_subnet(netuid) - - # Asserts - assert result is not None - subtensor.query_subtensor.assert_called_once_with("EmissionValues", None, [netuid]) - spy_balance_from_rao.assert_called_once_with(emission_value) - assert result == Balance.from_rao(emission_value) - - -# `get_subnet_connection_requirements` tests -def test_get_subnet_connection_requirements_success(mocker, subtensor): - """Test get_subnet_connection_requirements returns correct data when requirements are found.""" - # Prep - netuid = 1 - block = 123 - mock_tuple1 = (mocker.MagicMock(value="requirement1"), mocker.MagicMock(value=10)) - mock_tuple2 = (mocker.MagicMock(value="requirement2"), mocker.MagicMock(value=20)) - mock_result = mocker.MagicMock() - mock_result.records = [mock_tuple1, mock_tuple2] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_connection_requirements(netuid, block) - - # Asserts - assert result == {"requirement1": 10, "requirement2": 20} - subtensor.query_map_subtensor.assert_called_once_with( - "NetworkConnect", block, [netuid] - ) - - -def test_get_subnet_connection_requirements_no_data(mocker, subtensor): - """Test get_subnet_connection_requirements returns empty dict when no data is found.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock() - mock_result.records = [] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_connection_requirements(netuid, block) - - # Asserts - assert result == {} - subtensor.query_map_subtensor.assert_called_once_with( - "NetworkConnect", block, [netuid] - ) - - -def test_get_subnet_connection_requirements_no_records_attribute(mocker, subtensor): - """Test get_subnet_connection_requirements returns empty dict when result has no records attribute.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock() - del mock_result.records # Simulating a missing records attribute - - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_connection_requirements(netuid, block) - - # Asserts - assert result == {} - subtensor.query_map_subtensor.assert_called_once_with( - "NetworkConnect", block, [netuid] - ) - - -def test_get_subnet_connection_requirements_no_block_specified(mocker, subtensor): - """Test get_subnet_connection_requirements with no block specified.""" - # Prep - netuid = 1 - mock_tuple1 = (mocker.MagicMock(value="requirement1"), mocker.MagicMock(value=10)) - mock_tuple2 = (mocker.MagicMock(value="requirement2"), mocker.MagicMock(value=20)) - mock_result = mocker.MagicMock() - mock_result.records = [mock_tuple1, mock_tuple2] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_connection_requirements(netuid) - - # Asserts - assert result == {"requirement1": 10, "requirement2": 20} - subtensor.query_map_subtensor.assert_called_once_with( - "NetworkConnect", None, [netuid] - ) - - -# `get_subnets` tests -def test_get_subnets_success(mocker, subtensor): - """Test get_subnets returns correct list when subnet information is found.""" - # Prep - block = 123 - mock_netuid1 = mocker.MagicMock(value=1) - mock_netuid2 = mocker.MagicMock(value=2) - mock_result = mocker.MagicMock() - mock_result.records = [(mock_netuid1, True), (mock_netuid2, True)] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnets(block) - - # Asserts - assert result == [1, 2] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) - - -def test_get_subnets_no_data(mocker, subtensor): - """Test get_subnets returns empty list when no subnet information is found.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - mock_result.records = [] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnets(block) - - # Asserts - assert result == [] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) - - -def test_get_subnets_no_records_attribute(mocker, subtensor): - """Test get_subnets returns empty list when result has no records attribute.""" - # Prep - block = 123 - mock_result = mocker.MagicMock() - del mock_result.records # Simulating a missing records attribute - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnets(block) - - # Asserts - assert result == [] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) - - -def test_get_subnets_no_block_specified(mocker, subtensor): - """Test get_subnets with no block specified.""" - # Prep - mock_netuid1 = mocker.MagicMock(value=1) - mock_netuid2 = mocker.MagicMock(value=2) - mock_result = mocker.MagicMock() - mock_result.records = [(mock_netuid1, True), (mock_netuid2, True)] - mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnets() - - # Asserts - assert result == [1, 2] - subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", None) - - -# `get_all_subnets_info` tests -def test_get_all_subnets_info_success(mocker, subtensor): - """Test get_all_subnets_info returns correct data when subnet information is found.""" - # Prep - block = 123 - subnet_data = [1, 2, 3] # Mocked response data - mocker.patch.object( - subtensor.substrate, "get_block_hash", return_value="mock_block_hash" - ) - mock_response = {"result": subnet_data} - mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response) - mocker.patch.object( - subtensor_module.SubnetInfo, - "list_from_vec_u8", - return_value="list_from_vec_u80", - ) - - # Call - result = subtensor.get_all_subnets_info(block) - - # Asserts - subtensor.substrate.get_block_hash.assert_called_once_with(block) - subtensor.substrate.rpc_request.assert_called_once_with( - method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"] - ) - subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data) - - -@pytest.mark.parametrize("result_", [[], None]) -def test_get_all_subnets_info_no_data(mocker, subtensor, result_): - """Test get_all_subnets_info returns empty list when no subnet information is found.""" - # Prep - block = 123 - mocker.patch.object( - subtensor.substrate, "get_block_hash", return_value="mock_block_hash" - ) - mock_response = {"result": result_} - mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response) - mocker.patch.object(subtensor_module.SubnetInfo, "list_from_vec_u8") - - # Call - result = subtensor.get_all_subnets_info(block) - - # Asserts - assert result == [] - subtensor.substrate.get_block_hash.assert_called_once_with(block) - subtensor.substrate.rpc_request.assert_called_once_with( - method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"] - ) - subtensor_module.SubnetInfo.list_from_vec_u8.assert_not_called() - - -def test_get_all_subnets_info_retry(mocker, subtensor): - """Test get_all_subnets_info retries on failure.""" - # Prep - block = 123 - subnet_data = [1, 2, 3] - mocker.patch.object( - subtensor.substrate, "get_block_hash", return_value="mock_block_hash" - ) - mock_response = {"result": subnet_data} - mock_rpc_request = mocker.patch.object( - subtensor.substrate, - "rpc_request", - side_effect=[Exception, Exception, mock_response], - ) - mocker.patch.object( - subtensor_module.SubnetInfo, "list_from_vec_u8", return_value=["some_data"] - ) - - # Call - result = subtensor.get_all_subnets_info(block) - - # Asserts - subtensor.substrate.get_block_hash.assert_called_with(block) - assert mock_rpc_request.call_count == 3 - subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data) - assert result == ["some_data"] - - -# `get_subnet_info` tests -def test_get_subnet_info_success(mocker, subtensor): - """Test get_subnet_info returns correct data when subnet information is found.""" - # Prep - netuid = 1 - block = 123 - subnet_data = [1, 2, 3] - mocker.patch.object( - subtensor.substrate, "get_block_hash", return_value="mock_block_hash" - ) - mock_response = {"result": subnet_data} - mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response) - mocker.patch.object( - subtensor_module.SubnetInfo, "from_vec_u8", return_value=["from_vec_u8"] - ) - - # Call - result = subtensor.get_subnet_info(netuid, block) - - # Asserts - subtensor.substrate.get_block_hash.assert_called_once_with(block) - subtensor.substrate.rpc_request.assert_called_once_with( - method="subnetInfo_getSubnetInfo", params=[netuid, "mock_block_hash"] - ) - subtensor_module.SubnetInfo.from_vec_u8.assert_called_once_with(subnet_data) - - -@pytest.mark.parametrize("result_", [None, {}]) -def test_get_subnet_info_no_data(mocker, subtensor, result_): - """Test get_subnet_info returns None when no subnet information is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object( - subtensor.substrate, "get_block_hash", return_value="mock_block_hash" - ) - mock_response = {"result": result_} - mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response) - mocker.patch.object(subtensor_module.SubnetInfo, "from_vec_u8") - - # Call - result = subtensor.get_subnet_info(netuid, block) - - # Asserts - assert result is None - subtensor.substrate.get_block_hash.assert_called_once_with(block) - subtensor.substrate.rpc_request.assert_called_once_with( - method="subnetInfo_getSubnetInfo", params=[netuid, "mock_block_hash"] - ) - subtensor_module.SubnetInfo.from_vec_u8.assert_not_called() - - -def test_get_subnet_info_retry(mocker, subtensor): - """Test get_subnet_info retries on failure.""" - # Prep - netuid = 1 - block = 123 - subnet_data = [1, 2, 3] - mocker.patch.object( - subtensor.substrate, "get_block_hash", return_value="mock_block_hash" - ) - mock_response = {"result": subnet_data} - mock_rpc_request = mocker.patch.object( - subtensor.substrate, - "rpc_request", - side_effect=[Exception, Exception, mock_response], - ) - mocker.patch.object( - subtensor_module.SubnetInfo, "from_vec_u8", return_value=["from_vec_u8"] - ) - - # Call - result = subtensor.get_subnet_info(netuid, block) - - # Asserts - subtensor.substrate.get_block_hash.assert_called_with(block) - assert mock_rpc_request.call_count == 3 - subtensor_module.SubnetInfo.from_vec_u8.assert_called_once_with(subnet_data) - - -# `get_subnet_hyperparameters` tests -def test_get_subnet_hyperparameters_success(mocker, subtensor): - """Test get_subnet_hyperparameters returns correct data when hyperparameters are found.""" - # Prep - netuid = 1 - block = 123 - hex_bytes_result = "0x010203" - bytes_result = bytes.fromhex(hex_bytes_result[2:]) - mocker.patch.object(subtensor, "query_runtime_api", return_value=hex_bytes_result) - mocker.patch.object( - subtensor_module.SubnetHyperparameters, - "from_vec_u8", - return_value=["from_vec_u8"], - ) - - # Call - result = subtensor.get_subnet_hyperparameters(netuid, block) - - # Asserts - subtensor.query_runtime_api.assert_called_once_with( - runtime_api="SubnetInfoRuntimeApi", - method="get_subnet_hyperparams", - params=[netuid], - block=block, - ) - subtensor_module.SubnetHyperparameters.from_vec_u8.assert_called_once_with( - bytes_result - ) - - -def test_get_subnet_hyperparameters_no_data(mocker, subtensor): - """Test get_subnet_hyperparameters returns empty list when no data is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object(subtensor, "query_runtime_api", return_value=None) - mocker.patch.object(subtensor_module.SubnetHyperparameters, "from_vec_u8") - - # Call - result = subtensor.get_subnet_hyperparameters(netuid, block) - - # Asserts - assert result == [] - subtensor.query_runtime_api.assert_called_once_with( - runtime_api="SubnetInfoRuntimeApi", - method="get_subnet_hyperparams", - params=[netuid], - block=block, - ) - subtensor_module.SubnetHyperparameters.from_vec_u8.assert_not_called() - - -def test_get_subnet_hyperparameters_hex_without_prefix(mocker, subtensor): - """Test get_subnet_hyperparameters correctly processes hex string without '0x' prefix.""" - # Prep - netuid = 1 - block = 123 - hex_bytes_result = "010203" - bytes_result = bytes.fromhex(hex_bytes_result) - mocker.patch.object(subtensor, "query_runtime_api", return_value=hex_bytes_result) - mocker.patch.object(subtensor_module.SubnetHyperparameters, "from_vec_u8") - - # Call - result = subtensor.get_subnet_hyperparameters(netuid, block) - - # Asserts - subtensor.query_runtime_api.assert_called_once_with( - runtime_api="SubnetInfoRuntimeApi", - method="get_subnet_hyperparams", - params=[netuid], - block=block, - ) - subtensor_module.SubnetHyperparameters.from_vec_u8.assert_called_once_with( - bytes_result - ) - - -# `get_subnet_owner` tests -def test_get_subnet_owner_success(mocker, subtensor): - """Test get_subnet_owner returns correct data when owner information is found.""" - # Prep - netuid = 1 - block = 123 - owner_address = "5F3sa2TJAWMqDhXG6jhV4N8ko9rXPM6twz9mG9m3rrgq3xiJ" - mock_result = mocker.MagicMock(value=owner_address) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_owner(netuid, block) - - # Asserts - subtensor.query_subtensor.assert_called_once_with("SubnetOwner", block, [netuid]) - assert result == owner_address - - -def test_get_subnet_owner_no_data(mocker, subtensor): - """Test get_subnet_owner returns None when no owner information is found.""" - # Prep - netuid = 1 - block = 123 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - - # Call - result = subtensor.get_subnet_owner(netuid, block) - - # Asserts - subtensor.query_subtensor.assert_called_once_with("SubnetOwner", block, [netuid]) - assert result is None - - -def test_get_subnet_owner_no_value_attribute(mocker, subtensor): - """Test get_subnet_owner returns None when result has no value attribute.""" - # Prep - netuid = 1 - block = 123 - mock_result = mocker.MagicMock() - del mock_result.value # Simulating a missing value attribute - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - - # Call - result = subtensor.get_subnet_owner(netuid, block) - - # Asserts - subtensor.query_subtensor.assert_called_once_with("SubnetOwner", block, [netuid]) - assert result is None - - -#################### -# Nomination tests # -#################### - - -# `is_hotkey_delegate` tests -def test_is_hotkey_delegate_success(mocker, subtensor): - """Test is_hotkey_delegate returns True when hotkey is a delegate.""" - # Prep - hotkey_ss58 = "hotkey_ss58" - block = 123 - mock_delegates = [ - mocker.MagicMock(hotkey_ss58=hotkey_ss58), - mocker.MagicMock(hotkey_ss58="hotkey_ss583"), - ] - mocker.patch.object(subtensor, "get_delegates", return_value=mock_delegates) - - # Call - result = subtensor.is_hotkey_delegate(hotkey_ss58, block) - - # Asserts - subtensor.get_delegates.assert_called_once_with(block=block) - assert result is True - - -def test_is_hotkey_delegate_not_found(mocker, subtensor): - """Test is_hotkey_delegate returns False when hotkey is not a delegate.""" - # Prep - hotkey_ss58 = "hotkey_ss58" - block = 123 - mock_delegates = [mocker.MagicMock(hotkey_ss58="hotkey_ss583")] - mocker.patch.object(subtensor, "get_delegates", return_value=mock_delegates) - - # Call - result = subtensor.is_hotkey_delegate(hotkey_ss58, block) - - # Asserts - subtensor.get_delegates.assert_called_once_with(block=block) - assert result is False - - -# `get_delegate_take` tests -def test_get_delegate_take_success(mocker, subtensor): - """Test get_delegate_take returns correct data when delegate take is found.""" - # Prep - hotkey_ss58 = "hotkey_ss58" - block = 123 - delegate_take_value = 32768 - mock_result = mocker.MagicMock(value=delegate_take_value) - mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) - spy_u16_normalized_float = mocker.spy(subtensor_module, "U16_NORMALIZED_FLOAT") - - # Call - subtensor.get_delegate_take(hotkey_ss58, block) - - # Asserts - subtensor.query_subtensor.assert_called_once_with("Delegates", block, [hotkey_ss58]) - spy_u16_normalized_float.assert_called_once_with(delegate_take_value) - - -def test_get_delegate_take_no_data(mocker, subtensor): - """Test get_delegate_take returns None when no delegate take is found.""" - # Prep - hotkey_ss58 = "hotkey_ss58" - block = 123 - delegate_take_value = 32768 - mocker.patch.object(subtensor, "query_subtensor", return_value=None) - spy_u16_normalized_float = mocker.spy(subtensor_module, "U16_NORMALIZED_FLOAT") - - # Call - result = subtensor.get_delegate_take(hotkey_ss58, block) - - # Asserts - subtensor.query_subtensor.assert_called_once_with("Delegates", block, [hotkey_ss58]) - spy_u16_normalized_float.assert_not_called() - assert result is None - - -def test_get_remaining_arbitration_period(subtensor, mocker): - """Tests successful retrieval of total stake for hotkey.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock(return_value=mocker.MagicMock(value=0)) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - - # Call - result = subtensor.get_remaining_arbitration_period(coldkey_ss58=fake_ss58_address) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - name="ColdkeyArbitrationBlock", block=None, params=[fake_ss58_address] - ) - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert result == 0 - - -def test_get_remaining_arbitration_period_happy(subtensor, mocker): - """Tests successful retrieval of total stake for hotkey.""" - # Prep - subtensor.query_subtensor = mocker.MagicMock( - return_value=mocker.MagicMock(value=2000) - ) - fake_ss58_address = "12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU" - - # Call - result = subtensor.get_remaining_arbitration_period( - coldkey_ss58=fake_ss58_address, block=200 - ) - - # Assertions - subtensor.query_subtensor.assert_called_once_with( - name="ColdkeyArbitrationBlock", block=200, params=[fake_ss58_address] - ) - # if we change the methods logic in the future we have to be make sure the returned type is correct - assert result == 1800 # 2000 - 200 - - -def test_connect_without_substrate(mocker): - """Ensure re-connection is called when using an alive substrate.""" - # Prep - fake_substrate = mocker.MagicMock() - fake_substrate.websocket.sock.getsockopt.return_value = 1 - mocker.patch.object( - subtensor_module, "SubstrateInterface", return_value=fake_substrate - ) - fake_subtensor = Subtensor() - spy_get_substrate = mocker.spy(Subtensor, "_get_substrate") - - # Call - _ = fake_subtensor.block - - # Assertions - assert spy_get_substrate.call_count == 1 - - -def test_connect_with_substrate(mocker): - """Ensure re-connection is non called when using an alive substrate.""" - # Prep - fake_substrate = mocker.MagicMock() - fake_substrate.websocket.sock.getsockopt.return_value = 0 - mocker.patch.object( - subtensor_module, "SubstrateInterface", return_value=fake_substrate - ) - fake_subtensor = Subtensor() - spy_get_substrate = mocker.spy(Subtensor, "_get_substrate") - - # Call - _ = fake_subtensor.block - - # Assertions - assert spy_get_substrate.call_count == 0 diff --git a/tests/unit_tests/test_synapse.py b/tests/unit_tests/test_synapse.py deleted file mode 100644 index b0ce4f1325..0000000000 --- a/tests/unit_tests/test_synapse.py +++ /dev/null @@ -1,266 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -import json -import base64 -import pytest -import bittensor -from typing import Optional, ClassVar - - -def test_parse_headers_to_inputs(): - class Test(bittensor.Synapse): - key1: list[int] - - # Define a mock headers dictionary to use for testing - headers = { - "bt_header_axon_nonce": "111", - "bt_header_dendrite_ip": "12.1.1.2", - "bt_header_input_obj_key1": base64.b64encode( - json.dumps([1, 2, 3, 4]).encode("utf-8") - ).decode("utf-8"), - "timeout": "12", - "name": "Test", - "header_size": "111", - "total_size": "111", - "computed_body_hash": "0xabcdef", - } - print(headers) - - # Run the function to test - inputs_dict = Test.parse_headers_to_inputs(headers) - print(inputs_dict) - # Check the resulting dictionary - assert inputs_dict == { - "axon": {"nonce": "111"}, - "dendrite": {"ip": "12.1.1.2"}, - "key1": [1, 2, 3, 4], - "timeout": "12", - "name": "Test", - "header_size": "111", - "total_size": "111", - "computed_body_hash": "0xabcdef", - } - - -def test_from_headers(): - class Test(bittensor.Synapse): - key1: list[int] - - # Define a mock headers dictionary to use for testing - headers = { - "bt_header_axon_nonce": "111", - "bt_header_dendrite_ip": "12.1.1.2", - "bt_header_input_obj_key1": base64.b64encode( - json.dumps([1, 2, 3, 4]).encode("utf-8") - ).decode("utf-8"), - "timeout": "12", - "name": "Test", - "header_size": "111", - "total_size": "111", - "computed_body_hash": "0xabcdef", - } - - # Run the function to test - synapse = Test.from_headers(headers) - - # Check that the resulting object is an instance of YourClass - assert isinstance(synapse, Test) - - # Check the properties of the resulting object - # Replace with actual checks based on the structure of your class - assert synapse.axon.nonce == 111 - assert synapse.dendrite.ip == "12.1.1.2" - assert synapse.key1 == [1, 2, 3, 4] - assert synapse.timeout == 12 - assert synapse.name == "Test" - assert synapse.header_size == 111 - assert synapse.total_size == 111 - - -def test_synapse_create(): - # Create an instance of Synapse - synapse = bittensor.Synapse() - - # Ensure the instance created is of type Synapse - assert isinstance(synapse, bittensor.Synapse) - - # Check default properties of a newly created Synapse - assert synapse.name == "Synapse" - assert synapse.timeout == 12.0 - assert synapse.header_size == 0 - assert synapse.total_size == 0 - - # Convert the Synapse instance to a headers dictionary - headers = synapse.to_headers() - - # Ensure the headers is a dictionary and contains the expected keys - assert isinstance(headers, dict) - assert "timeout" in headers - assert "name" in headers - assert "header_size" in headers - assert "total_size" in headers - - # Ensure the 'name' and 'timeout' values match the Synapse's properties - assert headers["name"] == "Synapse" - assert headers["timeout"] == "12.0" - - # Create a new Synapse from the headers and check its 'timeout' property - next_synapse = synapse.from_headers(synapse.to_headers()) - assert next_synapse.timeout == 12.0 - - -def test_custom_synapse(): - # Define a custom Synapse subclass - class Test(bittensor.Synapse): - a: int # Carried through because required. - b: int = None # Not carried through headers - c: Optional[int] # Required, carried through headers, cannot be None - d: Optional[list[int]] # Required, carried though headers, cannot be None - e: list[int] # Carried through headers - f: Optional[int] = ( - None # Not Required, Not carried through headers, can be None - ) - g: Optional[list[int]] = ( - None # Not Required, Not carried though headers, can be None - ) - - # Create an instance of the custom Synapse subclass - synapse = Test( - a=1, - c=3, - d=[1, 2, 3, 4], - e=[1, 2, 3, 4], - ) - - # Ensure the instance created is of type Test and has the expected properties - assert isinstance(synapse, Test) - assert synapse.name == "Test" - assert synapse.a == 1 - assert synapse.b is None - assert synapse.c == 3 - assert synapse.d == [1, 2, 3, 4] - assert synapse.e == [1, 2, 3, 4] - assert synapse.f is None - assert synapse.g is None - - # Convert the Test instance to a headers dictionary - headers = synapse.to_headers() - - # Ensure the headers contains 'a' but not 'b' - assert "bt_header_input_obj_a" in headers - assert "bt_header_input_obj_b" not in headers - - # Create a new Test from the headers and check its properties - next_synapse = synapse.from_headers(synapse.to_headers()) - assert next_synapse.a == 0 # Default value is 0 - assert next_synapse.b is None - assert next_synapse.c == 0 # Default is 0 - assert next_synapse.d == [] # Default is [] - assert next_synapse.e == [] # Empty list is default for list types - assert next_synapse.f is None - assert next_synapse.g is None - - -def test_body_hash_override(): - # Create a Synapse instance - synapse_instance = bittensor.Synapse() - - # Try to set the body_hash property and expect an AttributeError - with pytest.raises( - AttributeError, - match="body_hash property is read-only and cannot be overridden.", - ): - synapse_instance.body_hash = [] - - -def test_default_instance_fields_dict_consistency(): - synapse_instance = bittensor.Synapse() - assert synapse_instance.model_dump() == { - "name": "Synapse", - "timeout": 12.0, - "total_size": 0, - "header_size": 0, - "dendrite": { - "status_code": None, - "status_message": None, - "process_time": None, - "ip": None, - "port": None, - "version": None, - "nonce": None, - "uuid": None, - "hotkey": None, - "signature": None, - }, - "axon": { - "status_code": None, - "status_message": None, - "process_time": None, - "ip": None, - "port": None, - "version": None, - "nonce": None, - "uuid": None, - "hotkey": None, - "signature": None, - }, - "computed_body_hash": "", - } - - -class LegacyHashedSynapse(bittensor.Synapse): - """Legacy Synapse subclass that serialized `required_hash_fields`.""" - - a: int - b: int - c: Optional[int] = None - d: Optional[list[str]] = None - required_hash_fields: Optional[list[str]] = ["b", "a", "d"] - - -class HashedSynapse(bittensor.Synapse): - a: int - b: int - c: Optional[int] = None - d: Optional[list[str]] = None - required_hash_fields: ClassVar[tuple[str, ...]] = ("a", "b", "d") - - -@pytest.mark.parametrize("synapse_cls", [LegacyHashedSynapse, HashedSynapse]) -def test_synapse_body_hash(synapse_cls): - synapse_instance = synapse_cls(a=1, b=2, d=["foobar"]) - assert ( - synapse_instance.body_hash - == "ae06397d08f30f75c91395c59f05c62ac3b62b88250eb78b109213258e6ced0c" - ) - - # Extra non-hashed values should not influence the body hash - synapse_instance_slightly_different = synapse_cls(d=["foobar"], c=3, a=1, b=2) - assert synapse_instance.body_hash == synapse_instance_slightly_different.body_hash - - # Even if someone tries to override the required_hash_fields, it should still be the same - synapse_instance_try_override_hash_fields = synapse_cls( - a=1, b=2, d=["foobar"], required_hash_fields=["a"] - ) - assert ( - synapse_instance.body_hash - == synapse_instance_try_override_hash_fields.body_hash - ) - - # Different hashed values should result in different body hashes - synapse_different = synapse_cls(a=1, b=2) - assert synapse_instance.body_hash != synapse_different.body_hash diff --git a/tests/unit_tests/test_tensor.py b/tests/unit_tests/test_tensor.py deleted file mode 100644 index 9939b397e7..0000000000 --- a/tests/unit_tests/test_tensor.py +++ /dev/null @@ -1,243 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -import pytest -import numpy as np -import bittensor -import numpy -import torch - - -# This is a fixture that creates an example tensor for testing -@pytest.fixture -def example_tensor(): - # Create a tensor from a list using PyTorch - data = np.array([1, 2, 3, 4]) - - # Serialize the tensor into a Tensor instance and return it - return bittensor.tensor(data) - - -@pytest.fixture -def example_tensor_torch(force_legacy_torch_compat_api): - # Create a tensor from a list using PyTorch - data = torch.tensor([1, 2, 3, 4]) - - # Serialize the tensor into a Tensor instance and return it - return bittensor.tensor(data) - - -def test_deserialize(example_tensor): - # Deserialize the tensor from the Tensor instance - tensor = example_tensor.deserialize() - - # Check that the result is a np.array with the correct values - assert isinstance(tensor, np.ndarray) - assert tensor.tolist() == [1, 2, 3, 4] - - -def test_deserialize_torch(example_tensor_torch, force_legacy_torch_compat_api): - tensor = example_tensor_torch.deserialize() - # Check that the result is a PyTorch tensor with the correct values - assert isinstance(tensor, torch.Tensor) - assert tensor.tolist() == [1, 2, 3, 4] - - -def test_serialize(example_tensor): - # Check that the serialized tensor is an instance of Tensor - assert isinstance(example_tensor, bittensor.Tensor) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor.buffer == example_tensor.buffer - assert example_tensor.dtype == example_tensor.dtype - assert example_tensor.shape == example_tensor.shape - - assert isinstance(example_tensor.tolist(), list) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor.buffer == example_tensor.buffer - assert example_tensor.dtype == example_tensor.dtype - assert example_tensor.shape == example_tensor.shape - - assert isinstance(example_tensor.numpy(), numpy.ndarray) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor.buffer == example_tensor.buffer - assert example_tensor.dtype == example_tensor.dtype - assert example_tensor.shape == example_tensor.shape - - assert isinstance(example_tensor.tensor(), np.ndarray) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor.buffer == example_tensor.buffer - assert example_tensor.dtype == example_tensor.dtype - assert example_tensor.shape == example_tensor.shape - - -def test_serialize_torch(example_tensor_torch, force_legacy_torch_compat_api): - # Check that the serialized tensor is an instance of Tensor - assert isinstance(example_tensor_torch, bittensor.Tensor) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor_torch.buffer == example_tensor_torch.buffer - assert example_tensor_torch.dtype == example_tensor_torch.dtype - assert example_tensor_torch.shape == example_tensor_torch.shape - - assert isinstance(example_tensor_torch.tolist(), list) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor_torch.buffer == example_tensor_torch.buffer - assert example_tensor_torch.dtype == example_tensor_torch.dtype - assert example_tensor_torch.shape == example_tensor_torch.shape - - assert isinstance(example_tensor_torch.numpy(), numpy.ndarray) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor_torch.buffer == example_tensor_torch.buffer - assert example_tensor_torch.dtype == example_tensor_torch.dtype - assert example_tensor_torch.shape == example_tensor_torch.shape - - assert isinstance(example_tensor_torch.tensor(), torch.Tensor) - - # Check that the Tensor instance has the correct buffer, dtype, and shape - assert example_tensor_torch.buffer == example_tensor_torch.buffer - assert example_tensor_torch.dtype == example_tensor_torch.dtype - assert example_tensor_torch.shape == example_tensor_torch.shape - - -def test_buffer_field(): - # Create a Tensor instance with a specified buffer, dtype, and shape - tensor = bittensor.Tensor( - buffer="0x321e13edqwds231231231232131", dtype="float32", shape=[3, 3] - ) - - # Check that the buffer field matches the provided value - assert tensor.buffer == "0x321e13edqwds231231231232131" - - -def test_buffer_field_torch(force_legacy_torch_compat_api): - # Create a Tensor instance with a specified buffer, dtype, and shape - tensor = bittensor.Tensor( - buffer="0x321e13edqwds231231231232131", dtype="torch.float32", shape=[3, 3] - ) - - # Check that the buffer field matches the provided value - assert tensor.buffer == "0x321e13edqwds231231231232131" - - -def test_dtype_field(): - # Create a Tensor instance with a specified buffer, dtype, and shape - tensor = bittensor.Tensor( - buffer="0x321e13edqwds231231231232131", dtype="float32", shape=[3, 3] - ) - - # Check that the dtype field matches the provided value - assert tensor.dtype == "float32" - - -def test_dtype_field_torch(force_legacy_torch_compat_api): - tensor = bittensor.Tensor( - buffer="0x321e13edqwds231231231232131", dtype="torch.float32", shape=[3, 3] - ) - assert tensor.dtype == "torch.float32" - - -def test_shape_field(): - # Create a Tensor instance with a specified buffer, dtype, and shape - tensor = bittensor.Tensor( - buffer="0x321e13edqwds231231231232131", dtype="float32", shape=[3, 3] - ) - - # Check that the shape field matches the provided value - assert tensor.shape == [3, 3] - - -def test_shape_field_torch(force_legacy_torch_compat_api): - tensor = bittensor.Tensor( - buffer="0x321e13edqwds231231231232131", dtype="torch.float32", shape=[3, 3] - ) - assert tensor.shape == [3, 3] - - -def test_serialize_all_types(): - bittensor.tensor(np.array([1], dtype=np.float16)) - bittensor.tensor(np.array([1], dtype=np.float32)) - bittensor.tensor(np.array([1], dtype=np.float64)) - bittensor.tensor(np.array([1], dtype=np.uint8)) - bittensor.tensor(np.array([1], dtype=np.int32)) - bittensor.tensor(np.array([1], dtype=np.int64)) - bittensor.tensor(np.array([1], dtype=bool)) - - -def test_serialize_all_types_torch(force_legacy_torch_compat_api): - bittensor.tensor(torch.tensor([1], dtype=torch.float16)) - bittensor.tensor(torch.tensor([1], dtype=torch.float32)) - bittensor.tensor(torch.tensor([1], dtype=torch.float64)) - bittensor.tensor(torch.tensor([1], dtype=torch.uint8)) - bittensor.tensor(torch.tensor([1], dtype=torch.int32)) - bittensor.tensor(torch.tensor([1], dtype=torch.int64)) - bittensor.tensor(torch.tensor([1], dtype=torch.bool)) - - -def test_serialize_all_types_equality(): - rng = np.random.default_rng() - - tensor = rng.standard_normal((100,), dtype=np.float32) - assert np.all(bittensor.tensor(tensor).tensor() == tensor) - - tensor = rng.standard_normal((100,), dtype=np.float64) - assert np.all(bittensor.tensor(tensor).tensor() == tensor) - - tensor = np.random.randint(255, 256, (1000,), dtype=np.uint8) - assert np.all(bittensor.tensor(tensor).tensor() == tensor) - - tensor = np.random.randint(2_147_483_646, 2_147_483_647, (1000,), dtype=np.int32) - assert np.all(bittensor.tensor(tensor).tensor() == tensor) - - tensor = np.random.randint( - 9_223_372_036_854_775_806, 9_223_372_036_854_775_807, (1000,), dtype=np.int64 - ) - assert np.all(bittensor.tensor(tensor).tensor() == tensor) - - tensor = rng.standard_normal((100,), dtype=np.float32) < 0.5 - assert np.all(bittensor.tensor(tensor).tensor() == tensor) - - -def test_serialize_all_types_equality_torch(force_legacy_torch_compat_api): - torchtensor = torch.randn([100], dtype=torch.float16) - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) - - torchtensor = torch.randn([100], dtype=torch.float32) - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) - - torchtensor = torch.randn([100], dtype=torch.float64) - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) - - torchtensor = torch.randint(255, 256, (1000,), dtype=torch.uint8) - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) - - torchtensor = torch.randint( - 2_147_483_646, 2_147_483_647, (1000,), dtype=torch.int32 - ) - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) - - torchtensor = torch.randint( - 9_223_372_036_854_775_806, 9_223_372_036_854_775_807, (1000,), dtype=torch.int64 - ) - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) - - torchtensor = torch.randn([100], dtype=torch.float32) < 0.5 - assert torch.all(bittensor.tensor(torchtensor).tensor() == torchtensor) diff --git a/tests/unit_tests/test_wallet.py b/tests/unit_tests/test_wallet.py deleted file mode 100644 index 0d0466e344..0000000000 --- a/tests/unit_tests/test_wallet.py +++ /dev/null @@ -1,517 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import json -import time -import pytest -import random -import re -import bittensor -from bittensor.errors import KeyFileError -from rich.prompt import Confirm -from ansible_vault import Vault -from unittest.mock import patch - - -def legacy_encrypt_keyfile_data(keyfile_data: bytes, password: str = None) -> bytes: - console = bittensor.__console__ - with console.status(":locked_with_key: Encrypting key..."): - vault = Vault(password) - return vault.vault.encrypt(keyfile_data) - - -def create_wallet(default_updated_password): - # create an nacl wallet - wallet = bittensor.wallet( - name=f"mock-{str(time.time())}", - path="/tmp/tests_wallets/do_not_use", - ) - with patch.object( - bittensor, - "ask_password_to_encrypt", - return_value=default_updated_password, - ): - wallet.create() - assert "NaCl" in str(wallet.coldkey_file) - - return wallet - - -def create_legacy_wallet(default_legacy_password=None, legacy_password=None): - def _legacy_encrypt_keyfile_data(*args, **kwargs): - args = { - k: v - for k, v in zip( - legacy_encrypt_keyfile_data.__code__.co_varnames[: len(args)], - args, - ) - } - kwargs = {**args, **kwargs} - kwargs["password"] = legacy_password - return legacy_encrypt_keyfile_data(**kwargs) - - legacy_wallet = bittensor.wallet( - name=f"mock-legacy-{str(time.time())}", - path="/tmp/tests_wallets/do_not_use", - ) - legacy_password = ( - default_legacy_password if legacy_password == None else legacy_password - ) - - # create a legacy ansible wallet - with patch.object( - bittensor, - "encrypt_keyfile_data", - new=_legacy_encrypt_keyfile_data, - # new = TestWalletUpdate.legacy_encrypt_keyfile_data, - ): - legacy_wallet.create() - assert "Ansible" in str(legacy_wallet.coldkey_file) - - return legacy_wallet - - -@pytest.fixture -def wallet_update_setup(): - # Setup the default passwords and wallets - default_updated_password = "nacl_password" - default_legacy_password = "ansible_password" - empty_wallet = bittensor.wallet( - name=f"mock-empty-{str(time.time())}", - path="/tmp/tests_wallets/do_not_use", - ) - legacy_wallet = create_legacy_wallet( - default_legacy_password=default_legacy_password - ) - wallet = create_wallet(default_updated_password) - - return { - "default_updated_password": default_updated_password, - "default_legacy_password": default_legacy_password, - "empty_wallet": empty_wallet, - "legacy_wallet": legacy_wallet, - "wallet": wallet, - } - - -def test_encrypt_and_decrypt(): - """Test message can be encrypted and decrypted successfully with ansible/nacl.""" - json_data = { - "address": "This is the address.", - "id": "This is the id.", - "key": "This is the key.", - } - message = json.dumps(json_data).encode() - - # encrypt and decrypt with nacl - encrypted_message = bittensor.encrypt_keyfile_data(message, "password") - decrypted_message = bittensor.decrypt_keyfile_data(encrypted_message, "password") - assert decrypted_message == message - assert bittensor.keyfile_data_is_encrypted(encrypted_message) - assert not bittensor.keyfile_data_is_encrypted(decrypted_message) - assert not bittensor.keyfile_data_is_encrypted_ansible(decrypted_message) - assert bittensor.keyfile_data_is_encrypted_nacl(encrypted_message) - - # encrypt and decrypt with legacy ansible - encrypted_message = legacy_encrypt_keyfile_data(message, "password") - decrypted_message = bittensor.decrypt_keyfile_data(encrypted_message, "password") - assert decrypted_message == message - assert bittensor.keyfile_data_is_encrypted(encrypted_message) - assert not bittensor.keyfile_data_is_encrypted(decrypted_message) - assert not bittensor.keyfile_data_is_encrypted_nacl(decrypted_message) - assert bittensor.keyfile_data_is_encrypted_ansible(encrypted_message) - - -def test_check_and_update_encryption_not_updated(wallet_update_setup): - """Test for a few cases where wallet should not be updated. - 1. When the wallet is already updated. - 2. When it is the hotkey. - 3. When the wallet is empty. - 4. When the wallet is legacy but no prompt to ask for password. - 5. When the password is wrong. - """ - wallet = wallet_update_setup["wallet"] - empty_wallet = wallet_update_setup["empty_wallet"] - legacy_wallet = wallet_update_setup["legacy_wallet"] - default_legacy_password = wallet_update_setup["default_legacy_password"] - # test the checking with no rewriting needs to be done. - with patch("bittensor.encrypt_keyfile_data") as encrypt: - # self.wallet is already the most updated with nacl encryption. - assert wallet.coldkey_file.check_and_update_encryption() - - # hotkey_file is not encrypted, thus do not need to be updated. - assert not wallet.hotkey_file.check_and_update_encryption() - - # empty_wallet has not been created, thus do not need to be updated. - assert not empty_wallet.coldkey_file.check_and_update_encryption() - - # legacy wallet cannot be updated without asking for password form prompt. - assert not legacy_wallet.coldkey_file.check_and_update_encryption( - no_prompt=True - ) - - # Wrong password - legacy_wallet = create_legacy_wallet( - default_legacy_password=default_legacy_password - ) - with patch("getpass.getpass", return_value="wrong_password"), patch.object( - Confirm, "ask", return_value=False - ): - assert not legacy_wallet.coldkey_file.check_and_update_encryption() - - # no renewal has been done in this test. - assert not encrypt.called - - -def test_check_and_update_excryption(wallet_update_setup, legacy_wallet=None): - """Test for the alignment of the updated VS old wallet. - 1. Same coldkey_file data. - 2. Same coldkey path. - 3. Same hotkey_file data. - 4. Same hotkey path. - 5. same password. - - Read the updated wallet in 2 ways. - 1. Directly as the output of check_and_update_encryption() - 2. Read from file using the same coldkey and hotkey name - """ - default_legacy_password = wallet_update_setup["default_legacy_password"] - - def check_new_coldkey_file(keyfile): - new_keyfile_data = keyfile._read_keyfile_data_from_file() - new_decrypted_keyfile_data = bittensor.decrypt_keyfile_data( - new_keyfile_data, legacy_password - ) - new_path = legacy_wallet.coldkey_file.path - - assert old_coldkey_file_data != None - assert new_keyfile_data != None - assert not old_coldkey_file_data == new_keyfile_data - assert bittensor.keyfile_data_is_encrypted_ansible(old_coldkey_file_data) - assert bittensor.keyfile_data_is_encrypted_nacl(new_keyfile_data) - assert not bittensor.keyfile_data_is_encrypted_nacl(old_coldkey_file_data) - assert not bittensor.keyfile_data_is_encrypted_ansible(new_keyfile_data) - assert old_decrypted_coldkey_file_data == new_decrypted_keyfile_data - assert new_path == old_coldkey_path - - def check_new_hotkey_file(keyfile): - new_keyfile_data = keyfile._read_keyfile_data_from_file() - new_path = legacy_wallet.hotkey_file.path - - assert old_hotkey_file_data == new_keyfile_data - assert new_path == old_hotkey_path - assert not bittensor.keyfile_data_is_encrypted(new_keyfile_data) - - if legacy_wallet == None: - legacy_password = f"PASSword-{random.randint(0, 10000)}" - legacy_wallet = create_legacy_wallet(legacy_password=legacy_password) - - else: - legacy_password = default_legacy_password - - # get old cold keyfile data - old_coldkey_file_data = legacy_wallet.coldkey_file._read_keyfile_data_from_file() - old_decrypted_coldkey_file_data = bittensor.decrypt_keyfile_data( - old_coldkey_file_data, legacy_password - ) - old_coldkey_path = legacy_wallet.coldkey_file.path - - # get old hot keyfile data - old_hotkey_file_data = legacy_wallet.hotkey_file._read_keyfile_data_from_file() - old_hotkey_path = legacy_wallet.hotkey_file.path - - # update legacy_wallet from ansible to nacl - with patch("getpass.getpass", return_value=legacy_password), patch.object( - Confirm, "ask", return_value=True - ): - legacy_wallet.coldkey_file.check_and_update_encryption() - - # get new keyfile data from the same legacy wallet - check_new_coldkey_file(legacy_wallet.coldkey_file) - check_new_hotkey_file(legacy_wallet.hotkey_file) - - # get new keyfile data from wallet name - updated_legacy_wallet = bittensor.wallet( - name=legacy_wallet.name, - hotkey=legacy_wallet.hotkey_str, - path="/tmp/tests_wallets/do_not_use", - ) - check_new_coldkey_file(updated_legacy_wallet.coldkey_file) - check_new_hotkey_file(updated_legacy_wallet.hotkey_file) - - # def test_password_retain(self): - # [tick] test the same password works - # [tick] try to read using the same hotkey/coldkey name - # [tick] test the same keyfile data could be retained - # [tick] test what if a wrong password was inserted - # [no need] try to read from the new file path - # [tick] test the old and new encrypted is not the same - # [tick] test that the hotkeys are not affected - - -@pytest.fixture -def mock_wallet(): - wallet = bittensor.wallet( - name=f"mock-{str(time.time())}", - hotkey=f"mock-{str(time.time())}", - path="/tmp/tests_wallets/do_not_use", - ) - wallet.create_new_coldkey(use_password=False, overwrite=True, suppress=True) - wallet.create_new_hotkey(use_password=False, overwrite=True, suppress=True) - - return wallet - - -def test_regen_coldkeypub_from_ss58_addr(mock_wallet): - """Test the `regenerate_coldkeypub` method of the wallet class, which regenerates the cold key pair from an SS58 address. - It checks whether the `set_coldkeypub` method is called with the expected arguments, and verifies that the generated key pair's SS58 address matches the input SS58 address. - It also tests the behavior when an invalid SS58 address is provided, raising a `ValueError` as expected. - """ - ss58_address = "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" - with patch.object(mock_wallet, "set_coldkeypub") as mock_set_coldkeypub: - mock_wallet.regenerate_coldkeypub( - ss58_address=ss58_address, overwrite=True, suppress=True - ) - - mock_set_coldkeypub.assert_called_once() - keypair: bittensor.Keypair = mock_set_coldkeypub.call_args_list[0][0][0] - assert keypair.ss58_address == ss58_address - - ss58_address_bad = ( - "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zx" # 1 character short - ) - with pytest.raises(ValueError): - mock_wallet.regenerate_coldkeypub( - ss58_address=ss58_address_bad, overwrite=True, suppress=True - ) - - -def test_regen_coldkeypub_from_hex_pubkey_str(mock_wallet): - """Test the `regenerate_coldkeypub` method of the wallet class, which regenerates the cold key pair from a hex public key string. - It checks whether the `set_coldkeypub` method is called with the expected arguments, and verifies that the generated key pair's public key matches the input public key. - It also tests the behavior when an invalid public key string is provided, raising a `ValueError` as expected. - """ - pubkey_str = "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f" - with patch.object(mock_wallet, "set_coldkeypub") as mock_set_coldkeypub: - mock_wallet.regenerate_coldkeypub( - public_key=pubkey_str, overwrite=True, suppress=True - ) - - mock_set_coldkeypub.assert_called_once() - keypair: bittensor.Keypair = mock_set_coldkeypub.call_args_list[0][0][0] - assert "0x" + keypair.public_key.hex() == pubkey_str - - pubkey_str_bad = "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512" # 1 character short - with pytest.raises(ValueError): - mock_wallet.regenerate_coldkeypub( - ss58_address=pubkey_str_bad, overwrite=True, suppress=True - ) - - -def test_regen_coldkeypub_from_hex_pubkey_bytes(mock_wallet): - """Test the `regenerate_coldkeypub` method of the wallet class, which regenerates the cold key pair from a hex public key byte string. - It checks whether the `set_coldkeypub` method is called with the expected arguments, and verifies that the generated key pair's public key matches the input public key. - """ - pubkey_str = "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f" - pubkey_bytes = bytes.fromhex(pubkey_str[2:]) # Remove 0x from beginning - with patch.object(mock_wallet, "set_coldkeypub") as mock_set_coldkeypub: - mock_wallet.regenerate_coldkeypub( - public_key=pubkey_bytes, overwrite=True, suppress=True - ) - - mock_set_coldkeypub.assert_called_once() - keypair: bittensor.Keypair = mock_set_coldkeypub.call_args_list[0][0][0] - assert keypair.public_key == pubkey_bytes - - -def test_regen_coldkeypub_no_pubkey(mock_wallet): - """Test the `regenerate_coldkeypub` method of the wallet class when no public key is provided. - It verifies that a `ValueError` is raised when neither a public key nor an SS58 address is provided. - """ - with pytest.raises(ValueError): - # Must provide either public_key or ss58_address - mock_wallet.regenerate_coldkeypub( - ss58_address=None, public_key=None, overwrite=True, suppress=True - ) - - -def test_regen_coldkey_from_hex_seed_str(mock_wallet): - """Test the `regenerate_coldkey` method of the wallet class, which regenerates the cold key pair from a hex seed string. - It checks whether the `set_coldkey` method is called with the expected arguments, and verifies that the generated key pair's seed and SS58 address match the input seed and the expected SS58 address. - It also tests the behavior when an invalid seed string is provided, raising a `ValueError` as expected. - """ - ss58_addr = "5D5cwd8DX6ij7nouVcoxDuWtJfiR1BnzCkiBVTt7DU8ft5Ta" - seed_str = "0x659c024d5be809000d0d93fe378cfde020846150b01c49a201fc2a02041f7636" - with patch.object(mock_wallet, "set_coldkey") as mock_set_coldkey: - mock_wallet.regenerate_coldkey(seed=seed_str, overwrite=True, suppress=True) - - mock_set_coldkey.assert_called_once() - keypair: bittensor.Keypair = mock_set_coldkey.call_args_list[0][0][0] - seed_hex = ( - keypair.seed_hex - if isinstance(keypair.seed_hex, str) - else keypair.seed_hex.hex() - ) - - assert re.match( - rf"(0x|){seed_str[2:]}", seed_hex - ), "The seed_hex does not match the expected pattern" - assert ( - keypair.ss58_address == ss58_addr - ) # Check that the ss58 address is correct - - seed_str_bad = "0x659c024d5be809000d0d93fe378cfde020846150b01c49a201fc2a02041f763" # 1 character short - with pytest.raises(ValueError): - mock_wallet.regenerate_coldkey(seed=seed_str_bad, overwrite=True, suppress=True) - - -def test_regen_hotkey_from_hex_seed_str(mock_wallet): - """Test the `regenerate_coldkey` method of the wallet class, which regenerates the cold key pair from a hex seed string. - It checks whether the `set_coldkey` method is called with the expected arguments, and verifies that the generated key pair's seed and SS58 address match the input seed and the expected SS58 address. - It also tests the behavior when an invalid seed string is provided, raising a `ValueError` as expected. - """ - ss58_addr = "5D5cwd8DX6ij7nouVcoxDuWtJfiR1BnzCkiBVTt7DU8ft5Ta" - seed_str = "0x659c024d5be809000d0d93fe378cfde020846150b01c49a201fc2a02041f7636" - with patch.object(mock_wallet, "set_hotkey") as mock_set_hotkey: - mock_wallet.regenerate_hotkey(seed=seed_str, overwrite=True, suppress=True) - - mock_set_hotkey.assert_called_once() - keypair: bittensor.Keypair = mock_set_hotkey.call_args_list[0][0][0] - - seed_hex = ( - keypair.seed_hex - if isinstance(keypair.seed_hex, str) - else keypair.seed_hex.hex() - ) - - pattern = rf"(0x|){seed_str[2:]}" - assert re.match( - pattern, seed_hex - ), f"The seed_hex '{seed_hex}' does not match the expected pattern '{pattern}'" - assert ( - keypair.ss58_address == ss58_addr - ) # Check that the ss58 address is correct - - seed_str_bad = "0x659c024d5be809000d0d93fe378cfde020846150b01c49a201fc2a02041f763" # 1 character short - with pytest.raises(ValueError): - mock_wallet.regenerate_hotkey(seed=seed_str_bad, overwrite=True, suppress=True) - - -@pytest.mark.parametrize( - "mnemonic, expected_exception", - [ - # Input is in a string format - ( - "fiscal prevent noise record smile believe quote front weasel book axis legal", - None, - ), - # Input is in a list format (acquired by encapsulating mnemonic arg in a string "" in the cli) - ( - [ - "fiscal prevent noise record smile believe quote front weasel book axis legal" - ], - None, - ), - # Input is in a full list format (aquired by pasting mnemonic arg simply w/o quotes in cli) - ( - [ - "fiscal", - "prevent", - "noise", - "record", - "smile", - "believe", - "quote", - "front", - "weasel", - "book", - "axis", - "legal", - ], - None, - ), - # Incomplete mnemonic - ("word1 word2 word3", ValueError), - # No mnemonic added - (None, ValueError), - ], - ids=[ - "string-format", - "list-format-thru-string", - "list-format", - "incomplete-mnemonic", - "no-mnemonic", - ], -) -def test_regen_coldkey_mnemonic(mock_wallet, mnemonic, expected_exception): - """Test the `regenerate_coldkey` method of the wallet class, which regenerates the cold key pair from a mnemonic. - We test different input formats of mnemonics and check if the function works as expected. - """ - with patch.object(mock_wallet, "set_coldkey") as mock_set_coldkey, patch.object( - mock_wallet, "set_coldkeypub" - ) as mock_set_coldkeypub: - if expected_exception: - with pytest.raises(expected_exception): - mock_wallet.regenerate_coldkey( - mnemonic=mnemonic, overwrite=True, suppress=True - ) - else: - mock_wallet.regenerate_coldkey(mnemonic=mnemonic) - mock_set_coldkey.assert_called_once() - mock_set_coldkeypub.assert_called_once() - - -@pytest.mark.parametrize( - "overwrite, user_input, expected_exception", - [ - (True, None, None), # Test with overwrite=True, no user input needed - (False, "n", True), # Test with overwrite=False and user says no, KeyFileError - (False, "y", None), # Test with overwrite=False and user says yes - ], -) -def test_regen_coldkey_overwrite_functionality( - mock_wallet, overwrite, user_input, expected_exception -): - """Test the `regenerate_coldkey` method of the wallet class, emphasizing on the overwrite functionality""" - ss58_addr = "5D5cwd8DX6ij7nouVcoxDuWtJfiR1BnzCkiBVTt7DU8ft5Ta" - seed_str = "0x659c024d5be809000d0d93fe378cfde020846150b01c49a201fc2a02041f7636" - - with patch.object(mock_wallet, "set_coldkey") as mock_set_coldkey, patch( - "builtins.input", return_value=user_input - ): - if expected_exception: - with pytest.raises(KeyFileError): - mock_wallet.regenerate_coldkey( - seed=seed_str, overwrite=overwrite, suppress=True - ) - else: - mock_wallet.regenerate_coldkey( - seed=seed_str, overwrite=overwrite, suppress=True - ) - mock_set_coldkey.assert_called_once() - keypair = mock_set_coldkey.call_args_list[0][0][0] - seed_hex = ( - keypair.seed_hex - if isinstance(keypair.seed_hex, str) - else keypair.seed_hex.hex() - ) - assert re.match( - rf"(0x|){seed_str[2:]}", seed_hex - ), "The seed_hex does not match the expected pattern" - assert ( - keypair.ss58_address == ss58_addr - ), "The SS58 address does not match the expected address" diff --git a/tests/unit_tests/utils/__init__.py b/tests/unit_tests/utils/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit_tests/utils/test_balance.py b/tests/unit_tests/utils/test_balance.py deleted file mode 100644 index b99bc111f2..0000000000 --- a/tests/unit_tests/utils/test_balance.py +++ /dev/null @@ -1,509 +0,0 @@ -import pytest -from hypothesis import given -from hypothesis import strategies as st -from typing import Union - -from bittensor import Balance -from tests.helpers import CLOSE_IN_VALUE - -""" -Test the Balance class -""" -valid_tao_numbers_strategy = st.one_of( - st.integers(max_value=21_000_000, min_value=-21_000_000), - st.floats( - allow_infinity=False, - allow_nan=False, - allow_subnormal=False, - max_value=21_000_000.00, - min_value=-21_000_000.00, - ), -) - - -def remove_zero_filter(x): - """Remove zero and rounded to zero from the list of valid numbers""" - return int(x * pow(10, 9)) != 0 - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_init(balance: Union[int, float]): - """ - Test the initialization of the Balance object. - """ - balance_ = Balance(balance) - if isinstance(balance, int): - assert balance_.rao == balance - elif isinstance(balance, float): - assert balance_.tao == CLOSE_IN_VALUE(balance, 0.00001) - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_add(balance: Union[int, float], balance2: Union[int, float]): - """ - Test the addition of two Balance objects. - """ - balance_ = Balance(balance) - balance2_ = Balance(balance2) - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - if isinstance(balance2, int): - rao2_ = balance2 - elif isinstance(balance2, float): - rao2_ = int(balance2 * pow(10, 9)) - - sum_ = balance_ + balance2_ - assert isinstance(sum_, Balance) - assert CLOSE_IN_VALUE(sum_.rao, 5) == rao_ + rao2_ - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_add_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the addition of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # convert balance2 to rao. Assume balance2 was rao - rao2_ = int(balance2) - - sum_ = balance_ + balance2_ - assert isinstance(sum_, Balance) - assert CLOSE_IN_VALUE(sum_.rao, 5) == rao_ + rao2_ - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_eq_other_not_balance(balance: Union[int, float]): - """ - Test the equality of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - rao2_: int - # convert balance2 to rao. This assumes balance2 is a rao value - rao2_ = int(balance_.rao) - - assert CLOSE_IN_VALUE(rao2_, 5) == balance_ - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_radd_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the right addition (radd) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = int(balance2) - - sum_ = balance2_ + balance_ # This is an radd - assert isinstance(sum_, Balance) - assert CLOSE_IN_VALUE(sum_.rao, 5) == rao2_ + rao_ - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_sub(balance: Union[int, float], balance2: Union[int, float]): - """ - Test the subtraction of two Balance objects. - """ - balance_ = Balance(balance) - balance2_ = Balance(balance2) - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - if isinstance(balance2, int): - rao2_ = balance2 - elif isinstance(balance2, float): - rao2_ = int(balance2 * pow(10, 9)) - - diff_ = balance_ - balance2_ - assert isinstance(diff_, Balance) - assert CLOSE_IN_VALUE(diff_.rao, 5) == rao_ - rao2_ - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_sub_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the subtraction of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = int(balance2) - - diff_ = balance_ - balance2_ - assert isinstance(diff_, Balance) - assert CLOSE_IN_VALUE(diff_.rao, 5) == rao_ - rao2_ - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_rsub_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the right subtraction (rsub) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = int(balance2) - - diff_ = balance2_ - balance_ # This is an rsub - assert isinstance(diff_, Balance) - assert CLOSE_IN_VALUE(diff_.rao, 5) == rao2_ - rao_ - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_mul(balance: Union[int, float], balance2: Union[int, float]): - """ - Test the multiplication of two Balance objects. - """ - balance_ = Balance(balance) - balance2_ = Balance(balance2) - rao_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - if isinstance(balance2, int): - rao2_ = balance2 - elif isinstance(balance2, float): - rao2_ = int(balance2 * pow(10, 9)) - - prod_ = balance_ * balance2_ - assert isinstance(prod_, Balance) - - assert ( - prod_.rao == pytest.approx(rao_ * rao2_, 9) - ), f"{balance_} * {balance2_} == {prod_.rao} != {rao_} * {balance2} == {rao_ * balance2}" - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_mul_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the multiplication of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - - prod_ = balance_ * balance2_ - assert isinstance(prod_, Balance) - - assert ( - abs(prod_.rao - int(rao_ * balance2)) <= 20 - ), f"{prod_.rao} != {int(rao_ * balance2)}" - assert prod_.rao == pytest.approx(int(rao_ * balance2)) - - -@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) -def test_balance_rmul_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the right multiplication (rmul) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - - prod_ = balance2_ * balance_ # This is an rmul - assert isinstance(prod_, Balance) - - assert ( - abs(prod_.rao - int(balance2 * rao_)) <= 20 - ), f"{prod_.rao} != {int(balance2 * rao_)}" - assert prod_.rao == pytest.approx(int(balance2 * rao_)) - - -@given( - balance=valid_tao_numbers_strategy, - balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), -) # Avoid zero division -def test_balance_truediv(balance: Union[int, float], balance2: Union[int, float]): - """ - Test the true division (/) of two Balance objects. - """ - balance_ = Balance(balance) - balance2_ = Balance(balance2) - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - if isinstance(balance2, int): - rao2_ = balance2 - elif isinstance(balance2, float): - rao2_ = int(balance2 * pow(10, 9)) - - quot_ = balance_ / balance2_ - assert isinstance(quot_, Balance) - assert ( - abs(quot_.rao - int(rao_ / rao2_)) <= 2 - ), f"{quot_.rao} != {int(rao_ / rao2_)}" - assert quot_.rao == pytest.approx(int(rao_ / rao2_)) - - -@given( - balance=valid_tao_numbers_strategy, - balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), -) -def test_balance_truediv_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the true division (/) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = balance2 - - quot_ = balance_ / balance2_ - assert quot_.rao == pytest.approx(int(rao_ / rao2_)) - assert ( - abs(quot_.rao - int(rao_ / rao2_)) <= 10 - ), f"{quot_.rao} != {int(rao_ / rao2_)}" - - -@given( - balance=valid_tao_numbers_strategy.filter(remove_zero_filter), - balance2=valid_tao_numbers_strategy, -) # This is a filter to avoid division by zero -def test_balance_rtruediv_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the right true division (rtruediv) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = balance2 - - quot_ = balance2_ / balance_ # This is an rtruediv - assert isinstance(quot_, Balance) - expected_value = int(rao2_ / rao_) - assert ( - abs(quot_.rao - expected_value) <= 5 - ), f"{balance2_} / {balance_} = {quot_.rao} != {expected_value}" - assert quot_.rao == pytest.approx(expected_value) - - -@given( - balance=valid_tao_numbers_strategy, - balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), -) # Avoid zero division -def test_balance_floordiv(balance: Union[int, float], balance2: Union[int, float]): - """ - Test the floor division (//) of two Balance objects. - """ - balance_ = Balance(balance) - balance2_ = Balance(balance2) - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - if isinstance(balance2, int): - rao2_ = balance2 - elif isinstance(balance2, float): - rao2_ = int(balance2 * pow(10, 9)) - - quot_ = balance_ // balance2_ - assert isinstance(quot_, Balance) - assert CLOSE_IN_VALUE(quot_.rao, 5) == rao_ // rao2_ - - -@given( - balance=valid_tao_numbers_strategy, - balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), -) -def test_balance_floordiv_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the floor division (//) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = balance2 - - quot_ = balance_ // balance2_ - assert isinstance(quot_, Balance) - expected_value = rao_ // rao2_ - assert ( - abs(quot_.rao - expected_value) <= 5 - ), f"{balance_} // {balance2_} = {quot_.rao} != {expected_value}" - assert quot_.rao == pytest.approx(rao_ // rao2_) - - -@given( - balance=valid_tao_numbers_strategy.filter(remove_zero_filter), - balance2=valid_tao_numbers_strategy, -) # This is a filter to avoid division by zero -def test_balance_rfloordiv_other_not_balance( - balance: Union[int, float], balance2: Union[int, float] -): - """ - Test the right floor division (rfloordiv) of a Balance object and a non-Balance object. - """ - balance_ = Balance(balance) - balance2_ = balance2 - rao_: int - rao2_: int - if isinstance(balance, int): - rao_ = balance - elif isinstance(balance, float): - rao_ = int(balance * pow(10, 9)) - # assume balance2 is a rao value - rao2_ = balance2 - - quot_ = balance2_ // balance_ # This is an rfloordiv - assert isinstance(quot_, Balance) - expected_value = rao2_ // rao_ - assert quot_.rao == pytest.approx(rao2_ // rao_) - assert abs(quot_.rao - expected_value) <= 5 - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_not_eq_none(balance: Union[int, float]): - """ - Test the inequality (!=) of a Balance object and None. - """ - balance_ = Balance(balance) - assert not balance_ == None - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_neq_none(balance: Union[int, float]): - """ - Test the inequality (!=) of a Balance object and None. - """ - balance_ = Balance(balance) - assert balance_ != None - - -def test_balance_init_from_invalid_value(): - """ - Test the initialization of a Balance object with an invalid value. - """ - with pytest.raises(TypeError): - Balance("invalid not a number") - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_add_invalid_type(balance: Union[int, float]): - """ - Test the addition of a Balance object with an invalid type. - """ - balance_ = Balance(balance) - with pytest.raises(NotImplementedError): - _ = balance_ + "" - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_sub_invalid_type(balance: Union[int, float]): - """ - Test the subtraction of a Balance object with an invalid type. - """ - balance_ = Balance(balance) - with pytest.raises(NotImplementedError): - _ = balance_ - "" - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_div_invalid_type(balance: Union[int, float]): - """ - Test the division of a Balance object with an invalid type. - """ - balance_ = Balance(balance) - with pytest.raises(NotImplementedError): - _ = balance_ / "" - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_mul_invalid_type(balance: Union[int, float]): - """ - Test the multiplication of a Balance object with an invalid type. - """ - balance_ = Balance(balance) - with pytest.raises(NotImplementedError): - _ = balance_ * "" - - -@given(balance=valid_tao_numbers_strategy) -def test_balance_eq_invalid_type(balance: Union[int, float]): - """ - Test the equality of a Balance object with an invalid type. - """ - balance_ = Balance(balance) - with pytest.raises(NotImplementedError): - balance_ == "" diff --git a/tests/unit_tests/utils/test_networking.py b/tests/unit_tests/utils/test_networking.py deleted file mode 100644 index 2037718578..0000000000 --- a/tests/unit_tests/utils/test_networking.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import urllib -import pytest -import requests -import unittest.mock as mock -from bittensor import utils -from unittest.mock import MagicMock - - -# Test conversion functions for IPv4 -def test_int_to_ip_zero(): - """Test converting integer to IPv4 address for 0.""" - assert utils.networking.int_to_ip(0) == "0.0.0.0" - assert utils.networking.ip_to_int("0.0.0.0") == 0 - assert utils.networking.ip__str__(4, "0.0.0.0", 8888) == "/ipv4/0.0.0.0:8888" - - -def test_int_to_ip_range(): - """Test converting integer to IPv4 addresses in a range.""" - for i in range(10): - assert utils.networking.int_to_ip(i) == f"0.0.0.{i}" - assert utils.networking.ip_to_int(f"0.0.0.{i}") == i - assert ( - utils.networking.ip__str__(4, f"0.0.0.{i}", 8888) == f"/ipv4/0.0.0.{i}:8888" - ) - - -def test_int_to_ip4_max(): - """Test converting integer to maximum IPv4 address.""" - assert utils.networking.int_to_ip(4294967295) == "255.255.255.255" - assert utils.networking.ip_to_int("255.255.255.255") == 4294967295 - assert ( - utils.networking.ip__str__(4, "255.255.255.255", 8888) - == "/ipv4/255.255.255.255:8888" - ) - - -# Test conversion functions for IPv6 -def test_int_to_ip6_zero(): - """Test converting integer to IPv6 address for 0.""" - assert utils.networking.int_to_ip(4294967296) == "::1:0:0" - assert utils.networking.ip_to_int("::1:0:0") == 4294967296 - assert utils.networking.ip__str__(6, "::1:0:0", 8888) == "/ipv6/::1:0:0:8888" - - -def test_int_to_ip6_range(): - """Test converting integer to IPv6 addresses in a range.""" - for i in range(10): - assert utils.networking.int_to_ip(4294967296 + i) == f"::1:0:{i}" - assert utils.networking.ip_to_int(f"::1:0:{i}") == 4294967296 + i - assert ( - utils.networking.ip__str__(6, f"::1:0:{i}", 8888) == f"/ipv6/::1:0:{i}:8888" - ) - - -def test_int_to_ip6_max(): - """Test converting integer to maximum IPv6 address.""" - max_val = 340282366920938463463374607431768211455 - assert ( - utils.networking.int_to_ip(max_val) == "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" - ) - assert ( - utils.networking.ip_to_int("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") == max_val - ) - assert ( - utils.networking.ip__str__(6, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", 8888) - == "/ipv6/ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:8888" - ) - - -def test_int_to_ip6_overflow(): - """Test handling overflow when converting integer to IPv6 address.""" - overflow = 340282366920938463463374607431768211455 + 1 - with pytest.raises(Exception): - utils.networking.int_to_ip(overflow) - - -def test_int_to_ip6_underflow(): - """Test handling underflow when converting integer to IPv6 address.""" - underflow = -1 - with pytest.raises(Exception): - utils.networking.int_to_ip(underflow) - - -# Test getting external IP address -def test_get_external_ip(): - """Test getting the external IP address.""" - assert utils.networking.get_external_ip() - - -def test_get_external_ip_os_broken(): - """Test getting the external IP address when os.popen is broken.""" - - class FakeReadline: - def readline(self): - return 1 - - def mock_call(): - return FakeReadline() - - with mock.patch.object(os, "popen", new=mock_call): - assert utils.networking.get_external_ip() - - -def test_get_external_ip_os_request_urllib_broken(): - """Test getting the external IP address when os.popen and requests.get/urllib.request are broken.""" - - class FakeReadline: - def readline(self): - return 1 - - def mock_call(): - return FakeReadline() - - class FakeResponse: - def text(self): - return 1 - - def mock_call_two(): - return FakeResponse() - - class FakeRequest: - def urlopen(self): - return 1 - - with mock.patch.object(os, "popen", new=mock_call): - with mock.patch.object(requests, "get", new=mock_call_two): - urllib.request = MagicMock(return_value=FakeRequest()) - with pytest.raises(Exception): - assert utils.networking.get_external_ip() - - -# Test formatting WebSocket endpoint URL -@pytest.mark.parametrize( - "url, expected", - [ - ("wss://exampleendpoint:9944", "wss://exampleendpoint:9944"), - ("ws://exampleendpoint:9944", "ws://exampleendpoint:9944"), - ( - "exampleendpoint:9944", - "ws://exampleendpoint:9944", - ), # should add ws:// not wss:// - ( - "ws://exampleendpoint", - "ws://exampleendpoint", - ), # should not add port if not specified - ( - "wss://exampleendpoint", - "wss://exampleendpoint", - ), # should not add port if not specified - ( - "exampleendpoint", - "ws://exampleendpoint", - ), # should not add port if not specified - ( - "exampleendpointwithws://:9944", - "ws://exampleendpointwithws://:9944", - ), # should only care about the front - ( - "exampleendpointwithwss://:9944", - "ws://exampleendpointwithwss://:9944", - ), # should only care about the front - ], -) -def test_format(url: str, expected: str): - """Test formatting WebSocket endpoint URL.""" - assert utils.networking.get_formatted_ws_endpoint_url(url) == expected diff --git a/tests/unit_tests/utils/test_registration.py b/tests/unit_tests/utils/test_registration.py deleted file mode 100644 index d0c4fc743b..0000000000 --- a/tests/unit_tests/utils/test_registration.py +++ /dev/null @@ -1,45 +0,0 @@ -import pytest - -from bittensor.utils.registration import LazyLoadedTorch - - -class MockBittensorLogging: - def __init__(self): - self.messages = [] - - def error(self, message): - self.messages.append(message) - - -@pytest.fixture -def mock_bittensor_logging(monkeypatch): - mock_logger = MockBittensorLogging() - monkeypatch.setattr("bittensor.logging", mock_logger) - return mock_logger - - -def test_lazy_loaded_torch__torch_installed(monkeypatch, mock_bittensor_logging): - import torch - - lazy_torch = LazyLoadedTorch() - - assert bool(torch) is True - - assert lazy_torch.nn is torch.nn - with pytest.raises(AttributeError): - lazy_torch.no_such_thing - - -def test_lazy_loaded_torch__no_torch(monkeypatch, mock_bittensor_logging): - monkeypatch.setattr("bittensor.utils.registration._get_real_torch", lambda: None) - - torch = LazyLoadedTorch() - - assert not torch - - with pytest.raises(ImportError): - torch.some_attribute - - # Check if the error message is logged correctly - assert len(mock_bittensor_logging.messages) == 1 - assert "This command requires torch." in mock_bittensor_logging.messages[0] diff --git a/tests/unit_tests/utils/test_subtensor.py b/tests/unit_tests/utils/test_subtensor.py deleted file mode 100644 index 1c1220bcea..0000000000 --- a/tests/unit_tests/utils/test_subtensor.py +++ /dev/null @@ -1,99 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2022 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import json - -import pytest - -import bittensor.utils.subtensor as st_utils - - -class MockPallet: - def __init__(self, errors): - self.errors = errors - - -@pytest.fixture -def pallet_with_errors(): - """Provide a mock pallet with sample errors.""" - return MockPallet( - [ - {"index": 1, "name": "ErrorOne", "docs": ["Description one."]}, - { - "index": 2, - "name": "ErrorTwo", - "docs": ["Description two.", "Continued."], - }, - ] - ) - - -@pytest.fixture -def empty_pallet(): - """Provide a mock pallet with no errors.""" - return MockPallet([]) - - -def test_get_errors_from_pallet_with_errors(pallet_with_errors): - """Ensure errors are correctly parsed from pallet.""" - expected = { - "1": {"name": "ErrorOne", "description": "Description one."}, - "2": {"name": "ErrorTwo", "description": "Description two. Continued."}, - } - assert st_utils._get_errors_from_pallet(pallet_with_errors) == expected - - -def test_get_errors_from_pallet_empty(empty_pallet): - """Test behavior with an empty list of errors.""" - assert st_utils._get_errors_from_pallet(empty_pallet) is None - - -def test_save_errors_to_cache(tmp_path): - """Ensure that errors are correctly saved to a file.""" - test_file = tmp_path / "subtensor_errors_map.json" - errors = {"1": {"name": "ErrorOne", "description": "Description one."}} - st_utils._ERRORS_FILE_PATH = test_file - st_utils._save_errors_to_cache("0x123", errors) - - with open(test_file, "r") as file: - data = json.load(file) - assert data["subtensor_build_id"] == "0x123" - assert data["errors"] == errors - - -def test_get_errors_from_cache(tmp_path): - """Test retrieval of errors from cache.""" - test_file = tmp_path / "subtensor_errors_map.json" - errors = {"1": {"name": "ErrorOne", "description": "Description one."}} - - st_utils._ERRORS_FILE_PATH = test_file - with open(test_file, "w") as file: - json.dump({"subtensor_build_id": "0x123", "errors": errors}, file) - assert st_utils._get_errors_from_cache() == { - "subtensor_build_id": "0x123", - "errors": errors, - } - - -def test_get_errors_no_cache(mocker, empty_pallet): - """Test get_errors function when no cache is available.""" - mocker.patch("bittensor.utils.subtensor._get_errors_from_cache", return_value=None) - mocker.patch("bittensor.utils.subtensor.SubstrateInterface") - substrate_mock = mocker.MagicMock() - substrate_mock.metadata.get_metadata_pallet.return_value = empty_pallet - substrate_mock.metadata[0].value = "0x123" - assert st_utils.get_subtensor_errors(substrate_mock) == {} diff --git a/tests/unit_tests/utils/test_utils.py b/tests/unit_tests/utils/test_utils.py deleted file mode 100644 index 3c077aba78..0000000000 --- a/tests/unit_tests/utils/test_utils.py +++ /dev/null @@ -1,328 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import logging - -import numpy as np -import bittensor.utils.weight_utils as weight_utils -import pytest - - -def test_convert_weight_and_uids(): - uids = np.arange(10) - weights = np.random.rand(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # min weight < 0 - weights[5] = -1 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # min uid < 0 - weights[5] = 0 - uids[3] = -1 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # len(uids) != len(weights) - uids[3] = 3 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights[1:]) - - # sum(weights) == 0 - weights = np.zeros(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # test for overflow and underflow - for _ in range(5): - uids = np.arange(10) - weights = np.random.rand(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - -def test_normalize_with_max_weight(): - weights = np.random.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.01) - assert wn.max() <= 0.01 - - weights = np.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.01) - assert wn.max() <= 0.01 - - weights = np.random.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.02) - assert wn.max() <= 0.02 - - weights = np.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.02) - assert wn.max() <= 0.02 - - weights = np.random.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.03) - assert wn.max() <= 0.03 - - weights = np.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.03) - assert wn.max() <= 0.03 - - # Check for Limit - limit = 0.001 - weights = np.random.rand(2000) - w = weights / weights.sum() - wn = weight_utils.normalize_max_weight(weights, limit=limit) - assert (w.max() >= limit and np.abs(limit - wn.max()) < 0.001) or ( - w.max() < limit and wn.max() < limit - ) - - # Check for Zeros - limit = 0.01 - weights = np.zeros(2000) - wn = weight_utils.normalize_max_weight(weights, limit=limit) - assert wn.max() == 1 / 2000 - - # Check for Ordering after normalization - weights = np.random.rand(100) - wn = weight_utils.normalize_max_weight(weights, limit=1) - assert np.array_equal(wn, weights / weights.sum()) - - # Check for epsilon changes - epsilon = 0.01 - weights = np.sort(np.random.rand(100)) - x = weights / weights.sum() - limit = x[-10] - change = epsilon * limit - y = weight_utils.normalize_max_weight(x, limit=limit - change) - z = weight_utils.normalize_max_weight(x, limit=limit + change) - assert np.abs(y - z).sum() < epsilon - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, expected", - [ - ("happy-path-1", 3, [0, 1, 2], [15, 5, 80], np.array([0.15, 0.05, 0.8])), - ("happy-path-2", 4, [1, 3], [50, 50], np.array([0.0, 0.5, 0.0, 0.5])), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_happy_path( - test_id, n, uids, weights, expected -): - # Act - result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - # Assert - assert np.allclose(result, expected), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, expected", - [ - ("edge_case_empty", 5, [], [], np.zeros(5)), - ("edge_case_single", 1, [0], [100], np.array([1.0])), - ("edge_case_all_zeros", 4, [0, 1, 2, 3], [0, 0, 0, 0], np.zeros(4)), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_edge_cases( - test_id, n, uids, weights, expected -): - # Act - result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - # Assert - assert np.allclose(result, expected), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, exception", - [ - ("error-case-mismatched-lengths", 3, [0, 1, 3, 4, 5], [10, 20, 30], IndexError), - ("error-case-negative-n", -1, [0, 1], [10, 20], ValueError), - ("error-case-invalid-uids", 3, [0, 3], [10, 20], IndexError), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_error_cases( - test_id, n, uids, weights, exception -): - # Act / Assert - with pytest.raises(exception): - weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, expected", - [ - ( - "happy-path-1", - 3, - [0, 1, 2], - [15, 5, 80], - [0, 1, 2], - np.array([0.15, 0.05, 0.8]), - ), - ( - "happy-path-2", - 3, - [0, 2], - [300, 300], - [0, 1, 2], - np.array([0.5, 0.0, 0.5]), - ), - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_happy_paths( - test_id, n, uids, weights, subnets, expected -): - # Act - result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - # Assert - assert np.allclose(result, expected, atol=1e-4), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, expected", - [ - ( - "edge-1", - 1, - [0], - [0], - [0], - np.array([0.0]), - ), # Single neuron with zero weight - ( - "edge-2", - 2, - [0, 1], - [0, 0], - [0, 1], - np.array([0.0, 0.0]), - ), # All zero weights - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_edge_cases( - test_id, n, uids, weights, subnets, expected -): - # Act - result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - # Assert - assert np.allclose(result, expected, atol=1e-4), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, exception", - [ - # uid not in subnets - ( - "error-1", - 3, - [1, 3], - [100, 200], - [1, 2], - "The subnet is unavailable at the moment.", - ), - # More uids than subnets - ( - "error-2", - 3, - [1, 2, 3], - [100, 200], - [1], - "The subnet is unavailable at the moment.", - ), - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_error_cases( - test_id, n, uids, weights, subnets, exception, caplog -): - with caplog.at_level(logging.WARNING): - weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - assert any( - exception in record.message and record.levelname == "WARNING" - for record in caplog.records - ) - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, expected_output", - [ - ( - "happy-path-1", - 5, - [1, 3, 4], - [10, 20, 30], - np.array([0, 10, 0, 20, 30], dtype=np.int64), - ), - ( - "happy-path-2", - 3, - [0, 1, 2], - [7, 8, 9], - np.array([7, 8, 9], dtype=np.int64), - ), - ("happy-path-3", 4, [2], [15], np.array([0, 0, 15, 0], dtype=np.int64)), - ], -) -def test_happy_path(test_id, n, uids, bonds, expected_output): - # Act - result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) - - # Assert - assert np.array_equal(result, expected_output), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, expected_output", - [ - ("edge-1", 1, [0], [0], np.array([0], dtype=np.int64)), # Single element - ( - "edge-2", - 10, - [], - [], - np.zeros(10, dtype=np.int64), - ), # Empty uids and bonds - ], -) -def test_edge_cases(test_id, n, uids, bonds, expected_output): - # Act - result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) - - # Assert - assert np.array_equal(result, expected_output), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, exception", - [ - ("error-1", 5, [1, 3, 6], [10, 20, 30], IndexError), # uid out of bounds - ("error-2", -1, [0], [10], ValueError), # Negative number of neurons - ], -) -def test_error_cases(test_id, n, uids, bonds, exception): - # Act / Assert - with pytest.raises(exception): - weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) diff --git a/tests/unit_tests/utils/test_version.py b/tests/unit_tests/utils/test_version.py deleted file mode 100644 index f9760933f3..0000000000 --- a/tests/unit_tests/utils/test_version.py +++ /dev/null @@ -1,168 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from pathlib import Path -import pytest -from freezegun import freeze_time -from datetime import datetime, timedelta, timezone - -from bittensor.utils.version import ( - VERSION_CHECK_THRESHOLD, - VersionCheckError, - get_and_save_latest_version, - check_version, - version_checking, -) -from unittest.mock import MagicMock -from pytest_mock import MockerFixture - - -@pytest.fixture -def pypi_version(): - return "6.9.3" - - -@pytest.fixture -def mock_get_version_from_pypi(mocker: MockerFixture, pypi_version: str): - return mocker.patch( - "bittensor.utils.version._get_version_from_pypi", - return_value=pypi_version, - autospec=True, - ) - - -@pytest.fixture -def version_file_path(mocker: MockerFixture, tmp_path: Path): - file_path = tmp_path / ".version" - - mocker.patch( - "bittensor.utils.version._get_version_file_path", return_value=file_path - ) - return file_path - - -def test_get_and_save_latest_version_no_file( - mock_get_version_from_pypi: MagicMock, version_file_path: Path, pypi_version: str -): - assert not version_file_path.exists() - - assert get_and_save_latest_version() == pypi_version - - mock_get_version_from_pypi.assert_called_once() - assert version_file_path.exists() - assert version_file_path.read_text() == pypi_version - - -@pytest.mark.parametrize("elapsed", [0, VERSION_CHECK_THRESHOLD - 5]) -def test_get_and_save_latest_version_file_fresh_check( - mock_get_version_from_pypi: MagicMock, version_file_path: Path, elapsed: int -): - now = datetime.now(timezone.utc) - - version_file_path.write_text("6.9.5") - - with freeze_time(now + timedelta(seconds=elapsed)): - assert get_and_save_latest_version() == "6.9.5" - - mock_get_version_from_pypi.assert_not_called() - - -def test_get_and_save_latest_version_file_expired_check( - mock_get_version_from_pypi: MagicMock, version_file_path: Path, pypi_version: str -): - now = datetime.now(timezone.utc) - - version_file_path.write_text("6.9.5") - - with freeze_time(now + timedelta(seconds=VERSION_CHECK_THRESHOLD + 1)): - assert get_and_save_latest_version() == pypi_version - - mock_get_version_from_pypi.assert_called_once() - assert version_file_path.read_text() == pypi_version - - -@pytest.mark.parametrize( - ("current_version", "latest_version"), - [ - ("6.9.3", "6.9.4"), - ("6.9.3a1", "6.9.3a2"), - ("6.9.3a1", "6.9.3b1"), - ("6.9.3", "6.10"), - ("6.9.3", "7.0"), - ("6.0.15", "6.1.0"), - ], -) -def test_check_version_newer_available( - mocker: MockerFixture, current_version: str, latest_version: str, capsys -): - mocker.patch("bittensor.utils.version.bittensor.__version__", current_version) - mocker.patch( - "bittensor.utils.version.get_and_save_latest_version", - return_value=latest_version, - ) - - check_version() - - captured = capsys.readouterr() - - assert "update" in captured.out - assert current_version in captured.out - assert latest_version in captured.out - - -@pytest.mark.parametrize( - ("current_version", "latest_version"), - [ - ("6.9.3", "6.9.3"), - ("6.9.3", "6.9.2"), - ("6.9.3b", "6.9.3a"), - ], -) -def test_check_version_up_to_date( - mocker: MockerFixture, current_version: str, latest_version: str, capsys -): - mocker.patch("bittensor.utils.version.bittensor.__version__", current_version) - mocker.patch( - "bittensor.utils.version.get_and_save_latest_version", - return_value=latest_version, - ) - - check_version() - - captured = capsys.readouterr() - - assert captured.out == "" - - -def test_version_checking(mocker: MockerFixture): - mock = mocker.patch("bittensor.utils.version.check_version") - - version_checking() - - mock.assert_called_once() - - -def test_version_checking_exception(mocker: MockerFixture): - mock = mocker.patch( - "bittensor.utils.version.check_version", side_effect=VersionCheckError - ) - - version_checking() - - mock.assert_called_once() diff --git a/tests/unit_tests/utils/test_weight_utils.py b/tests/unit_tests/utils/test_weight_utils.py deleted file mode 100644 index 66f3c8127a..0000000000 --- a/tests/unit_tests/utils/test_weight_utils.py +++ /dev/null @@ -1,534 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies Inc - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import logging -import numpy as np -import bittensor.utils.weight_utils as weight_utils -import pytest - -from bittensor.utils import torch - - -def test_convert_weight_and_uids(): - uids = np.arange(10) - weights = np.random.rand(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # min weight < 0 - weights[5] = -1 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # min uid < 0 - weights[5] = 0 - uids[3] = -1 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # len(uids) != len(weights) - uids[3] = 3 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights[1:]) - - # sum(weights) == 0 - weights = np.zeros(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # test for overflow and underflow - for _ in range(5): - uids = np.arange(10) - weights = np.random.rand(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - -def test_convert_weight_and_uids_torch(force_legacy_torch_compat_api): - uids = torch.tensor(list(range(10))) - weights = torch.rand(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # min weight < 0 - weights[5] = -1 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - # min uid < 0 - weights[5] = 0 - uids[3] = -1 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - # len(uids) != len(weights) - uids[3] = 3 - with pytest.raises(ValueError) as pytest_wrapped_e: - weight_utils.convert_weights_and_uids_for_emit(uids, weights[1:]) - - # sum(weights) == 0 - weights = torch.zeros(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - # test for overflow and underflow - for _ in range(5): - uids = torch.tensor(list(range(10))) - weights = torch.rand(10) - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - - -def test_normalize_with_max_weight(): - weights = np.random.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.01) - assert wn.max() <= 0.01 - - weights = np.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.01) - assert wn.max() <= 0.01 - - weights = np.random.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.02) - assert wn.max() <= 0.02 - - weights = np.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.02) - assert wn.max() <= 0.02 - - weights = np.random.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.03) - assert wn.max() <= 0.03 - - weights = np.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.03) - assert wn.max() <= 0.03 - - # Check for Limit - limit = 0.001 - weights = np.random.rand(2000) - w = weights / weights.sum() - wn = weight_utils.normalize_max_weight(weights, limit=limit) - assert abs((w.max() >= limit and (limit - wn.max())) < 0.001) or ( - w.max() < limit and wn.max() < limit - ) - - # Check for Zeros - limit = 0.01 - weights = np.zeros(2000) - wn = weight_utils.normalize_max_weight(weights, limit=limit) - assert wn.max() == 1 / 2000 - - # Check for Ordering after normalization - weights = np.random.rand(100) - wn = weight_utils.normalize_max_weight(weights, limit=1) - assert np.array_equal(wn, weights / weights.sum()) - - # Check for epsilon changes - epsilon = 0.01 - weights = np.sort(np.random.rand(100)) - x = weights / weights.sum() - limit = x[-10] - change = epsilon * limit - y = weight_utils.normalize_max_weight(x, limit=limit - change) - z = weight_utils.normalize_max_weight(x, limit=limit + change) - assert np.abs(y - z).sum() < epsilon - - -def test_normalize_with_max_weight__legacy_torch_api_compat( - force_legacy_torch_compat_api, -): - weights = torch.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.01) - assert wn.max() <= 0.01 - - weights = torch.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.01) - assert wn.max() <= 0.01 - - weights = torch.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.02) - assert wn.max() <= 0.02 - - weights = torch.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.02) - assert wn.max() <= 0.02 - - weights = torch.rand(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.03) - assert wn.max() <= 0.03 - - weights = torch.zeros(1000) - wn = weight_utils.normalize_max_weight(weights, limit=0.03) - assert wn.max() <= 0.03 - - # Check for Limit - limit = 0.001 - weights = torch.rand(2000) - w = weights / weights.sum() - wn = weight_utils.normalize_max_weight(weights, limit=limit) - assert (w.max() >= limit and (limit - wn.max()).abs() < 0.001) or ( - w.max() < limit and wn.max() < limit - ) - - # Check for Zeros - limit = 0.01 - weights = torch.zeros(2000) - wn = weight_utils.normalize_max_weight(weights, limit=limit) - assert wn.max() == 1 / 2000 - - # Check for Ordering after normalization - weights = torch.rand(100) - wn = weight_utils.normalize_max_weight(weights, limit=1) - assert torch.isclose(wn, weights / weights.sum(), atol=1e-08, rtol=0).all() - - # Check for epsilon changes - epsilon = 0.01 - weights, _ = torch.sort(torch.rand(100)) - x = weights / weights.sum() - limit = x[-10] - change = epsilon * limit - y = weight_utils.normalize_max_weight(x, limit=limit - change) - z = weight_utils.normalize_max_weight(x, limit=limit + change) - assert (y - z).abs().sum() < epsilon - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, expected", - [ - ("happy-path-1", 3, [0, 1, 2], [15, 5, 80], np.array([0.15, 0.05, 0.8])), - ("happy-path-2", 4, [1, 3], [50, 50], np.array([0.0, 0.5, 0.0, 0.5])), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_happy_path( - test_id, n, uids, weights, expected -): - # Act - result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - # Assert - assert np.allclose(result, expected), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, expected", - [ - ( - "happy-path-1", - 3, - [0, 1, 2], - [15, 5, 80], - [0, 1, 2], - torch.tensor([0.15, 0.05, 0.8]), - ), - ( - "happy-path-2", - 3, - [0, 2], - [300, 300], - [0, 1, 2], - torch.tensor([0.5, 0.0, 0.5]), - ), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_happy_path_torch( - test_id, n, uids, weights, subnets, expected, force_legacy_torch_compat_api -): - # Act - result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - # Assert - assert torch.allclose(result, expected), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, expected", - [ - ("edge_case_empty", 5, [], [], np.zeros(5)), - ("edge_case_single", 1, [0], [100], np.array([1.0])), - ("edge_case_all_zeros", 4, [0, 1, 2, 3], [0, 0, 0, 0], np.zeros(4)), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_edge_cases( - test_id, n, uids, weights, expected -): - # Act - result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - # Assert - assert np.allclose(result, expected), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, exception", - [ - ("error-case-mismatched-lengths", 3, [0, 1, 3, 4, 5], [10, 20, 30], IndexError), - ("error-case-negative-n", -1, [0, 1], [10, 20], ValueError), - ("error-case-invalid-uids", 3, [0, 3], [10, 20], IndexError), - ], -) -def test_convert_weight_uids_and_vals_to_tensor_error_cases( - test_id, n, uids, weights, exception -): - # Act / Assert - with pytest.raises(exception): - weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, expected", - [ - ( - "happy-path-1", - 3, - [0, 1, 2], - [15, 5, 80], - [0, 1, 2], - np.array([0.15, 0.05, 0.8]), - ), - ( - "happy-path-2", - 3, - [0, 2], - [300, 300], - [0, 1, 2], - np.array([0.5, 0.0, 0.5]), - ), - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_happy_paths( - test_id, n, uids, weights, subnets, expected -): - # Act - result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - # Assert - assert np.allclose(result, expected, atol=1e-4), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, expected", - [ - ( - "edge-1", - 1, - [0], - [0], - [0], - torch.tensor([0.0]), - ), # Single neuron with zero weight - ( - "edge-2", - 2, - [0, 1], - [0, 0], - [0, 1], - torch.tensor([0.0, 0.0]), - ), # All zero weights - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_edge_cases( - test_id, n, uids, weights, subnets, expected, force_legacy_torch_compat_api -): - # Act - result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - # Assert - assert torch.allclose(result, expected, atol=1e-4), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, expected", - [ - ( - "edge-1", - 1, - [0], - [0], - [0], - np.array([0.0]), - ), # Single neuron with zero weight - ( - "edge-2", - 2, - [0, 1], - [0, 0], - [0, 1], - np.array([0.0, 0.0]), - ), # All zero weights - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_edge_cases( - test_id, n, uids, weights, subnets, expected -): - # Act - result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - # Assert - assert np.allclose(result, expected, atol=1e-4), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, weights, subnets, exception", - [ - # uid not in subnets - ( - "error-1", - 3, - [1, 3], - [100, 200], - [1, 2], - "The subnet is unavailable at the moment.", - ), - # More uids than subnets - ( - "error-2", - 3, - [1, 2, 3], - [100, 200], - [1], - "The subnet is unavailable at the moment.", - ), - ], -) -def test_convert_root_weight_uids_and_vals_to_tensor_error_cases( - test_id, n, uids, weights, subnets, exception, caplog -): - with caplog.at_level(logging.WARNING): - weight_utils.convert_root_weight_uids_and_vals_to_tensor( - n, uids, weights, subnets - ) - - assert any( - exception in record.message and record.levelname == "WARNING" - for record in caplog.records - ) - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, expected_output", - [ - ( - "happy-path-1", - 5, - [1, 3, 4], - [10, 20, 30], - np.array([0, 10, 0, 20, 30], dtype=np.int64), - ), - ( - "happy-path-2", - 3, - [0, 1, 2], - [7, 8, 9], - np.array([7, 8, 9], dtype=np.int64), - ), - ("happy-path-3", 4, [2], [15], np.array([0, 0, 15, 0], dtype=np.int64)), - ], -) -def test_happy_path(test_id, n, uids, bonds, expected_output): - # Act - result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) - - # Assert - assert np.array_equal(result, expected_output), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, expected_output", - [ - ( - "happy-path-1", - 5, - [1, 3, 4], - [10, 20, 30], - torch.tensor([0, 10, 0, 20, 30], dtype=torch.int64), - ), - ( - "happy-path-2", - 3, - [0, 1, 2], - [7, 8, 9], - torch.tensor([7, 8, 9], dtype=torch.int64), - ), - ("happy-path-3", 4, [2], [15], torch.tensor([0, 0, 15, 0], dtype=torch.int64)), - ], -) -def test_happy_path_torch( - test_id, n, uids, bonds, expected_output, force_legacy_torch_compat_api -): - # Act - result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) - - # Assert - assert torch.equal(result, expected_output), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, expected_output", - [ - ("edge-1", 1, [0], [0], np.array([0], dtype=np.int64)), # Single element - ( - "edge-2", - 10, - [], - [], - np.zeros(10, dtype=np.int64), - ), # Empty uids and bonds - ], -) -def test_edge_cases(test_id, n, uids, bonds, expected_output): - # Act - result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) - - # Assert - assert np.array_equal(result, expected_output), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, expected_output", - [ - ("edge-1", 1, [0], [0], torch.tensor([0], dtype=torch.int64)), # Single element - ( - "edge-2", - 10, - [], - [], - torch.zeros(10, dtype=torch.int64), - ), # Empty uids and bonds - ], -) -def test_edge_cases_torch( - test_id, n, uids, bonds, expected_output, force_legacy_torch_compat_api -): - # Act - result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) - - # Assert - assert torch.equal(result, expected_output), f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, n, uids, bonds, exception", - [ - ("error-1", 5, [1, 3, 6], [10, 20, 30], IndexError), # uid out of bounds - ("error-2", -1, [0], [10], ValueError), # Negative number of neurons - ], -) -def test_error_cases(test_id, n, uids, bonds, exception): - # Act / Assert - with pytest.raises(exception): - weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) From bee344d5b63465dd20c045883e3f8472b4ed1600 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 24 Sep 2024 14:57:23 -0700 Subject: [PATCH 02/11] add btsdk stuff --- .circleci/check_pr_status.sh | 26 + .circleci/config.yml | 359 +++ .coveragerc | 7 + .dockerignore | 21 + .flake8 | 4 + .github/ISSUE_TEMPLATE/bug_report.yaml | 59 + .github/ISSUE_TEMPLATE/feature_request.yaml | 38 + .github/PULL_REQUEST_TEMPLATE/bug_fix.md | 59 + .../PULL_REQUEST_TEMPLATE/feature_change.md | 54 + .../performance_improvement.md | 55 + .github/auto_assign.yml | 7 + .github/dependabot.yml | 8 + .github/pull_request_template.md | 10 + .github/workflows/auto-assign.yml | 15 + .github/workflows/docker_release.yml | 51 + .github/workflows/e2e-subtensor-tests.yaml | 105 + .github/workflows/release.yml | 72 + .gitignore | 216 ++ .test_durations | 268 +++ Dockerfile | 40 + LICENSE | 16 + Makefile | 26 + README.md | 247 ++ VERSION | 1 + bittensor/__init__.py | 53 + bittensor/__main__.py | 21 + bittensor/core/__init__.py | 0 bittensor/core/axon.py | 1521 ++++++++++++ bittensor/core/chain_data/__init__.py | 22 + bittensor/core/chain_data/axon_info.py | 163 ++ bittensor/core/chain_data/delegate_info.py | 105 + .../core/chain_data/delegate_info_lite.py | 29 + bittensor/core/chain_data/ip_info.py | 81 + bittensor/core/chain_data/neuron_info.py | 176 ++ bittensor/core/chain_data/neuron_info_lite.py | 171 ++ bittensor/core/chain_data/prometheus_info.py | 31 + .../core/chain_data/proposal_vote_data.py | 21 + .../chain_data/scheduled_coldkey_swap_info.py | 65 + bittensor/core/chain_data/stake_info.py | 79 + .../core/chain_data/subnet_hyperparameters.py | 112 + bittensor/core/chain_data/subnet_info.py | 103 + bittensor/core/chain_data/utils.py | 291 +++ bittensor/core/config.py | 396 ++++ bittensor/core/dendrite.py | 832 +++++++ bittensor/core/errors.py | 129 ++ bittensor/core/extrinsics/__init__.py | 16 + bittensor/core/extrinsics/commit_weights.py | 274 +++ bittensor/core/extrinsics/prometheus.py | 187 ++ bittensor/core/extrinsics/serving.py | 319 +++ bittensor/core/extrinsics/set_weights.py | 194 ++ bittensor/core/extrinsics/transfer.py | 215 ++ bittensor/core/extrinsics/utils.py | 49 + bittensor/core/metagraph.py | 1299 +++++++++++ bittensor/core/settings.py | 241 ++ bittensor/core/stream.py | 158 ++ bittensor/core/subtensor.py | 1733 ++++++++++++++ bittensor/core/synapse.py | 852 +++++++ bittensor/core/tensor.py | 249 ++ bittensor/core/threadpool.py | 295 +++ bittensor/core/types.py | 38 + bittensor/utils/__init__.py | 279 +++ bittensor/utils/axon_utils.py | 58 + bittensor/utils/balance.py | 268 +++ bittensor/utils/btlogging/__init__.py | 27 + bittensor/utils/btlogging/defines.py | 28 + bittensor/utils/btlogging/format.py | 222 ++ bittensor/utils/btlogging/helpers.py | 88 + bittensor/utils/btlogging/loggingmachine.py | 534 +++++ bittensor/utils/deprecated.py | 150 ++ bittensor/utils/mock/__init__.py | 18 + bittensor/utils/mock/subtensor_mock.py | 908 ++++++++ bittensor/utils/networking.py | 199 ++ bittensor/utils/registration.py | 99 + bittensor/utils/subnets.py | 77 + bittensor/utils/version.py | 134 ++ bittensor/utils/weight_utils.py | 414 ++++ contrib/CODE_REVIEW_DOCS.md | 72 + contrib/CONTRIBUTING.md | 299 +++ contrib/DEBUGGING.md | 161 ++ contrib/DEVELOPMENT_WORKFLOW.md | 159 ++ contrib/RELEASE_GUIDELINES.md | 87 + contrib/STYLE.md | 350 +++ contrib/TESTING.md | 94 + docker-compose.yml | 10 + example.env | 5 + mypy.ini | 18 + requirements/btcli.txt | 1 + requirements/cubit.txt | 3 + requirements/dev.txt | 19 + requirements/prod.txt | 23 + requirements/torch.txt | 1 + scripts/check_compatibility.sh | 76 + scripts/check_pre_submit.sh | 18 + scripts/check_requirements_changes.sh | 10 + scripts/create_wallet.sh | 13 + scripts/environments/README.md | 21 + scripts/environments/apple_m1_environment.yml | 272 +++ scripts/install.sh | 298 +++ scripts/post_install_cli.py | 29 + setup.py | 99 + tests/__init__.py | 18 + tests/e2e_tests/__init__.py | 0 tests/e2e_tests/conftest.py | 84 + tests/e2e_tests/test_axon.py | 128 + tests/e2e_tests/test_commit_weights.py | 165 ++ tests/e2e_tests/test_dendrite.py | 136 ++ tests/e2e_tests/test_incentive.py | 184 ++ tests/e2e_tests/test_liquid_alpha.py | 186 ++ tests/e2e_tests/test_metagraph.py | 177 ++ tests/e2e_tests/test_subtensor_functions.py | 152 ++ tests/e2e_tests/test_transfer.py | 52 + tests/e2e_tests/utils/chain_interactions.py | 186 ++ tests/e2e_tests/utils/test_utils.py | 83 + tests/helpers/__init__.py | 34 + tests/helpers/helpers.py | 170 ++ tests/integration_tests/__init__.py | 16 + .../test_metagraph_integration.py | 110 + .../test_subtensor_integration.py | 250 ++ tests/pytest.ini | 3 + tests/unit_tests/__init__.py | 0 tests/unit_tests/conftest.py | 13 + .../extrinsics/test_commit_weights.py | 133 ++ tests/unit_tests/extrinsics/test_init.py | 114 + .../unit_tests/extrinsics/test_prometheus.py | 167 ++ tests/unit_tests/extrinsics/test_serving.py | 401 ++++ .../unit_tests/extrinsics/test_set_weights.py | 278 +++ tests/unit_tests/extrinsics/test_transfer.py | 142 ++ tests/unit_tests/factories/__init__.py | 0 tests/unit_tests/factories/neuron_factory.py | 63 + tests/unit_tests/test_axon.py | 781 +++++++ tests/unit_tests/test_chain_data.py | 479 ++++ tests/unit_tests/test_dendrite.py | 416 ++++ tests/unit_tests/test_deprecated.py | 51 + tests/unit_tests/test_logging.py | 199 ++ tests/unit_tests/test_metagraph.py | 176 ++ tests/unit_tests/test_subnets.py | 82 + tests/unit_tests/test_subtensor.py | 2053 +++++++++++++++++ tests/unit_tests/test_synapse.py | 269 +++ tests/unit_tests/test_tensor.py | 245 ++ tests/unit_tests/utils/__init__.py | 0 tests/unit_tests/utils/test_balance.py | 520 +++++ tests/unit_tests/utils/test_init.py | 27 + tests/unit_tests/utils/test_networking.py | 167 ++ tests/unit_tests/utils/test_registration.py | 62 + tests/unit_tests/utils/test_utils.py | 169 ++ tests/unit_tests/utils/test_version.py | 171 ++ tests/unit_tests/utils/test_weight_utils.py | 681 ++++++ 147 files changed, 27918 insertions(+) create mode 100755 .circleci/check_pr_status.sh create mode 100644 .circleci/config.yml create mode 100644 .coveragerc create mode 100644 .dockerignore create mode 100644 .flake8 create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yaml create mode 100644 .github/PULL_REQUEST_TEMPLATE/bug_fix.md create mode 100644 .github/PULL_REQUEST_TEMPLATE/feature_change.md create mode 100644 .github/PULL_REQUEST_TEMPLATE/performance_improvement.md create mode 100644 .github/auto_assign.yml create mode 100644 .github/dependabot.yml create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/auto-assign.yml create mode 100644 .github/workflows/docker_release.yml create mode 100644 .github/workflows/e2e-subtensor-tests.yaml create mode 100644 .github/workflows/release.yml create mode 100644 .gitignore create mode 100644 .test_durations create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 VERSION create mode 100644 bittensor/__init__.py create mode 100644 bittensor/__main__.py create mode 100644 bittensor/core/__init__.py create mode 100644 bittensor/core/axon.py create mode 100644 bittensor/core/chain_data/__init__.py create mode 100644 bittensor/core/chain_data/axon_info.py create mode 100644 bittensor/core/chain_data/delegate_info.py create mode 100644 bittensor/core/chain_data/delegate_info_lite.py create mode 100644 bittensor/core/chain_data/ip_info.py create mode 100644 bittensor/core/chain_data/neuron_info.py create mode 100644 bittensor/core/chain_data/neuron_info_lite.py create mode 100644 bittensor/core/chain_data/prometheus_info.py create mode 100644 bittensor/core/chain_data/proposal_vote_data.py create mode 100644 bittensor/core/chain_data/scheduled_coldkey_swap_info.py create mode 100644 bittensor/core/chain_data/stake_info.py create mode 100644 bittensor/core/chain_data/subnet_hyperparameters.py create mode 100644 bittensor/core/chain_data/subnet_info.py create mode 100644 bittensor/core/chain_data/utils.py create mode 100644 bittensor/core/config.py create mode 100644 bittensor/core/dendrite.py create mode 100644 bittensor/core/errors.py create mode 100644 bittensor/core/extrinsics/__init__.py create mode 100644 bittensor/core/extrinsics/commit_weights.py create mode 100644 bittensor/core/extrinsics/prometheus.py create mode 100644 bittensor/core/extrinsics/serving.py create mode 100644 bittensor/core/extrinsics/set_weights.py create mode 100644 bittensor/core/extrinsics/transfer.py create mode 100644 bittensor/core/extrinsics/utils.py create mode 100644 bittensor/core/metagraph.py create mode 100644 bittensor/core/settings.py create mode 100644 bittensor/core/stream.py create mode 100644 bittensor/core/subtensor.py create mode 100644 bittensor/core/synapse.py create mode 100644 bittensor/core/tensor.py create mode 100644 bittensor/core/threadpool.py create mode 100644 bittensor/core/types.py create mode 100644 bittensor/utils/__init__.py create mode 100644 bittensor/utils/axon_utils.py create mode 100644 bittensor/utils/balance.py create mode 100644 bittensor/utils/btlogging/__init__.py create mode 100644 bittensor/utils/btlogging/defines.py create mode 100644 bittensor/utils/btlogging/format.py create mode 100644 bittensor/utils/btlogging/helpers.py create mode 100644 bittensor/utils/btlogging/loggingmachine.py create mode 100644 bittensor/utils/deprecated.py create mode 100644 bittensor/utils/mock/__init__.py create mode 100644 bittensor/utils/mock/subtensor_mock.py create mode 100644 bittensor/utils/networking.py create mode 100644 bittensor/utils/registration.py create mode 100644 bittensor/utils/subnets.py create mode 100644 bittensor/utils/version.py create mode 100644 bittensor/utils/weight_utils.py create mode 100644 contrib/CODE_REVIEW_DOCS.md create mode 100644 contrib/CONTRIBUTING.md create mode 100644 contrib/DEBUGGING.md create mode 100644 contrib/DEVELOPMENT_WORKFLOW.md create mode 100644 contrib/RELEASE_GUIDELINES.md create mode 100644 contrib/STYLE.md create mode 100644 contrib/TESTING.md create mode 100644 docker-compose.yml create mode 100644 example.env create mode 100644 mypy.ini create mode 100644 requirements/btcli.txt create mode 100644 requirements/cubit.txt create mode 100644 requirements/dev.txt create mode 100644 requirements/prod.txt create mode 100644 requirements/torch.txt create mode 100755 scripts/check_compatibility.sh create mode 100755 scripts/check_pre_submit.sh create mode 100755 scripts/check_requirements_changes.sh create mode 100755 scripts/create_wallet.sh create mode 100644 scripts/environments/README.md create mode 100644 scripts/environments/apple_m1_environment.yml create mode 100755 scripts/install.sh create mode 100644 scripts/post_install_cli.py create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/e2e_tests/__init__.py create mode 100644 tests/e2e_tests/conftest.py create mode 100644 tests/e2e_tests/test_axon.py create mode 100644 tests/e2e_tests/test_commit_weights.py create mode 100644 tests/e2e_tests/test_dendrite.py create mode 100644 tests/e2e_tests/test_incentive.py create mode 100644 tests/e2e_tests/test_liquid_alpha.py create mode 100644 tests/e2e_tests/test_metagraph.py create mode 100644 tests/e2e_tests/test_subtensor_functions.py create mode 100644 tests/e2e_tests/test_transfer.py create mode 100644 tests/e2e_tests/utils/chain_interactions.py create mode 100644 tests/e2e_tests/utils/test_utils.py create mode 100644 tests/helpers/__init__.py create mode 100644 tests/helpers/helpers.py create mode 100644 tests/integration_tests/__init__.py create mode 100644 tests/integration_tests/test_metagraph_integration.py create mode 100644 tests/integration_tests/test_subtensor_integration.py create mode 100644 tests/pytest.ini create mode 100644 tests/unit_tests/__init__.py create mode 100644 tests/unit_tests/conftest.py create mode 100644 tests/unit_tests/extrinsics/test_commit_weights.py create mode 100644 tests/unit_tests/extrinsics/test_init.py create mode 100644 tests/unit_tests/extrinsics/test_prometheus.py create mode 100644 tests/unit_tests/extrinsics/test_serving.py create mode 100644 tests/unit_tests/extrinsics/test_set_weights.py create mode 100644 tests/unit_tests/extrinsics/test_transfer.py create mode 100644 tests/unit_tests/factories/__init__.py create mode 100644 tests/unit_tests/factories/neuron_factory.py create mode 100644 tests/unit_tests/test_axon.py create mode 100644 tests/unit_tests/test_chain_data.py create mode 100644 tests/unit_tests/test_dendrite.py create mode 100644 tests/unit_tests/test_deprecated.py create mode 100644 tests/unit_tests/test_logging.py create mode 100644 tests/unit_tests/test_metagraph.py create mode 100644 tests/unit_tests/test_subnets.py create mode 100644 tests/unit_tests/test_subtensor.py create mode 100644 tests/unit_tests/test_synapse.py create mode 100644 tests/unit_tests/test_tensor.py create mode 100644 tests/unit_tests/utils/__init__.py create mode 100644 tests/unit_tests/utils/test_balance.py create mode 100644 tests/unit_tests/utils/test_init.py create mode 100644 tests/unit_tests/utils/test_networking.py create mode 100644 tests/unit_tests/utils/test_registration.py create mode 100644 tests/unit_tests/utils/test_utils.py create mode 100644 tests/unit_tests/utils/test_version.py create mode 100644 tests/unit_tests/utils/test_weight_utils.py diff --git a/.circleci/check_pr_status.sh b/.circleci/check_pr_status.sh new file mode 100755 index 0000000000..4b31a29698 --- /dev/null +++ b/.circleci/check_pr_status.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Extract the repository owner +REPO_OWNER=$(echo $CIRCLE_PULL_REQUEST | awk -F'/' '{print $(NF-3)}') + +# Extract the repository name +REPO_NAME=$(echo $CIRCLE_PULL_REQUEST | awk -F'/' '{print $(NF-2)}') + +# Extract the pull request number +PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | awk -F'/' '{print $NF}') + + +PR_DETAILS=$(curl -s \ + "https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/pulls/$PR_NUMBER") + + +IS_DRAFT=$(echo "$PR_DETAILS" | jq -r .draft) +echo $IS_DRAFT + +if [ "$IS_DRAFT" == "true" ]; then + echo "This PR is a draft. Skipping the workflow." + exit 1 +else + echo "This PR is not a draft. Proceeding with the workflow." + exit 0 +fi diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..90f49d54eb --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,359 @@ +version: 2.1 + +orbs: + python: circleci/python@2.1.1 + python-lib: dialogue/python-lib@0.1.55 + +jobs: + check-if-pr-is-draft: + docker: + - image: cimg/python:3.10 + steps: + - checkout + - run: + name: Install jq + command: sudo apt-get update && sudo apt-get install -y jq + - run: + name: Check if PR is a draft + command: .circleci/check_pr_status.sh + + ruff: + resource_class: small + parameters: + python-version: + type: string + docker: + - image: cimg/python:<< parameters.python-version >> + + steps: + - checkout + + - restore_cache: + name: Restore cached ruff venv + keys: + - v2-pypi-py-ruff-<< parameters.python-version >> + + - run: + name: Update & Activate ruff venv + command: | + python -m venv .venv + . .venv/bin/activate + python -m pip install --upgrade pip + pip install ruff -c requirements/dev.txt + + - save_cache: + name: Save cached ruff venv + paths: + - ".venv/" + key: v2-pypi-py-ruff-<< parameters.python-version >> + + - run: + name: Ruff format check + command: | + . .venv/bin/activate + ruff format --diff . + + check_compatibility: + parameters: + python_version: + type: string + docker: + - image: cimg/python:3.10 + steps: + - checkout + - run: + name: Check if requirements files have changed + command: ./scripts/check_requirements_changes.sh + - run: + name: Install dependencies and Check compatibility + command: | + if [ "$REQUIREMENTS_CHANGED" == "true" ]; then + sudo apt-get update + sudo apt-get install -y jq curl + ./scripts/check_compatibility.sh << parameters.python_version >> + else + echo "Skipping compatibility checks..." + fi + + build-and-test: + resource_class: medium + parallelism: 2 + parameters: + python-version: + type: string + docker: + - image: cimg/python:<< parameters.python-version >> + + steps: + - checkout + + - restore_cache: + name: Restore cached venv + keys: + - v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} + - v2-pypi-py<< parameters.python-version >> + + - run: + name: Update & Activate venv + command: | + python -m venv .venv + . .venv/bin/activate + python -m pip install --upgrade pip + python -m pip install '.[dev]' + + - save_cache: + name: Save cached venv + paths: + - "env/" + key: v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} + + - run: + name: Install Bittensor + command: | + . .venv/bin/activate + pip install -e '.[dev]' + + - run: + name: Instantiate Mock Wallet + command: | + . .venv/bin/activate + ./scripts/create_wallet.sh + + - run: + name: Unit Tests + no_output_timeout: 20m + command: | + . .venv/bin/activate + export PYTHONUNBUFFERED=1 + pytest -n2 --reruns 3 --durations=0 --verbose --junitxml=test-results/unit_tests.xml \ + --cov=. --cov-append --cov-config .coveragerc \ + --splits $CIRCLE_NODE_TOTAL --group $((CIRCLE_NODE_INDEX + 1)) \ + --splitting-algorithm duration_based_chunks --store-durations --durations-path .test_durations \ + tests/unit_tests/ + + - run: + name: Integration Tests + no_output_timeout: 30m + command: | + . .venv/bin/activate + export PYTHONUNBUFFERED=1 + pytest -n2 --reruns 3 --reruns-delay 15 --durations=0 --verbose --junitxml=test-results/integration_tests.xml \ + --cov=. --cov-append --cov-config .coveragerc \ + --splits $CIRCLE_NODE_TOTAL --group $((CIRCLE_NODE_INDEX + 1)) \ + --splitting-algorithm duration_based_chunks --store-durations --durations-path .test_durations \ + tests/integration_tests/ + + - store_test_results: + path: test-results + - store_artifacts: + path: test-results + + + #- when: + #condition: + #equal: ["3.10.5", << parameters.python-version >> ] + #steps: + #- run: + #name: Upload Coverage + #command: | + #. .venv/bin/activate && coveralls + #env: + #CI_NAME: circleci + #CI_BUILD_NUMBER: $CIRCLE_BUILD_NUM + #CI_BUILD_URL: $CIRCLE_BUILD_URL + #CI_BRANCH: $CIRCLE_BRANCH + #CI_JOB_ID: $CIRCLE_NODE_INDEX + #COVERALLS_PARALLEL: true + + + lint-and-type-check: + resource_class: medium + parallelism: 2 + parameters: + python-version: + type: string + docker: + - image: cimg/python:<< parameters.python-version >> + + steps: + - checkout + + - restore_cache: + name: Restore cached venv + keys: + - v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} + - v2-pypi-py<< parameters.python-version >> + + - run: + name: Update & Activate venv + command: | + python -m venv .venv + . .venv/bin/activate + python -m pip install --upgrade pip + python -m pip install '.[dev]' + pip install flake8 + + - save_cache: + name: Save cached venv + paths: + - "env/" + key: v2-pypi-py<< parameters.python-version >>-{{ checksum "requirements/prod.txt" }}+{{ checksum "requirements/dev.txt" }} + + - run: + name: Install Bittensor + command: | + . .venv/bin/activate + pip install -e '.[dev]' + + - run: + name: Lint with flake8 + command: | + . .venv/bin/activate + python -m flake8 bittensor/ --count + + - run: + name: Type check with mypy + command: | + . .venv/bin/activate + python -m mypy --ignore-missing-imports bittensor/ + + unit-tests-all-python-versions: + docker: + - image: cimg/python:3.10 + steps: + - run: + name: Placeholder command + command: echo "Success, only runs if all python versions ran" + + coveralls: + docker: + - image: cimg/python:3.10 + steps: + - run: + name: Combine Coverage + command: | + pip3 install --upgrade coveralls + coveralls --finish --rcfile .coveragerc || echo "Failed to upload coverage" + + check-version-updated: + docker: + - image: cimg/python:3.10 + steps: + - checkout + + - run: + name: Version is updated + command: | + [[ $(git diff-tree --no-commit-id --name-only -r HEAD..master | grep bittensor/__init__.py | wc -l) == 1 ]] && echo "bittensor/__init__.py has changed" + [[ $(git diff-tree --no-commit-id --name-only -r HEAD..master | grep VERSION | wc -l) == 1 ]] && echo "VERSION has changed" + + check-changelog-updated: + docker: + - image: cimg/python:3.10 + steps: + - checkout + - run: + name: File CHANGELOG.md is updated + command: | + [[ $(git diff-tree --no-commit-id --name-only -r HEAD..master | grep CHANGELOG.md | wc -l) == 1 ]] && echo "CHANGELOG.md has changed" + + check-version-not-released: + docker: + - image: cimg/python:3.10 + steps: + - checkout + - run: + name: Git tag does not exist for the current version + command: | + [[ $(git tag | grep `cat VERSION` | wc -l) == 0 ]] && echo "VERSION is not a tag" + - run: + name: Pypi package 'bittensor' does not exist for the current version + command: | + [[ $(pip index versions bittensor | grep `cat VERSION` | wc -l) == 0 ]] && echo "Pypi package 'bittensor' does not exist" + - run: + name: Docker image 'opentensorfdn/bittensor' does not exist for the current version + command: | + [[ $(docker manifest inspect opentensorfdn/bittensor:`cat VERSION` > /dev/null 2> /dev/null ; echo $?) == 1 ]] && echo "Docker image 'opentensorfdn/bittensor:`cat VERSION`' does not exist in dockerhub" + + release-dry-run: + docker: + - image: cimg/python:3.10 + steps: + - checkout + - setup_remote_docker: + version: 20.10.14 + docker_layer_caching: true + - run: + name: Executing release script + command: | + ./scripts/release/release.sh --github-token ${GH_API_ACCESS_TOKEN} + +workflows: + compatibility_checks: + jobs: + - check_compatibility: + python_version: "3.9" + name: check-compatibility-3.9 + - check_compatibility: + python_version: "3.10" + name: check-compatibility-3.10 + - check_compatibility: + python_version: "3.11" + name: check-compatibility-3.11 + + pr-requirements: + jobs: + - check-if-pr-is-draft + - ruff: + python-version: "3.9.13" + requires: + - check-if-pr-is-draft + - build-and-test: + matrix: + parameters: + python-version: ["3.9.13", "3.10.6", "3.11.4"] + requires: + - check-if-pr-is-draft + - unit-tests-all-python-versions: + requires: + - build-and-test + - lint-and-type-check: + matrix: + parameters: + python-version: ["3.9.13", "3.10.6", "3.11.4"] + requires: + - check-if-pr-is-draft + #- coveralls: + #requires: + #- build-and-test + + release-branches-requirements: + jobs: + - check-version-updated: + filters: + branches: + only: + - /^(release|hotfix)/.*/ + - check-changelog-updated: + filters: + branches: + only: + - /^(release|hotfix)/.*/ + - release-dry-run: + filters: + branches: + only: + - /^(release|hotfix)/.*/ + + release-requirements: + jobs: + - check-version-not-released: + filters: + branches: + only: + - master + - release-dry-run: + filters: + branches: + only: + - master diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..b0e422abef --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +omit = + ./nuclei/* + ./routers/* + ./setup.py + ./tests/* + ./env/* diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..eabfb03301 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,21 @@ +**/data/ +**/*.log +**/*.png +**/*.pstats +**/*.ipynb +**/bittensor.egg-info/* +**/lib/* +**/build/* +**/dist/* +**/runs/* +**/env/* +**/venv/* +**/tmp/* +**/test_results/* +**/__pycache__/* +**/.circleci +**/.git +**/.github +**/.hypothesis +**/.vscode +**/.gitignore diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..6b2eaa0333 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 120 +exclude = .git,__pycache__, __init__.py, docs/source/conf.py,old,build,dist,venv,.venv,.tox +select = E9,F63,F7,F82,F401 diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 0000000000..5e875de9a0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,59 @@ +name: Bug report +description: Create a report to help us improve +labels: [bug] +assignees: [] + +body: + - type: textarea + id: bug-description + attributes: + label: Describe the bug + description: A clear and concise description of what the bug is. + validations: + required: true + + - type: textarea + id: reproduce + attributes: + label: To Reproduce + description: Steps to reproduce the behavior. + placeholder: | + 1. Go to '...' + 2. Run command '...' + 3. Scroll down to '....' + 4. See error + validations: + required: true + + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: If applicable, add screenshots to help explain your problem. + validations: + required: false + + - type: input + id: environment + attributes: + label: Environment + description: Please specify your OS and Distro, and Bittensor Version. + placeholder: "OS and Distro: [e.g. Linux Ubuntu, Linux Fedora, etc.], Bittensor Version [e.g. 22]" + validations: + required: true + + - type: textarea + id: additional-context + attributes: + label: Additional context + description: Add any other context about the problem here. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 0000000000..b9cd275add --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,38 @@ +name: Feature request +description: Suggest an idea for this project +labels: [feature] +assignees: [] + +body: + - type: textarea + id: problem-description + attributes: + label: Is your feature request related to a problem? Please describe. + description: A clear and concise description of what the problem is. + placeholder: "Ex. I'm always frustrated when [...]" + validations: + required: true + + - type: textarea + id: solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + validations: + required: true + + - type: textarea + id: alternatives + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: false + + - type: textarea + id: additional-context + attributes: + label: Additional context + description: Add any other context or screenshots about the feature request here. + validations: + required: false diff --git a/.github/PULL_REQUEST_TEMPLATE/bug_fix.md b/.github/PULL_REQUEST_TEMPLATE/bug_fix.md new file mode 100644 index 0000000000..8bf781b532 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/bug_fix.md @@ -0,0 +1,59 @@ + + +### Bug + + + +### Description of the Change + + + +### Alternate Designs + + + +### Possible Drawbacks + + + +### Verification Process + + + +### Release Notes + + \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/feature_change.md b/.github/PULL_REQUEST_TEMPLATE/feature_change.md new file mode 100644 index 0000000000..0b29a822b3 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/feature_change.md @@ -0,0 +1,54 @@ +### Requirements for Adding, Changing, or Removing a Feature + +* Fill out the template below. Any pull request that does not include enough information to be reviewed in a timely manner may be closed at the maintainers' discretion. +* The pull request must contribute a change that has been endorsed by the maintainer team. See details in the template below. +* The pull request must update the test suite to exercise the updated functionality. +* After you create the pull request, all status checks must be pass before a maintainer reviews your contribution. + +### Description of the Change + + + +### Alternate Designs + + + +### Possible Drawbacks + + + +### Verification Process + + + +### Release Notes + + \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/performance_improvement.md b/.github/PULL_REQUEST_TEMPLATE/performance_improvement.md new file mode 100644 index 0000000000..96e18c9d29 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/performance_improvement.md @@ -0,0 +1,55 @@ +### Requirements for Contributing a Performance Improvement + +* Fill out the template below. Any pull request that does not include enough information to be reviewed in a timely manner may be closed at the maintainers' discretion. +* The pull request must only affect performance of existing functionality +* After you create the pull request, all status checks must be pass before a maintainer reviews your contribution. + +### Description of the Change + + + +### Quantitative Performance Benefits + + + +### Possible Drawbacks + + + +### Verification Process + + + +### Applicable Issues + + + +### Release Notes + + \ No newline at end of file diff --git a/.github/auto_assign.yml b/.github/auto_assign.yml new file mode 100644 index 0000000000..900e2ceb85 --- /dev/null +++ b/.github/auto_assign.yml @@ -0,0 +1,7 @@ +addReviewers: true + +# A list of team slugs to add as assignees +reviewers: + - opentensor/cortex + +numberOfReviewers: 0 \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..adff4d0aab --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "" + file: "requirements/prod.txt" + schedule: + interval: "daily" + open-pull-requests-limit: 0 # Only security updates will be opened as PRs diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..4a5da46aee --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,10 @@ +Welcome! + +Due to [GitHub limitations](https://github.com/orgs/community/discussions/4620), +please switch to **Preview** for links to render properly. + +Please choose the right template for your pull request: + +- 🐛 Are you fixing a bug? [Bug fix](?template=bug_fix.md) +- 📈 Are you improving performance? [Performance improvement](?template=performance_improvement.md) +- đŸ’» Are you changing functionality? [Feature change](?template=feature_change.md) diff --git a/.github/workflows/auto-assign.yml b/.github/workflows/auto-assign.yml new file mode 100644 index 0000000000..3a952f91b8 --- /dev/null +++ b/.github/workflows/auto-assign.yml @@ -0,0 +1,15 @@ +name: Auto Assign Cortex to Pull Requests + +on: + pull_request: + types: [opened, reopened] + +jobs: + auto-assign: + runs-on: ubuntu-latest + steps: + - name: Auto-assign Cortex Team + uses: kentaro-m/auto-assign-action@v1.2.4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: .github/auto_assign.yml \ No newline at end of file diff --git a/.github/workflows/docker_release.yml b/.github/workflows/docker_release.yml new file mode 100644 index 0000000000..dbb6c3bab8 --- /dev/null +++ b/.github/workflows/docker_release.yml @@ -0,0 +1,51 @@ +name: Build and Push Docker Image +# https://github.com/sigstore/cosign +on: + workflow_dispatch: + inputs: + tag: + description: 'Docker image tag' + required: true + default: 'latest' + +jobs: + build-and-push: + runs-on: ubuntu-latest + + permissions: + contents: read + id-token: write + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Install cosign + uses: sigstore/cosign-installer@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + push: true + tags: | + opentensorfdn/bittensor:${{ github.event.inputs.tag }} + opentensorfdn/bittensor:latest + provenance: false + + - name: Sign the images with GitHub OIDC Token + env: + DIGEST: ${{ steps.build.outputs.digest }} + TAGS: ${{ steps.build.outputs.tags }} + run: | + echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} \ No newline at end of file diff --git a/.github/workflows/e2e-subtensor-tests.yaml b/.github/workflows/e2e-subtensor-tests.yaml new file mode 100644 index 0000000000..0bc467a94d --- /dev/null +++ b/.github/workflows/e2e-subtensor-tests.yaml @@ -0,0 +1,105 @@ +name: E2E Subtensor Tests + +concurrency: + group: e2e-subtensor-${{ github.ref }} + cancel-in-progress: true + +on: + push: + branches: [main, development, staging] + + pull_request: + branches: [main, development, staging] + types: [ opened, synchronize, reopened, ready_for_review ] + + workflow_dispatch: + inputs: + verbose: + description: "Output more information when triggered manually" + required: false + default: "" + +env: + CARGO_TERM_COLOR: always + VERBOSE: ${{ github.event.inputs.verbose }} + +# job to run tests in parallel +jobs: + # Job to find all test files + find-tests: + runs-on: ubuntu-latest + if: ${{ github.event_name != 'pull_request' || github.event.pull_request.draft == false }} + outputs: + test-files: ${{ steps.get-tests.outputs.test-files }} + steps: + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v2 + + - name: Find test files + id: get-tests + run: | + test_files=$(find tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') + echo "::set-output name=test-files::$test_files" + shell: bash + + # Job to run tests in parallel + run: + needs: find-tests + runs-on: SubtensorCI + timeout-minutes: 45 + strategy: + fail-fast: false # Allow other matrix jobs to run even if this job fails + max-parallel: 8 # Set the maximum number of parallel jobs + matrix: + rust-branch: + - nightly-2024-03-05 + rust-target: + - x86_64-unknown-linux-gnu + os: + - ubuntu-latest + test-file: ${{ fromJson(needs.find-tests.outputs.test-files) }} + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + TARGET: ${{ matrix.rust-target }} + steps: + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v2 + + - name: Install dependencies + run: | + sudo apt-get update && + sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler + + - name: Install Rust ${{ matrix.rust-branch }} + uses: actions-rs/toolchain@v1.0.6 + with: + toolchain: ${{ matrix.rust-branch }} + components: rustfmt + profile: minimal + + - name: Add wasm32-unknown-unknown target + run: | + rustup target add wasm32-unknown-unknown --toolchain stable-x86_64-unknown-linux-gnu + rustup component add rust-src --toolchain stable-x86_64-unknown-linux-gnu + + - name: Clone subtensor repo + run: git clone https://github.com/opentensor/subtensor.git + + - name: Setup subtensor repo + working-directory: ${{ github.workspace }}/subtensor + run: git checkout testnet + + - name: Run tests + run: | + python3 -m pip install -e .[dev] pytest + LOCALNET_SH_PATH="${{ github.workspace }}/subtensor/scripts/localnet.sh" pytest ${{ matrix.test-file }} -s + + - name: Retry failed tests + if: failure() + run: | + sleep 10 + python3 -m pip install -e .[dev] pytest + LOCALNET_SH_PATH="${{ github.workspace }}/subtensor/scripts/localnet.sh" pytest ${{ matrix.test-file }} -s diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..2cdfe5dfa0 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,72 @@ +name: Build and Publish Python Package + +on: + workflow_dispatch: + inputs: + version: + description: 'Version to release' + required: true + type: string + +jobs: + build: + name: Build Python distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build wheel twine + + - name: Build package + run: python setup.py sdist bdist_wheel + + - name: Check if package version already exists + run: | + PACKAGE_NAME=$(python setup.py --name) + PACKAGE_VERSION=${{ github.event.inputs.version }} + if twine check dist/*; then + if pip install $PACKAGE_NAME==$PACKAGE_VERSION; then + echo "Error: Version $PACKAGE_VERSION of $PACKAGE_NAME already exists on PyPI" + exit 1 + else + echo "Version $PACKAGE_VERSION of $PACKAGE_NAME does not exist on PyPI. Proceeding with upload." + fi + else + echo "Error: Twine check failed." + exit 1 + fi + + - name: Upload artifact + uses: actions/upload-artifact@v3 + with: + name: dist + path: dist/ + + approve-and-publish: + needs: build + runs-on: ubuntu-latest + environment: release + permissions: + contents: read + id-token: write + + steps: + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: dist + path: dist/ + + - name: Publish package distributions to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + verbose: true + print-hash: true \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..5cc3b79913 --- /dev/null +++ b/.gitignore @@ -0,0 +1,216 @@ +# Byte-compiled / optimized / DLL files +**/__pycache__/ +*.py[cod] +*$py.class +*.pyc + +# Remove notebooks. +*.ipynb + +# weigths and biases +wandb/ + +*.csv +*.torch +*.pt +*.log + +# runs/data/models/logs/~ +data/ +**/data/ + +# C extensions +*.so + +# IDE +*.idea/ + +# VSCODE +.vscode/ + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ +# Generated by Cargo +# will have compiled files and executables +**/target/ +# These are backup files generated by rustfmt +**/*.rs.bk + +.DS_Store + +# The cache for docker container dependency +.cargo + +# The cache for chain data in container +.local + +# State folder for all neurons. +**/data/* +!data/.gitkeep + +# misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +# PIPY Stuff +bittensor.egg-info +bittensor*.egg +bdist.* + +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +**/build/* +**/dist/* +**/runs/* +**/env/* +**/data/* +**/.data/* +**/tmp/* + +**/.bash_history +**/*.xml +**/*.pstats +**/*.png + +# Replicate library +**/.replicate +replicate.yaml +**/run.sh + +# Notebooks +*.ipynb + +tests/zombienet/bin/**/* \ No newline at end of file diff --git a/.test_durations b/.test_durations new file mode 100644 index 0000000000..8cb7d74bff --- /dev/null +++ b/.test_durations @@ -0,0 +1,268 @@ +{ + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_delegate_stake": 32.565206749999994, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_inspect": 2.0870491260000037, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_metagraph": 17.437785333, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_neuron_run_reregister_false": 35.75446520799999, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_nominate": 38.171487959, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview": 54.78253583300001, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_all": 303.709275458, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_no_wallet": 33.569985001, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_not_in_first_subnet": 7.832046707999993, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_hotkeys_config": 1.235335959000004, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_by_bad_column_name": 34.20312183400001, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_by_config": 1.4365408759999951, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_order_config": 1.4505757079999952, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_sort_order_config_bad_sort_type": 34.18927604199999, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_with_width_config": 1.6561556670000002, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_hotkeys_config": 1.2479347909999987, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_sort_by_config": 34.193473041, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_sort_order_config": 1.436726291999996, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_overview_without_width_config": 1.449721043000011, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_recycle_register": 48.5383515, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_register": 6.655044251, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_set_weights": 0.006143250000008038, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake": 44.89659891599999, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_all_hotkeys": 31.83300620899999, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_exclude_hotkeys_from_all": 0.0015482090000062954, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_multiple_hotkeys_max_stake": 0.0011364169999907858, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_multiple_hotkeys_max_stake_not_enough_balance": 0.0009022089999959348, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_single_hotkey_max_stake": 0.0009031669999970404, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_single_hotkey_max_stake_enough_stake": 0.0012163340000057588, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_single_hotkey_max_stake_not_enough_balance": 0.0009654589999996688, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_stake_with_specific_hotkeys": 357.5746072910001, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_transfer": 16.976931332999996, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_transfer_not_enough_balance": 22.429711792000006, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_undelegate_stake": 27.56590779199999, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_all_hotkeys": 38.311913373, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_exclude_hotkeys_from_all": 0.0018990010000123903, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_multiple_hotkeys_max_stake": 0.0010086670000006848, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkAndConfig::test_unstake_with_specific_hotkeys": 0.0012716660000009483, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_delegate": 0.0012134169999740152, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_list_delegates": 12.917025874999979, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_list_subnets": 0.32005762600000764, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_run_reregister_false": 2.500768667000017, + "tests/integration_tests/test_cli.py::TestCLIWithNetworkUsingArgs::test_run_synapse_all": 8.177792832999984, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_btcli_help": 0.05371037599999795, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_check_configs": 0.5839849989999948, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_list": 0.015767583999995338, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_list_no_wallet": 0.004536540000003697, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_new_coldkey": 0.005761207000013258, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_new_hotkey": 0.003966625999993312, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_regen_coldkey": 0.00497241600000109, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_regen_coldkeypub": 0.00346216599999849, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_regen_hotkey": 0.004310167000014076, + "tests/integration_tests/test_cli_no_network.py::TestCLINoNetwork::test_register_cuda_use_cuda_flag": 2.813618584000004, + "tests/integration_tests/test_dataset.py::test_change_data_size": 9.975283208999997, + "tests/integration_tests/test_dataset.py::test_construct_text_corpus": 5.504439667999989, + "tests/integration_tests/test_dataset.py::test_fail_IPFS_server": 2.991185999999985, + "tests/integration_tests/test_dataset.py::test_mock": 0.11688258300000598, + "tests/integration_tests/test_dataset.py::test_mock_function": 0.11185374999999453, + "tests/integration_tests/test_dataset.py::test_next": 5.809825165999982, + "tests/integration_tests/test_dataset.py::test_text_dataset": 0.003949084000012704, + "tests/integration_tests/test_dendrite.py::test_dendrite_backoff": 0.3834034169999967, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor": 0.005605251000005751, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_endpoint_len_error": 0.0010508339999972804, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_endpoint_type_error": 0.0009945420000008198, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_input_len_error": 0.0010635420000113527, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_mismatch_len_error": 0.0009768319999921005, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_shape_error": 0.0010397920000002614, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_tensor_type_error": 0.0020723339999904056, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text": 0.005868083999999385, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_endpoints_tensor": 0.04405566500001612, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_list_string": 0.01698745900000631, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_multiple_endpoints_tensor": 0.01505404200000271, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_multiple_endpoints_tensor_list": 0.01597050000000877, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_non_list": 0.0058105829999988146, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_singular": 0.016635499999992476, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_singular_no_batch_size": 0.01967587499999013, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_singular_string": 0.02379695900000911, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_tensor_list": 0.00768116700000121, + "tests/integration_tests/test_dendrite.py::test_dendrite_forward_text_tensor_list_singular": 0.007751000000013164, + "tests/integration_tests/test_dendrite.py::test_dendrite_to_df": 0.6830525419999987, + "tests/integration_tests/test_dendrite.py::test_failing_synapse": 0.652249334000004, + "tests/integration_tests/test_dendrite.py::test_successful_synapse": 0.5847192090000135, + "tests/integration_tests/test_ipfs.py::test_ipfs_init": 0.005554707999998243, + "tests/integration_tests/test_ipfs.py::test_retrieve_directory": 0.20729179199999237, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_create": 0.08020704100000131, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_decrypt_keyfile_data_legacy": 3.0671192910000045, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_keyfile_mock": 0.018454082999994625, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_keyfile_mock_func": 0.019594999999995366, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_legacy_coldkey": 0.030612376000000552, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_overwriting": 0.031093917000006854, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_user_interface": 0.017205207999992922, + "tests/integration_tests/test_keyfile.py::TestKeyFiles::test_validate_password": 0.01777775099999701, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_full_sync": 3.6405804169999954, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_lite_sync": 3.6356975829999953, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_load_sync_save": 3.243659209999997, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_parameters": 3.0838419149999936, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_print_empty": 2.6707623749999954, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_properties": 3.287473416999994, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_state_dict": 3.296576874000003, + "tests/integration_tests/test_metagraph_integration.py::TestMetagraph::test_sync_block_0": 4.055834208, + "tests/integration_tests/test_priority_thread_pool.py::test_priority_thread_pool": 0.002472417000006999, + "tests/integration_tests/test_prometheus.py::TestPrometheus::test_init_prometheus_failed": 1.491444625000014, + "tests/integration_tests/test_prometheus.py::TestPrometheus::test_init_prometheus_success": 1.6381353319999903, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_balance": 2.5954937909999956, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_balances": 1.9654992910000004, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_current_block": 0.3812910839999972, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_get_uid_by_hotkey_on_subnet": 0.6584294999999969, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_hotkey_register": 0.46409241699998915, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_hotkey_register_failed": 0.3542701670000099, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_network_overrides": 0.953627209000004, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_failed": 1.788183917000012, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_multiprocessed_already_registered": 0.9777173749999974, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_partly_failed": 1.5698486670000023, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_registration_stale_then_continue": 0.781868541999998, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_set_weights": 0.6006925410000008, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_set_weights_failed": 0.3889112079999961, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_set_weights_inclusion": 0.4296055830000114, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_stake": 0.1843938319999836, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_stake_failed": 0.3917970010000005, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_stake_inclusion": 0.38589883299999883, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer": 2.0724527499999965, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_dest_as_bytes": 1.2727416259999842, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_failed": 1.2812408760000125, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_inclusion": 1.2405266240000117, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_transfer_invalid_dest": 0.4117500419999942, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_unstake": 0.4006357079999958, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_unstake_failed": 0.4873798340000093, + "tests/integration_tests/test_subtensor_integration.py::TestSubtensor::test_unstake_inclusion": 0.3860250829999927, + "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_ip_not_set_dont_use_internal_ip": 0.006879416000003857, + "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_ip_port_set_full_address_internal": 0.004500209000006805, + "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_ip_set_full_address_internal": 0.08792841500000037, + "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_port_not_set_use_internal_port": 0.004651376000000873, + "tests/unit_tests/bittensor_tests/test_axon.py::TestExternalAxon::test_external_port_set_full_address_internal": 0.00591749999999891, + "tests/unit_tests/bittensor_tests/test_axon.py::test_axon_is_destroyed": 0.040033332000000144, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_causal_lm_next_shape_error": 0.0009744579999990677, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_causal_lm_shape_error": 0.001580541999999241, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_deserialization_error": 0.0005970819999987498, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_grads_shape_error": 0.001092959000000171, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_invalid_request": 0.0007582499999996273, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_last_hidden_shape_error": 0.0008626240000007002, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_exception": 0.0010987509999997869, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_causal_lm": 0.0032578749999991885, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_causal_lm_next": 0.002431750000001287, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_hidden": 0.001287251000000822, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_success_text_priority": 0.0034178330000074197, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_response_timeout": 0.0009528730000010199, + "tests/unit_tests/bittensor_tests/test_axon.py::test_backward_seq_2_seq_shape_error": 0.0010720409999995795, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_batch_shape_error": 0.0007811660000003329, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causal_lm_next_state_exception": 0.0009985000000014566, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causal_lm_state_exception": 0.002173708000000829, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallm_shape_error": 0.0006132079999998652, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallm_success": 0.019581957999998956, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallmnext_shape_error": 0.0007552919999991303, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_causallmnext_success": 0.022651415999999536, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_deserialization_empty": 0.0009227910000007, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_deserialization_error": 0.0008193749999989564, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_empty_request": 0.0011124170000007538, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_joint_faulty_synapse": 0.01353250000000017, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_joint_missing_synapse": 0.013988917000000711, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_joint_success": 0.0509341249999995, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_last_hidden_shape_error": 0.0008222500000005795, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_last_hidden_state_exception": 0.0009832080000000687, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_last_hidden_success": 0.0017997490000007943, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_not_implemented": 0.001580126000000348, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_priority_2nd_request_timeout": 2.009712416999996, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_priority_timeout": 27.006205707000003, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_response_deserialization_error": 0.0009404579999996443, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_2_seq_shape_error": 0.0009308739999998039, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_2_seq_state_exception": 0.0013031659999995782, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_2_seq_success": 0.0018539589999990724, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_seq_shape_error": 0.0008392500000002912, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_tensor_success_priority": 0.07963441700000029, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_timeout": 0.0021218760000003556, + "tests/unit_tests/bittensor_tests/test_axon.py::test_forward_unknown_error": 0.000990500999999533, + "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_backward_fails": 0.006330292000001236, + "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_backward_works": 0.012263416000003247, + "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_forward_fails": 0.004834957999989342, + "tests/unit_tests/bittensor_tests/test_axon.py::test_grpc_forward_works": 0.015886249999994106, + "tests/unit_tests/bittensor_tests/test_axon.py::test_sign_v2": 0.0025120420000011023, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_add": 0.18219254200000012, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_add_invalid_type": 0.12365654300000006, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_add_other_not_balance": 0.14650508300000098, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_div_invalid_type": 0.12069516600000174, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_eq_invalid_type": 0.1321914169999996, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_eq_other_not_balance": 0.13415275000000015, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_floordiv": 0.2226764569999995, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_floordiv_other_not_balance": 0.23913508399999994, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_init": 0.12514987600000005, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_init_from_invalid_value": 0.0004109170000008433, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_mul": 0.19085399900000066, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_mul_invalid_type": 0.16508675100000048, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_mul_other_not_balance": 0.2507777079999993, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_neq_none": 0.12535729200000034, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_not_eq_none": 0.14622908400000068, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_radd_other_not_balance": 0.1727647920000006, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rfloordiv_other_not_balance": 0.21285375000000073, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rmul_other_not_balance": 0.17940537499999998, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rsub_other_not_balance": 0.19510154200000063, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_rtruediv_other_not_balance": 0.32300358299999843, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_sub": 0.20487529099999868, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_sub_invalid_type": 0.13107362499999908, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_sub_other_not_balance": 0.20876896000000222, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_truediv": 0.20615204100000106, + "tests/unit_tests/bittensor_tests/test_balance.py::TestBalance::test_balance_truediv_other_not_balance": 0.20203299999999835, + "tests/unit_tests/bittensor_tests/test_config.py::test_loaded_config": 0.000341875000000158, + "tests/unit_tests/bittensor_tests/test_config.py::test_prefix": 1.4881067080000019, + "tests/unit_tests/bittensor_tests/test_config.py::test_strict": 0.003527500000000572, + "tests/unit_tests/bittensor_tests/test_config.py::test_to_defaults": 0.0006572089999998809, + "tests/unit_tests/bittensor_tests/test_endpoint.py::test_create_endpoint": 0.0035975830000012365, + "tests/unit_tests/bittensor_tests/test_endpoint.py::test_endpoint_fails_checks": 0.0009294989999997227, + "tests/unit_tests/bittensor_tests/test_endpoint.py::test_endpoint_to_tensor": 0.0014645410000007075, + "tests/unit_tests/bittensor_tests/test_endpoint.py::test_thrash_equality_of_endpoint": 0.5774439579999999, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_axon_receptor_forward_works": 0.0101347909999987, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward": 0.01403204099999833, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward_large": 0.0014666259999991382, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward_multiple": 0.0015117080000006666, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_backward_no_grad": 0.001954291000000552, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_call_time": 0.029393998999999837, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_del": 0.0004828739999975795, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_causal_lm_next_shape_error": 0.00045083400000045515, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_causal_lm_shape_error": 0.0004375410000001523, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_last_hidden_shape_error": 0.00042408300000218446, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_seq_2_seq_shape_error": 0.000591667000000129, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_causal_lm": 0.0019801239999992504, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_causal_lm_next": 0.0015587079999992426, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_last_hidden": 0.0014038749999993883, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_tensor_pass_through_text_seq_2_seq": 0.0012167919999974686, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_causal_lm": 0.0020301259999992993, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_causal_lm_next": 0.0013322070000008068, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_last_hidden": 0.0011474169999985406, + "tests/unit_tests/bittensor_tests/test_forward_backward.py::test_dendrite_forward_text_seq_2_seq": 0.0011787070000028876, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_create_ed25519_keypair": 0.001834499999999295, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_create_keypair_from_private_key": 0.0005444169999986315, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_create_sr25519_keypair": 0.0015333330000011358, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_generate_mnemonic": 0.0003291669999967439, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_default_to_dev_mnemonic": 0.0019820840000015494, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_hard_path": 0.0019323339999992584, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_nested_hard_soft_path": 0.0018494169999989651, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_nested_soft_hard_path": 0.0020734170000000773, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_path_gt_32_bytes": 0.001790332999998867, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_soft_path": 0.0016932490000005629, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_hdkd_unsupported_password": 0.00044658299999866813, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_incorrect_private_key_length_sr25519": 0.00047804200000101105, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_incorrect_public_key": 0.0003666670000015415, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_invalid_mnemic": 0.0004930830000002828, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_only_provide_public_key": 0.00045920699999868475, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_only_provide_ss58_address": 0.000522709000001953, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_provide_no_ss58_address_and_public_key": 0.0005050830000019602, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify": 0.0016591679999979903, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_ed25519": 0.0016544579999990816, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_hex_data": 0.001937792000001437, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_incorrect_signature": 0.001960749000000206, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_invalid_message": 0.00183941700000112, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_invalid_signature": 0.0016063319999997105, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_invalid_signature_ed25519": 0.001609873999999678, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_and_verify_scale_bytes": 0.00196662400000136, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_missing_private_key": 0.0006992090000004225, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_sign_unsupported_crypto_type": 0.0004697499999988253, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_unsupport_crypto_type": 0.0004740830000002916, + "tests/unit_tests/bittensor_tests/test_keypair.py::KeyPairTestCase::test_verify_unsupported_crypto_type": 0.0007947079999990336, + "tests/unit_tests/bittensor_tests/test_metagraph.py::TestMetagraph::test_from_neurons": 0.8742741239999994, + "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_coreserver_reregister_flag_false_exit": 0.006013750000001039, + "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_coreserver_reregister_flag_true": 0.006052874999999958, + "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_model_output_check": 9.921326915999998, + "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreServer::test_set_fine_tuning_params": 6.299140666000003, + "tests/unit_tests/bittensor_tests/test_neuron.py::TestCoreValidator::test_corevalidator_reregister_flag_false_exit": 0.008880706999999433 +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..5a355b5cf6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,40 @@ +# syntax=docker/dockerfile:1 +FROM python:3.11.8-bookworm + +LABEL bittensor.image.authors="bittensor.com" \ + bittensor.image.vendor="Bittensor" \ + bittensor.image.title="bittensor/bittensor" \ + bittensor.image.description="Bittensor: Incentivized Peer to Peer Neural Networks" \ + bittensor.image.source="https://github.com/opentensor/bittensor.git" \ + bittensor.image.revision="${VCS_REF}" \ + bittensor.image.created="${BUILD_DATE}" \ + bittensor.image.documentation="https://app.gitbook.com/@opentensor/s/bittensor/" +ARG DEBIAN_FRONTEND=noninteractive + +# Update the base image +RUN apt update && apt upgrade -y +# Install bittensor +## Install dependencies +RUN apt install -y curl sudo nano git htop netcat-openbsd wget unzip tmux apt-utils cmake build-essential +## Upgrade pip +RUN pip3 install --upgrade pip + +# Install nvm and pm2 +RUN curl -o install_nvm.sh https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.1/install.sh && \ + echo 'fabc489b39a5e9c999c7cab4d281cdbbcbad10ec2f8b9a7f7144ad701b6bfdc7 install_nvm.sh' | sha256sum --check && \ + bash install_nvm.sh + +RUN bash -c "source $HOME/.nvm/nvm.sh && \ + # use node 16 + nvm install 16 && \ + # install pm2 + npm install --location=global pm2" + +RUN mkdir -p /root/.bittensor/bittensor +COPY . /root/.bittensor/bittensor +RUN cd /root/.bittensor/bittensor && python3 -m pip install . + +# Increase ulimit to 1,000,000 +RUN prlimit --pid=$PPID --nofile=1000000 + +EXPOSE 8091 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..8d10866d56 --- /dev/null +++ b/LICENSE @@ -0,0 +1,16 @@ +The MIT License (MIT) +Copyright © 2021 Yuma Rao + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the “Software”), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of +the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..344c3e4184 --- /dev/null +++ b/Makefile @@ -0,0 +1,26 @@ +SHELL:=/bin/bash + +init-venv: + python3 -m venv venv && source ./venv/bin/activate + +clean-venv: + source ./venv/bin/activate && \ + pip freeze > make_venv_to_uninstall.txt && \ + pip uninstall -r make_venv_to_uninstall.txt && \ + rm make_venv_to_uninstall.txt + +clean: + rm -rf dist/ && \ + rm -rf build/ && \ + rm -rf bittensor.egg-info/ && \ + rm -rf .pytest_cache/ && \ + rm -rf lib/ + +install: + python3 -m pip install . + +install-dev: + python3 -m pip install '.[dev]' + +install-cubit: + python3 -m pip install '.[cubit]' \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000..08f060aaf6 --- /dev/null +++ b/README.md @@ -0,0 +1,247 @@ +
+ +# **Bittensor SDK** +[![Discord Chat](https://img.shields.io/discord/308323056592486420.svg)](https://discord.gg/bittensor) +[![PyPI version](https://badge.fury.io/py/bittensor.svg)](https://badge.fury.io/py/bittensor) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +--- + +## Internet-scale Neural Networks + +[Discord](https://discord.gg/qasY3HA9F9) ‱ [Network](https://taostats.io/) ‱ [Research](https://bittensor.com/whitepaper) + +
+ +- [Overview of Bittensor](#overview-of-bittensor) +- [The Bittensor SDK](#the-bittensor-sdk) +- [Is Bittensor a blockchain or an AI platform?](#is-bittensor-a-blockchain-or-an-ai-platform) +- [Subnets](#subnets) +- [Subnet validators and subnet miners](#subnet-validators-and-subnet-miners) +- [Yuma Consensus](#yuma-consensus) +- [Release Notes](#release-notes) +- [Install Bittensor SDK](#install-bittensor-sdk) +- [Upgrade](#upgrade) +- [Install on macOS and Linux](#install-on-macos-and-linux) + - [Install using a Bash command](#install-using-a-bash-command) + - [Install using `pip3 install`](#install-using-pip3-install) + - [Install from source](#install-from-source) + - [Verify using Python interpreter](#verify-using-python-interpreter) + - [Verify by listing axon information](#verify-by-listing-axon-information) +- [Release Guidelines](#release-guidelines) +- [Contributions](#contributions) +- [License](#license) +- [Acknowledgments](#acknowledgments) + +--- + +## Overview of Bittensor + +Welcome! Bittensor is an open source platform on which you can produce competitive digital commodities. These digital commodities can be machine intelligence, storage space, compute power, protein folding, financial markets prediction, and many more. You are rewarded in **TAO** when you produce best digital commodities. + +## The Bittensor SDK + +The Opentensor Foundation (OTF) provides all the open source tools, including this Bittensor SDK, the codebase and the documentation, with step-by-step tutorials and guides, to enable you to participate in the Bittensor ecosystem. + +- **Developer documentation**: https://docs.bittensor.com. +- **A Beginner's Q and A on Bittensor**: https://docs.bittensor.com/questions-and-answers. +- **Bittensor whitepaper**: https://bittensor.com/whitepaper. + +This Bittensor SDK contains ready-to-use Python packages for interacting with the Bittensor ecosystem, writing subnet incentive mechanisms, subnet miners, subnet validators and querying the subtensor (the blockchain part of the Bittensor network). + +--- + +## Is Bittensor a blockchain or an AI platform? + +In Bittensor there is one blockchain, and many platforms that are connected to this one blockchain. We call these platforms as **subnets**, and this one blockchain **subtensor**. So, a subnet can be AI-related or it can be something else. The Bittensor network has a number of distinct subnets. All these subnets interact with subtensor blockchain. If you are thinking, "So, subnets are not part of the blockchain but only interact with it?" then the answer is "yes, exactly." + +## Subnets + +Each category of the digital commodity is produced in a distinct subnet. Applications are built on these specific subnets. End-users of these applications would be served by these applications. + +## Subnet validators and subnet miners + +Subnets, which exist outside the blockchain and are connected to it, are off-chain competitions where only the best producers are rewarded. A subnet consists of off-chain **subnet validators** who initiate the competition for a specific digital commodity, and off-chain **subnet miners** who compete and respond by producing the best quality digital commodity. + +## Yuma Consensus + +Scores are assigned to the top-performing subnet miners and subnet validators. The on-chain Yuma Consensus determines the TAO rewards for these top performers. The Bittensor blockchain, the subtensor, runs on decentralized validation nodes, just like any blockchain. + +**This SDK repo is for Bittensor platform only** +This Bittensor SDK codebase is for the Bittensor platform only, designed to help developers create subnets and build tools on Bittensor. For subnets and applications, refer to subnet-specific websites, which are maintained by subnet owners. + +## Release Notes + +See [Bittensor SDK Release Notes](https://docs.bittensor.com/bittensor-rel-notes). + +--- + +## Install Bittensor SDK + +Before you can start developing, you must install Bittensor SDK and then create Bittensor wallet. + +## Upgrade + +If you already installed Bittensor SDK, make sure you upgrade to the latest version. Run the below command: + +```bash +python3 -m pip install --upgrade bittensor +``` + +--- + +## Install on macOS and Linux + +You can install Bittensor SDK on your local machine in either of the following ways. **Make sure you verify your installation after you install**: +- [Install using a Bash command](#install-using-a-bash-command). +- [Install using `pip3 install`](#install-using-pip3-install) +- [Install from source](#install-from-source) + +### Install using a Bash command + +This is the most straightforward method. It is recommended for a beginner as it will pre-install requirements like Python, if they are not already present on your machine. Copy and paste the following `bash` command into your terminal: + +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/opentensor/bittensor/master/scripts/install.sh)" +``` + +**For Ubuntu-Linux users** +If you are using Ubuntu-Linux, the script will prompt for `sudo` access to install all required apt-get packages. + +### Install using `pip3 install` + +```bash +python3 -m venv bt_venv +source bt_venv/bin/activate +pip install bittensor +``` + +### Install from source + +1. Create and activate a virtual environment + + - Create Python virtual environment. Follow [this guide on python.org](https://docs.python.org/3/library/venv.html#creating-virtual-environments). + + - Activate the new environment. Follow [this guide on python.org](https://docs.python.org/3/library/venv.html#how-venvs-work) + +2. Clone the Bittensor SDK repo + +```bash +git clone https://github.com/opentensor/bittensor.git +``` + +3. Install + +You can install using any of the below options: + +- **Install only SDK**: Run the below command to install Bittensor SDK in the above virtual environment. + + ```python + pip install bittensor + ``` + +- **Install SDK with `btcli`**: Install Bittensor SDK with `btcli`. The `btcli` will be installed as an independent tool and its Python package is `bittensor-cli`. + + ```python + pip install bittensor[btcli] + ``` + +- **Install SDK with `torch`**: Install Bittensor SDK with [`torch`](https://pytorch.org/docs/stable/torch.html). + + ```python + pip install bittensor[torch] + ``` + +- **Install SDK with `cubit`**: Install Bittensor SDK with [`cubit`](https://pytorch.org/docs/stable/torch.html). + + ```python + pip install bittensor[cubit] + ``` + + +--- + +## Install on Windows + +To install and run Bittensor SDK on Windows you must install [**WSL 2** (Windows Subsystem for Linux)](https://learn.microsoft.com/en-us/windows/wsl/about) on Windows and select [Ubuntu Linux distribution](https://github.com/ubuntu/WSL/blob/main/docs/guides/install-ubuntu-wsl2.md). + +After you installed the above, follow the same installation steps described above in [Install on macOS and Linux](#install-on-macos-and-linux). + +**ALERT**: **Limited support on Windows** +While wallet transactions like delegating, transfer, registering, staking can be performed on a Windows machine using WSL 2, the mining and validating operations are not recommended and are not supported on Windows machines. + +--- + +## Verify the installation + +You can verify your installation in either of the below ways: + +### Verify using `btsdk` version + +```bash +python3 -m bittensor +``` + +The above command will show you the version of the `btsdk` you just installed. + +### Verify using Python interpreter + +1. Launch the Python interpreter on your terminal. + + ```bash + python3 + ``` +2. Enter the following two lines in the Python interpreter. + + ```python + import bittensor as bt + print( bt.__version__ ) + ``` + The Python interpreter output will look like below: + + ```python + Python 3.11.6 (main, Oct 2 2023, 13:45:54) [Clang 15.0.0 (clang-1500.0.40.1)] on darwin + Type "help", "copyright", "credits" or "license" for more information. + >>> import bittensor as bt + >>> print( bt.__version__ ) + + ``` +You will see the version number you installed in place of ``. + +### Verify by listing axon information + +You can also verify the Bittensor SDK installation by listing the axon information for the neurons. Enter the following lines in the Python interpreter. + +```python +import bittensor +metagraph = bittensor.Metagraph(1) +metagraph.axons[:10] +``` +The Python interpreter output will look like below. + +```bash +[AxonInfo( /ipv4/3.139.80.241:11055, 5GqDsK6SAPyQtG243hbaKTsoeumjQQLhUu8GyrXikPTmxjn7, 5D7u5BTqF3j1XHnizp9oR67GFRr8fBEFhbdnuVQEx91vpfB5, 600 ), AxonInfo( /ipv4/8.222.132.190:5108, 5CwqDkDt1uk2Bngvf8avrapUshGmiUvYZjYa7bfA9Gv9kn1i, 5HQ9eTDorvovKTxBc9RUD22FZHZzpy1KRfaxCnRsT9QhuvR6, 600 ), AxonInfo( /ipv4/34.90.71.181:8091, 5HEo565WAy4Dbq3Sv271SAi7syBSofyfhhwRNjFNSM2gP9M2, 5ChuGqW2cxc5AZJ29z6vyTkTncg75L9ovfp8QN8eB8niSD75, 601 ), AxonInfo( /ipv4/64.247.206.79:8091, 5HK5tp6t2S59DywmHRWPBVJeJ86T61KjurYqeooqj8sREpeN, 5E7W9QXNoW7se7B11vWRMKRCSWkkAu9EYotG5Ci2f9cqV8jn, 601 ), AxonInfo( /ipv4/51.91.30.166:40203, 5EXYcaCdnvnMZbozeknFWbj6aKXojfBi9jUpJYHea68j4q1a, 5CsxoeDvWsQFZJnDCyzxaNKgA8pBJGUJyE1DThH8xU25qUMg, 601 ), AxonInfo( /ipv4/149.137.225.62:8091, 5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3, 5Ccmf1dJKzGtXX7h17eN72MVMRsFwvYjPVmkXPUaapczECf6, 600 ), AxonInfo( /ipv4/38.147.83.11:8091, 5Hddm3iBFD2GLT5ik7LZnT3XJUnRnN8PoeCFgGQgawUVKNm8, 5DCQw11aUW7bozAKkB8tB5bHqAjiu4F6mVLZBdgJnk8dzUoV, 610 ), AxonInfo( /ipv4/38.147.83.30:41422, 5HNQURvmjjYhTSksi8Wfsw676b4owGwfLR2BFAQzG7H3HhYf, 5EZUTdAbXyLmrs3oiPvfCM19nG6oRs4X7zpgxG5oL1iK4MAh, 610 ), AxonInfo( /ipv4/54.227.25.215:10022, 5DxrZuW8kmkZPKGKp1RBVovaP5zHtPLDHYc5Yu82Z1fWqK5u, 5FhXUSmSZ2ec7ozRSA8Bg3ywmGwrjoLLzsXjNcwmZme2GcSC, 601 ), AxonInfo( /ipv4/52.8.243.76:40033, 5EnZN591jjsKKbt3yBtfGKWHxhxRH9cJonqTKRT5yTRUyNon, 5ChzhHyGmWwEdHjuvAxoUifHEZ6xpUjR67fDd4a42UrPysyB, 601 )] +>>> +``` + +--- + +## Release Guidelines +Instructions for the release manager: [RELEASE_GUIDELINES.md](./contrib/RELEASE_GUIDELINES.md) document. + +## Contributions +Ready to contribute? Read the [contributing guide](./contrib/CONTRIBUTING.md) before making a pull request. + +## License +The MIT License (MIT) +Copyright © 2024 The Opentensor Foundation + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +## Acknowledgments +**learning-at-home/hivemind** diff --git a/VERSION b/VERSION new file mode 100644 index 0000000000..8b23b8d47c --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +7.3.0 \ No newline at end of file diff --git a/bittensor/__init__.py b/bittensor/__init__.py new file mode 100644 index 0000000000..5ddba2abe4 --- /dev/null +++ b/bittensor/__init__.py @@ -0,0 +1,53 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import warnings + +from .core.settings import __version__, version_split, DEFAULTS +from .utils.btlogging import logging +from .utils.deprecated import * + + +# Logging helpers. +def trace(on: bool = True): + """ + Enables or disables trace logging. + + Args: + on (bool): If True, enables trace logging. If False, disables trace logging. + """ + logging.set_trace(on) + + +def debug(on: bool = True): + """ + Enables or disables debug logging. + + Args: + on (bool): If True, enables debug logging. If False, disables debug logging. + """ + logging.set_debug(on) + + +def __getattr__(name): + if name == "version_split": + warnings.warn( + "version_split is deprecated and will be removed in future versions. Use __version__ instead.", + DeprecationWarning, + ) + return version_split + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/bittensor/__main__.py b/bittensor/__main__.py new file mode 100644 index 0000000000..05d664c9de --- /dev/null +++ b/bittensor/__main__.py @@ -0,0 +1,21 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from bittensor import __version__ + +if __name__ == "__main__": + print(f"Bittensor SDK version: {__version__}") diff --git a/bittensor/core/__init__.py b/bittensor/core/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/bittensor/core/axon.py b/bittensor/core/axon.py new file mode 100644 index 0000000000..cd32ba4212 --- /dev/null +++ b/bittensor/core/axon.py @@ -0,0 +1,1521 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +"""Create and initialize Axon, which services the forward and backward requests from other neurons.""" + +import argparse +import asyncio +import contextlib +import copy +import inspect +import json +import threading +import time +import traceback +import typing +import uuid +import warnings +from inspect import signature, Signature, Parameter +from typing import Any, Awaitable, Callable, Optional + +import uvicorn +from bittensor_wallet import Wallet +from fastapi import APIRouter, Depends, FastAPI +from fastapi.responses import JSONResponse +from fastapi.routing import serialize_response +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from starlette.requests import Request +from starlette.responses import Response +from substrateinterface import Keypair + +from bittensor.core.chain_data import AxonInfo +from bittensor.core.config import Config +from bittensor.core.errors import ( + BlacklistedException, + InvalidRequestNameError, + NotVerifiedException, + PostProcessException, + PriorityException, + SynapseDendriteNoneException, + SynapseException, + SynapseParsingError, + UnknownSynapseError, +) +from bittensor.core.settings import DEFAULTS, version_as_int +from bittensor.core.stream import StreamingSynapse +from bittensor.core.synapse import Synapse, TerminalInfo +from bittensor.core.threadpool import PriorityThreadPoolExecutor +from bittensor.utils import networking +from bittensor.utils.axon_utils import allowed_nonce_window_ns, calculate_diff_seconds +from bittensor.utils.btlogging import logging + +# Just for annotation checker +if typing.TYPE_CHECKING: + from bittensor.core.subtensor import Subtensor + +# Latest version with old style nonce structure (this in not a current SDK version) +V_7_2_0 = 7002000 + + +class FastAPIThreadedServer(uvicorn.Server): + """ + The ``FastAPIThreadedServer`` class is a specialized server implementation for the Axon server in the Bittensor network. + + It extends the functionality of :func:`uvicorn.Server` to run the FastAPI application in a separate thread, allowing the Axon server to handle HTTP requests concurrently and non-blocking. + + This class is designed to facilitate the integration of FastAPI with the Axon's asynchronous architecture, ensuring efficient and scalable handling of network requests. + + Importance and Functionality + Threaded Execution + The class allows the FastAPI application to run in a separate thread, enabling concurrent handling of HTTP requests which is crucial for the performance and scalability of the Axon server. + + Seamless Integration + By running FastAPI in a threaded manner, this class ensures seamless integration of FastAPI's capabilities with the Axon server's asynchronous and multi-threaded architecture. + + Controlled Server Management + The methods start and stop provide controlled management of the server's lifecycle, ensuring that the server can be started and stopped as needed, which is vital for maintaining the Axon server's reliability and availability. + + Signal Handling + Overriding the default signal handlers prevents potential conflicts with the Axon server's main application flow, ensuring stable operation in various network conditions. + + Use Cases + Starting the Server + When the Axon server is initialized, it can use this class to start the FastAPI application in a separate thread, enabling it to begin handling HTTP requests immediately. + + Stopping the Server + During shutdown or maintenance of the Axon server, this class can be used to stop the FastAPI application gracefully, ensuring that all resources are properly released. + + Example Usage:: + + self.app = FastAPI() + log_level = "trace" + self.fast_config = uvicorn.Config(self.app, host="0.0.0.0", port=self.config.axon.port, log_level=log_level) + self.fast_server = FastAPIThreadedServer(config=self.fast_config) + self.fast_server.start() + # do something + self.fast_server.stop() + + Args: + should_exit (bool): Flag to indicate whether the server should stop running. + is_running (bool): Flag to indicate whether the server is currently running. + + The server overrides the default signal handlers to prevent interference with the main application flow and provides methods to start and stop the server in a controlled manner. + """ + + should_exit: bool = False + is_running: bool = False + + def install_signal_handlers(self): + """ + Overrides the default signal handlers provided by ``uvicorn.Server``. This method is essential to ensure that the signal handling in the threaded server does not interfere with the main application's flow, especially in a complex asynchronous environment like the Axon server. + """ + + @contextlib.contextmanager + def run_in_thread(self): + """ + Manages the execution of the server in a separate thread, allowing the FastAPI application to run asynchronously without blocking the main thread of the Axon server. This method is a key component in enabling concurrent request handling in the Axon server. + + Yields: + None: This method yields control back to the caller while the server is running in the background thread. + """ + thread = threading.Thread(target=self.run, daemon=True) + thread.start() + try: + while not self.started: + time.sleep(1e-3) + yield + finally: + self.should_exit = True + thread.join() + + def _wrapper_run(self): + """ + A wrapper method for the :func:`run_in_thread` context manager. This method is used internally by the ``start`` method to initiate the server's execution in a separate thread. + """ + with self.run_in_thread(): + while not self.should_exit: + time.sleep(1e-3) + + def start(self): + """ + Starts the FastAPI server in a separate thread if it is not already running. This method sets up the server to handle HTTP requests concurrently, enabling the Axon server to efficiently manage incoming network requests. + + The method ensures that the server starts running in a non-blocking manner, allowing the Axon server to continue its other operations seamlessly. + """ + if not self.is_running: + self.should_exit = False + thread = threading.Thread(target=self._wrapper_run, daemon=True) + thread.start() + self.is_running = True + + def stop(self): + """ + Signals the FastAPI server to stop running. This method sets the :func:`should_exit` flag to ``True``, indicating that the server should cease its operations and exit the running thread. + + Stopping the server is essential for controlled shutdowns and resource management in the Axon server, especially during maintenance or when redeploying with updated configurations. + """ + if self.is_running: + self.should_exit = True + + +class Axon: + """ + The ``Axon`` class in Bittensor is a fundamental component that serves as the server-side interface for a neuron within the Bittensor network. + + This class is responsible for managing + incoming requests from other neurons and implements various mechanisms to ensure efficient + and secure network interactions. + + An axon relies on a FastAPI router to create endpoints for different message types. These + endpoints are crucial for handling various request types that a neuron might receive. The + class is designed to be flexible and customizable, allowing users to specify custom rules + for forwarding, blacklisting, prioritizing, and verifying incoming requests. The class also + includes internal mechanisms to manage a thread pool, supporting concurrent handling of + requests with defined priority levels. + + Methods in this class are equipped to deal with incoming requests from various scenarios in the + network and serve as the server face for a neuron. It accepts multiple arguments, like wallet, + configuration parameters, ip address, server binding port, external ip, external port and max + workers. Key methods involve managing and operating the FastAPI application router, including + the attachment and operation of endpoints. + + Key Features: + + - FastAPI router integration for endpoint creation and management. + - Customizable request handling including forwarding, blacklisting, and prioritization. + - Verification of incoming requests against custom-defined functions. + - Thread pool management for concurrent request handling. + - Command-line argument support for user-friendly program interaction. + + Example Usage:: + + import bittensor + # Define your custom synapse class + class MySynapse( bittensor.Synapse ): + input: int = 1 + output: int = None + + # Define a custom request forwarding function using your synapse class + def forward( synapse: MySynapse ) -> MySynapse: + # Apply custom logic to synapse and return it + synapse.output = 2 + return synapse + + # Define a custom request verification function + def verify_my_synapse( synapse: MySynapse ): + # Apply custom verification logic to synapse + # Optionally raise Exception + assert synapse.input == 1 + ... + + # Define a custom request blacklist function + def blacklist_my_synapse( synapse: MySynapse ) -> bool: + # Apply custom blacklist + return False ( if non blacklisted ) or True ( if blacklisted ) + + # Define a custom request priority function + def prioritize_my_synapse( synapse: MySynapse ) -> float: + # Apply custom priority + return 1.0 + + # Initialize Axon object with a custom configuration + my_axon = bittensor.Axon( + config=my_config, + wallet=my_wallet, + port=9090, + ip="192.0.2.0", + external_ip="203.0.113.0", + external_port=7070 + ) + + # Attach the endpoint with the specified verification and forward functions. + my_axon.attach( + forward_fn = forward_my_synapse, + verify_fn = verify_my_synapse, + blacklist_fn = blacklist_my_synapse, + priority_fn = prioritize_my_synapse + ) + + # Serve and start your axon. + my_axon.serve( + netuid = ... + subtensor = ... + ).start() + + # If you have multiple forwarding functions, you can chain attach them. + my_axon.attach( + forward_fn = forward_my_synapse, + verify_fn = verify_my_synapse, + blacklist_fn = blacklist_my_synapse, + priority_fn = prioritize_my_synapse + ).attach( + forward_fn = forward_my_synapse_2, + verify_fn = verify_my_synapse_2, + blacklist_fn = blacklist_my_synapse_2, + priority_fn = prioritize_my_synapse_2 + ).serve( + netuid = ... + subtensor = ... + ).start() + + Args: + wallet (Optional[bittensor_wallet.Wallet]): Wallet with hotkey and coldkeypub. + config (Optional[bittensor.core.config.Config]): Configuration parameters for the axon. + port (Optional[int]): Port for server binding. + ip (Optional[str]): Binding IP address. + external_ip (Optional[str]): External IP address to broadcast. + external_port (Optional[int]): External port to broadcast. + max_workers (Optional[int]): Number of active threads for request handling. + + Returns: + bittensor.core.axon.Axon: An instance of the axon class configured as per the provided arguments. + + Note: + This class is a core part of Bittensor's decentralized network for machine intelligence, + allowing neurons to communicate effectively and securely. + + Importance and Functionality + Endpoint Registration + This method dynamically registers API endpoints based on the Synapse used, allowing the Axon to respond to specific types of requests and synapses. + + Customization of Request Handling + By attaching different functions, the Axon can customize how it + handles, verifies, prioritizes, and potentially blocks incoming requests, making it adaptable to various network scenarios. + + Security and Efficiency + The method contributes to both the security (via verification and blacklisting) and efficiency (via prioritization) of request handling, which are crucial in a decentralized network environment. + + Flexibility + The ability to define custom functions for different aspects of request handling provides great flexibility, allowing the Axon to be tailored to specific needs and use cases within the Bittensor network. + + Error Handling and Validation + The method ensures that the attached functions meet the required + signatures, providing error handling to prevent runtime issues. + """ + + def __init__( + self, + wallet: Optional["Wallet"] = None, + config: Optional["Config"] = None, + port: Optional[int] = None, + ip: Optional[str] = None, + external_ip: Optional[str] = None, + external_port: Optional[int] = None, + max_workers: Optional[int] = None, + ): + """Creates a new bittensor.Axon object from passed arguments. + + Args: + config (:obj:`Optional[bittensor.core.config.Config]`): bittensor.Axon.config() + wallet (:obj:`Optional[bittensor_wallet.Wallet]`): bittensor wallet with hotkey and coldkeypub. + port (:type:`Optional[int]`): Binding port. + ip (:type:`Optional[str]`): Binding ip. + external_ip (:type:`Optional[str]`): The external ip of the server to broadcast to the network. + external_port (:type:`Optional[int]`): The external port of the server to broadcast to the network. + max_workers (:type:`Optional[int]`): Used to create the threadpool if not passed, specifies the number of active threads servicing requests. + """ + # Build and check config. + if config is None: + config = Axon.config() + config = copy.deepcopy(config) + config.axon.ip = ip or DEFAULTS.axon.ip + config.axon.port = port or DEFAULTS.axon.port + config.axon.external_ip = external_ip or DEFAULTS.axon.external_ip + config.axon.external_port = external_port or DEFAULTS.axon.external_port + config.axon.max_workers = max_workers or DEFAULTS.axon.max_workers + Axon.check_config(config) + self.config = config # type: ignore + + # Get wallet or use default. + self.wallet = wallet or Wallet() + + # Build axon objects. + self.uuid = str(uuid.uuid1()) + self.ip = self.config.axon.ip # type: ignore + self.port = self.config.axon.port # type: ignore + self.external_ip = ( + self.config.axon.external_ip # type: ignore + if self.config.axon.external_ip is not None # type: ignore + else networking.get_external_ip() + ) + self.external_port = ( + self.config.axon.external_port # type: ignore + if self.config.axon.external_port is not None # type: ignore + else self.config.axon.port # type: ignore + ) + self.full_address = str(self.config.axon.ip) + ":" + str(self.config.axon.port) # type: ignore + self.started = False + + # Build middleware + self.thread_pool = PriorityThreadPoolExecutor( + max_workers=self.config.axon.max_workers # type: ignore + ) + self.nonces: dict[str, int] = {} + + # Request default functions. + self.forward_class_types: dict[str, list[Signature]] = {} + self.blacklist_fns: dict[str, Optional[Callable]] = {} + self.priority_fns: dict[str, Optional[Callable]] = {} + self.forward_fns: dict[str, Optional[Callable]] = {} + self.verify_fns: dict[str, Optional[Callable]] = {} + + # Instantiate FastAPI + self.app = FastAPI() + log_level = "trace" if logging.__trace_on__ else "critical" + self.fast_config = uvicorn.Config( + self.app, host="0.0.0.0", port=self.config.axon.port, log_level=log_level + ) + self.fast_server = FastAPIThreadedServer(config=self.fast_config) + self.router = APIRouter() + self.app.include_router(self.router) + + # Build ourselves as the middleware. + self.middleware_cls = AxonMiddleware + self.app.add_middleware(self.middleware_cls, axon=self) + + # Attach default forward. + def ping(r: Synapse) -> Synapse: + return r + + self.attach( + forward_fn=ping, verify_fn=None, blacklist_fn=None, priority_fn=None + ) + + def info(self) -> "AxonInfo": + """Returns the axon info object associated with this axon.""" + return AxonInfo( + version=version_as_int, + ip=self.external_ip, + ip_type=networking.ip_version(self.external_ip), + port=self.external_port, + hotkey=self.wallet.hotkey.ss58_address, + coldkey=self.wallet.coldkeypub.ss58_address, + protocol=4, + placeholder1=0, + placeholder2=0, + ) + + def attach( + self, + forward_fn: Callable, + blacklist_fn: Optional[Callable] = None, + priority_fn: Optional[Callable] = None, + verify_fn: Optional[Callable] = None, + ) -> "Axon": + """ + + Attaches custom functions to the Axon server for handling incoming requests. This method enables + the ``Axon`` to define specific behaviors for request forwarding, verification, blacklisting, and + prioritization, thereby customizing its interaction within the Bittensor network. + + Registers an API endpoint to the FastAPI application router. + It uses the name of the first argument of the :func:`forward_fn` function as the endpoint name. + + The :func:`attach` method in the Bittensor framework's axon class is a crucial function for registering + API endpoints to the Axon's FastAPI application router. This method allows the Axon server to + define how it handles incoming requests by attaching functions for forwarding, verifying, + blacklisting, and prioritizing requests. It's a key part of customizing the server's behavior + and ensuring efficient and secure handling of requests within the Bittensor network. + + Args: + forward_fn (Callable): Function to be called when the API endpoint is accessed. It should have at least one argument. + blacklist_fn (Optional[Callable]): Function to filter out undesired requests. It should take the same arguments as :func:`forward_fn` and return a boolean value. Defaults to ``None``, meaning no blacklist filter will be used. + priority_fn (Optional[Callable]): Function to rank requests based on their priority. It should take the same arguments as :func:`forward_fn` and return a numerical value representing the request's priority. Defaults to ``None``, meaning no priority sorting will be applied. + verify_fn (Optional[Callable]): Function to verify requests. It should take the same arguments as :func:`forward_fn` and return a boolean value. If ``None``, :func:`self.default_verify` function will be used. + + Note: + The methods :func:`forward_fn`, :func:`blacklist_fn`, :func:`priority_fn`, and :func:`verify_fn` should be designed to receive the same parameters. + + Raises: + AssertionError: If :func:`forward_fn` does not have the signature: ``forward( synapse: YourSynapse ) -> synapse``. + AssertionError: If :func:`blacklist_fn` does not have the signature: ``blacklist( synapse: YourSynapse ) -> bool``. + AssertionError: If :func:`priority_fn` does not have the signature: ``priority( synapse: YourSynapse ) -> float``. + AssertionError: If :func:`verify_fn` does not have the signature: ``verify( synapse: YourSynapse ) -> None``. + + Returns: + self: Returns the instance of the AxonServer class for potential method chaining. + + Example Usage:: + + def forward_custom(synapse: MyCustomSynapse) -> MyCustomSynapse: + # Custom logic for processing the request + return synapse + + def blacklist_custom(synapse: MyCustomSynapse) -> tuple[bool, str]: + return True, "Allowed!" + + def priority_custom(synapse: MyCustomSynapse) -> float: + return 1.0 + + def verify_custom(synapse: MyCustomSynapse): + # Custom logic for verifying the request + pass + + my_axon = bittensor.Axon(...) + my_axon.attach(forward_fn=forward_custom, verify_fn=verify_custom) + + Note: + The :func:`attach` method is fundamental in setting up the Axon server's request handling capabilities, + enabling it to participate effectively and securely in the Bittensor network. The flexibility + offered by this method allows developers to tailor the Axon's behavior to specific requirements and + use cases. + """ + forward_sig = signature(forward_fn) + try: + first_param = next(iter(forward_sig.parameters.values())) + except StopIteration: + raise ValueError( + "The forward_fn first argument must be a subclass of bittensor.Synapse, but it has no arguments" + ) + + param_class = first_param.annotation + assert issubclass( + param_class, Synapse + ), "The first argument of forward_fn must inherit from bittensor.Synapse" + request_name = param_class.__name__ + + async def endpoint(*args, **kwargs): + start_time = time.time() + response = forward_fn(*args, **kwargs) + if isinstance(response, Awaitable): + response = await response + if isinstance(response, Synapse): + return await self.middleware_cls.synapse_to_response( + synapse=response, start_time=start_time + ) + else: + response_synapse = getattr(response, "synapse", None) + if response_synapse is None: + warnings.warn( + "The response synapse is None. The input synapse will be used as the response synapse. " + "Reliance on forward_fn modifying input synapse as a side-effects is deprecated. " + "Explicitly set `synapse` on response object instead.", + DeprecationWarning, + ) + # Replace with `return response` in next major version + response_synapse = args[0] + + return await self.middleware_cls.synapse_to_response( + synapse=response_synapse, + start_time=start_time, + response_override=response, + ) + + return_annotation = forward_sig.return_annotation + + if isinstance(return_annotation, type) and issubclass( + return_annotation, Synapse + ): + if issubclass( + return_annotation, + StreamingSynapse, + ): + warnings.warn( + "The forward_fn return annotation is a subclass of bittensor.StreamingSynapse. " + "Most likely the correct return annotation would be BTStreamingResponse." + ) + else: + return_annotation = JSONResponse + + endpoint.__signature__ = Signature( # type: ignore + parameters=list(forward_sig.parameters.values()), + return_annotation=return_annotation, + ) + + # Add the endpoint to the router, making it available on both GET and POST methods + self.router.add_api_route( + path=f"/{request_name}", + endpoint=endpoint, + methods=["GET", "POST"], + dependencies=[Depends(self.verify_body_integrity)], + ) + self.app.include_router(self.router) + + # Check the signature of blacklist_fn, priority_fn and verify_fn if they are provided + expected_params = [ + Parameter( + name="synapse", + kind=Parameter.POSITIONAL_OR_KEYWORD, + annotation=forward_sig.parameters[ + list(forward_sig.parameters)[0] + ].annotation, + ) + ] + if blacklist_fn: + blacklist_sig = Signature( + expected_params, return_annotation=tuple[bool, str] + ) + assert ( + signature(blacklist_fn) == blacklist_sig + ), f"The blacklist_fn function must have the signature: blacklist( synapse: {request_name} ) -> tuple[bool, str]" + if priority_fn: + priority_sig = Signature(expected_params, return_annotation=float) + assert ( + signature(priority_fn) == priority_sig + ), f"The priority_fn function must have the signature: priority( synapse: {request_name} ) -> float" + if verify_fn: + verify_sig = Signature(expected_params, return_annotation=None) + assert ( + signature(verify_fn) == verify_sig + ), f"The verify_fn function must have the signature: verify( synapse: {request_name} ) -> None" + + # Store functions in appropriate attribute dictionaries + self.forward_class_types[request_name] = param_class + self.blacklist_fns[request_name] = blacklist_fn + self.priority_fns[request_name] = priority_fn + self.verify_fns[request_name] = ( + verify_fn or self.default_verify + ) # Use 'default_verify' if 'verify_fn' is None + self.forward_fns[request_name] = forward_fn + + return self + + @classmethod + def config(cls) -> "Config": + """ + Parses the command-line arguments to form a Bittensor configuration object. + + Returns: + bittensor.core.config.Config: Configuration object with settings from command-line arguments. + """ + parser = argparse.ArgumentParser() + Axon.add_args(parser) # Add specific axon-related arguments + return Config(parser, args=[]) + + @classmethod + def help(cls): + """Prints the help text (list of command-line arguments and their descriptions) to stdout.""" + parser = argparse.ArgumentParser() + Axon.add_args(parser) # Add specific axon-related arguments + print(cls.__new__.__doc__) # Print docstring of the class + parser.print_help() # Print parser's help text + + @classmethod + def add_args(cls, parser: argparse.ArgumentParser, prefix: Optional[str] = None): + """ + Adds AxonServer-specific command-line arguments to the argument parser. + + Args: + parser (argparse.ArgumentParser): Argument parser to which the arguments will be added. + prefix (Optional[str]): Prefix to add to the argument names. Defaults to None. + + Note: + Environment variables are used to define default values for the arguments. + """ + prefix_str = "" if prefix is None else prefix + "." + try: + # Add command-line arguments to the parser + parser.add_argument( + "--" + prefix_str + "axon.port", + type=int, + help="The local port this axon endpoint is bound to. i.e. 8091", + default=DEFAULTS.axon.port, + ) + parser.add_argument( + "--" + prefix_str + "axon.ip", + type=str, + help="""The local ip this axon binds to. ie. [::]""", + default=DEFAULTS.axon.ip, + ) + parser.add_argument( + "--" + prefix_str + "axon.external_port", + type=int, + required=False, + help="""The public port this axon broadcasts to the network. i.e. 8091""", + default=DEFAULTS.axon.external_port, + ) + parser.add_argument( + "--" + prefix_str + "axon.external_ip", + type=str, + required=False, + help="""The external ip this axon broadcasts to the network to. ie. [::]""", + default=DEFAULTS.axon.external_ip, + ) + parser.add_argument( + "--" + prefix_str + "axon.max_workers", + type=int, + help="""The maximum number connection handler threads working simultaneously on this endpoint. + The grpc server distributes new worker threads to service requests up to this number.""", + default=DEFAULTS.axon.max_workers, + ) + + except argparse.ArgumentError: + # Exception handling for re-parsing arguments + pass + + async def verify_body_integrity(self, request: "Request"): + """ + The ``verify_body_integrity`` method in the Bittensor framework is a key security function within the + Axon server's middleware. It is responsible for ensuring the integrity of the body of incoming HTTP + requests. + + It asynchronously verifies the integrity of the body of a request by comparing the hash of required fields + with the corresponding hashes provided in the request headers. This method is critical for ensuring + that the incoming request payload has not been altered or tampered with during transmission, establishing + a level of trust and security between the sender and receiver in the network. + + Args: + request (Request): The incoming FastAPI request object containing both headers and the request body. + + Returns: + dict: Returns the parsed body of the request as a dictionary if all the hash comparisons match, indicating that the body is intact and has not been tampered with. + + Raises: + JSONResponse: Raises a JSONResponse with a 400 status code if any of the hash comparisons fail, indicating a potential integrity issue with the incoming request payload. The response includes the detailed error message specifying which field has a hash mismatch. + + This method performs several key functions: + + 1. Decoding and loading the request body for inspection. + 2. Gathering required field names for hash comparison from the Axon configuration. + 3. Loading and parsing the request body into a dictionary. + 4. Reconstructing the Synapse object and recomputing the hash for verification and logging. + 5. Comparing the recomputed hash with the hash provided in the request headers for verification. + + Note: + The integrity verification is an essential step in ensuring the security of the data exchange within the Bittensor network. It helps prevent tampering and manipulation of data during transit, thereby maintaining the reliability and trust in the network communication. + """ + # Await and load the request body, so we can inspect it + body = await request.body() + request_body = body.decode() if isinstance(body, bytes) else body + + request_name = request.url.path.split("/")[1] + + # Load the body dict and check if all required field hashes match + body_dict = json.loads(request_body) + + # Reconstruct the synapse object from the body dict and recompute the hash + syn = self.forward_class_types[request_name](**body_dict) # type: ignore + parsed_body_hash = syn.body_hash # Rehash the body from request + + body_hash = request.headers.get("computed_body_hash", "") + if parsed_body_hash != body_hash: + raise ValueError( + f"Hash mismatch between header body hash {body_hash} and parsed body hash {parsed_body_hash}" + ) + + # If body is good, return the parsed body so that it can be passed onto the route function + return body_dict + + @classmethod + def check_config(cls, config: "Config"): + """ + This method checks the configuration for the axon's port and wallet. + + Args: + config (bittensor.core.config.Config): The config object holding axon settings. + + Raises: + AssertionError: If the axon or external ports are not in range [1024, 65535] + """ + assert ( + 1024 < config.axon.port < 65535 + ), "Axon port must be in range [1024, 65535]" + + assert config.axon.external_port is None or ( + 1024 < config.axon.external_port < 65535 + ), "External port must be in range [1024, 65535]" + + def to_string(self): + """Provides a human-readable representation of the AxonInfo for this Axon.""" + return self.info().to_string() + + def __str__(self) -> str: + """Provides a human-readable representation of the Axon instance.""" + _started = "started" if self.started else "stopped" + _keys = list(self.forward_fns.keys()) + return f"Axon({self.ip}, {self.port}, {self.wallet.hotkey.ss58_address}, {_started}, {_keys})" + + def __repr__(self) -> str: + """ + Provides a machine-readable (unambiguous) representation of the Axon instance. + It is made identical to __str__ in this case. + """ + return self.__str__() + + def __del__(self): + """ + This magic method is called when the Axon object is about to be destroyed. + It ensures that the Axon server shuts down properly. + """ + self.stop() + + def start(self) -> "Axon": + """ + Starts the Axon server and its underlying FastAPI server thread, transitioning the state of the + Axon instance to ``started``. This method initiates the server's ability to accept and process + incoming network requests, making it an active participant in the Bittensor network. + + The start method triggers the FastAPI server associated with the Axon to begin listening for + incoming requests. It is a crucial step in making the neuron represented by this Axon operational + within the Bittensor network. + + Returns: + bittensor.core.axon.Axon: The Axon instance in the 'started' state. + + Example:: + + my_axon = bittensor.Axon(...) + ... # setup axon, attach functions, etc. + my_axon.start() # Starts the axon server + + Note: + After invoking this method, the Axon is ready to handle requests as per its configured endpoints and custom logic. + """ + self.fast_server.start() + self.started = True + return self + + def stop(self) -> "Axon": + """ + Stops the Axon server and its underlying GRPC server thread, transitioning the state of the Axon + instance to ``stopped``. This method ceases the server's ability to accept new network requests, + effectively removing the neuron's server-side presence in the Bittensor network. + + By stopping the FastAPI server, the Axon ceases to listen for incoming requests, and any existing + connections are gracefully terminated. This function is typically used when the neuron is being + shut down or needs to temporarily go offline. + + Returns: + bittensor.core.axon.Axon: The Axon instance in the 'stopped' state. + + Example:: + + my_axon = bittensor.Axon(...) + my_axon.start() + ... + my_axon.stop() # Stops the axon server + + + Note: + It is advisable to ensure that all ongoing processes or requests are completed or properly handled before invoking this method. + """ + self.fast_server.stop() + self.started = False + return self + + def serve(self, netuid: int, subtensor: Optional["Subtensor"] = None) -> "Axon": + """ + Serves the Axon on the specified subtensor connection using the configured wallet. This method + registers the Axon with a specific subnet within the Bittensor network, identified by the ``netuid``. + It links the Axon to the broader network, allowing it to participate in the decentralized exchange + of information. + + Args: + netuid (int): The unique identifier of the subnet to register on. This ID is essential for the Axon to correctly position itself within the Bittensor network topology. + subtensor (Optional[bittensor.core.subtensor.Subtensor]): The subtensor connection to use for serving. If not provided, a new connection is established based on default configurations. + + Returns: + bittensor.core.axon.Axon: The Axon instance that is now actively serving on the specified subtensor. + + Example:: + + my_axon = bittensor.Axon(...) + subtensor = bt.subtensor(network="local") # Local by default + my_axon.serve(netuid=1, subtensor=subtensor) # Serves the axon on subnet with netuid 1 + + Note: + The ``serve`` method is crucial for integrating the Axon into the Bittensor network, allowing it + to start receiving and processing requests from other neurons. + """ + if subtensor is not None and hasattr(subtensor, "serve_axon"): + subtensor.serve_axon(netuid=netuid, axon=self) + return self + + async def default_verify(self, synapse: "Synapse"): + """ + This method is used to verify the authenticity of a received message using a digital signature. + + It ensures that the message was not tampered with and was sent by the expected sender. + + The :func:`default_verify` method in the Bittensor framework is a critical security function within the + Axon server. It is designed to authenticate incoming messages by verifying their digital + signatures. This verification ensures the integrity of the message and confirms that it was + indeed sent by the claimed sender. The method plays a pivotal role in maintaining the trustworthiness + and reliability of the communication within the Bittensor network. + + Key Features + Security Assurance + The default_verify method is crucial for ensuring the security of the Bittensor network. By verifying digital signatures, it guards against unauthorized access + and data manipulation. + + Preventing Replay Attacks + The method checks for increasing nonce values, which is a vital + step in preventing replay attacks. A replay attack involves an adversary reusing or + delaying the transmission of a valid data transmission to deceive the receiver. + The first time a nonce is seen, it is checked for freshness by ensuring it is + within an acceptable delta time range. + + Authenticity and Integrity Checks + By verifying that the message's digital signature matches + its content, the method ensures the message's authenticity (it comes from the claimed + sender) and integrity (it hasn't been altered during transmission). + + Trust in Communication + This method fosters trust in the network communication. Neurons + (nodes in the Bittensor network) can confidently interact, knowing that the messages they + receive are genuine and have not been tampered with. + + Cryptographic Techniques + The method's reliance on asymmetric encryption techniques is a + cornerstone of modern cryptographic security, ensuring that only entities with the correct + cryptographic keys can participate in secure communication. + + Args: + synapse(bittensor.core.synapse.Synapse): bittensor request synapse. + + Raises: + Exception: If the ``receiver_hotkey`` doesn't match with ``self.receiver_hotkey``. + Exception: If the nonce is not larger than the previous nonce for the same endpoint key. + Exception: If the signature verification fails. + + After successful verification, the nonce for the given endpoint key is updated. + + Note: + The verification process assumes the use of an asymmetric encryption algorithm, + where the sender signs the message with their private key and the receiver verifies the + signature using the sender's public key. + """ + # Build the keypair from the dendrite_hotkey + if synapse.dendrite is not None: + keypair = Keypair(ss58_address=synapse.dendrite.hotkey) + + # Build the signature messages. + message = f"{synapse.dendrite.nonce}.{synapse.dendrite.hotkey}.{self.wallet.hotkey.ss58_address}.{synapse.dendrite.uuid}.{synapse.computed_body_hash}" + + # Build the unique endpoint key. + endpoint_key = f"{synapse.dendrite.hotkey}:{synapse.dendrite.uuid}" + + # Requests must have nonces to be safe from replays + if synapse.dendrite.nonce is None: + raise Exception("Missing Nonce") + + # Newer nonce structure post v7.2 + if ( + synapse.dendrite.version is not None + and synapse.dendrite.version >= V_7_2_0 + ): + # If we don't have a nonce stored, ensure that the nonce falls within + # a reasonable delta. + current_time_ns = time.time_ns() + allowed_window_ns = allowed_nonce_window_ns( + current_time_ns, synapse.timeout + ) + + if ( + self.nonces.get(endpoint_key) is None + and synapse.dendrite.nonce <= allowed_window_ns + ): + diff_seconds, allowed_delta_seconds = calculate_diff_seconds( + current_time_ns, synapse.timeout, synapse.dendrite.nonce + ) + raise Exception( + f"Nonce is too old: acceptable delta is {allowed_delta_seconds:.2f} seconds but request was {diff_seconds:.2f} seconds old" + ) + + # If a nonce is stored, ensure the new nonce + # is greater than the previous nonce + if ( + self.nonces.get(endpoint_key) is not None + and synapse.dendrite.nonce <= self.nonces[endpoint_key] + ): + raise Exception("Nonce is too old, a newer one was last processed") + # Older nonce structure pre v7.2 + else: + if ( + self.nonces.get(endpoint_key) is not None + and synapse.dendrite.nonce <= self.nonces[endpoint_key] + ): + raise Exception("Nonce is too old, a newer one was last processed") + + if not keypair.verify(message, synapse.dendrite.signature): + raise Exception( + f"Signature mismatch with {message} and {synapse.dendrite.signature}" + ) + + # Success + self.nonces[endpoint_key] = synapse.dendrite.nonce # type: ignore + else: + raise SynapseDendriteNoneException(synapse=synapse) + + +def create_error_response(synapse: "Synapse") -> "JSONResponse": + """Creates an error response based on the provided synapse object. + + Args: + synapse (bittensor.core.synapse.Synapse): The synapse object containing details about the request and the associated axon. + + Returns: + JSONResponse: A JSON response with a status code and content indicating the error message. + """ + if synapse.axon is None: + return JSONResponse( + status_code=400, + headers=synapse.to_headers(), + content={"message": "Invalid request name"}, + ) + else: + return JSONResponse( + status_code=synapse.axon.status_code or 400, + headers=synapse.to_headers(), + content={"message": synapse.axon.status_message}, + ) + + +def log_and_handle_error( + synapse: "Synapse", + exception: Exception, + status_code: Optional[int] = None, + start_time: Optional[float] = None, +) -> "Synapse": + """ + Logs the error and updates the synapse object with the appropriate error details. + + Args: + synapse (bittensor.core.synapse.Synapse): The synapse object to be updated with error information. + exception (Exception): The exception that was raised and needs to be logged and handled. + status_code (Optional[int]): The HTTP status code to be set on the synapse object. Defaults to None. + start_time (Optional[float]): The timestamp marking the start of the processing, used to calculate process time. Defaults to None. + + Returns: + Synapse: The updated synapse object with error details. + """ + if isinstance(exception, SynapseException): + synapse = exception.synapse or synapse + + logging.trace(f"Forward handled exception: {exception}") + else: + logging.trace(f"Forward exception: {traceback.format_exc()}") + + if synapse.axon is None: + synapse.axon = TerminalInfo() + + # Set the status code of the synapse to the given status code. + error_id = str(uuid.uuid4()) + error_type = exception.__class__.__name__ + + # Log the detailed error message for internal use + logging.error(f"{error_type}#{error_id}: {exception}") + + if not status_code and synapse.axon.status_code != 100: + status_code = synapse.axon.status_code + status_message = synapse.axon.status_message + if isinstance(exception, SynapseException): + if not status_code: + if isinstance(exception, PriorityException): + status_code = 503 + elif isinstance(exception, UnknownSynapseError): + status_code = 404 + elif isinstance(exception, BlacklistedException): + status_code = 403 + elif isinstance(exception, NotVerifiedException): + status_code = 401 + elif isinstance(exception, (InvalidRequestNameError, SynapseParsingError)): + status_code = 400 + else: + status_code = 500 + status_message = status_message or str(exception) + else: + status_code = status_code or 500 + status_message = status_message or f"Internal Server Error #{error_id}" + + # Set a user-friendly error message + synapse.axon.status_code = status_code + synapse.axon.status_message = status_message + + if start_time: + # Calculate the processing time by subtracting the start time from the current time. + synapse.axon.process_time = str(time.time() - start_time) # type: ignore + + return synapse + + +class AxonMiddleware(BaseHTTPMiddleware): + """ + The `AxonMiddleware` class is a key component in the Axon server, responsible for processing all incoming requests. + + It handles the essential tasks of verifying requests, executing blacklist checks, + running priority functions, and managing the logging of messages and errors. Additionally, the class + is responsible for updating the headers of the response and executing the requested functions. + + This middleware acts as an intermediary layer in request handling, ensuring that each request is + processed according to the defined rules and protocols of the Bittensor network. It plays a pivotal + role in maintaining the integrity and security of the network communication. + + Args: + app (FastAPI): An instance of the FastAPI application to which this middleware is attached. + axon (bittensor.core.axon.Axon): The Axon instance that will process the requests. + + The middleware operates by intercepting incoming requests, performing necessary preprocessing + (like verification and priority assessment), executing the request through the Axon's endpoints, and + then handling any postprocessing steps such as response header updating and logging. + """ + + def __init__(self, app: "AxonMiddleware", axon: "Axon"): + """ + Initialize the AxonMiddleware class. + + Args: + app (bittensor.core.axon.AxonMiddleware): An instance of the application where the middleware processor is used. + axon (bittensor.core.axon.Axon): The axon instance used to process the requests. + """ + super().__init__(app) + self.axon = axon + + async def dispatch( + self, request: "Request", call_next: "RequestResponseEndpoint" + ) -> Response: + """ + Asynchronously processes incoming HTTP requests and returns the corresponding responses. This + method acts as the central processing unit of the AxonMiddleware, handling each step in the + request lifecycle. + + Args: + request (Request): The incoming HTTP request to be processed. + call_next (RequestResponseEndpoint): A callable that processes the request and returns a response. + + Returns: + Response: The HTTP response generated after processing the request. + + This method performs several key functions: + + 1. Request Preprocessing: Sets up Synapse object from request headers and fills necessary information. + 2. Logging: Logs the start of request processing. + 3. Blacklist Checking: Verifies if the request is blacklisted. + 4. Request Verification: Ensures the authenticity and integrity of the request. + 5. Priority Assessment: Evaluates and assigns priority to the request. + 6. Request Execution: Calls the next function in the middleware chain to process the request. + 7. Response Postprocessing: Updates response headers and logs the end of the request processing. + + The method also handles exceptions and errors that might occur during each stage, ensuring that + appropriate responses are returned to the client. + """ + # Records the start time of the request processing. + start_time = time.time() + + try: + # Set up the synapse from its headers. + try: + synapse: "Synapse" = await self.preprocess(request) + except Exception as exc: + if isinstance(exc, SynapseException) and exc.synapse is not None: + synapse = exc.synapse + else: + synapse = Synapse() + raise + + # Logs the start of the request processing + if synapse.dendrite is not None: + logging.trace( + f"axon | <-- | {request.headers.get('content-length', -1)} B | {synapse.name} | {synapse.dendrite.hotkey} | {synapse.dendrite.ip}:{synapse.dendrite.port} | 200 | Success " + ) + else: + logging.trace( + f"axon | <-- | {request.headers.get('content-length', -1)} B | {synapse.name} | None | None | 200 | Success " + ) + + # Call the blacklist function + await self.blacklist(synapse) + + # Call verify and return the verified request + await self.verify(synapse) + + # Call the priority function + await self.priority(synapse) + + # Call the run function + response = await self.run(synapse, call_next, request) + + # Handle errors related to preprocess. + except InvalidRequestNameError as e: + if synapse.axon is None: + synapse.axon = TerminalInfo() + synapse.axon.status_code = 400 + synapse.axon.status_message = str(e) + synapse = log_and_handle_error(synapse, e, start_time=start_time) + response = create_error_response(synapse) + + except SynapseException as e: + synapse = e.synapse or synapse + synapse = log_and_handle_error(synapse, e, start_time=start_time) + response = create_error_response(synapse) + + # Handle all other errors. + except Exception as e: + synapse = log_and_handle_error(synapse, e, start_time=start_time) + response = create_error_response(synapse) + + # Logs the end of request processing and returns the response + finally: + # Log the details of the processed synapse, including total size, name, hotkey, IP, port, + # status code, and status message, using the debug level of the logger. + if synapse.dendrite is not None and synapse.axon is not None: + logging.trace( + f"axon | --> | {response.headers.get('content-length', -1)} B | {synapse.name} | {synapse.dendrite.hotkey} | {synapse.dendrite.ip}:{synapse.dendrite.port} | {synapse.axon.status_code} | {synapse.axon.status_message}" + ) + elif synapse.axon is not None: + logging.trace( + f"axon | --> | {response.headers.get('content-length', -1)} B | {synapse.name} | None | None | {synapse.axon.status_code} | {synapse.axon.status_message}" + ) + else: + logging.trace( + f"axon | --> | {response.headers.get('content-length', -1)} B | {synapse.name} | None | None | 200 | Success " + ) + + # Return the response to the requester. + return response + + async def preprocess(self, request: "Request") -> "Synapse": + """ + Performs the initial processing of the incoming request. This method is responsible for + extracting relevant information from the request and setting up the Synapse object, which + represents the state and context of the request within the Axon server. + + Args: + request (Request): The incoming request to be preprocessed. + + Returns: + bittensor.core.synapse.Synapse: The Synapse object representing the preprocessed state of the request. + + The preprocessing involves: + + 1. Extracting the request name from the URL path. + 2. Creating a Synapse instance from the request headers using the appropriate class type. + 3. Filling in the Axon and Dendrite information into the Synapse object. + 4. Signing the Synapse from the Axon side using the wallet hotkey. + + This method sets the foundation for the subsequent steps in the request handling process, + ensuring that all necessary information is encapsulated within the Synapse object. + """ + # Extracts the request name from the URL path. + try: + request_name = request.url.path.split("/")[1] + except Exception: + raise InvalidRequestNameError( + f"Improperly formatted request. Could not parser request {request.url.path}." + ) + + # Creates a synapse instance from the headers using the appropriate forward class type + # based on the request name obtained from the URL path. + request_synapse = self.axon.forward_class_types.get(request_name) + if request_synapse is None: + raise UnknownSynapseError( + f"Synapse name '{request_name}' not found. Available synapses {list(self.axon.forward_class_types.keys())}" + ) + + try: + synapse = request_synapse.from_headers(request.headers) # type: ignore + except Exception: + raise SynapseParsingError( + f"Improperly formatted request. Could not parse headers {request.headers} into synapse of type {request_name}." + ) + synapse.name = request_name + + # Fills the local axon information into the synapse. + synapse.axon.__dict__.update( + { + "version": str(version_as_int), + "uuid": str(self.axon.uuid), + "nonce": time.time_ns(), + "status_code": 100, + } + ) + + # Fills the dendrite information into the synapse. + synapse.dendrite.__dict__.update( + {"port": str(request.client.port), "ip": str(request.client.host)} # type: ignore + ) + + # Signs the synapse from the axon side using the wallet hotkey. + message = f"{synapse.axon.nonce}.{synapse.dendrite.hotkey}.{synapse.axon.hotkey}.{synapse.axon.uuid}" + synapse.axon.signature = f"0x{self.axon.wallet.hotkey.sign(message).hex()}" + + # Return the setup synapse. + return synapse + + async def verify(self, synapse: "Synapse"): + """ + Verifies the authenticity and integrity of the request. This method ensures that the incoming + request meets the predefined security and validation criteria. + + Args: + synapse (bittensor.core.synapse.Synapse): The Synapse object representing the request. + + Raises: + Exception: If the verification process fails due to unmet criteria or security concerns. + + The verification process involves: + + 1. Retrieving the specific verification function for the request's Synapse type. + 2. Executing the verification function and handling any exceptions that arise. + + Successful verification allows the request to proceed further in the processing pipeline, while + failure results in an appropriate exception being raised. + """ + # Start of the verification process. Verification is the process where we ensure that + # the incoming request is from a trusted source or fulfills certain requirements. + # We get a specific verification function from 'verify_fns' dictionary that corresponds + # to our request's name. Each request name (synapse name) has its unique verification function. + verify_fn = ( + self.axon.verify_fns.get(synapse.name) if synapse.name is not None else None + ) + + # If a verification function exists for the request's name + if verify_fn: + try: + # We attempt to run the verification function using the synapse instance + # created from the request. If this function runs without throwing an exception, + # it means that the verification was successful. + ( + await verify_fn(synapse) + if inspect.iscoroutinefunction(verify_fn) + else verify_fn(synapse) + ) + except Exception as e: + # If there was an exception during the verification process, we log that + # there was a verification exception. + logging.trace(f"Verify exception {str(e)}") + + # Check if the synapse.axon object exists + if synapse.axon is not None: + # We set the status code of the synapse to "401" which denotes an unauthorized access. + synapse.axon.status_code = 401 + else: + # If the synapse.axon object doesn't exist, raise an exception. + raise Exception("Synapse.axon object is None") + + # We raise an exception to stop the process and return the error to the requester. + # The error message includes the original exception message. + raise NotVerifiedException( + f"Not Verified with error: {str(e)}", synapse=synapse + ) + + async def blacklist(self, synapse: "Synapse"): + """ + Checks if the request should be blacklisted. This method ensures that requests from disallowed + sources or with malicious intent are blocked from processing. This can be extremely useful for + preventing spam or other forms of abuse. The blacklist is a list of keys or identifiers that + are prohibited from accessing certain resources. + + Args: + synapse (bittensor.core.synapse.Synapse): The Synapse object representing the request. + + Raises: + Exception: If the request is found in the blacklist. + + The blacklist check involves: + + 1. Retrieving the blacklist checking function for the request's Synapse type. + 2. Executing the check and handling the case where the request is blacklisted. + + If a request is blacklisted, it is blocked, and an exception is raised to halt further processing. + """ + # A blacklist is a list of keys or identifiers + # that are prohibited from accessing certain resources. + # We retrieve the blacklist checking function from the 'blacklist_fns' dictionary + # that corresponds to the request's name (synapse name). + blacklist_fn = ( + self.axon.blacklist_fns.get(synapse.name) + if synapse.name is not None + else None + ) + + # If a blacklist checking function exists for the request's name + if blacklist_fn: + # We execute the blacklist checking function using the synapse instance as input. + # If the function returns True, it means that the key or identifier is blacklisted. + blacklisted, reason = ( + await blacklist_fn(synapse) + if inspect.iscoroutinefunction(blacklist_fn) + else blacklist_fn(synapse) + ) + if blacklisted: + # We log that the key or identifier is blacklisted. + logging.trace(f"Blacklisted: {blacklisted}, {reason}") + + # Check if the synapse.axon object exists + if synapse.axon is not None: + # We set the status code of the synapse to "403" which indicates a forbidden access. + synapse.axon.status_code = 403 + else: + # If the synapse.axon object doesn't exist, raise an exception. + raise Exception("Synapse.axon object is None") + + # We raise an exception to halt the process and return the error message to the requester. + raise BlacklistedException( + f"Forbidden. Key is blacklisted: {reason}.", synapse=synapse + ) + + async def priority(self, synapse: "Synapse"): + """ + Executes the priority function for the request. This method assesses and assigns a priority + level to the request, determining its urgency and importance in the processing queue. + + Args: + synapse (bittensor.core.synapse.Synapse): The Synapse object representing the request. + + Raises: + Exception: If the priority assessment process encounters issues, such as timeouts. + + The priority function plays a crucial role in managing the processing load and ensuring that + critical requests are handled promptly. + """ + # Retrieve the priority function from the 'priority_fns' dictionary that corresponds + # to the request's name (synapse name). + priority_fn = self.axon.priority_fns.get(str(synapse.name), None) + + async def submit_task( + executor: "PriorityThreadPoolExecutor", priority: float + ) -> tuple[float, Any]: + """ + Submits the given priority function to the specified executor for asynchronous execution. + The function will run in the provided executor and return the priority value along with the result. + + Args: + executor (bittensor.core.threadpool.PriorityThreadPoolExecutor): The executor in which the priority function will be run. + priority (float): The priority function to be executed. + + Returns: + tuple: A tuple containing the priority value and the result of the priority function execution. + """ + loop = asyncio.get_event_loop() + future = loop.run_in_executor(executor, lambda: priority) + result = await future + return priority, result + + # If a priority function exists for the request's name + if priority_fn: + try: + # Execute the priority function and get the priority value. + priority = ( + await priority_fn(synapse) + if inspect.iscoroutinefunction(priority_fn) + else priority_fn(synapse) + ) + + # Submit the task to the thread pool for execution with the given priority. + # The submit_task function will handle the execution and return the result. + _, result = await submit_task(self.axon.thread_pool, priority) + + except TimeoutError as e: + # If the execution of the priority function exceeds the timeout, + # it raises an exception to handle the timeout error. + logging.trace(f"TimeoutError: {str(e)}") + + # Set the status code of the synapse to 408 which indicates a timeout error. + if synapse.axon is not None: + synapse.axon.status_code = 408 + + # Raise an exception to stop the process and return an appropriate error message to the requester. + raise PriorityException( + f"Response timeout after: {synapse.timeout}s", synapse=synapse + ) + + async def run( + self, + synapse: "Synapse", + call_next: "RequestResponseEndpoint", + request: "Request", + ) -> "Response": + """ + Executes the requested function as part of the request processing pipeline. This method calls + the next function in the middleware chain to process the request and generate a response. + + Args: + synapse (bittensor.core.synapse.Synapse): The Synapse object representing the request. + call_next (RequestResponseEndpoint): The next function in the middleware chain to process requests. + request (Request): The original HTTP request. + + Returns: + Response: The HTTP response generated by processing the request. + + This method is a critical part of the request lifecycle, where the actual processing of the + request takes place, leading to the generation of a response. + """ + assert isinstance(synapse, Synapse) + + try: + # The requested function is executed by calling the 'call_next' function, + # passing the original request as an argument. This function processes the request + # and returns the response. + response = await call_next(request) + + except Exception as e: + # Log the exception for debugging purposes. + logging.trace(f"Run exception: {str(e)}") + raise + + # Return the starlet response + return response + + @classmethod + async def synapse_to_response( + cls, + synapse: "Synapse", + start_time: float, + *, + response_override: Optional["Response"] = None, + ) -> "Response": + """ + Converts the Synapse object into a JSON response with HTTP headers. + + Args: + synapse (bittensor.core.synapse.Synapse): The Synapse object representing the request. + start_time (float): The timestamp when the request processing started. + response_override: Instead of serializing the synapse, mutate the provided response object. This is only really useful for StreamingSynapse responses. + + Returns: + Response: The final HTTP response, with updated headers, ready to be sent back to the client. + + Postprocessing is the last step in the request handling process, ensuring that the response is + properly formatted and contains all necessary information. + """ + if synapse.axon is None: + synapse.axon = TerminalInfo() + + if synapse.axon.status_code is None: + synapse.axon.status_code = 200 + + if synapse.axon.status_code == 200 and not synapse.axon.status_message: + synapse.axon.status_message = "Success" + + synapse.axon.process_time = time.time() - start_time + + if response_override: + response = response_override + else: + serialized_synapse = await serialize_response(response_content=synapse) + response = JSONResponse( + status_code=synapse.axon.status_code, + content=serialized_synapse, + ) + + try: + updated_headers = synapse.to_headers() + except Exception as e: + raise PostProcessException( + f"Error while parsing response headers. Postprocess exception: {str(e)}.", + synapse=synapse, + ) from e + + try: + response.headers.update(updated_headers) + except Exception as e: + raise PostProcessException( + f"Error while updating response headers. Postprocess exception: {str(e)}.", + synapse=synapse, + ) from e + + return response diff --git a/bittensor/core/chain_data/__init__.py b/bittensor/core/chain_data/__init__.py new file mode 100644 index 0000000000..9ad1e38881 --- /dev/null +++ b/bittensor/core/chain_data/__init__.py @@ -0,0 +1,22 @@ +""" +This module provides data structures and functions for working with the Bittensor network, including neuron and subnet +information, SCALE encoding/decoding, and custom RPC type registry. +""" + +from scalecodec.types import GenericCall + +from .axon_info import AxonInfo +from .delegate_info import DelegateInfo +from .delegate_info_lite import DelegateInfoLite +from .ip_info import IPInfo +from .neuron_info import NeuronInfo +from .neuron_info_lite import NeuronInfoLite +from .prometheus_info import PrometheusInfo +from .proposal_vote_data import ProposalVoteData +from .scheduled_coldkey_swap_info import ScheduledColdkeySwapInfo +from .stake_info import StakeInfo +from .subnet_hyperparameters import SubnetHyperparameters +from .subnet_info import SubnetInfo +from .utils import custom_rpc_type_registry + +ProposalCallData = GenericCall diff --git a/bittensor/core/chain_data/axon_info.py b/bittensor/core/chain_data/axon_info.py new file mode 100644 index 0000000000..eee9cb82a1 --- /dev/null +++ b/bittensor/core/chain_data/axon_info.py @@ -0,0 +1,163 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +This module defines the `AxonInfo` class, a data structure used to represent information about an axon endpoint +in the bittensor network. +""" + +import json +from dataclasses import asdict, dataclass +from typing import Any, Union + +from bittensor.utils import networking +from bittensor.utils.btlogging import logging +from bittensor.utils.registration import torch, use_torch + + +@dataclass +class AxonInfo: + """ + The `AxonInfo` class represents information about an axon endpoint in the bittensor network. This includes + properties such as IP address, ports, and relevant keys. + + Attributes: + version (int): The version of the axon endpoint. + ip (str): The IP address of the axon endpoint. + port (int): The port number the axon endpoint uses. + ip_type (int): The type of IP protocol (e.g., IPv4 or IPv6). + hotkey (str): The hotkey associated with the axon endpoint. + coldkey (str): The coldkey associated with the axon endpoint. + protocol (int): The protocol version (default is 4). + placeholder1 (int): Reserved field (default is 0). + placeholder2 (int): Reserved field (default is 0). + """ + + version: int + ip: str + port: int + ip_type: int + hotkey: str + coldkey: str + protocol: int = 4 + placeholder1: int = 0 + placeholder2: int = 0 + + @property + def is_serving(self) -> bool: + """True if the endpoint is serving.""" + return self.ip != "0.0.0.0" + + def ip_str(self) -> str: + """Return the whole IP as string""" + return networking.ip__str__(self.ip_type, self.ip, self.port) + + def __eq__(self, other: "AxonInfo"): + if other is None: + return False + + if ( + self.version == other.version + and self.ip == other.ip + and self.port == other.port + and self.ip_type == other.ip_type + and self.coldkey == other.coldkey + and self.hotkey == other.hotkey + ): + return True + + return False + + def __str__(self): + return f"AxonInfo( {self.ip_str()}, {self.hotkey}, {self.coldkey}, {self.version} )" + + def __repr__(self): + return self.__str__() + + def to_string(self) -> str: + """Converts the `AxonInfo` object to a string representation using JSON.""" + try: + return json.dumps(asdict(self)) + except (TypeError, ValueError) as e: + logging.error(f"Error converting AxonInfo to string: {e}") + return AxonInfo(0, "", 0, 0, "", "").to_string() + + @classmethod + def from_string(cls, json_string: str) -> "AxonInfo": + """ + Creates an `AxonInfo` object from its string representation using JSON. + + Args: + json_string (str): The JSON string representation of the AxonInfo object. + + Returns: + AxonInfo: An instance of AxonInfo created from the JSON string. If decoding fails, returns a default `AxonInfo` object with default values. + + Raises: + json.JSONDecodeError: If there is an error in decoding the JSON string. + TypeError: If there is a type error when creating the AxonInfo object. + ValueError: If there is a value error when creating the AxonInfo object. + """ + try: + data = json.loads(json_string) + return cls(**data) + except json.JSONDecodeError as e: + logging.error(f"Error decoding JSON: {e}") + except TypeError as e: + logging.error(f"Type error: {e}") + except ValueError as e: + logging.error(f"Value error: {e}") + return AxonInfo(0, "", 0, 0, "", "") + + @classmethod + def from_neuron_info(cls, neuron_info: dict) -> "AxonInfo": + """ + Converts a dictionary to an `AxonInfo` object. + + Args: + neuron_info (dict): A dictionary containing the neuron information. + + Returns: + instance (AxonInfo): An instance of AxonInfo created from the dictionary. + """ + return cls( + version=neuron_info["axon_info"]["version"], + ip=networking.int_to_ip(int(neuron_info["axon_info"]["ip"])), + port=neuron_info["axon_info"]["port"], + ip_type=neuron_info["axon_info"]["ip_type"], + hotkey=neuron_info["hotkey"], + coldkey=neuron_info["coldkey"], + ) + + def to_parameter_dict( + self, + ) -> Union[dict[str, Union[int, str]], "torch.nn.ParameterDict"]: + """Returns a torch tensor or dict of the subnet info, depending on the USE_TORCH flag set.""" + if use_torch(): + return torch.nn.ParameterDict(self.__dict__) + else: + return self.__dict__ + + @classmethod + def from_parameter_dict( + cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] + ) -> "AxonInfo": + """Returns an axon_info object from a torch parameter_dict or a parameter dict.""" + if use_torch(): + return cls(**dict(parameter_dict)) + else: + return cls(**parameter_dict) diff --git a/bittensor/core/chain_data/delegate_info.py b/bittensor/core/chain_data/delegate_info.py new file mode 100644 index 0000000000..d77f1e1412 --- /dev/null +++ b/bittensor/core/chain_data/delegate_info.py @@ -0,0 +1,105 @@ +from dataclasses import dataclass +from typing import Optional, Any + +from scalecodec.utils.ss58 import ss58_encode + +from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType +from bittensor.core.settings import SS58_FORMAT +from bittensor.utils import u16_normalized_float +from bittensor.utils.balance import Balance + + +@dataclass +class DelegateInfo: + """ + Dataclass for delegate information. For a lighter version of this class, see ``DelegateInfoLite``. + + Args: + hotkey_ss58 (str): Hotkey of the delegate for which the information is being fetched. + total_stake (int): Total stake of the delegate. + nominators (list[tuple[str, int]]): List of nominators of the delegate and their stake. + take (float): Take of the delegate as a percentage. + owner_ss58 (str): Coldkey of the owner. + registrations (list[int]): List of subnets that the delegate is registered on. + validator_permits (list[int]): List of subnets that the delegate is allowed to validate on. + return_per_1000 (int): Return per 1000 TAO, for the delegate over a day. + total_daily_return (int): Total daily return of the delegate. + + """ + + hotkey_ss58: str # Hotkey of delegate + total_stake: Balance # Total stake of the delegate + nominators: list[ + tuple[str, Balance] + ] # List of nominators of the delegate and their stake + owner_ss58: str # Coldkey of owner + take: float # Take of the delegate as a percentage + validator_permits: list[ + int + ] # List of subnets that the delegate is allowed to validate on + registrations: tuple[int] # List of subnets that the delegate is registered on + return_per_1000: Balance # Return per 1000 tao of the delegate over a day + total_daily_return: Balance # Total daily return of the delegate + + @classmethod + def fix_decoded_values(cls, decoded: Any) -> "DelegateInfo": + """Fixes the decoded values.""" + + return cls( + hotkey_ss58=ss58_encode(decoded["delegate_ss58"], SS58_FORMAT), + owner_ss58=ss58_encode(decoded["owner_ss58"], SS58_FORMAT), + take=u16_normalized_float(decoded["take"]), + nominators=[ + ( + ss58_encode(nom[0], SS58_FORMAT), + Balance.from_rao(nom[1]), + ) + for nom in decoded["nominators"] + ], + total_stake=Balance.from_rao( + sum([nom[1] for nom in decoded["nominators"]]) + ), + validator_permits=decoded["validator_permits"], + registrations=decoded["registrations"], + return_per_1000=Balance.from_rao(decoded["return_per_1000"]), + total_daily_return=Balance.from_rao(decoded["total_daily_return"]), + ) + + @classmethod + def from_vec_u8(cls, vec_u8: list[int]) -> Optional["DelegateInfo"]: + """Returns a DelegateInfo object from a ``vec_u8``.""" + if len(vec_u8) == 0: + return None + + decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo) + if decoded is None: + return None + + return DelegateInfo.fix_decoded_values(decoded) + + @classmethod + def list_from_vec_u8(cls, vec_u8: list[int]) -> list["DelegateInfo"]: + """Returns a list of DelegateInfo objects from a ``vec_u8``.""" + decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo, is_vec=True) + + if decoded is None: + return [] + + return [DelegateInfo.fix_decoded_values(d) for d in decoded] + + @classmethod + def delegated_list_from_vec_u8( + cls, vec_u8: list[int] + ) -> list[tuple["DelegateInfo", "Balance"]]: + """Returns a list of Tuples of DelegateInfo objects, and Balance, from a ``vec_u8``. + + This is the list of delegates that the user has delegated to, and the amount of stake delegated. + """ + decoded = from_scale_encoding(vec_u8, ChainDataType.DelegatedInfo, is_vec=True) + if decoded is None: + return [] + + return [ + (DelegateInfo.fix_decoded_values(d), Balance.from_rao(s)) + for d, s in decoded + ] diff --git a/bittensor/core/chain_data/delegate_info_lite.py b/bittensor/core/chain_data/delegate_info_lite.py new file mode 100644 index 0000000000..bf693c1841 --- /dev/null +++ b/bittensor/core/chain_data/delegate_info_lite.py @@ -0,0 +1,29 @@ +from dataclasses import dataclass + + +@dataclass +class DelegateInfoLite: + """ + Dataclass for `DelegateLiteInfo`. This is a lighter version of :func:``DelegateInfo``. + + Args: + delegate_ss58 (str): Hotkey of the delegate for which the information is being fetched. + take (float): Take of the delegate as a percentage. + nominators (int): Count of the nominators of the delegate. + owner_ss58 (str): Coldkey of the owner. + registrations (list[int]): List of subnets that the delegate is registered on. + validator_permits (list[int]): List of subnets that the delegate is allowed to validate on. + return_per_1000 (int): Return per 1000 TAO, for the delegate over a day. + total_daily_return (int): Total daily return of the delegate. + """ + + delegate_ss58: str # Hotkey of delegate + take: float # Take of the delegate as a percentage + nominators: int # Count of the nominators of the delegate. + owner_ss58: str # Coldkey of owner + registrations: list[int] # List of subnets that the delegate is registered on + validator_permits: list[ + int + ] # List of subnets that the delegate is allowed to validate on + return_per_1000: int # Return per 1000 tao for the delegate over a day + total_daily_return: int # Total daily return of the delegate diff --git a/bittensor/core/chain_data/ip_info.py b/bittensor/core/chain_data/ip_info.py new file mode 100644 index 0000000000..6bbfabe02e --- /dev/null +++ b/bittensor/core/chain_data/ip_info.py @@ -0,0 +1,81 @@ +from dataclasses import dataclass +from typing import Optional, Any, Union + +from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType +from bittensor.utils import networking as net +from bittensor.utils.registration import torch, use_torch + + +@dataclass +class IPInfo: + """ + Dataclass representing IP information. + + Attributes: + ip (str): The IP address as a string. + ip_type (int): The type of the IP address (e.g., IPv4, IPv6). + protocol (int): The protocol associated with the IP (e.g., TCP, UDP). + """ + + ip: str + ip_type: int + protocol: int + + def encode(self) -> dict[str, Any]: + """Returns a dictionary of the IPInfo object that can be encoded.""" + return { + "ip": net.ip_to_int( + self.ip + ), # IP type and protocol are encoded together as a u8 + "ip_type_and_protocol": ((self.ip_type << 4) + self.protocol) & 0xFF, + } + + @classmethod + def from_vec_u8(cls, vec_u8: list[int]) -> Optional["IPInfo"]: + """Returns a IPInfo object from a ``vec_u8``.""" + if len(vec_u8) == 0: + return None + + decoded = from_scale_encoding(vec_u8, ChainDataType.IPInfo) + if decoded is None: + return None + + return IPInfo.fix_decoded_values(decoded) + + @classmethod + def list_from_vec_u8(cls, vec_u8: list[int]) -> list["IPInfo"]: + """Returns a list of IPInfo objects from a ``vec_u8``.""" + decoded = from_scale_encoding(vec_u8, ChainDataType.IPInfo, is_vec=True) + + if decoded is None: + return [] + + return [IPInfo.fix_decoded_values(d) for d in decoded] + + @classmethod + def fix_decoded_values(cls, decoded: dict) -> "IPInfo": + """Returns a SubnetInfo object from a decoded IPInfo dictionary.""" + return IPInfo( + ip=net.int_to_ip(decoded["ip"]), + ip_type=decoded["ip_type_and_protocol"] >> 4, + protocol=decoded["ip_type_and_protocol"] & 0xF, + ) + + def to_parameter_dict( + self, + ) -> Union[dict[str, Union[str, int]], "torch.nn.ParameterDict"]: + """Returns a torch tensor or dict of the subnet IP info.""" + if use_torch(): + return torch.nn.ParameterDict(self.__dict__) + else: + return self.__dict__ + + @classmethod + def from_parameter_dict( + cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] + ) -> "IPInfo": + """Creates a IPInfo instance from a parameter dictionary.""" + if use_torch(): + return cls(**dict(parameter_dict)) + else: + return cls(**parameter_dict) diff --git a/bittensor/core/chain_data/neuron_info.py b/bittensor/core/chain_data/neuron_info.py new file mode 100644 index 0000000000..478cdfa4c9 --- /dev/null +++ b/bittensor/core/chain_data/neuron_info.py @@ -0,0 +1,176 @@ +from dataclasses import dataclass +from typing import Optional, TYPE_CHECKING + +import bt_decode +import netaddr + +from bittensor.core.chain_data.axon_info import AxonInfo +from bittensor.core.chain_data.prometheus_info import PrometheusInfo +from bittensor.core.chain_data.utils import decode_account_id, process_stake_data +from bittensor.utils import u16_normalized_float +from bittensor.utils.balance import Balance + +# for annotation purposes +if TYPE_CHECKING: + from bittensor.core.chain_data.neuron_info_lite import NeuronInfoLite + + +@dataclass +class NeuronInfo: + """Represents the metadata of a neuron including keys, UID, stake, rankings, and other attributes. + + Attributes: + hotkey (str): The hotkey associated with the neuron. + coldkey (str): The coldkey associated with the neuron. + uid (int): The unique identifier for the neuron. + netuid (int): The network unique identifier for the neuron. + active (int): The active status of the neuron. + stake (Balance): The balance staked to this neuron. + stake_dict (dict[str, Balance]): A dictionary mapping coldkey to the amount staked. + total_stake (Balance): The total amount of stake. + rank (float): The rank score of the neuron. + emission (float): The emission rate. + incentive (float): The incentive value. + consensus (float): The consensus score. + trust (float): The trust score. + validator_trust (float): The validation trust score. + dividends (float): The dividends value. + last_update (int): The timestamp of the last update. + validator_permit (bool): Validator permit status. + weights (list[list[int]]): List of weights associated with the neuron. + bonds (list[list[int]]): List of bonds associated with the neuron. + pruning_score (int): The pruning score of the neuron. + prometheus_info (Optional[PrometheusInfo]): Information related to Prometheus. + axon_info (Optional[AxonInfo]): Information related to Axon. + is_null (bool): Indicator if this is a null neuron. + """ + + hotkey: str + coldkey: str + uid: int + netuid: int + active: int + stake: "Balance" + # mapping of coldkey to amount staked to this Neuron + stake_dict: dict[str, "Balance"] + total_stake: "Balance" + rank: float + emission: float + incentive: float + consensus: float + trust: float + validator_trust: float + dividends: float + last_update: int + validator_permit: bool + weights: list[list[int]] + bonds: list[list[int]] + pruning_score: int + prometheus_info: Optional["PrometheusInfo"] = None + axon_info: Optional["AxonInfo"] = None + is_null: bool = False + + @classmethod + def from_weights_bonds_and_neuron_lite( + cls, + neuron_lite: "NeuronInfoLite", + weights_as_dict: dict[int, list[tuple[int, int]]], + bonds_as_dict: dict[int, list[tuple[int, int]]], + ) -> "NeuronInfo": + """ + Creates an instance of NeuronInfo from NeuronInfoLite and dictionaries of weights and bonds. + + Args: + neuron_lite (NeuronInfoLite): A lite version of the neuron containing basic attributes. + weights_as_dict (dict[int, list[tuple[int, int]]]): A dictionary where the key is the UID and the value is a list of weight tuples associated with the neuron. + bonds_as_dict (dict[int, list[tuple[int, int]]]): A dictionary where the key is the UID and the value is a list of bond tuples associated with the neuron. + + Returns: + NeuronInfo: An instance of NeuronInfo populated with the provided weights and bonds. + """ + n_dict = neuron_lite.__dict__ + n_dict["weights"] = weights_as_dict.get(neuron_lite.uid, []) + n_dict["bonds"] = bonds_as_dict.get(neuron_lite.uid, []) + + return cls(**n_dict) + + @staticmethod + def get_null_neuron() -> "NeuronInfo": + """Returns a null NeuronInfo instance.""" + neuron = NeuronInfo( + uid=0, + netuid=0, + active=0, + stake=Balance.from_rao(0), + stake_dict={}, + total_stake=Balance.from_rao(0), + rank=0, + emission=0, + incentive=0, + consensus=0, + trust=0, + validator_trust=0, + dividends=0, + last_update=0, + validator_permit=False, + weights=[], + bonds=[], + prometheus_info=None, + axon_info=None, + is_null=True, + coldkey="000000000000000000000000000000000000000000000000", + hotkey="000000000000000000000000000000000000000000000000", + pruning_score=0, + ) + return neuron + + @classmethod + def from_vec_u8(cls, vec_u8: bytes) -> "NeuronInfo": + """Instantiates NeuronInfo from a byte vector.""" + n = bt_decode.NeuronInfo.decode(bytes(vec_u8)) + stake_dict = process_stake_data(n.stake) + total_stake = sum(stake_dict.values()) if stake_dict else Balance(0) + axon_info = n.axon_info + coldkey = decode_account_id(n.coldkey) + hotkey = decode_account_id(n.hotkey) + return NeuronInfo( + hotkey=hotkey, + coldkey=coldkey, + uid=n.uid, + netuid=n.netuid, + active=n.active, + stake=total_stake, + stake_dict=stake_dict, + total_stake=total_stake, + rank=u16_normalized_float(n.rank), + emission=n.emission / 1e9, + incentive=u16_normalized_float(n.incentive), + consensus=u16_normalized_float(n.consensus), + trust=u16_normalized_float(n.trust), + validator_trust=u16_normalized_float(n.validator_trust), + dividends=u16_normalized_float(n.dividends), + last_update=n.last_update, + validator_permit=n.validator_permit, + weights=[[e[0], e[1]] for e in n.weights], + bonds=[[e[0], e[1]] for e in n.bonds], + pruning_score=n.pruning_score, + prometheus_info=PrometheusInfo( + block=n.prometheus_info.block, + version=n.prometheus_info.version, + ip=str(netaddr.IPAddress(n.prometheus_info.ip)), + port=n.prometheus_info.port, + ip_type=n.prometheus_info.ip_type, + ), + axon_info=AxonInfo( + version=axon_info.version, + ip=str(netaddr.IPAddress(axon_info.ip)), + port=axon_info.port, + ip_type=axon_info.ip_type, + placeholder1=axon_info.placeholder1, + placeholder2=axon_info.placeholder2, + protocol=axon_info.protocol, + hotkey=hotkey, + coldkey=coldkey, + ), + is_null=False, + ) diff --git a/bittensor/core/chain_data/neuron_info_lite.py b/bittensor/core/chain_data/neuron_info_lite.py new file mode 100644 index 0000000000..48d9ed4ca1 --- /dev/null +++ b/bittensor/core/chain_data/neuron_info_lite.py @@ -0,0 +1,171 @@ +from dataclasses import dataclass +from typing import Optional + +import bt_decode +import netaddr + +from bittensor.core.chain_data.axon_info import AxonInfo +from bittensor.core.chain_data.prometheus_info import PrometheusInfo +from bittensor.core.chain_data.utils import decode_account_id, process_stake_data +from bittensor.utils import u16_normalized_float +from bittensor.utils.balance import Balance + + +@dataclass +class NeuronInfoLite: + """ + NeuronInfoLite is a dataclass representing neuron metadata without weights and bonds. + + Attributes: + hotkey (str): The hotkey string for the neuron. + coldkey (str): The coldkey string for the neuron. + uid (int): A unique identifier for the neuron. + netuid (int): Network unique identifier for the neuron. + active (int): Indicates whether the neuron is active. + stake (Balance): The stake amount associated with the neuron. + stake_dict (dict): Mapping of coldkey to the amount staked to this Neuron. + total_stake (Balance): Total amount of the stake. + rank (float): The rank of the neuron. + emission (float): The emission value of the neuron. + incentive (float): The incentive value of the neuron. + consensus (float): The consensus value of the neuron. + trust (float): Trust value of the neuron. + validator_trust (float): Validator trust value of the neuron. + dividends (float): Dividends associated with the neuron. + last_update (int): Timestamp of the last update. + validator_permit (bool): Indicates if the neuron has a validator permit. + prometheus_info (Optional[PrometheusInfo]): Prometheus information associated with the neuron. + axon_info (Optional[AxonInfo]): Axon information associated with the neuron. + pruning_score (int): The pruning score of the neuron. + is_null (bool): Indicates whether the neuron is null. + + Methods: + get_null_neuron: Returns a NeuronInfoLite object representing a null neuron. + list_from_vec_u8: Decodes a bytes object into a list of NeuronInfoLite instances. + """ + + hotkey: str + coldkey: str + uid: int + netuid: int + active: int + stake: "Balance" + # mapping of coldkey to amount staked to this Neuron + stake_dict: dict[str, "Balance"] + total_stake: "Balance" + rank: float + emission: float + incentive: float + consensus: float + trust: float + validator_trust: float + dividends: float + last_update: int + validator_permit: bool + prometheus_info: Optional["PrometheusInfo"] + axon_info: Optional["AxonInfo"] + pruning_score: int + is_null: bool = False + + @staticmethod + def get_null_neuron() -> "NeuronInfoLite": + """Returns a null NeuronInfoLite instance.""" + neuron = NeuronInfoLite( + uid=0, + netuid=0, + active=0, + stake=Balance.from_rao(0), + stake_dict={}, + total_stake=Balance.from_rao(0), + rank=0, + emission=0, + incentive=0, + consensus=0, + trust=0, + validator_trust=0, + dividends=0, + last_update=0, + validator_permit=False, + prometheus_info=None, + axon_info=None, + is_null=True, + coldkey="000000000000000000000000000000000000000000000000", + hotkey="000000000000000000000000000000000000000000000000", + pruning_score=0, + ) + return neuron + + @classmethod + def list_from_vec_u8(cls, vec_u8: bytes) -> list["NeuronInfoLite"]: + """ + Decodes a bytes object into a list of NeuronInfoLite instances. + + Args: + vec_u8 (bytes): The bytes object to decode into NeuronInfoLite instances. + + Returns: + list[NeuronInfoLite]: A list of NeuronInfoLite instances decoded from the provided bytes object. + """ + decoded = bt_decode.NeuronInfoLite.decode_vec(vec_u8) + results = [] + for item in decoded: + active = item.active + axon_info = item.axon_info + coldkey = decode_account_id(item.coldkey) + consensus = item.consensus + dividends = item.dividends + emission = item.emission + hotkey = decode_account_id(item.hotkey) + incentive = item.incentive + last_update = item.last_update + netuid = item.netuid + prometheus_info = item.prometheus_info + pruning_score = item.pruning_score + rank = item.rank + stake_dict = process_stake_data(item.stake) + stake = sum(stake_dict.values()) if stake_dict else Balance(0) + trust = item.trust + uid = item.uid + validator_permit = item.validator_permit + validator_trust = item.validator_trust + results.append( + NeuronInfoLite( + active=active, + axon_info=AxonInfo( + version=axon_info.version, + ip=str(netaddr.IPAddress(axon_info.ip)), + port=axon_info.port, + ip_type=axon_info.ip_type, + placeholder1=axon_info.placeholder1, + placeholder2=axon_info.placeholder2, + protocol=axon_info.protocol, + hotkey=hotkey, + coldkey=coldkey, + ), + coldkey=coldkey, + consensus=u16_normalized_float(consensus), + dividends=u16_normalized_float(dividends), + emission=emission / 1e9, + hotkey=hotkey, + incentive=u16_normalized_float(incentive), + last_update=last_update, + netuid=netuid, + prometheus_info=PrometheusInfo( + version=prometheus_info.version, + ip=str(netaddr.IPAddress(prometheus_info.ip)), + port=prometheus_info.port, + ip_type=prometheus_info.ip_type, + block=prometheus_info.block, + ), + pruning_score=pruning_score, + rank=u16_normalized_float(rank), + stake_dict=stake_dict, + stake=stake, + total_stake=stake, + trust=u16_normalized_float(trust), + uid=uid, + validator_permit=validator_permit, + validator_trust=u16_normalized_float(validator_trust), + ) + ) + return results diff --git a/bittensor/core/chain_data/prometheus_info.py b/bittensor/core/chain_data/prometheus_info.py new file mode 100644 index 0000000000..7cdccf83fa --- /dev/null +++ b/bittensor/core/chain_data/prometheus_info.py @@ -0,0 +1,31 @@ +from dataclasses import dataclass + +from bittensor.utils import networking + + +@dataclass +class PrometheusInfo: + """ + Dataclass representing information related to Prometheus. + + Attributes: + block (int): The block number associated with the Prometheus data. + version (int): The version of the Prometheus data. + ip (str): The IP address associated with Prometheus. + port (int): The port number for Prometheus. + ip_type (int): The type of IP address (e.g., IPv4, IPv6). + """ + + block: int + version: int + ip: str + port: int + ip_type: int + + @classmethod + def fix_decoded_values(cls, prometheus_info_decoded: dict) -> "PrometheusInfo": + """Returns a PrometheusInfo object from a prometheus_info_decoded dictionary.""" + prometheus_info_decoded["ip"] = networking.int_to_ip( + int(prometheus_info_decoded["ip"]) + ) + return cls(**prometheus_info_decoded) diff --git a/bittensor/core/chain_data/proposal_vote_data.py b/bittensor/core/chain_data/proposal_vote_data.py new file mode 100644 index 0000000000..493bc2d79f --- /dev/null +++ b/bittensor/core/chain_data/proposal_vote_data.py @@ -0,0 +1,21 @@ +from typing import TypedDict + + +# Senate / Proposal data +class ProposalVoteData(TypedDict): + """ + This TypedDict represents the data structure for a proposal vote in the Senate. + + Attributes: + index (int): The index of the proposal. + threshold (int): The threshold required for the proposal to pass. + ayes (List[str]): List of senators who voted 'aye'. + nays (List[str]): List of senators who voted 'nay'. + end (int): The ending timestamp of the voting period. + """ + + index: int + threshold: int + ayes: list[str] + nays: list[str] + end: int diff --git a/bittensor/core/chain_data/scheduled_coldkey_swap_info.py b/bittensor/core/chain_data/scheduled_coldkey_swap_info.py new file mode 100644 index 0000000000..7c0f6e7f88 --- /dev/null +++ b/bittensor/core/chain_data/scheduled_coldkey_swap_info.py @@ -0,0 +1,65 @@ +from dataclasses import dataclass +from typing import Optional, Any + +from scalecodec.utils.ss58 import ss58_encode + +from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType +from bittensor.core.settings import SS58_FORMAT + + +@dataclass +class ScheduledColdkeySwapInfo: + """ + The `ScheduledColdkeySwapInfo` class is a dataclass representing information about scheduled cold key swaps. + + Attributes: + old_coldkey (str): The old cold key before the swap. + new_coldkey (str): The new cold key after the swap. + arbitration_block (int): The block number at which the arbitration of the swap will take place. + """ + + old_coldkey: str + new_coldkey: str + arbitration_block: int + + @classmethod + def fix_decoded_values(cls, decoded: Any) -> "ScheduledColdkeySwapInfo": + """Fixes the decoded values.""" + return cls( + old_coldkey=ss58_encode(decoded["old_coldkey"], SS58_FORMAT), + new_coldkey=ss58_encode(decoded["new_coldkey"], SS58_FORMAT), + arbitration_block=decoded["arbitration_block"], + ) + + @classmethod + def from_vec_u8(cls, vec_u8: list[int]) -> Optional["ScheduledColdkeySwapInfo"]: + """Returns a ScheduledColdkeySwapInfo object from a ``vec_u8``.""" + if len(vec_u8) == 0: + return None + + decoded = from_scale_encoding(vec_u8, ChainDataType.ScheduledColdkeySwapInfo) + if decoded is None: + return None + + return ScheduledColdkeySwapInfo.fix_decoded_values(decoded) + + @classmethod + def list_from_vec_u8(cls, vec_u8: list[int]) -> list["ScheduledColdkeySwapInfo"]: + """Returns a list of ScheduledColdkeySwapInfo objects from a ``vec_u8``.""" + decoded = from_scale_encoding( + vec_u8, ChainDataType.ScheduledColdkeySwapInfo, is_vec=True + ) + if decoded is None: + return [] + + return [ScheduledColdkeySwapInfo.fix_decoded_values(d) for d in decoded] + + @classmethod + def decode_account_id_list(cls, vec_u8: list[int]) -> Optional[list[str]]: + """Decodes a list of AccountIds from vec_u8.""" + decoded = from_scale_encoding( + vec_u8, ChainDataType.ScheduledColdkeySwapInfo.AccountId, is_vec=True + ) + if decoded is None: + return None + return [ss58_encode(account_id, SS58_FORMAT) for account_id in decoded] diff --git a/bittensor/core/chain_data/stake_info.py b/bittensor/core/chain_data/stake_info.py new file mode 100644 index 0000000000..8d3b5020fb --- /dev/null +++ b/bittensor/core/chain_data/stake_info.py @@ -0,0 +1,79 @@ +from dataclasses import dataclass +from typing import Optional, Any + +from scalecodec.utils.ss58 import ss58_encode + +from bittensor.core.chain_data.utils import ( + from_scale_encoding, + from_scale_encoding_using_type_string, + ChainDataType, +) +from bittensor.core.settings import SS58_FORMAT +from bittensor.utils.balance import Balance + + +@dataclass +class StakeInfo: + """ + Dataclass for representing stake information linked to hotkey and coldkey pairs. + + Attributes: + hotkey_ss58 (str): The SS58 encoded hotkey address. + coldkey_ss58 (str): The SS58 encoded coldkey address. + stake (Balance): The stake associated with the hotkey-coldkey pair, represented as a Balance object. + """ + + hotkey_ss58: str # Hotkey address + coldkey_ss58: str # Coldkey address + stake: Balance # Stake for the hotkey-coldkey pair + + @classmethod + def fix_decoded_values(cls, decoded: Any) -> "StakeInfo": + """Fixes the decoded values.""" + return cls( + hotkey_ss58=ss58_encode(decoded["hotkey"], SS58_FORMAT), + coldkey_ss58=ss58_encode(decoded["coldkey"], SS58_FORMAT), + stake=Balance.from_rao(decoded["stake"]), + ) + + @classmethod + def from_vec_u8(cls, vec_u8: list[int]) -> Optional["StakeInfo"]: + """Returns a StakeInfo object from a ``vec_u8``.""" + if len(vec_u8) == 0: + return None + + decoded = from_scale_encoding(vec_u8, ChainDataType.StakeInfo) + if decoded is None: + return None + + return StakeInfo.fix_decoded_values(decoded) + + @classmethod + def list_of_tuple_from_vec_u8( + cls, vec_u8: list[int] + ) -> dict[str, list["StakeInfo"]]: + """Returns a list of StakeInfo objects from a ``vec_u8``.""" + decoded: Optional[list[tuple[str, list[object]]]] = ( + from_scale_encoding_using_type_string( + input_=vec_u8, type_string="Vec<(AccountId, Vec)>" + ) + ) + + if decoded is None: + return {} + + return { + ss58_encode(address=account_id, ss58_format=SS58_FORMAT): [ + StakeInfo.fix_decoded_values(d) for d in stake_info + ] + for account_id, stake_info in decoded + } + + @classmethod + def list_from_vec_u8(cls, vec_u8: list[int]) -> list["StakeInfo"]: + """Returns a list of StakeInfo objects from a ``vec_u8``.""" + decoded = from_scale_encoding(vec_u8, ChainDataType.StakeInfo, is_vec=True) + if decoded is None: + return [] + + return [StakeInfo.fix_decoded_values(d) for d in decoded] diff --git a/bittensor/core/chain_data/subnet_hyperparameters.py b/bittensor/core/chain_data/subnet_hyperparameters.py new file mode 100644 index 0000000000..c28f802cfc --- /dev/null +++ b/bittensor/core/chain_data/subnet_hyperparameters.py @@ -0,0 +1,112 @@ +from dataclasses import dataclass +from typing import Optional + +import bt_decode + + +@dataclass +class SubnetHyperparameters: + """ + This class represents the hyperparameters for a subnet. + + Attributes: + rho (int): The rate of decay of some value. + kappa (int): A constant multiplier used in calculations. + immunity_period (int): The period during which immunity is active. + min_allowed_weights (int): Minimum allowed weights. + max_weight_limit (float): Maximum weight limit. + tempo (int): The tempo or rate of operation. + min_difficulty (int): Minimum difficulty for some operations. + max_difficulty (int): Maximum difficulty for some operations. + weights_version (int): The version number of the weights used. + weights_rate_limit (int): Rate limit for processing weights. + adjustment_interval (int): Interval at which adjustments are made. + activity_cutoff (int): Activity cutoff threshold. + registration_allowed (bool): Indicates if registration is allowed. + target_regs_per_interval (int): Target number of registrations per interval. + min_burn (int): Minimum burn value. + max_burn (int): Maximum burn value. + bonds_moving_avg (int): Moving average of bonds. + max_regs_per_block (int): Maximum number of registrations per block. + serving_rate_limit (int): Limit on the rate of service. + max_validators (int): Maximum number of validators. + adjustment_alpha (int): Alpha value for adjustments. + difficulty (int): Difficulty level. + commit_reveal_weights_interval (int): Interval for commit-reveal weights. + commit_reveal_weights_enabled (bool): Flag indicating if commit-reveal weights are enabled. + alpha_high (int): High value of alpha. + alpha_low (int): Low value of alpha. + liquid_alpha_enabled (bool): Flag indicating if liquid alpha is enabled. + """ + + rho: int + kappa: int + immunity_period: int + min_allowed_weights: int + max_weight_limit: float + tempo: int + min_difficulty: int + max_difficulty: int + weights_version: int + weights_rate_limit: int + adjustment_interval: int + activity_cutoff: int + registration_allowed: bool + target_regs_per_interval: int + min_burn: int + max_burn: int + bonds_moving_avg: int + max_regs_per_block: int + serving_rate_limit: int + max_validators: int + adjustment_alpha: int + difficulty: int + commit_reveal_weights_interval: int + commit_reveal_weights_enabled: bool + alpha_high: int + alpha_low: int + liquid_alpha_enabled: bool + + @classmethod + def from_vec_u8(cls, vec_u8: bytes) -> Optional["SubnetHyperparameters"]: + """ + Create a `SubnetHyperparameters` instance from a vector of bytes. + + This method decodes the given vector of bytes using the `bt_decode` module and creates a new instance of `SubnetHyperparameters` with the decoded values. + + Args: + vec_u8 (bytes): A vector of bytes to decode into `SubnetHyperparameters`. + + Returns: + Optional[SubnetHyperparameters]: An instance of `SubnetHyperparameters` if decoding is successful, None otherwise. + """ + decoded = bt_decode.SubnetHyperparameters.decode(vec_u8) + return SubnetHyperparameters( + rho=decoded.rho, + kappa=decoded.kappa, + immunity_period=decoded.immunity_period, + min_allowed_weights=decoded.min_allowed_weights, + max_weight_limit=decoded.max_weights_limit, + tempo=decoded.tempo, + min_difficulty=decoded.min_difficulty, + max_difficulty=decoded.max_difficulty, + weights_version=decoded.weights_version, + weights_rate_limit=decoded.weights_rate_limit, + adjustment_interval=decoded.adjustment_interval, + activity_cutoff=decoded.activity_cutoff, + registration_allowed=decoded.registration_allowed, + target_regs_per_interval=decoded.target_regs_per_interval, + min_burn=decoded.min_burn, + max_burn=decoded.max_burn, + bonds_moving_avg=decoded.bonds_moving_avg, + max_regs_per_block=decoded.max_regs_per_block, + serving_rate_limit=decoded.serving_rate_limit, + max_validators=decoded.max_validators, + adjustment_alpha=decoded.adjustment_alpha, + difficulty=decoded.difficulty, + commit_reveal_weights_interval=decoded.commit_reveal_weights_interval, + commit_reveal_weights_enabled=decoded.commit_reveal_weights_enabled, + alpha_high=decoded.alpha_high, + alpha_low=decoded.alpha_low, + liquid_alpha_enabled=decoded.liquid_alpha_enabled, + ) diff --git a/bittensor/core/chain_data/subnet_info.py b/bittensor/core/chain_data/subnet_info.py new file mode 100644 index 0000000000..f1ce151872 --- /dev/null +++ b/bittensor/core/chain_data/subnet_info.py @@ -0,0 +1,103 @@ +from dataclasses import dataclass +from typing import Any, Optional, Union + +from scalecodec.utils.ss58 import ss58_encode + +from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType +from bittensor.core.settings import SS58_FORMAT +from bittensor.utils import u16_normalized_float +from bittensor.utils.balance import Balance +from bittensor.utils.registration import torch, use_torch + + +@dataclass +class SubnetInfo: + """Dataclass for subnet info.""" + + netuid: int + rho: int + kappa: int + difficulty: int + immunity_period: int + max_allowed_validators: int + min_allowed_weights: int + max_weight_limit: float + scaling_law_power: float + subnetwork_n: int + max_n: int + blocks_since_epoch: int + tempo: int + modality: int + # netuid -> topk percentile prunning score requirement (u16:MAX normalized.) + connection_requirements: dict[str, float] + emission_value: float + burn: Balance + owner_ss58: str + + @classmethod + def from_vec_u8(cls, vec_u8: list[int]) -> Optional["SubnetInfo"]: + """Returns a SubnetInfo object from a ``vec_u8``.""" + if len(vec_u8) == 0: + return None + + decoded = from_scale_encoding(vec_u8, ChainDataType.SubnetInfo) + if decoded is None: + return None + + return SubnetInfo.fix_decoded_values(decoded) + + @classmethod + def list_from_vec_u8(cls, vec_u8: list[int]) -> list["SubnetInfo"]: + """Returns a list of SubnetInfo objects from a ``vec_u8``.""" + decoded = from_scale_encoding( + vec_u8, ChainDataType.SubnetInfo, is_vec=True, is_option=True + ) + + if decoded is None: + return [] + + return [SubnetInfo.fix_decoded_values(d) for d in decoded] + + @classmethod + def fix_decoded_values(cls, decoded: dict) -> "SubnetInfo": + """Returns a SubnetInfo object from a decoded SubnetInfo dictionary.""" + return SubnetInfo( + netuid=decoded["netuid"], + rho=decoded["rho"], + kappa=decoded["kappa"], + difficulty=decoded["difficulty"], + immunity_period=decoded["immunity_period"], + max_allowed_validators=decoded["max_allowed_validators"], + min_allowed_weights=decoded["min_allowed_weights"], + max_weight_limit=decoded["max_weights_limit"], + scaling_law_power=decoded["scaling_law_power"], + subnetwork_n=decoded["subnetwork_n"], + max_n=decoded["max_allowed_uids"], + blocks_since_epoch=decoded["blocks_since_last_step"], + tempo=decoded["tempo"], + modality=decoded["network_modality"], + connection_requirements={ + str(int(netuid)): u16_normalized_float(int(req)) + for netuid, req in decoded["network_connect"] + }, + emission_value=decoded["emission_values"], + burn=Balance.from_rao(decoded["burn"]), + owner_ss58=ss58_encode(decoded["owner"], SS58_FORMAT), + ) + + def to_parameter_dict(self) -> Union[dict[str, Any], "torch.nn.ParameterDict"]: + """Returns a torch tensor or dict of the subnet info.""" + if use_torch(): + return torch.nn.ParameterDict(self.__dict__) + else: + return self.__dict__ + + @classmethod + def from_parameter_dict( + cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] + ) -> "SubnetInfo": + """Creates a SubnetInfo instance from a parameter dictionary.""" + if use_torch(): + return cls(**dict(parameter_dict)) + else: + return cls(**parameter_dict) diff --git a/bittensor/core/chain_data/utils.py b/bittensor/core/chain_data/utils.py new file mode 100644 index 0000000000..0544ca85a2 --- /dev/null +++ b/bittensor/core/chain_data/utils.py @@ -0,0 +1,291 @@ +"""Chain data helper functions and data.""" + +from enum import Enum +from typing import Optional, Union + +from scalecodec.base import RuntimeConfiguration, ScaleBytes +from scalecodec.type_registry import load_type_registry_preset +from scalecodec.utils.ss58 import ss58_encode + +from bittensor.core.settings import SS58_FORMAT +from bittensor.utils.balance import Balance + + +class ChainDataType(Enum): + NeuronInfo = 1 + SubnetInfo = 2 + DelegateInfo = 3 + NeuronInfoLite = 4 + DelegatedInfo = 5 + StakeInfo = 6 + IPInfo = 7 + SubnetHyperparameters = 8 + ScheduledColdkeySwapInfo = 9 + AccountId = 10 + + +def from_scale_encoding( + input_: Union[list[int], bytes, "ScaleBytes"], + type_name: "ChainDataType", + is_vec: bool = False, + is_option: bool = False, +) -> Optional[dict]: + """ + Decodes input_ data from SCALE encoding based on the specified type name and modifiers. + + Args: + input_ (Union[List[int], bytes, ScaleBytes]): The input_ data to decode. + type_name (ChainDataType): The type of data being decoded. + is_vec (bool): Whether the data is a vector of the specified type. Default is ``False``. + is_option (bool): Whether the data is an optional value of the specified type. Default is ``False``. + + Returns: + Optional[dict]: The decoded data as a dictionary, or ``None`` if the decoding fails. + """ + type_string = type_name.name + if type_name == ChainDataType.DelegatedInfo: + # DelegatedInfo is a tuple of (DelegateInfo, Compact) + type_string = f"({ChainDataType.DelegateInfo.name}, Compact)" + if is_option: + type_string = f"Option<{type_string}>" + if is_vec: + type_string = f"Vec<{type_string}>" + + return from_scale_encoding_using_type_string(input_, type_string) + + +def from_scale_encoding_using_type_string( + input_: Union[list[int], bytes, ScaleBytes], type_string: str +) -> Optional[dict]: + """ + Decodes SCALE encoded data to a dictionary based on the provided type string. + + Args: + input_ (Union[List[int], bytes, ScaleBytes]): The SCALE encoded input data. + type_string (str): The type string defining the structure of the data. + + Returns: + Optional[dict]: The decoded data as a dictionary, or ``None`` if the decoding fails. + + Raises: + TypeError: If the input_ is not a list[int], bytes, or ScaleBytes. + """ + if isinstance(input_, ScaleBytes): + as_scale_bytes = input_ + else: + if isinstance(input_, list) and all([isinstance(i, int) for i in input_]): + vec_u8 = input_ + as_bytes = bytes(vec_u8) + elif isinstance(input_, bytes): + as_bytes = input_ + else: + raise TypeError("input_ must be a list[int], bytes, or ScaleBytes") + + as_scale_bytes = ScaleBytes(as_bytes) + + rpc_runtime_config = RuntimeConfiguration() + rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy")) + rpc_runtime_config.update_type_registry(custom_rpc_type_registry) + + obj = rpc_runtime_config.create_scale_object(type_string, data=as_scale_bytes) + + return obj.decode() + + +custom_rpc_type_registry = { + "types": { + "SubnetInfo": { + "type": "struct", + "type_mapping": [ + ["netuid", "Compact"], + ["rho", "Compact"], + ["kappa", "Compact"], + ["difficulty", "Compact"], + ["immunity_period", "Compact"], + ["max_allowed_validators", "Compact"], + ["min_allowed_weights", "Compact"], + ["max_weights_limit", "Compact"], + ["scaling_law_power", "Compact"], + ["subnetwork_n", "Compact"], + ["max_allowed_uids", "Compact"], + ["blocks_since_last_step", "Compact"], + ["tempo", "Compact"], + ["network_modality", "Compact"], + ["network_connect", "Vec<[u16; 2]>"], + ["emission_values", "Compact"], + ["burn", "Compact"], + ["owner", "AccountId"], + ], + }, + "DelegateInfo": { + "type": "struct", + "type_mapping": [ + ["delegate_ss58", "AccountId"], + ["take", "Compact"], + ["nominators", "Vec<(AccountId, Compact)>"], + ["owner_ss58", "AccountId"], + ["registrations", "Vec>"], + ["validator_permits", "Vec>"], + ["return_per_1000", "Compact"], + ["total_daily_return", "Compact"], + ], + }, + "NeuronInfo": { + "type": "struct", + "type_mapping": [ + ["hotkey", "AccountId"], + ["coldkey", "AccountId"], + ["uid", "Compact"], + ["netuid", "Compact"], + ["active", "bool"], + ["axon_info", "axon_info"], + ["prometheus_info", "PrometheusInfo"], + ["stake", "Vec<(AccountId, Compact)>"], + ["rank", "Compact"], + ["emission", "Compact"], + ["incentive", "Compact"], + ["consensus", "Compact"], + ["trust", "Compact"], + ["validator_trust", "Compact"], + ["dividends", "Compact"], + ["last_update", "Compact"], + ["validator_permit", "bool"], + ["weights", "Vec<(Compact, Compact)>"], + ["bonds", "Vec<(Compact, Compact)>"], + ["pruning_score", "Compact"], + ], + }, + "NeuronInfoLite": { + "type": "struct", + "type_mapping": [ + ["hotkey", "AccountId"], + ["coldkey", "AccountId"], + ["uid", "Compact"], + ["netuid", "Compact"], + ["active", "bool"], + ["axon_info", "axon_info"], + ["prometheus_info", "PrometheusInfo"], + ["stake", "Vec<(AccountId, Compact)>"], + ["rank", "Compact"], + ["emission", "Compact"], + ["incentive", "Compact"], + ["consensus", "Compact"], + ["trust", "Compact"], + ["validator_trust", "Compact"], + ["dividends", "Compact"], + ["last_update", "Compact"], + ["validator_permit", "bool"], + ["pruning_score", "Compact"], + ], + }, + "axon_info": { + "type": "struct", + "type_mapping": [ + ["block", "u64"], + ["version", "u32"], + ["ip", "u128"], + ["port", "u16"], + ["ip_type", "u8"], + ["protocol", "u8"], + ["placeholder1", "u8"], + ["placeholder2", "u8"], + ], + }, + "PrometheusInfo": { + "type": "struct", + "type_mapping": [ + ["block", "u64"], + ["version", "u32"], + ["ip", "u128"], + ["port", "u16"], + ["ip_type", "u8"], + ], + }, + "IPInfo": { + "type": "struct", + "type_mapping": [ + ["ip", "Compact"], + ["ip_type_and_protocol", "Compact"], + ], + }, + "StakeInfo": { + "type": "struct", + "type_mapping": [ + ["hotkey", "AccountId"], + ["coldkey", "AccountId"], + ["stake", "Compact"], + ], + }, + "SubnetHyperparameters": { + "type": "struct", + "type_mapping": [ + ["rho", "Compact"], + ["kappa", "Compact"], + ["immunity_period", "Compact"], + ["min_allowed_weights", "Compact"], + ["max_weights_limit", "Compact"], + ["tempo", "Compact"], + ["min_difficulty", "Compact"], + ["max_difficulty", "Compact"], + ["weights_version", "Compact"], + ["weights_rate_limit", "Compact"], + ["adjustment_interval", "Compact"], + ["activity_cutoff", "Compact"], + ["registration_allowed", "bool"], + ["target_regs_per_interval", "Compact"], + ["min_burn", "Compact"], + ["max_burn", "Compact"], + ["bonds_moving_avg", "Compact"], + ["max_regs_per_block", "Compact"], + ["serving_rate_limit", "Compact"], + ["max_validators", "Compact"], + ["adjustment_alpha", "Compact"], + ["difficulty", "Compact"], + ["commit_reveal_weights_interval", "Compact"], + ["commit_reveal_weights_enabled", "bool"], + ["alpha_high", "Compact"], + ["alpha_low", "Compact"], + ["liquid_alpha_enabled", "bool"], + ], + }, + "ScheduledColdkeySwapInfo": { + "type": "struct", + "type_mapping": [ + ["old_coldkey", "AccountId"], + ["new_coldkey", "AccountId"], + ["arbitration_block", "Compact"], + ], + }, + } +} + + +def decode_account_id(account_id_bytes: list) -> str: + """ + Decodes an AccountId from bytes to a Base64 string using SS58 encoding. + + Args: + account_id_bytes (bytes): The AccountId in bytes that needs to be decoded. + + Returns: + str: The decoded AccountId as a Base64 string. + """ + # Convert the AccountId bytes to a Base64 string + return ss58_encode(bytes(account_id_bytes).hex(), SS58_FORMAT) + + +def process_stake_data(stake_data: list) -> dict: + """ + Processes stake data to decode account IDs and convert stakes from rao to Balance objects. + + Args: + stake_data (list): A list of tuples where each tuple contains an account ID in bytes and a stake in rao. + + Returns: + dict: A dictionary with account IDs as keys and their corresponding Balance objects as values. + """ + decoded_stake_data = {} + for account_id_bytes, stake_ in stake_data: + account_id = decode_account_id(account_id_bytes) + decoded_stake_data.update({account_id: Balance.from_rao(stake_)}) + return decoded_stake_data diff --git a/bittensor/core/config.py b/bittensor/core/config.py new file mode 100644 index 0000000000..5027bbecb5 --- /dev/null +++ b/bittensor/core/config.py @@ -0,0 +1,396 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +"""Implementation of the config class, which manages the configuration of different Bittensor modules.""" + +import argparse +import copy +import os +import sys +from copy import deepcopy +from typing import Any, TypeVar, Type, Optional + +import yaml +from munch import DefaultMunch + + +class InvalidConfigFile(Exception): + """In place of YAMLError""" + + +class Config(DefaultMunch): + """ + Implementation of the config class, which manages the configuration of different Bittensor modules. + + Translates the passed parser into a nested Bittensor config. + + Args: + parser (argparse.ArgumentParser): Command line parser object. + strict (bool): If ``true``, the command line arguments are strictly parsed. + args (list of str): Command line arguments. + default (Optional[Any]): Default value for the Config. Defaults to ``None``. This default will be returned for attributes that are undefined. + + Returns: + config (bittensor.core.config.Config): Nested config object created from parser arguments. + """ + + __is_set: dict[str, bool] + + def __init__( + self, + parser: argparse.ArgumentParser = None, + args: Optional[list[str]] = None, + strict: bool = False, + default: Optional[Any] = None, + ) -> None: + super().__init__(default) + + self["__is_set"] = {} + + if parser is None: + return + + # Optionally add config specific arguments + try: + parser.add_argument( + "--config", + type=str, + help="If set, defaults are overridden by passed file.", + ) + except Exception: + # this can fail if --config has already been added. + pass + + try: + parser.add_argument( + "--strict", + action="store_true", + help="""If flagged, config will check that only exact arguments have been set.""", + default=False, + ) + except Exception: + # this can fail if --strict has already been added. + pass + + try: + parser.add_argument( + "--no_version_checking", + action="store_true", + help="Set ``true`` to stop cli version checking.", + default=False, + ) + except Exception: + # this can fail if --no_version_checking has already been added. + pass + + try: + parser.add_argument( + "--no_prompt", + dest="no_prompt", + action="store_true", + help="Set ``true`` to stop cli from prompting the user.", + default=False, + ) + except Exception: + # this can fail if --no_version_checking has already been added. + pass + + # Get args from argv if not passed in. + if args is None: + args = sys.argv[1:] + + # Check for missing required arguments before proceeding + missing_required_args = self.__check_for_missing_required_args(parser, args) + if missing_required_args: + # Handle missing required arguments gracefully + raise ValueError( + f"Missing required arguments: {', '.join(missing_required_args)}" + ) + + # 1.1 Optionally load defaults if the --config is set. + try: + config_file_path = ( + str(os.getcwd()) + + "/" + + vars(parser.parse_known_args(args)[0])["config"] + ) + except Exception as e: + config_file_path = None + + # Parse args not strict + config_params = Config.__parse_args__(args=args, parser=parser, strict=False) + + # 2. Optionally check for --strict + # strict=True when passed in OR when --strict is set + strict = config_params.strict or strict + + if config_file_path is not None: + config_file_path = os.path.expanduser(config_file_path) + try: + with open(config_file_path) as f: + params_config = yaml.safe_load(f) + print(f"Loading config defaults from: {config_file_path}") + parser.set_defaults(**params_config) + except Exception as e: + print(f"Error in loading: {e} using default parser settings") + + # 2. Continue with loading in params. + params = Config.__parse_args__(args=args, parser=parser, strict=strict) + + _config = self + + # Splits params and add to config + Config.__split_params__(params=params, _config=_config) + + # Make the is_set map + _config["__is_set"] = {} + + # Reparse args using default of unset + parser_no_defaults = copy.deepcopy(parser) + + # Only command as the arg, else no args + default_param_args = ( + [_config.get("command")] + if _config.get("command") is not None and _config.get("subcommand") is None + else [] + ) + if _config.get("command") is not None and _config.get("subcommand") is not None: + default_param_args = [_config.get("command"), _config.get("subcommand")] + + # Get all args by name + default_params = parser.parse_args(args=default_param_args) + + all_default_args = default_params.__dict__.keys() | [] + # Make a dict with keys as args and values as argparse.SUPPRESS + defaults_as_suppress = {key: argparse.SUPPRESS for key in all_default_args} + # Set the defaults to argparse.SUPPRESS, should remove them from the namespace + parser_no_defaults.set_defaults(**defaults_as_suppress) + parser_no_defaults._defaults.clear() # Needed for quirk of argparse + + # Check for subparsers and do the same + if parser_no_defaults._subparsers is not None: + for action in parser_no_defaults._subparsers._actions: + # Should only be the "command" subparser action + if isinstance(action, argparse._SubParsersAction): + # Set the defaults to argparse.SUPPRESS, should remove them from the namespace + # Each choice is the keyword for a command, we need to set the defaults for each of these + # Note: we also need to clear the _defaults dict for each, this is a quirk of argparse + cmd_parser: argparse.ArgumentParser + for cmd_parser in action.choices.values(): + # If this choice is also a subparser, set defaults recursively + if cmd_parser._subparsers: + for action in cmd_parser._subparsers._actions: + # Should only be the "command" subparser action + if isinstance(action, argparse._SubParsersAction): + cmd_parser: argparse.ArgumentParser + for cmd_parser in action.choices.values(): + cmd_parser.set_defaults(**defaults_as_suppress) + cmd_parser._defaults.clear() # Needed for quirk of argparse + else: + cmd_parser.set_defaults(**defaults_as_suppress) + cmd_parser._defaults.clear() # Needed for quirk of argparse + + # Reparse the args, but this time with the defaults as argparse.SUPPRESS + params_no_defaults = Config.__parse_args__( + args=args, parser=parser_no_defaults, strict=strict + ) + + # Diff the params and params_no_defaults to get the is_set map + _config["__is_set"] = { + arg_key: True + for arg_key in [ + k + for k, _ in filter( + lambda kv: kv[1] != argparse.SUPPRESS, + params_no_defaults.__dict__.items(), + ) + ] + } + + @staticmethod + def __split_params__(params: argparse.Namespace, _config: "Config"): + # Splits params on dot syntax i.e. neuron.axon_port and adds to _config + for arg_key, arg_val in params.__dict__.items(): + split_keys = arg_key.split(".") + head = _config + keys = split_keys + while len(keys) > 1: + if ( + hasattr(head, keys[0]) and head[keys[0]] is not None + ): # Needs to be Config + head = getattr(head, keys[0]) + keys = keys[1:] + else: + head[keys[0]] = Config() + head = head[keys[0]] + keys = keys[1:] + if len(keys) == 1: + head[keys[0]] = arg_val + + @staticmethod + def __parse_args__( + args: list[str], parser: argparse.ArgumentParser = None, strict: bool = False + ) -> argparse.Namespace: + """Parses the passed args use the passed parser. + + Args: + args (list[str]): List of arguments to parse. + parser (argparse.ArgumentParser): Command line parser object. + strict (bool): If ``true``, the command line arguments are strictly parsed. + + Returns: + Namespace: Namespace object created from parser arguments. + """ + if not strict: + params, unrecognized = parser.parse_known_args(args=args) + params_list = list(params.__dict__) + # bug within argparse itself, does not correctly set value for boolean flags + for unrec in unrecognized: + if unrec.startswith("--") and unrec[2:] in params_list: + # Set the missing boolean value to true + setattr(params, unrec[2:], True) + else: + params = parser.parse_args(args=args) + + return params + + def __deepcopy__(self, memo) -> "Config": + _default = self.__default__ + + config_state = self.__getstate__() + config_copy = Config() + memo[id(self)] = config_copy + + config_copy.__setstate__(config_state) + config_copy.__default__ = _default + + config_copy["__is_set"] = deepcopy(self["__is_set"], memo) + + return config_copy + + def __repr__(self) -> str: + return self.__str__() + + @staticmethod + def _remove_private_keys(d): + if "__parser" in d: + d.pop("__parser", None) + if "__is_set" in d: + d.pop("__is_set", None) + for k, v in list(d.items()): + if isinstance(v, dict): + Config._remove_private_keys(v) + return d + + def __str__(self) -> str: + # remove the parser and is_set map from the visible config + visible = copy.deepcopy(self.toDict()) + visible.pop("__parser", None) + visible.pop("__is_set", None) + cleaned = Config._remove_private_keys(visible) + return "\n" + yaml.dump(cleaned, sort_keys=False) + + def copy(self) -> "Config": + return copy.deepcopy(self) + + @staticmethod + def to_string(items) -> str: + """Get string from items.""" + return "\n" + yaml.dump(items.toDict()) + + def update_with_kwargs(self, kwargs): + """Add config to self""" + for key, val in kwargs.items(): + self[key] = val + + @classmethod + def _merge(cls, a, b): + """ + Merge two configurations recursively. + If there is a conflict, the value from the second configuration will take precedence. + """ + for key in b: + if key in a: + if isinstance(a[key], dict) and isinstance(b[key], dict): + a[key] = cls._merge(a[key], b[key]) + else: + a[key] = b[key] + else: + a[key] = b[key] + return a + + def merge(self, b: "Config"): + """ + Merges the current config with another config. + + Args: + b (bittensor.core.config.Config): Another config to merge. + """ + self._merge(self, b) + + @classmethod + def merge_all(cls, configs: list["Config"]) -> "Config": + """ + Merge all configs in the list into one config. + If there is a conflict, the value from the last configuration in the list will take precedence. + + Args: + configs (list[bittensor.core.config.Config]): List of configs to be merged. + + Returns: + config (bittensor.core.config.Config): Merged config object. + """ + result = cls() + for cfg in configs: + result.merge(cfg) + return result + + def is_set(self, param_name: str) -> bool: + """Returns a boolean indicating whether the parameter has been set or is still the default.""" + if param_name not in self.get("__is_set"): + return False + else: + return self.get("__is_set")[param_name] + + def __check_for_missing_required_args( + self, parser: argparse.ArgumentParser, args: list[str] + ) -> list[str]: + required_args = self.__get_required_args_from_parser(parser) + missing_args = [arg for arg in required_args if not any(arg in s for s in args)] + return missing_args + + @staticmethod + def __get_required_args_from_parser(parser: argparse.ArgumentParser) -> list[str]: + required_args = [] + for action in parser._actions: + if action.required: + # Prefix the argument with '--' if it's a long argument, or '-' if it's short + prefix = "--" if len(action.dest) > 1 else "-" + required_args.append(prefix + action.dest) + return required_args + + +T = TypeVar("T", bound="DefaultConfig") + + +class DefaultConfig(Config): + """A Config with a set of default values.""" + + @classmethod + def default(cls: Type[T]) -> T: + """Get default config.""" + raise NotImplementedError("Function default is not implemented.") diff --git a/bittensor/core/dendrite.py b/bittensor/core/dendrite.py new file mode 100644 index 0000000000..0b9bc5381f --- /dev/null +++ b/bittensor/core/dendrite.py @@ -0,0 +1,832 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from __future__ import annotations + +import asyncio +import time +import uuid +from typing import Any, AsyncGenerator, Optional, Union, Type + +import aiohttp +from bittensor_wallet import Wallet +from substrateinterface import Keypair + +from bittensor.core.axon import Axon +from bittensor.core.chain_data import AxonInfo +from bittensor.core.settings import version_as_int +from bittensor.core.stream import StreamingSynapse +from bittensor.core.synapse import Synapse, TerminalInfo +from bittensor.utils import networking +from bittensor.utils.btlogging import logging +from bittensor.utils.registration import torch, use_torch + +DENDRITE_ERROR_MAPPING: dict[Type[Exception], tuple] = { + aiohttp.ClientConnectorError: ("503", "Service unavailable"), + asyncio.TimeoutError: ("408", "Request timeout"), + aiohttp.ClientResponseError: (None, "Client response error"), + aiohttp.ClientPayloadError: ("400", "Payload error"), + aiohttp.ClientError: ("500", "Client error"), + aiohttp.ServerTimeoutError: ("504", "Server timeout error"), + aiohttp.ServerDisconnectedError: ("503", "Service disconnected"), + aiohttp.ServerConnectionError: ("503", "Service connection error"), +} +DENDRITE_DEFAULT_ERROR = ("422", "Failed to parse response") + + +class DendriteMixin: + """ + The Dendrite class represents the abstracted implementation of a network client module. + + In the brain analogy, dendrites receive signals + from other neurons (in this case, network servers or axons), and the Dendrite class here is designed + to send requests to those endpoint to receive inputs. + + This class includes a wallet or keypair used for signing messages, and methods for making + HTTP requests to the network servers. It also provides functionalities such as logging + network requests and processing server responses. + + Args: + keypair (Option[Union[bittensor_wallet.Wallet, substrateinterface.Keypair]]): The wallet or keypair used for signing messages. + external_ip (str): The external IP address of the local system. + synapse_history (list): A list of Synapse objects representing the historical responses. + + Methods: + __str__(): Returns a string representation of the Dendrite object. + __repr__(): Returns a string representation of the Dendrite object, acting as a fallback for __str__(). + query(self, *args, **kwargs) -> Union[Synapse, list[Synapse]]: Makes synchronous requests to one or multiple target Axons and returns responses. + forward(self, axons, synapse=Synapse(), timeout=12, deserialize=True, run_async=True, streaming=False) -> Synapse: Asynchronously sends requests to one or multiple Axons and collates their responses. + call(self, target_axon, synapse=Synapse(), timeout=12.0, deserialize=True) -> Synapse: Asynchronously sends a request to a specified Axon and processes the response. + call_stream(self, target_axon, synapse=Synapse(), timeout=12.0, deserialize=True) -> AsyncGenerator[Synapse, None]: Sends a request to a specified Axon and yields an AsyncGenerator that contains streaming response chunks before finally yielding the filled Synapse as the final element. + preprocess_synapse_for_request(self, target_axon_info, synapse, timeout=12.0) -> Synapse: Preprocesses the synapse for making a request, including building headers and signing. + process_server_response(self, server_response, json_response, local_synapse): Processes the server response, updates the local synapse state, and merges headers. + close_session(self): Synchronously closes the internal aiohttp client session. + aclose_session(self): Asynchronously closes the internal aiohttp client session. + + NOTE: + When working with async `aiohttp `_ client sessions, it is recommended to use a context manager. + + Example with a context manager:: + + async with dendrite(wallet = bittensor_wallet.Wallet()) as d: + print(d) + d( ) # ping axon + d( [] ) # ping multiple + d( Axon(), Synapse ) + + However, you are able to safely call :func:`dendrite.query()` without a context manager in a synchronous setting. + + Example without a context manager:: + + d = dendrite(wallet = bittensor_wallet.Wallet() ) + print(d) + d( ) # ping axon + d( [] ) # ping multiple + d( bittensor.core.axon.Axon, bittensor.core.synapse.Synapse ) + """ + + def __init__(self, wallet: Optional[Union["Wallet", "Keypair"]] = None): + """ + Initializes the Dendrite object, setting up essential properties. + + Args: + wallet (Optional[Union[bittensor_wallet.Wallet, substrateinterface.Keypair]]): The user's wallet or keypair used for signing messages. Defaults to ``None``, in which case a new :func:`bittensor_wallet.Wallet().hotkey` is generated and used. + """ + # Initialize the parent class + super(DendriteMixin, self).__init__() + + # Unique identifier for the instance + self.uuid = str(uuid.uuid1()) + + # Get the external IP + self.external_ip = networking.get_external_ip() + + # If a wallet or keypair is provided, use its hotkey. If not, generate a new one. + self.keypair = ( + wallet.hotkey if isinstance(wallet, Wallet) else wallet + ) or Wallet().hotkey + + self.synapse_history: list = [] + + self._session: Optional[aiohttp.ClientSession] = None + + @property + async def session(self) -> aiohttp.ClientSession: + """ + An asynchronous property that provides access to the internal `aiohttp `_ client session. + + This property ensures the management of HTTP connections in an efficient way. It lazily + initializes the `aiohttp.ClientSession `_ on its first use. The session is then reused for subsequent + HTTP requests, offering performance benefits by reusing underlying connections. + + This is used internally by the dendrite when querying axons, and should not be used directly + unless absolutely necessary for your application. + + Returns: + aiohttp.ClientSession: The active `aiohttp `_ client session instance. If no session exists, a + new one is created and returned. This session is used for asynchronous HTTP requests within + the dendrite, adhering to the async nature of the network interactions in the Bittensor framework. + + Example usage:: + + import bittensor # Import bittensor + wallet = bittensor.Wallet( ... ) # Initialize a wallet + dendrite = bittensor.Dendrite(wallet=wallet) # Initialize a dendrite instance with the wallet + + async with (await dendrite.session).post( # Use the session to make an HTTP POST request + url, # URL to send the request to + headers={...}, # Headers dict to be sent with the request + json={...}, # JSON body data to be sent with the request + timeout=10, # Timeout duration in seconds + ) as response: + json_response = await response.json() # Extract the JSON response from the server + + """ + if self._session is None: + self._session = aiohttp.ClientSession() + return self._session + + def close_session(self): + """ + Closes the internal `aiohttp `_ client session synchronously. + + This method ensures the proper closure and cleanup of the aiohttp client session, releasing any + resources like open connections and internal buffers. It is crucial for preventing resource leakage + and should be called when the dendrite instance is no longer in use, especially in synchronous contexts. + + Note: + This method utilizes asyncio's event loop to close the session asynchronously from a synchronous context. It is advisable to use this method only when asynchronous context management is not feasible. + + Usage: + When finished with dendrite in a synchronous context + :func:`dendrite_instance.close_session()`. + """ + if self._session: + loop = asyncio.get_event_loop() + loop.run_until_complete(self._session.close()) + self._session = None + + async def aclose_session(self): + """ + Asynchronously closes the internal `aiohttp `_ client session. + + This method is the asynchronous counterpart to the :func:`close_session` method. It should be used in + asynchronous contexts to ensure that the aiohttp client session is closed properly. The method + releases resources associated with the session, such as open connections and internal buffers, + which is essential for resource management in asynchronous applications. + + Example: + Usage:: + When finished with dendrite in an asynchronous context + await :func:`dendrite_instance.aclose_session()`. + + Example: + Usage:: + async with dendrite_instance: + # Operations using dendrite + pass + # The session will be closed automatically after the above block + """ + if self._session: + await self._session.close() + self._session = None + + def _get_endpoint_url(self, target_axon, request_name): + """ + Constructs the endpoint URL for a network request to a target axon. + + This internal method generates the full HTTP URL for sending a request to the specified axon. The + URL includes the IP address and port of the target axon, along with the specific request name. It + differentiates between requests to the local system (using '0.0.0.0') and external systems. + + Args: + target_axon: The target axon object containing IP and port information. + request_name: The specific name of the request being made. + + Returns: + str: A string representing the complete HTTP URL for the request. + """ + endpoint = ( + f"0.0.0.0:{str(target_axon.port)}" + if target_axon.ip == str(self.external_ip) + else f"{target_axon.ip}:{str(target_axon.port)}" + ) + return f"http://{endpoint}/{request_name}" + + def log_exception(self, exception: Exception): + """ + Logs an exception with a unique identifier. + + This method generates a unique UUID for the error, extracts the error type, + and logs the error message using Bittensor's logging system. + + Args: + exception (Exception): The exception object to be logged. + + Returns: + None + """ + error_id = str(uuid.uuid4()) + error_type = exception.__class__.__name__ + logging.error(f"{error_type}#{error_id}: {exception}") + + def process_error_message( + self, + synapse: Union["Synapse", "StreamingSynapse"], + request_name: str, + exception: Exception, + ) -> Union["Synapse", "StreamingSynapse"]: + """ + Handles exceptions that occur during network requests, updating the synapse with appropriate status codes and messages. + + This method interprets different types of exceptions and sets the corresponding status code and + message in the synapse object. It covers common network errors such as connection issues and timeouts. + + Args: + synapse (bittensor.core.synapse.Synapse): The synapse object associated with the request. + request_name (str): The name of the request during which the exception occurred. + exception (Exception): The exception object caught during the request. + + Returns: + Synapse (bittensor.core.synapse.Synapse): The updated synapse object with the error status code and message. + + Note: + This method updates the synapse object in-place. + """ + + self.log_exception(exception) + + error_info = DENDRITE_ERROR_MAPPING.get(type(exception), DENDRITE_DEFAULT_ERROR) + status_code, status_message = error_info + + if status_code: + synapse.dendrite.status_code = status_code # type: ignore + elif isinstance(exception, aiohttp.ClientResponseError): + synapse.dendrite.status_code = str(exception.code) # type: ignore + + message = f"{status_message}: {str(exception)}" + if isinstance(exception, aiohttp.ClientConnectorError): + message = f"{status_message} at {synapse.axon.ip}:{synapse.axon.port}/{request_name}" # type: ignore + elif isinstance(exception, asyncio.TimeoutError): + message = f"{status_message} after {synapse.timeout} seconds" + + synapse.dendrite.status_message = message # type: ignore + + return synapse + + def _log_outgoing_request(self, synapse: "Synapse"): + """ + Logs information about outgoing requests for debugging purposes. + + This internal method logs key details about each outgoing request, including the size of the + request, the name of the synapse, the axon's details, and a success indicator. This information + is crucial for monitoring and debugging network activity within the Bittensor network. + + To turn on debug messages, set the environment variable BITTENSOR_DEBUG to ``1``, or call the bittensor debug method like so:: + + Example:: + + import bittensor + bittensor.debug() + + Args: + synapse (bittensor.core.synapse.Synapse): The synapse object representing the request being sent. + """ + if synapse.axon is not None: + logging.trace( + f"dendrite | --> | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | 0 | Success" + ) + + def _log_incoming_response(self, synapse: "Synapse"): + """ + Logs information about incoming responses for debugging and monitoring. + + Similar to :func:`_log_outgoing_request`, this method logs essential details of the incoming responses, + including the size of the response, synapse name, axon details, status code, and status message. + This logging is vital for troubleshooting and understanding the network interactions in Bittensor. + + Args: + synapse (bittensor.core.synapse.Synapse): The synapse object representing the received response. + """ + if synapse.axon is not None and synapse.dendrite is not None: + logging.trace( + f"dendrite | <-- | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | {synapse.dendrite.status_code} | {synapse.dendrite.status_message}" + ) + + def query( + self, *args, **kwargs + ) -> list[Union["AsyncGenerator[Any, Any]", "Synapse", "StreamingSynapse"]]: + """ + Makes a synchronous request to multiple target Axons and returns the server responses. + + Cleanup is automatically handled and sessions are closed upon completed requests. + + Args: + axons (Union[list[Union[bittensor.core.chain_data.axon_info.AxonInfo, 'bittensor.core.axon.Axon']], Union['bittensor.core.chain_data.axon_info.AxonInfo', 'bittensor.core.axon.Axon']]): The list of target Axon information. + synapse (Optional[bittensor.core.synapse.Synapse]): The Synapse object. Defaults to :func:`Synapse()`. + timeout (Optional[float]): The request timeout duration in seconds. Defaults to ``12.0`` seconds. + + Returns: + Union[bittensor.core.synapse.Synapse, list[bittensor.core.synapse.Synapse]]: If a single target axon is provided, returns the response from that axon. If multiple target axons are provided, returns a list of responses from all target axons. + """ + result = None + try: + loop = asyncio.get_event_loop() + result = loop.run_until_complete(self.forward(*args, **kwargs)) + except Exception: + new_loop = asyncio.new_event_loop() + asyncio.set_event_loop(new_loop) + result = new_loop.run_until_complete(self.forward(*args, **kwargs)) + new_loop.close() + finally: + self.close_session() + return result # type: ignore + + async def forward( + self, + axons: Union[list[Union["AxonInfo", "Axon"]], Union["AxonInfo", "Axon"]], + synapse: "Synapse" = Synapse(), + timeout: float = 12, + deserialize: bool = True, + run_async: bool = True, + streaming: bool = False, + ) -> list[Union["AsyncGenerator[Any, Any]", "Synapse", "StreamingSynapse"]]: + """ + Asynchronously sends requests to one or multiple Axons and collates their responses. + + This function acts as a bridge for sending multiple requests concurrently or sequentially + based on the provided parameters. It checks the type of the target Axons, preprocesses + the requests, and then sends them off. After getting the responses, it processes and + collates them into a unified format. + + When querying an Axon that sends a single response, this function returns a Synapse object + containing the response data. If multiple Axons are queried, a list of Synapse objects is + returned, each containing the response from the corresponding Axon. + + For example:: + + ... + import bittensor + wallet = bittensor.Wallet() # Initialize a wallet + synapse = bittensor.Synapse(...) # Create a synapse object that contains query data + dendrite = bittensor.Dendrite(wallet = wallet) # Initialize a dendrite instance + netuid = ... # Provide subnet ID + metagraph = bittensor.Metagraph(netuid) # Initialize a metagraph instance + axons = metagraph.axons # Create a list of axons to query + responses = await dendrite(axons, synapse) # Send the query to all axons and await the responses + + When querying an Axon that sends back data in chunks using the Dendrite, this function + returns an AsyncGenerator that yields each chunk as it is received. The generator can be + iterated over to process each chunk individually. + + For example:: + + ... + dendrite = bittensor.Dendrite(wallet = wallet) + async for chunk in dendrite.forward(axons, synapse, timeout, deserialize, run_async, streaming): + # Process each chunk here + print(chunk) + + Args: + axons (Union[list[Union[bittensor.core.chain_data.axon_info.AxonInfo, bittensor.core.axon.Axon]], Union[bittensor.core.chain_data.axon_info.AxonInfo, bittensor.core.axon.Axon]]): The target Axons to send requests to. Can be a single Axon or a list of Axons. + synapse (bittensor.core.synapse.Synapse): The Synapse object encapsulating the data. Defaults to a new :func:`Synapse` instance. + timeout (float): Maximum duration to wait for a response from an Axon in seconds. Defaults to ``12.0``. + deserialize (bool): Determines if the received response should be deserialized. Defaults to ``True``. + run_async (bool): If ``True``, sends requests concurrently. Otherwise, sends requests sequentially. Defaults to ``True``. + streaming (bool): Indicates if the response is expected to be in streaming format. Defaults to ``False``. + + Returns: + Union[AsyncGenerator, bittensor.core.synapse.Synapse, list[bittensor.core.synapse.Synapse]]: If a single `Axon` is targeted, returns its response. + If multiple Axons are targeted, returns a list of their responses. + """ + is_list = True + # If a single axon is provided, wrap it in a list for uniform processing + if not isinstance(axons, list): + is_list = False + axons = [axons] + + # Check if synapse is an instance of the StreamingSynapse class or if streaming flag is set. + is_streaming_subclass = issubclass(synapse.__class__, StreamingSynapse) + if streaming != is_streaming_subclass: + logging.warning( + f"Argument streaming is {streaming} while issubclass(synapse, StreamingSynapse) is {synapse.__class__.__name__}. This may cause unexpected behavior." + ) + streaming = is_streaming_subclass or streaming + + async def query_all_axons( + is_stream: bool, + ) -> Union["AsyncGenerator[Any, Any]", "Synapse", "StreamingSynapse"]: + """ + Handles the processing of requests to all targeted axons, accommodating both streaming and non-streaming responses. + + This function manages the concurrent or sequential dispatch of requests to a list of axons. + It utilizes the ``is_stream`` parameter to determine the mode of response handling (streaming + or non-streaming). For each axon, it calls ``single_axon_response`` and aggregates the responses. + + Args: + is_stream (bool): Flag indicating whether the axon responses are expected to be streamed. + If ``True``, responses are handled in streaming mode. + + Returns: + list[Union[AsyncGenerator, bittensor.core.synapse.Synapse, bittensor.core.stream.StreamingSynapse]]: A list containing the responses from each axon. The type of each response depends on the streaming mode and the type of synapse used. + """ + + async def single_axon_response( + target_axon: Union["AxonInfo", "Axon"], + ) -> Union["AsyncGenerator[Any, Any]", "Synapse", "StreamingSynapse"]: + """ + Manages the request and response process for a single axon, supporting both streaming and non-streaming modes. + + This function is responsible for initiating a request to a single axon. Depending on the ``is_stream`` flag, it either uses ``call_stream`` for streaming responses or ``call`` for standard responses. The function handles the response processing, catering to the specifics of streaming or non-streaming data. + + Args: + target_axon (Union[bittensor.core.chain_data.axon_info.AxonInfo, bittensor.core.axon.Axon): The target axon object to which the request is to be sent. This object contains the necessary information like IP address and port to formulate the request. + + Returns: + Union[AsyncGenerator, bittensor.core.synapse.Synapse, bittensor.core.stream.StreamingSynapse]: The response from the targeted axon. In streaming mode, an AsyncGenerator is returned, yielding data chunks. In non-streaming mode, a Synapse or StreamingSynapse object is returned containing the response. + """ + if is_stream: + # If in streaming mode, return the async_generator + return self.call_stream( + target_axon=target_axon, + synapse=synapse.model_copy(), # type: ignore + timeout=timeout, + deserialize=deserialize, + ) + else: + # If not in streaming mode, simply call the axon and get the response. + return await self.call( + target_axon=target_axon, + synapse=synapse.model_copy(), # type: ignore + timeout=timeout, + deserialize=deserialize, + ) + + # If run_async flag is False, get responses one by one. + if not run_async: + return [ + await single_axon_response(target_axon) for target_axon in axons + ] # type: ignore + # If run_async flag is True, get responses concurrently using asyncio.gather(). + return await asyncio.gather( + *(single_axon_response(target_axon) for target_axon in axons) + ) # type: ignore + + # Get responses for all axons. + responses = await query_all_axons(streaming) + # Return the single response if only one axon was targeted, else return all responses + return responses[0] if len(responses) == 1 and not is_list else responses # type: ignore + + async def call( + self, + target_axon: Union["AxonInfo", "Axon"], + synapse: "Synapse" = Synapse(), + timeout: float = 12.0, + deserialize: bool = True, + ) -> "Synapse": + """ + Asynchronously sends a request to a specified Axon and processes the response. + + This function establishes a connection with a specified Axon, sends the encapsulated data through the Synapse object, waits for a response, processes it, and then returns the updated Synapse object. + + Args: + target_axon (Union[bittensor.core.chain_data.axon_info.AxonInfo, bittensor.core.axon.Axon]): The target Axon to send the request to. + synapse (bittensor.core.synapse.Synapse): The Synapse object encapsulating the data. Defaults to a new :func:`Synapse` instance. + timeout (float): Maximum duration to wait for a response from the Axon in seconds. Defaults to ``12.0``. + deserialize (bool): Determines if the received response should be deserialized. Defaults to ``True``. + + Returns: + bittensor.core.synapse.Synapse: The Synapse object, updated with the response data from the Axon. + """ + + # Record start time + start_time = time.time() + target_axon = ( + target_axon.info() if isinstance(target_axon, Axon) else target_axon + ) + + # Build request endpoint from the synapse class + request_name = synapse.__class__.__name__ + url = self._get_endpoint_url(target_axon, request_name=request_name) + + # Preprocess synapse for making a request + synapse = self.preprocess_synapse_for_request(target_axon, synapse, timeout) + + try: + # Log outgoing request + self._log_outgoing_request(synapse) + + # Make the HTTP POST request + async with (await self.session).post( + url=url, + headers=synapse.to_headers(), + json=synapse.model_dump(), + timeout=aiohttp.ClientTimeout(total=timeout), + ) as response: + # Extract the JSON response from the server + json_response = await response.json() + # Process the server response and fill synapse + self.process_server_response(response, json_response, synapse) + + # Set process time and log the response + synapse.dendrite.process_time = str(time.time() - start_time) # type: ignore + + except Exception as e: + synapse = self.process_error_message(synapse, request_name, e) + + finally: + self._log_incoming_response(synapse) + + # Log synapse event history + self.synapse_history.append(Synapse.from_headers(synapse.to_headers())) + + # Return the updated synapse object after deserializing if requested + return synapse.deserialize() if deserialize else synapse + + async def call_stream( + self, + target_axon: Union["AxonInfo", "Axon"], + synapse: "StreamingSynapse" = Synapse(), # type: ignore + timeout: float = 12.0, + deserialize: bool = True, + ) -> "AsyncGenerator[Any, Any]": + """ + Sends a request to a specified Axon and yields streaming responses. + + Similar to ``call``, but designed for scenarios where the Axon sends back data in + multiple chunks or streams. The function yields each chunk as it is received. This is + useful for processing large responses piece by piece without waiting for the entire + data to be transmitted. + + Args: + target_axon (Union[bittensor.core.chain_data.axon_info.AxonInfo, bittensor.core.axon.Axon]): The target Axon to send the request to. + synapse (bittensor.core.synapse.Synapse): The Synapse object encapsulating the data. Defaults to a new :func:`Synapse` instance. + timeout (float): Maximum duration to wait for a response (or a chunk of the response) from the Axon in seconds. Defaults to ``12.0``. + deserialize (bool): Determines if each received chunk should be deserialized. Defaults to ``True``. + + Yields: + object: Each yielded object contains a chunk of the arbitrary response data from the Axon. + bittensor.core.synapse.Synapse: After the AsyncGenerator has been exhausted, yields the final filled Synapse. + """ + + # Record start time + start_time = time.time() + target_axon = ( + target_axon.info() if isinstance(target_axon, Axon) else target_axon + ) + + # Build request endpoint from the synapse class + request_name = synapse.__class__.__name__ + endpoint = ( + f"0.0.0.0:{str(target_axon.port)}" + if target_axon.ip == str(self.external_ip) + else f"{target_axon.ip}:{str(target_axon.port)}" + ) + url = f"http://{endpoint}/{request_name}" + + # Preprocess synapse for making a request + synapse = self.preprocess_synapse_for_request(target_axon, synapse, timeout) # type: ignore + + try: + # Log outgoing request + self._log_outgoing_request(synapse) + + # Make the HTTP POST request + async with (await self.session).post( + url, + headers=synapse.to_headers(), + json=synapse.model_dump(), + timeout=aiohttp.ClientTimeout(total=timeout), + ) as response: + # Use synapse subclass' process_streaming_response method to yield the response chunks + async for chunk in synapse.process_streaming_response(response): # type: ignore + yield chunk # Yield each chunk as it's processed + json_response = synapse.extract_response_json(response) + + # Process the server response + self.process_server_response(response, json_response, synapse) + + # Set process time and log the response + synapse.dendrite.process_time = str(time.time() - start_time) # type: ignore + + except Exception as e: + synapse = self.process_error_message(synapse, request_name, e) # type: ignore + + finally: + self._log_incoming_response(synapse) + + # Log synapse event history + self.synapse_history.append(Synapse.from_headers(synapse.to_headers())) + + # Return the updated synapse object after deserializing if requested + if deserialize: + yield synapse.deserialize() + else: + yield synapse + + def preprocess_synapse_for_request( + self, + target_axon_info: "AxonInfo", + synapse: "Synapse", + timeout: float = 12.0, + ) -> "Synapse": + """ + Preprocesses the synapse for making a request. This includes building headers for Dendrite and Axon and signing the request. + + Args: + target_axon_info (bittensor.core.chain_data.axon_info.AxonInfo): The target axon information. + synapse (bittensor.core.synapse.Synapse): The synapse object to be preprocessed. + timeout (float): The request timeout duration in seconds. Defaults to ``12.0`` seconds. + + Returns: + bittensor.core.synapse.Synapse: The preprocessed synapse. + """ + # Set the timeout for the synapse + synapse.timeout = timeout + synapse.dendrite = TerminalInfo( + ip=self.external_ip, + version=version_as_int, + nonce=time.time_ns(), + uuid=self.uuid, + hotkey=self.keypair.ss58_address, + ) + + # Build the Axon headers using the target axon's details + synapse.axon = TerminalInfo( + ip=target_axon_info.ip, + port=target_axon_info.port, + hotkey=target_axon_info.hotkey, + ) + + # Sign the request using the dendrite, axon info, and the synapse body hash + message = f"{synapse.dendrite.nonce}.{synapse.dendrite.hotkey}.{synapse.axon.hotkey}.{synapse.dendrite.uuid}.{synapse.body_hash}" + synapse.dendrite.signature = f"0x{self.keypair.sign(message).hex()}" + + return synapse + + def process_server_response( + self, + server_response: "aiohttp.ClientResponse", + json_response: dict, + local_synapse: "Synapse", + ): + """ + Processes the server response, updates the local synapse state with the server's state and merges headers set by the server. + + Args: + server_response (object): The `aiohttp `_ response object from the server. + json_response (dict): The parsed JSON response from the server. + local_synapse (bittensor.core.synapse.Synapse): The local synapse object to be updated. + + Raises: + None: But errors in attribute setting are silently ignored. + """ + # Check if the server responded with a successful status code + if server_response.status == 200: + # If the response is successful, overwrite local synapse state with + # server's state only if the protocol allows mutation. To prevent overwrites, + # the protocol must set Frozen = True + server_synapse = local_synapse.__class__(**json_response) + for key in local_synapse.model_dump().keys(): + try: + # Set the attribute in the local synapse from the corresponding + # attribute in the server synapse + setattr(local_synapse, key, getattr(server_synapse, key)) + except Exception: + # Ignore errors during attribute setting + pass + else: + # If the server responded with an error, update the local synapse state + if local_synapse.axon is None: + local_synapse.axon = TerminalInfo() + local_synapse.axon.status_code = server_response.status + local_synapse.axon.status_message = json_response.get("message") + + # Extract server headers and overwrite None values in local synapse headers + server_headers = Synapse.from_headers(server_response.headers) # type: ignore + + # Merge dendrite headers + local_synapse.dendrite.__dict__.update( + { + **local_synapse.dendrite.model_dump(exclude_none=True), # type: ignore + **server_headers.dendrite.model_dump(exclude_none=True), # type: ignore + } + ) + + # Merge axon headers + local_synapse.axon.__dict__.update( + { + **local_synapse.axon.model_dump(exclude_none=True), # type: ignore + **server_headers.axon.model_dump(exclude_none=True), # type: ignore + } + ) + + # Update the status code and status message of the dendrite to match the axon + local_synapse.dendrite.status_code = local_synapse.axon.status_code # type: ignore + local_synapse.dendrite.status_message = local_synapse.axon.status_message # type: ignore + + def __str__(self) -> str: + """ + Returns a string representation of the Dendrite object. + + Returns: + str: The string representation of the Dendrite object in the format :func:`dendrite()`. + """ + return f"dendrite({self.keypair.ss58_address})" + + def __repr__(self) -> str: + """ + Returns a string representation of the Dendrite object, acting as a fallback for :func:`__str__()`. + + Returns: + str: The string representation of the Dendrite object in the format :func:`dendrite()`. + """ + return self.__str__() + + async def __aenter__(self): + """ + Asynchronous context manager entry method. + + Enables the use of the ``async with`` statement with the Dendrite instance. When entering the context, the current instance of the class is returned, making it accessible within the asynchronous context. + + Returns: + Dendrite: The current instance of the Dendrite class. + + Usage:: + async with Dendrite() as dendrite: + await dendrite.some_async_method() + """ + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + """ + Asynchronous context manager exit method. + + Ensures proper cleanup when exiting the ``async with`` context. This method will close the `aiohttp `_ client session asynchronously, releasing any tied resources. + + Args: + exc_type (Type[BaseException]): The type of exception that was raised. + exc_value (BaseException): The instance of exception that was raised. + traceback (TracebackType): A traceback object encapsulating the call stack at the point where the exception was raised. + + Usage:: + import bittensor + + wallet = bittensor.Wallet() + async with bittensor.Dendrite(wallet=wallet) as dendrite: + await dendrite.some_async_method() + + Note: + This automatically closes the session by calling :func:`__aexit__` after the context closes. + """ + await self.aclose_session() + + def __del__(self): + """ + Dendrite destructor. + + This method is invoked when the Dendrite instance is about to be destroyed. The destructor ensures that the aiohttp client session is closed before the instance is fully destroyed, releasing any remaining resources. + + Note: + Relying on the destructor for cleanup can be unpredictable. It is recommended to explicitly close sessions using the provided methods or the ``async with`` context manager. + + Usage:: + + dendrite = Dendrite() + # ... some operations ... + del dendrite # This will implicitly invoke the __del__ method and close the session. + """ + self.close_session() + + +# For back-compatibility with torch +BaseModel: Union["torch.nn.Module", object] = torch.nn.Module if use_torch() else object + + +class Dendrite(DendriteMixin, BaseModel): # type: ignore + def __init__(self, wallet: Optional[Union["Wallet", "Keypair"]] = None): + if use_torch(): + torch.nn.Module.__init__(self) + DendriteMixin.__init__(self, wallet) + + +if not use_torch(): + + async def call(self, *args, **kwargs): + return await self.forward(*args, **kwargs) + + Dendrite.__call__ = call diff --git a/bittensor/core/errors.py b/bittensor/core/errors.py new file mode 100644 index 0000000000..6fd9729e8b --- /dev/null +++ b/bittensor/core/errors.py @@ -0,0 +1,129 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from __future__ import annotations + +from bittensor.core.synapse import Synapse + + +class ChainError(BaseException): + """Base error for any chain related errors.""" + + +class ChainConnectionError(ChainError): + """Error for any chain connection related errors.""" + + +class ChainTransactionError(ChainError): + """Error for any chain transaction related errors.""" + + +class ChainQueryError(ChainError): + """Error for any chain query related errors.""" + + +class StakeError(ChainTransactionError): + """Error raised when a stake transaction fails.""" + + +class UnstakeError(ChainTransactionError): + """Error raised when an unstake transaction fails.""" + + +class IdentityError(ChainTransactionError): + """Error raised when an identity transaction fails.""" + + +class NominationError(ChainTransactionError): + """Error raised when a nomination transaction fails.""" + + +class TakeError(ChainTransactionError): + """Error raised when an increase / decrease take transaction fails.""" + + +class TransferError(ChainTransactionError): + """Error raised when a transfer transaction fails.""" + + +class RegistrationError(ChainTransactionError): + """Error raised when a neuron registration transaction fails.""" + + +class NotRegisteredError(ChainTransactionError): + """Error raised when a neuron is not registered, and the transaction requires it to be.""" + + +class NotDelegateError(StakeError): + """Error raised when a hotkey you are trying to stake to is not a delegate.""" + + +class MetadataError(ChainTransactionError): + """Error raised when metadata commitment transaction fails.""" + + +class InvalidRequestNameError(Exception): + """This exception is raised when the request name is invalid. Usually indicates a broken URL.""" + + +class SynapseException(Exception): + def __init__(self, message="Synapse Exception", synapse: "Synapse" | None = None): + self.message = message + self.synapse = synapse + super().__init__(self.message) + + +class UnknownSynapseError(SynapseException): + """This exception is raised when the request name is not found in the Axon's forward_fns dictionary.""" + + +class SynapseParsingError(Exception): + """This exception is raised when the request headers are unable to be parsed into the synapse type.""" + + +class NotVerifiedException(SynapseException): + """This exception is raised when the request is not verified.""" + + +class BlacklistedException(SynapseException): + """This exception is raised when the request is blacklisted.""" + + +class PriorityException(SynapseException): + """This exception is raised when the request priority is not met.""" + + +class PostProcessException(SynapseException): + """This exception is raised when the response headers cannot be updated.""" + + +class RunException(SynapseException): + """This exception is raised when the requested function cannot be executed. Indicates a server error.""" + + +class InternalServerError(SynapseException): + """This exception is raised when the requested function fails on the server. Indicates a server error.""" + + +class SynapseDendriteNoneException(SynapseException): + def __init__( + self, + message="Synapse Dendrite is None", + synapse: "Synapse" | None = None, + ): + self.message = message + super().__init__(self.message, synapse) diff --git a/bittensor/core/extrinsics/__init__.py b/bittensor/core/extrinsics/__init__.py new file mode 100644 index 0000000000..640a132503 --- /dev/null +++ b/bittensor/core/extrinsics/__init__.py @@ -0,0 +1,16 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py new file mode 100644 index 0000000000..5e9f2e9e19 --- /dev/null +++ b/bittensor/core/extrinsics/commit_weights.py @@ -0,0 +1,274 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +"""Module commit weights and reveal weights extrinsic.""" + +from typing import Optional, TYPE_CHECKING + +from retry import retry +from rich.prompt import Confirm + +from bittensor.core.extrinsics.utils import submit_extrinsic +from bittensor.utils import format_error_message +from bittensor.utils.btlogging import logging +from bittensor.utils.networking import ensure_connected + +# For annotation purposes +if TYPE_CHECKING: + from bittensor_wallet import Wallet + from bittensor.core.subtensor import Subtensor + + +# # Chain call for `commit_weights_extrinsic` +@ensure_connected +def do_commit_weights( + self: "Subtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[dict]]: + """ + Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights. + This method constructs and submits the transaction, handling retries and blockchain communication. + + Args: + self (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + commit_hash (str): The hash of the neuron's weights to be committed. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + + Returns: + tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. + + This method ensures that the weight commitment is securely recorded on the Bittensor blockchain, providing a verifiable record of the neuron's weight distribution at a specific point in time. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="commit_weights", + call_params={ + "netuid": netuid, + "commit_hash": commit_hash, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, None + + response.process_events() + if response.is_success: + return True, None + else: + return False, response.error_message + + return make_substrate_call_with_retry() + + +def commit_weights_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, +) -> tuple[bool, str]: + """ + Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. + This function is a wrapper around the `do_commit_weights` method, handling user prompts and error messages. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + commit_hash (str): The hash of the neuron's weights to be committed. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. + + Returns: + tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string + value describing the success or potential error. + + This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required. + """ + if prompt and not Confirm.ask(f"Would you like to commit weights?"): + return False, "User cancelled the operation." + + success, error_message = do_commit_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if success: + success_message = "Successfully committed weights." + logging.info(success_message) + return True, success_message + else: + error_message = format_error_message(error_message) + logging.error(f"Failed to commit weights: {error_message}") + return False, error_message + + +# Chain call for `reveal_weights_extrinsic` +@ensure_connected +def do_reveal_weights( + self: "Subtensor", + wallet: "Wallet", + netuid: int, + uids: list[int], + values: list[int], + salt: list[int], + version_key: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[dict]]: + """ + Internal method to send a transaction to the Bittensor blockchain, revealing the weights for a specific subnet. + This method constructs and submits the transaction, handling retries and blockchain communication. + + Args: + self (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[int]): List of neuron UIDs for which weights are being revealed. + values (list[int]): List of weight values corresponding to each UID. + salt (list[int]): List of salt values corresponding to the hash function. + version_key (int): Version key for compatibility with the network. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + + Returns: + tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. + + This method ensures that the weight revelation is securely recorded on the Bittensor blockchain, providing transparency and accountability for the neuron's weight distribution. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="reveal_weights", + call_params={ + "netuid": netuid, + "uids": uids, + "values": values, + "salt": salt, + "version_key": version_key, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, None + + response.process_events() + if response.is_success: + return True, None + else: + return False, response.error_message + + return make_substrate_call_with_retry() + + +def reveal_weights_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + uids: list[int], + weights: list[int], + salt: list[int], + version_key: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, +) -> tuple[bool, str]: + """ + Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. + This function is a wrapper around the `_do_reveal_weights` method, handling user prompts and error messages. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[int]): List of neuron UIDs for which weights are being revealed. + weights (list[int]): List of weight values corresponding to each UID. + salt (list[int]): List of salt values corresponding to the hash function. + version_key (int): Version key for compatibility with the network. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. + + Returns: + tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string + value describing the success or potential error. + + This function provides a user-friendly interface for revealing weights on the Bittensor blockchain, ensuring proper error handling and user interaction when required. + """ + + if prompt and not Confirm.ask(f"Would you like to reveal weights?"): + return False, "User cancelled the operation." + + success, error_message = do_reveal_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + uids=uids, + values=weights, + salt=salt, + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if success: + success_message = "Successfully revealed weights." + logging.info(success_message) + return True, success_message + else: + error_message = format_error_message(error_message) + logging.error(f"Failed to reveal weights: {error_message}") + return False, error_message diff --git a/bittensor/core/extrinsics/prometheus.py b/bittensor/core/extrinsics/prometheus.py new file mode 100644 index 0000000000..a6ab1cfb16 --- /dev/null +++ b/bittensor/core/extrinsics/prometheus.py @@ -0,0 +1,187 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import json +from typing import Optional, TYPE_CHECKING + +from retry import retry + +from bittensor.core.extrinsics.utils import submit_extrinsic +from bittensor.core.settings import version_as_int, bt_console +from bittensor.utils import networking as net, format_error_message +from bittensor.utils.btlogging import logging +from bittensor.utils.networking import ensure_connected + +# For annotation purposes +if TYPE_CHECKING: + from bittensor_wallet import Wallet + from bittensor.core.subtensor import Subtensor + from bittensor.core.types import PrometheusServeCallParams + + +# Chain call for `prometheus_extrinsic` +@ensure_connected +def do_serve_prometheus( + self: "Subtensor", + wallet: "Wallet", + call_params: "PrometheusServeCallParams", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> tuple[bool, Optional[dict]]: + """ + Sends a serve prometheus extrinsic to the chain. + + Args: + self (bittensor.core.subtensor.Subtensor): Bittensor subtensor object + wallet (bittensor_wallet.Wallet): Wallet object. + call_params (bittensor.core.types.PrometheusServeCallParams): Prometheus serve call parameters. + wait_for_inclusion (bool): If ``true``, waits for inclusion. + wait_for_finalization (bool): If ``true``, waits for finalization. + + Returns: + success (bool): ``True`` if serve prometheus was successful. + error (Optional[str]): Error message if serve prometheus failed, ``None`` otherwise. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="serve_prometheus", + call_params=call_params, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.hotkey + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if wait_for_inclusion or wait_for_finalization: + response.process_events() + if response.is_success: + return True, None + else: + return False, response.error_message + else: + return True, None + + return make_substrate_call_with_retry() + + +def prometheus_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + port: int, + netuid: int, + ip: int = None, + wait_for_inclusion: bool = False, + wait_for_finalization=True, +) -> bool: + """Subscribes a Bittensor endpoint to the Subtensor chain. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Bittensor subtensor object. + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + ip (str): Endpoint host port i.e., ``192.122.31.4``. + port (int): Endpoint port number i.e., `9221`. + netuid (int): Network `uid` to serve on. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + + # Get external ip + if ip is None: + try: + external_ip = net.get_external_ip() + bt_console.print( + f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]" + ) + logging.success(prefix="External IP", suffix="{external_ip}") + except Exception as e: + raise RuntimeError( + f"Unable to attain your external ip. Check your internet connection. error: {e}" + ) from e + else: + external_ip = ip + + call_params: "PrometheusServeCallParams" = { + "version": version_as_int, + "ip": net.ip_to_int(external_ip), + "port": port, + "ip_type": net.ip_version(external_ip), + } + + with bt_console.status(":satellite: Checking Prometheus..."): + neuron = subtensor.get_neuron_for_pubkey_and_subnet( + wallet.hotkey.ss58_address, netuid=netuid + ) + neuron_up_to_date = not neuron.is_null and call_params == { + "version": neuron.prometheus_info.version, + "ip": net.ip_to_int(neuron.prometheus_info.ip), + "port": neuron.prometheus_info.port, + "ip_type": neuron.prometheus_info.ip_type, + } + + if neuron_up_to_date: + bt_console.print( + f":white_heavy_check_mark: [green]Prometheus already Served[/green]\n" + f"[green not bold]- Status: [/green not bold] |" + f"[green not bold] ip: [/green not bold][white not bold]{neuron.prometheus_info.ip}[/white not bold] |" + f"[green not bold] ip_type: [/green not bold][white not bold]{neuron.prometheus_info.ip_type}[/white not bold] |" + f"[green not bold] port: [/green not bold][white not bold]{neuron.prometheus_info.port}[/white not bold] | " + f"[green not bold] version: [/green not bold][white not bold]{neuron.prometheus_info.version}[/white not bold] |" + ) + + bt_console.print( + f":white_heavy_check_mark: [white]Prometheus already served.[/white]" + ) + return True + + # Add netuid, not in prometheus_info + call_params["netuid"] = netuid + + with bt_console.status( + f":satellite: Serving prometheus on: [white]{subtensor.network}:{netuid}[/white] ..." + ): + success, error_message = do_serve_prometheus( + self=subtensor, + wallet=wallet, + call_params=call_params, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if wait_for_inclusion or wait_for_finalization: + if success is True: + json_ = json.dumps(call_params, indent=4, sort_keys=True) + bt_console.print( + f":white_heavy_check_mark: [green]Served prometheus[/green]\n [bold white]{json_}[/bold white]" + ) + return True + else: + bt_console.print( + f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}" + ) + return False + else: + return True diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py new file mode 100644 index 0000000000..490f9c268e --- /dev/null +++ b/bittensor/core/extrinsics/serving.py @@ -0,0 +1,319 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import json +from typing import Optional, TYPE_CHECKING + +from retry import retry +from rich.prompt import Confirm + +from bittensor.core.errors import MetadataError +from bittensor.core.extrinsics.utils import submit_extrinsic +from bittensor.core.settings import version_as_int, bt_console +from bittensor.utils import format_error_message, networking as net +from bittensor.utils.btlogging import logging +from bittensor.utils.networking import ensure_connected + +# For annotation purposes +if TYPE_CHECKING: + from bittensor.core.axon import Axon + from bittensor.core.subtensor import Subtensor + from bittensor.core.types import AxonServeCallParams + from bittensor_wallet import Wallet + + +# Chain call for `serve_extrinsic` and `serve_axon_extrinsic` +@ensure_connected +def do_serve_axon( + self: "Subtensor", + wallet: "Wallet", + call_params: "AxonServeCallParams", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> tuple[bool, Optional[dict]]: + """ + Internal method to submit a serve axon transaction to the Bittensor blockchain. This method creates and submits a transaction, enabling a neuron's ``Axon`` to serve requests on the network. + + Args: + self (bittensor.core.subtensor.Subtensor): Subtensor instance object. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron. + call_params (bittensor.core.types.AxonServeCallParams): Parameters required for the serve axon call. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + + Returns: + tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. + + This function is crucial for initializing and announcing a neuron's ``Axon`` service on the network, enhancing the decentralized computation capabilities of Bittensor. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="serve_axon", + call_params=call_params, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.hotkey + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if wait_for_inclusion or wait_for_finalization: + response.process_events() + if response.is_success: + return True, None + else: + return False, response.error_message + else: + return True, None + + return make_substrate_call_with_retry() + + +def serve_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + ip: str, + port: int, + protocol: int, + netuid: int, + placeholder1: int = 0, + placeholder2: int = 0, + wait_for_inclusion: bool = False, + wait_for_finalization=True, + prompt: bool = False, +) -> bool: + """Subscribes a Bittensor endpoint to the subtensor chain. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance object. + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + ip (str): Endpoint host port i.e., ``192.122.31.4``. + port (int): Endpoint port number i.e., ``9221``. + protocol (int): An ``int`` representation of the protocol. + netuid (int): The network uid to serve on. + placeholder1 (int): A placeholder for future use. + placeholder2 (int): A placeholder for future use. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + # Decrypt hotkey + wallet.unlock_hotkey() + params: "AxonServeCallParams" = { + "version": version_as_int, + "ip": net.ip_to_int(ip), + "port": port, + "ip_type": net.ip_version(ip), + "netuid": netuid, + "hotkey": wallet.hotkey.ss58_address, + "coldkey": wallet.coldkeypub.ss58_address, + "protocol": protocol, + "placeholder1": placeholder1, + "placeholder2": placeholder2, + } + logging.debug("Checking axon ...") + neuron = subtensor.get_neuron_for_pubkey_and_subnet( + wallet.hotkey.ss58_address, netuid=netuid + ) + neuron_up_to_date = not neuron.is_null and params == { + "version": neuron.axon_info.version, + "ip": net.ip_to_int(neuron.axon_info.ip), + "port": neuron.axon_info.port, + "ip_type": neuron.axon_info.ip_type, + "netuid": neuron.netuid, + "hotkey": neuron.hotkey, + "coldkey": neuron.coldkey, + "protocol": neuron.axon_info.protocol, + "placeholder1": neuron.axon_info.placeholder1, + "placeholder2": neuron.axon_info.placeholder2, + } + output = params.copy() + output["coldkey"] = wallet.coldkeypub.ss58_address + output["hotkey"] = wallet.hotkey.ss58_address + if neuron_up_to_date: + logging.debug( + f"Axon already served on: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) " + ) + return True + + if prompt: + output = params.copy() + output["coldkey"] = wallet.coldkeypub.ss58_address + output["hotkey"] = wallet.hotkey.ss58_address + if not Confirm.ask( + f"Do you want to serve axon:\n [bold white]{json.dumps(output, indent=4, sort_keys=True)}[/bold white]" + ): + return False + + logging.debug( + f"Serving axon with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) -> {subtensor.network}:{netuid}" + ) + success, error_message = do_serve_axon( + self=subtensor, + wallet=wallet, + call_params=params, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if wait_for_inclusion or wait_for_finalization: + if success is True: + logging.debug( + f"Axon served with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) on {subtensor.network}:{netuid} " + ) + return True + else: + logging.error(f"Failed: {format_error_message(error_message)}") + return False + else: + return True + + +def serve_axon_extrinsic( + subtensor: "Subtensor", + netuid: int, + axon: "Axon", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> bool: + """Serves the axon to the network. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance object. + netuid (int): The ``netuid`` being served on. + axon (bittensor.core.axon.Axon): Axon to serve. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + axon.wallet.unlock_hotkey() + axon.wallet.unlock_coldkeypub() + external_port = axon.external_port + + # ---- Get external ip ---- + if axon.external_ip is None: + try: + external_ip = net.get_external_ip() + bt_console.print( + f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]" + ) + logging.success(prefix="External IP", suffix=f"{external_ip}") + except Exception as e: + raise RuntimeError( + f"Unable to attain your external ip. Check your internet connection. error: {e}" + ) from e + else: + external_ip = axon.external_ip + + # ---- Subscribe to chain ---- + serve_success = serve_extrinsic( + subtensor=subtensor, + wallet=axon.wallet, + ip=external_ip, + port=external_port, + netuid=netuid, + protocol=4, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + return serve_success + + +# Community uses this extrinsic directly and via `subtensor.commit` +@net.ensure_connected +def publish_metadata( + self: "Subtensor", + wallet: "Wallet", + netuid: int, + data_type: str, + data: bytes, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> bool: + """ + Publishes metadata on the Bittensor network using the specified wallet and network identifier. + + Args: + self (bittensor.core.subtensor.Subtensor): The subtensor instance representing the Bittensor blockchain connection. + wallet (bittensor_wallet.Wallet): The wallet object used for authentication in the transaction. + netuid (int): Network UID on which the metadata is to be published. + data_type (str): The data type of the information being submitted. It should be one of the following: ``'Sha256'``, ``'Blake256'``, ``'Keccak256'``, or ``'Raw0-128'``. This specifies the format or hashing algorithm used for the data. + data (str): The actual metadata content to be published. This should be formatted or hashed according to the ``type`` specified. (Note: max ``str`` length is 128 bytes) + wait_for_inclusion (bool): If ``True``, the function will wait for the extrinsic to be included in a block before returning. Defaults to ``False``. + wait_for_finalization (bool): If ``True``, the function will wait for the extrinsic to be finalized on the chain before returning. Defaults to ``True``. + + Returns: + bool: ``True`` if the metadata was successfully published (and finalized if specified). ``False`` otherwise. + + Raises: + MetadataError: If there is an error in submitting the extrinsic or if the response from the blockchain indicates failure. + """ + + wallet.unlock_hotkey() + + with self.substrate as substrate: + call = substrate.compose_call( + call_module="Commitments", + call_function="set_commitment", + call_params={ + "netuid": netuid, + "info": {"fields": [[{f"{data_type}": data}]]}, + }, + ) + + extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.hotkey) + response = substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True + response.process_events() + if response.is_success: + return True + else: + raise MetadataError(format_error_message(response.error_message)) + + +# Community uses this function directly +@net.ensure_connected +def get_metadata(self, netuid: int, hotkey: str, block: Optional[int] = None) -> str: + @retry(delay=2, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + with self.substrate as substrate: + return substrate.query( + module="Commitments", + storage_function="CommitmentOf", + params=[netuid, hotkey], + block_hash=None if block is None else substrate.get_block_hash(block), + ) + + commit_data = make_substrate_call_with_retry() + return commit_data.value diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py new file mode 100644 index 0000000000..7680061c5b --- /dev/null +++ b/bittensor/core/extrinsics/set_weights.py @@ -0,0 +1,194 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import logging +from typing import Union, Optional, TYPE_CHECKING + +import numpy as np +from numpy.typing import NDArray +from retry import retry +from rich.prompt import Confirm + +from bittensor.core.extrinsics.utils import submit_extrinsic +from bittensor.core.settings import bt_console, version_as_int +from bittensor.utils import format_error_message, weight_utils +from bittensor.utils.btlogging import logging +from bittensor.utils.networking import ensure_connected +from bittensor.utils.registration import torch, use_torch + +# For annotation purposes +if TYPE_CHECKING: + from bittensor.core.subtensor import Subtensor + from bittensor_wallet import Wallet + + +# Chain call for `do_set_weights` +@ensure_connected +def do_set_weights( + self: "Subtensor", + wallet: "Wallet", + uids: list[int], + vals: list[int], + netuid: int, + version_key: int = version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[dict]]: # (success, error_message) + """ + Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons. This method constructs and submits the transaction, handling retries and blockchain communication. + + Args: + self (bittensor.core.subtensor.Subtensor): Subtensor interface + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights. + uids (list[int]): List of neuron UIDs for which weights are being set. + vals (list[int]): List of weight values corresponding to each UID. + netuid (int): Unique identifier for the network. + version_key (int): Version key for compatibility with the network. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + + Returns: + tuple[bool, Optional[str]]: A tuple containing a success flag and an optional response message. + + This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their trust in other neurons based on observed performance and contributions. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="set_weights", + call_params={ + "dests": uids, + "weights": vals, + "netuid": netuid, + "version_key": version_key, + }, + ) + # Period dictates how long the extrinsic will stay as part of waiting pool + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + era={"period": 5}, + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + response.process_events() + if response.is_success: + return True, "Successfully set weights." + else: + return False, response.error_message + + return make_substrate_call_with_retry() + + +# Community uses this extrinsic directly and via `subtensor.set_weights` +def set_weights_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = 0, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, +) -> tuple[bool, str]: + """Sets the given weights and values on chain for wallet hotkey account. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor endpoint to use. + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + netuid (int): The ``netuid`` of the subnet to set weights for. + uids (Union[NDArray[np.int64], torch.LongTensor, list]): The ``uint64`` uids of destination neurons. + weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The weights to set. These must be ``float`` s and correspond to the passed ``uid`` s. + version_key (int): The version key of the validator. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. + + Returns: + tuple[bool, str]: A tuple containing a success flag and an optional response message. + """ + # First convert types. + if use_torch(): + if isinstance(uids, list): + uids = torch.tensor(uids, dtype=torch.int64) + if isinstance(weights, list): + weights = torch.tensor(weights, dtype=torch.float32) + else: + if isinstance(uids, list): + uids = np.array(uids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + + # Reformat and normalize. + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + uids, weights + ) + + # Ask before moving on. + if prompt: + if not Confirm.ask( + f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n" + f"uids: {weight_uids}[/bold white ]?" + ): + return False, "Prompt refused." + + with bt_console.status( + f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." + ): + try: + success, error_message = do_set_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + logging.success( + msg=str(success), + prefix="Set weights", + suffix="Finalized: ", + ) + return True, "Successfully set weights and Finalized." + else: + error_message = format_error_message(error_message) + logging.error(error_message) + return False, error_message + + except Exception as e: + bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") + logging.debug(str(e)) + return False, str(e) diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py new file mode 100644 index 0000000000..896fecbf96 --- /dev/null +++ b/bittensor/core/extrinsics/transfer.py @@ -0,0 +1,215 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import Optional, Union, TYPE_CHECKING + +from retry import retry +from rich.prompt import Confirm + +from bittensor.core.extrinsics.utils import submit_extrinsic +from bittensor.core.settings import bt_console, NETWORK_EXPLORER_MAP +from bittensor.utils import ( + get_explorer_url_for_network, + format_error_message, + is_valid_bittensor_address_or_public_key, +) +from bittensor.utils.balance import Balance +from bittensor.utils.networking import ensure_connected + +# For annotation purposes +if TYPE_CHECKING: + from bittensor.core.subtensor import Subtensor + from bittensor_wallet import Wallet + + +# Chain call for `transfer_extrinsic` +@ensure_connected +def do_transfer( + self: "Subtensor", + wallet: "Wallet", + dest: str, + transfer_balance: "Balance", + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[str], Optional[dict]]: + """Sends a transfer extrinsic to the chain. + + Args: + self (subtensor.core.subtensor.Subtensor): The Subtensor instance object. + wallet (bittensor_wallet.Wallet): Wallet object. + dest (str): Destination public key address. + transfer_balance (bittensor.utils.balance.Balance): Amount to transfer. + wait_for_inclusion (bool): If ``true``, waits for inclusion. + wait_for_finalization (bool): If ``true``, waits for finalization. + + Returns: + success (bool): ``True`` if transfer was successful. + block_hash (str): Block hash of the transfer. On success and if wait_for_ finalization/inclusion is ``True``. + error (dict): Error message from subtensor if transfer failed. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": dest, "value": transfer_balance.rao}, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, None, None + + # Otherwise continue with finalization. + response.process_events() + if response.is_success: + block_hash = response.block_hash + return True, block_hash, None + else: + return False, None, response.error_message + + return make_substrate_call_with_retry() + + +# Community uses this extrinsic directly and via `subtensor.transfer` +def transfer_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + dest: str, + amount: Union["Balance", float], + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + keep_alive: bool = True, + prompt: bool = False, +) -> bool: + """Transfers funds from this wallet to the destination public key address. + + Args: + subtensor (subtensor.core.subtensor.Subtensor): The Subtensor instance object. + wallet (bittensor_wallet.Wallet): Bittensor wallet object to make transfer from. + dest (str, ss58_address or ed25519): Destination public key address of receiver. + amount (Union[Balance, int]): Amount to stake as Bittensor balance, or ``float`` interpreted as Tao. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + # Validate destination address. + if not is_valid_bittensor_address_or_public_key(dest): + bt_console.print( + f":cross_mark: [red]Invalid destination address[/red]:[bold white]\n {dest}[/bold white]" + ) + return False + + if isinstance(dest, bytes): + # Convert bytes to hex string. + dest = "0x" + dest.hex() + + # Unlock wallet coldkey. + wallet.unlock_coldkey() + + # Convert to bittensor.Balance + if not isinstance(amount, Balance): + transfer_balance = Balance.from_tao(amount) + else: + transfer_balance = amount + + # Check balance. + with bt_console.status(":satellite: Checking Balance..."): + account_balance = subtensor.get_balance(wallet.coldkey.ss58_address) + # check existential deposit. + existential_deposit = subtensor.get_existential_deposit() + + with bt_console.status(":satellite: Transferring..."): + fee = subtensor.get_transfer_fee( + wallet=wallet, dest=dest, value=transfer_balance.rao + ) + + if not keep_alive: + # Check if the transfer should keep_alive the account + existential_deposit = Balance(0) + + # Check if we have enough balance. + if account_balance < (transfer_balance + fee + existential_deposit): + bt_console.print( + ":cross_mark: [red]Not enough balance[/red]:[bold white]\n" + f" balance: {account_balance}\n" + f" amount: {transfer_balance}\n" + f" for fee: {fee}[/bold white]" + ) + return False + + # Ask before moving on. + if prompt: + if not Confirm.ask( + "Do you want to transfer:[bold white]\n" + f" amount: {transfer_balance}\n" + f" from: {wallet.name}:{wallet.coldkey.ss58_address}\n" + f" to: {dest}\n" + f" for fee: {fee}[/bold white]" + ): + return False + + with bt_console.status(":satellite: Transferring..."): + success, block_hash, error_message = do_transfer( + self=subtensor, + wallet=wallet, + dest=dest, + transfer_balance=transfer_balance, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if success: + bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + bt_console.print(f"[green]Block Hash: {block_hash}[/green]") + + explorer_urls = get_explorer_url_for_network( + subtensor.network, block_hash, NETWORK_EXPLORER_MAP + ) + if explorer_urls != {} and explorer_urls: + bt_console.print( + f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}[/green]" + ) + bt_console.print( + f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}[/green]" + ) + else: + bt_console.print( + f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}" + ) + + if success: + with bt_console.status(":satellite: Checking Balance..."): + new_balance = subtensor.get_balance(wallet.coldkey.ss58_address) + bt_console.print( + f"Balance:\n [blue]{account_balance}[/blue] :arrow_right: [green]{new_balance}[/green]" + ) + return True + + return False diff --git a/bittensor/core/extrinsics/utils.py b/bittensor/core/extrinsics/utils.py new file mode 100644 index 0000000000..6c896372b6 --- /dev/null +++ b/bittensor/core/extrinsics/utils.py @@ -0,0 +1,49 @@ +"""Module with helper functions for extrinsics.""" + +from typing import TYPE_CHECKING +from substrateinterface.exceptions import SubstrateRequestException +from bittensor.utils.btlogging import logging +from bittensor.utils import format_error_message + +if TYPE_CHECKING: + from substrateinterface import SubstrateInterface + from scalecodec.types import GenericExtrinsic + + +def submit_extrinsic( + substrate: "SubstrateInterface", + extrinsic: "GenericExtrinsic", + wait_for_inclusion: bool, + wait_for_finalization: bool, +): + """ + Submits an extrinsic to the substrate blockchain and handles potential exceptions. + + This function attempts to submit an extrinsic to the substrate blockchain with specified options + for waiting for inclusion in a block and/or finalization. If an exception occurs during submission, + it logs the error and re-raises the exception. + + Args: + substrate (substrateinterface.SubstrateInterface): The substrate interface instance used to interact with the blockchain. + extrinsic (scalecodec.types.GenericExtrinsic): The extrinsic to be submitted to the blockchain. + wait_for_inclusion (bool): Whether to wait for the extrinsic to be included in a block. + wait_for_finalization (bool): Whether to wait for the extrinsic to be finalized on the blockchain. + + Returns: + response: The response from the substrate after submitting the extrinsic. + + Raises: + SubstrateRequestException: If the submission of the extrinsic fails, the error is logged and re-raised. + """ + try: + response = substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + except SubstrateRequestException as e: + logging.error(format_error_message(e.args[0], substrate=substrate)) + # Re-rise the exception for retrying of the extrinsic call. If we remove the retry logic, the raise will need + # to be removed. + raise + return response diff --git a/bittensor/core/metagraph.py b/bittensor/core/metagraph.py new file mode 100644 index 0000000000..208eaa6b9f --- /dev/null +++ b/bittensor/core/metagraph.py @@ -0,0 +1,1299 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import os +import pickle +import typing +from abc import ABC, abstractmethod +from os import listdir +from os.path import join +from typing import Optional, Union + +import numpy as np +from numpy.typing import NDArray + +from bittensor.utils.btlogging import logging +from bittensor.utils.registration import torch, use_torch +from bittensor.utils.weight_utils import ( + convert_weight_uids_and_vals_to_tensor, + convert_bond_uids_and_vals_to_tensor, + convert_root_weight_uids_and_vals_to_tensor, +) +from . import settings +from .chain_data import AxonInfo + +# For annotation purposes +if typing.TYPE_CHECKING: + from bittensor.core.subtensor import Subtensor + + +METAGRAPH_STATE_DICT_NDARRAY_KEYS = [ + "version", + "n", + "block", + "stake", + "total_stake", + "ranks", + "trust", + "consensus", + "validator_trust", + "incentive", + "emission", + "dividends", + "active", + "last_update", + "validator_permit", + "uids", +] +"""List of keys for the metagraph state dictionary used in NDArray serialization. + +This list defines the set of keys expected in the metagraph's state dictionary when serializing and deserializing NumPy ndarray objects. Each key corresponds to a specific attribute or metric associated with the nodes in the metagraph. + +- **version** (`str`): The version identifier of the metagraph state. +- **n** (`int`): The total number of nodes in the metagraph. +- **block** (`int`): The current block number in the blockchain or ledger. +- **stake** (`ndarray`): An array representing the stake of each node. +- **total_stake** (`float`): The sum of all individual stakes in the metagraph. +- **ranks** (`ndarray`): An array of rank scores assigned to each node. +- **trust** (`ndarray`): An array of trust scores for the nodes. +- **consensus** (`ndarray`): An array indicating consensus levels among nodes. +- **validator_trust** (`ndarray`): Trust scores specific to validator nodes. +- **incentive** (`ndarray`): Incentive values allocated to nodes. +- **emission** (`float`): The rate of emission for new tokens or units. +- **dividends** (`ndarray`): Dividend amounts distributed to nodes. +- **active** (`ndarray`): Boolean array indicating active (`True`) or inactive (`False`) nodes. +- **last_update** (`int`): Timestamp of the last state update. +- **validator_permit** (`ndarray`): Boolean array indicating nodes permitted to validate. +- **uids** (`ndarray`): Unique identifiers for each node in the metagraph. +""" + + +def get_save_dir(network: str, netuid: int) -> str: + """ + Returns a directory path given ``network`` and ``netuid`` inputs. + + Args: + network (str): Network name. + netuid (int): Network UID. + + Returns: + str: Directory path. + """ + return os.path.expanduser( + os.path.join( + "~", + ".bittensor", + "metagraphs", + f"network-{str(network)}", + f"netuid-{str(netuid)}", + ) + ) + + +def latest_block_path(dir_path: str) -> str: + """ + Get the latest block path from the provided directory path. + + Args: + dir_path (str): Directory path. + + Returns: + str: Latest block path. + """ + latest_block = -1 + latest_file_full_path = None + for filename in listdir(dir_path): + full_path_filename = os.path.expanduser(join(dir_path, filename)) + try: + block_number = int(filename.split("-")[1].split(".")[0]) + if block_number > latest_block: + latest_block = block_number + latest_file_full_path = full_path_filename + except Exception: + pass + if not latest_file_full_path: + raise ValueError(f"Metagraph not found at: {dir_path}") + else: + return latest_file_full_path + + +class MetagraphMixin(ABC): + """ + The metagraph class is a core component of the Bittensor network, representing the neural graph that forms the backbone of the decentralized machine learning system. + + The metagraph is a dynamic representation of the network's state, capturing the interconnectedness and attributes of neurons (participants) in the Bittensor ecosystem. This class is not just a static structure but a live reflection of the network, constantly updated and synchronized with the state of the blockchain. + + In Bittensor, neurons are akin to nodes in a distributed system, each contributing computational resources and participating in the network's collective intelligence. The metagraph tracks various attributes of these neurons, such as stake, trust, and consensus, which are crucial for the network's incentive mechanisms and the Yuma Consensus algorithm as outlined in the `NeurIPS paper `_. These attributes + govern how neurons interact, how they are incentivized, and their roles within the network's + decision-making processes. + + Args: + netuid (int): A unique identifier that distinguishes between different instances or versions of the Bittensor network. + network (str): The name of the network, signifying specific configurations or iterations within the Bittensor ecosystem. + version (NDArray): The version number of the network, integral for tracking network updates. + n (NDArray): The total number of neurons in the network, reflecting its size and complexity. + block (NDArray): The current block number in the blockchain, crucial for synchronizing with the network's latest state. + stake: Represents the cryptocurrency staked by neurons, impacting their influence and earnings within the network. + total_stake: The cumulative stake across all neurons. + ranks: Neuron rankings as per the Yuma Consensus algorithm, influencing their incentive distribution and network authority. + trust: Scores indicating the reliability of neurons, mainly miners, within the network's operational context. + consensus: Scores reflecting each neuron's alignment with the network's collective decisions. + validator_trust: Trust scores for validator neurons, crucial for network security and validation. + incentive: Rewards allocated to neurons, particularly miners, for their network contributions. + emission: The rate at which rewards are distributed to neurons. + dividends: Rewards received primarily by validators as part of the incentive mechanism. + active: Status indicating whether a neuron is actively participating in the network. + last_update: Timestamp of the latest update to a neuron's data. + validator_permit: Indicates if a neuron is authorized to act as a validator. + weights: Inter-neuronal weights set by each neuron, influencing network dynamics. + bonds: Represents speculative investments by neurons in others, part of the reward mechanism. + uids: Unique identifiers for each neuron, essential for network operations. + axons (List): Details about each neuron's axon, critical for facilitating network communication. + + The metagraph plays a pivotal role in Bittensor's decentralized AI operations, influencing everything from data propagation to reward distribution. It embodies the principles of decentralized governance + and collaborative intelligence, ensuring that the network remains adaptive, secure, and efficient. + + Example: + Initializing the metagraph to represent the current state of the Bittensor network:: + + from bittensor.core.metagraph import Metagraph + metagraph = Metagraph(netuid=config.netuid, network=subtensor.network, sync=False) + + Synchronizing the metagraph with the network to reflect the latest state and neuron data:: + + metagraph.sync(subtensor=subtensor) + + Accessing metagraph properties to inform network interactions and decisions:: + + total_stake = metagraph.S + neuron_ranks = metagraph.R + neuron_incentives = metagraph.I + axons = metagraph.axons + neurons = metagraph.neurons + + Maintaining a local copy of hotkeys for querying and interacting with network entities:: + + hotkeys = deepcopy(metagraph.hotkeys) + """ + + netuid: int + network: str + version: Union["torch.nn.Parameter", tuple[NDArray]] + n: Union["torch.nn.Parameter", NDArray] + block: Union["torch.nn.Parameter", NDArray] + stake: Union["torch.nn.Parameter", NDArray] + total_stake: Union["torch.nn.Parameter", NDArray] + ranks: Union["torch.nn.Parameter", NDArray] + trust: Union["torch.nn.Parameter", NDArray] + consensus: Union["torch.nn.Parameter", NDArray] + validator_trust: Union["torch.nn.Parameter", NDArray] + incentive: Union["torch.nn.Parameter", NDArray] + emission: Union["torch.nn.Parameter", NDArray] + dividends: Union["torch.nn.Parameter", NDArray] + active: Union["torch.nn.Parameter", NDArray] + last_update: Union["torch.nn.Parameter", NDArray] + validator_permit: Union["torch.nn.Parameter", NDArray] + weights: Union["torch.nn.Parameter", NDArray] + bonds: Union["torch.nn.Parameter", NDArray] + uids: Union["torch.nn.Parameter", NDArray] + axons: list[AxonInfo] + + @property + def S(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Represents the stake of each neuron in the Bittensor network. Stake is an important concept in the + Bittensor ecosystem, signifying the amount of network weight (or “stake”) each neuron holds, + represented on a digital ledger. The stake influences a neuron's ability to contribute to and benefit + from the network, playing a crucial role in the distribution of incentives and decision-making processes. + + Returns: + NDArray: A tensor representing the stake of each neuron in the network. Higher values signify a greater stake held by the respective neuron. + """ + return self.total_stake + + @property + def R(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Contains the ranks of neurons in the Bittensor network. Ranks are determined by the network based + on each neuron's performance and contributions. Higher ranks typically indicate a greater level of + contribution or performance by a neuron. These ranks are crucial in determining the distribution of + incentives within the network, with higher-ranked neurons receiving more incentive. + + Returns: + NDArray: A tensor where each element represents the rank of a neuron. Higher values indicate higher ranks within the network. + """ + return self.ranks + + @property + def I(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Incentive values of neurons represent the rewards they receive for their contributions to the network. + The Bittensor network employs an incentive mechanism that rewards neurons based on their + informational value, stake, and consensus with other peers. This ensures that the most valuable and + trusted contributions are incentivized. + + Returns: + NDArray: A tensor of incentive values, indicating the rewards or benefits accrued by each neuron based on their contributions and network consensus. + """ + return self.incentive + + @property + def E(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Denotes the emission values of neurons in the Bittensor network. Emissions refer to the distribution or + release of rewards (often in the form of cryptocurrency) to neurons, typically based on their stake and + performance. This mechanism is central to the network's incentive model, ensuring that active and + contributing neurons are appropriately rewarded. + + Returns: + NDArray: A tensor where each element represents the emission value for a neuron, indicating the amount of reward distributed to that neuron. + """ + return self.emission + + @property + def C(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Represents the consensus values of neurons in the Bittensor network. Consensus is a measure of how + much a neuron's contributions are trusted and agreed upon by the majority of the network. It is + calculated based on a staked weighted trust system, where the network leverages the collective + judgment of all participating peers. Higher consensus values indicate that a neuron's contributions + are more widely trusted and valued across the network. + + Returns: + NDArray: A tensor of consensus values, where each element reflects the level of trust and agreement a neuron has achieved within the network. + + """ + return self.consensus + + @property + def T(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Represents the trust values assigned to each neuron in the Bittensor network. Trust is a key metric that + reflects the reliability and reputation of a neuron based on its past behavior and contributions. It is + an essential aspect of the network's functioning, influencing decision-making processes and interactions + between neurons. + + The trust matrix is inferred from the network's inter-peer weights, indicating the level of trust each neuron + has in others. A higher value in the trust matrix suggests a stronger trust relationship between neurons. + + Returns: + NDArray: A tensor of trust values, where each element represents the trust level of a neuron. Higher values denote a higher level of trust within the network. + """ + return self.trust + + @property + def Tv(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Contains the validator trust values of neurons in the Bittensor network. Validator trust is specifically + associated with neurons that act as validators within the network. This specialized form of trust reflects + the validators' reliability and integrity in their role, which is crucial for maintaining the network's + stability and security. + + Validator trust values are particularly important for the network's consensus and validation processes, + determining the validators' influence and responsibilities in these critical functions. + + Returns: + NDArray: A tensor of validator trust values, specifically applicable to neurons serving as validators, where higher values denote greater trustworthiness in their validation roles. + """ + return self.validator_trust + + @property + def D(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Represents the dividends received by neurons in the Bittensor network. Dividends are a form of reward or + distribution, typically given to neurons based on their stake, performance, and contribution to the network. + They are an integral part of the network's incentive structure, encouraging active and beneficial participation. + + Returns: + NDArray: A tensor of dividend values, where each element indicates the dividends received by a neuron, reflecting their share of network rewards. + """ + return self.dividends + + @property + def B(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Bonds in the Bittensor network represent a speculative reward mechanism where neurons can accumulate + bonds in other neurons. Bonds are akin to investments or stakes in other neurons, reflecting a belief in + their future value or performance. This mechanism encourages correct weighting and collaboration + among neurons while providing an additional layer of incentive. + + Returns: + NDArray: A tensor representing the bonds held by each neuron, where each value signifies the proportion of bonds owned by one neuron in another. + """ + return self.bonds + + @property + def W(self) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Represents the weights assigned to each neuron in the Bittensor network. In the context of Bittensor, + weights are crucial for determining the influence and interaction between neurons. Each neuron is responsible + for setting its weights, which are then recorded on a digital ledger. These weights are reflective of the + neuron's assessment or judgment of other neurons in the network. + + The weight matrix :math:`W = [w_{ij}]` is a key component of the network's architecture, where the :math:`i^{th}` row is set by + neuron :math:`i` and represents its weights towards other neurons. These weights influence the ranking and incentive + mechanisms within the network. Higher weights from a neuron towards another can imply greater trust or value + placed on that neuron's contributions. + + Returns: + NDArray: A tensor of inter-peer weights, where each element :math:`w_{ij}` represents the weight assigned by neuron :math:`i` to neuron :math:`j`. This matrix is fundamental to the network's functioning, influencing the distribution of incentives and the inter-neuronal dynamics. + """ + return self.weights + + @property + def hotkeys(self) -> list[str]: + """ + Represents a list of ``hotkeys`` for each neuron in the Bittensor network. + + Hotkeys are unique identifiers used by neurons for active participation in the network, such as sending and receiving information or + transactions. They are akin to public keys in cryptographic systems and are essential for identifying and authenticating neurons within the network's operations. + + Returns: + List[str]: A list of hotkeys, with each string representing the hotkey of a corresponding neuron. + + These keys are crucial for the network's security and integrity, ensuring proper identification and authorization of network participants. + + Note: + While the `NeurIPS paper `_ may not explicitly detail the concept of hotkeys, they are a fundamental of decentralized networks for secure and authenticated interactions. + """ + return [axon.hotkey for axon in self.axons] + + @property + def coldkeys(self) -> list[str]: + """ + Contains a list of ``coldkeys`` for each neuron in the Bittensor network. + + Coldkeys are similar to hotkeys but are typically used for more secure, offline activities such as storing assets or offline signing of transactions. They are an important aspect of a neuron's security, providing an additional layer of protection for sensitive operations and assets. + + Returns: + List[str]: A list of coldkeys, each string representing the coldkey of a neuron. These keys play a vital role in the secure management of assets and sensitive operations within the network. + + Note: + The concept of coldkeys, while not explicitly covered in the NeurIPS paper, is a standard practice in + blockchain and decentralized networks for enhanced security and asset protection. + """ + return [axon.coldkey for axon in self.axons] + + @property + def addresses(self) -> list[str]: + """ + Provides a list of IP addresses for each neuron in the Bittensor network. These addresses are used for + network communication, allowing neurons to connect, interact, and exchange information with each other. + IP addresses are fundamental for the network's peer-to-peer communication infrastructure. + + Returns: + List[str]: A list of IP addresses, with each string representing the address of a neuron. These addresses enable the decentralized, distributed nature of the network, facilitating direct communication and data exchange among neurons. + + Note: + While IP addresses are a basic aspect of network communication, specific details about their use in + the Bittensor network may not be covered in the `NeurIPS paper `_. They are, however, integral to the + functioning of any distributed network. + """ + return [axon.ip_str() for axon in self.axons] + + @abstractmethod + def __init__( + self, netuid: int, network: str = "finney", lite: bool = True, sync: bool = True + ): + """ + Initializes a new instance of the metagraph object, setting up the basic structure and parameters based on the provided arguments. + This method is the entry point for creating a metagraph object, + which is a central component in representing the state of the Bittensor network. + + Args: + netuid (int): The unique identifier for the network, distinguishing this instance of the metagraph within potentially multiple network configurations. + network (str): The name of the network, which can indicate specific configurations or versions of the Bittensor network. + lite (bool): A flag indicating whether to use a lite version of the metagraph. The lite version may contain less detailed information but can be quicker to initialize and sync. + sync (bool): A flag indicating whether to synchronize the metagraph with the network upon initialization. Synchronization involves updating the metagraph's parameters to reflect the current state of the network. + + Example: + Initializing a metagraph object for the Bittensor network with a specific network UID:: + + metagraph = metagraph(netuid=123, network="finney", lite=True, sync=True) + + """ + + def __str__(self) -> str: + """ + Provides a human-readable string representation of the metagraph object. This representation includes key identifiers and attributes of the metagraph, making it easier to quickly understand + the state and configuration of the metagraph in a simple format. + + Returns: + str: A string that succinctly represents the metagraph, including its network UID, the total number of neurons (n), the current block number, and the network's name. This format is particularly useful for logging, debugging, and displaying the metagraph in a concise manner. + + Example: + When printing the metagraph object or using it in a string context, this method is automatically invoked:: + + print(metagraph) # Output: "metagraph(netuid:1, n:100, block:500, network:finney)" + """ + return f"metagraph(netuid:{self.netuid}, n:{self.n.item()}, block:{self.block.item()}, network:{self.network})" + + def __repr__(self) -> str: + """ + Provides a detailed string representation of the metagraph object, intended for unambiguous understanding and debugging purposes. This method simply calls the :func:`__str__` method, ensuring + consistency between the informal and formal string representations of the metagraph. + + Returns: + str: The same string representation as provided by the :func:`__str__` method, detailing the metagraph's key attributes including network UID, number of neurons, block number, and network name. + + Example: + The :func:`__repr__` output can be used in debugging to get a clear and concise description of the metagraph:: + + metagraph_repr = repr(metagraph) + print(metagraph_repr) # Output mirrors that of __str__ + """ + return self.__str__() + + def metadata(self) -> dict: + """ + Retrieves the metadata of the metagraph, providing key information about the current state of the + Bittensor network. This metadata includes details such as the network's unique identifier (``netuid``), + the total number of neurons (``n``), the current block number, the network's name, and the version of + the Bittensor network. + + Returns: + dict: A dictionary containing essential metadata about the metagraph, including: + + - ``netuid``: The unique identifier for the network. + - ``n``: The total number of neurons in the network. + - ``block``: The current block number in the network's blockchain. + - ``network``: The name of the Bittensor network. + - ``version``: The version number of the Bittensor software. + + Note: + This metadata is crucial for understanding the current state and configuration of the network, as well as for tracking its evolution over time. + """ + return { + "netuid": self.netuid, + "n": self.n.item(), + "block": self.block.item(), + "network": self.network, + "version": settings.__version__, + } + + def state_dict(self): + return { + "netuid": self.netuid, + "network": self.network, + "version": self.version, + "n": self.n, + "block": self.block, + "stake": self.stake, + "total_stake": self.total_stake, + "ranks": self.ranks, + "trust": self.trust, + "consensus": self.consensus, + "validator_trust": self.validator_trust, + "incentive": self.incentive, + "emission": self.emission, + "dividends": self.dividends, + "active": self.active, + "last_update": self.last_update, + "validator_permit": self.validator_permit, + "weights": self.weights, + "bonds": self.bonds, + "uids": self.uids, + "axons": self.axons, + "neurons": self.neurons, + } + + def sync( + self, + block: Optional[int] = None, + lite: bool = True, + subtensor: Optional["Subtensor"] = None, + ): + """ + Synchronizes the metagraph with the Bittensor network's current state. It updates the metagraph's attributes to reflect the latest data from the network, ensuring the metagraph represents the most current state of the network. + + Args: + block (Optional[int]): A specific block number to synchronize with. If None, the metagraph syncs with the latest block. This allows for historical analysis or specific state examination of the network. + lite (bool): If True, a lite version of the metagraph is used for quicker synchronization. This is beneficial when full detail is not necessary, allowing for reduced computational and time overhead. + subtensor (Optional[bittensor.core.subtensor.Subtensor]): An instance of the subtensor class from Bittensor, providing an interface to the underlying blockchain data. If provided, this instance is used for data retrieval during synchronization. + + Example: + Sync the metagraph with the latest block from the subtensor, using the lite version for efficiency:: + + from bittensor.core.subtensor import Subtensor + + subtensor = Subtensor() + metagraph.sync(subtensor=subtensor) + + Sync with a specific block number for detailed analysis:: + + from bittensor.core.subtensor import Subtensor + + subtensor = Subtensor() + metagraph.sync(block=12345, lite=False, subtensor=subtensor) + + NOTE: + If attempting to access data beyond the previous 300 blocks, you **must** use the ``archive`` network for subtensor. Light nodes are configured only to store the previous 300 blocks if connecting to finney or test networks. + + For example:: + + from bittensor.core.subtensor import Subtensor + + subtensor = Subtensor(network='archive') + current_block = subtensor.get_current_block() + history_block = current_block - 1200 + + metagraph.sync(block=history_block, lite=False, subtensor=subtensor) + """ + + # Initialize subtensor + subtensor = self._initialize_subtensor(subtensor) + + if ( + subtensor.chain_endpoint != settings.ARCHIVE_ENTRYPOINT + or subtensor.network != settings.NETWORKS[3] + ): + cur_block = subtensor.get_current_block() + if block and block < (cur_block - 300): + logging.warning( + "Attempting to sync longer than 300 blocks ago on a non-archive node. Please use the 'archive' " + "network for subtensor and retry." + ) + + # Assign neurons based on 'lite' flag + self._assign_neurons(block, lite, subtensor) + + # Set attributes for metagraph + self._set_metagraph_attributes(block, subtensor) + + # If not a 'lite' version, compute and set weights and bonds for each neuron + if not lite: + self._set_weights_and_bonds(subtensor=subtensor) + + def _initialize_subtensor(self, subtensor: "Subtensor"): + """ + Initializes the subtensor to be used for syncing the metagraph. + + This method ensures that a subtensor instance is available and properly set up for data retrieval during the synchronization process. + + If no subtensor is provided, this method is responsible for creating a new instance of the subtensor, configured according to the current network settings. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance provided for initialization. If ``None``, a new subtensor instance is created using the current network configuration. + + Returns: + subtensor (bittensor.core.subtensor.Subtensor): The initialized subtensor instance, ready to be used for syncing the metagraph. + + Internal Usage: + Used internally during the sync process to ensure a valid subtensor instance is available:: + + subtensor = self._initialize_subtensor(subtensor) + """ + if not subtensor: + # TODO: Check and test the initialization of the new subtensor + # Lazy import due to circular import (subtensor -> metagraph, metagraph -> subtensor) + from bittensor.core.subtensor import Subtensor + + subtensor = Subtensor(network=self.network) + return subtensor + + def _assign_neurons(self, block: int, lite: bool, subtensor: "Subtensor"): + """ + Assigns neurons to the metagraph based on the provided block number and the lite flag. + + This method is responsible for fetching and setting the neuron data in the metagraph, which includes neuron attributes like UID, stake, trust, and other relevant information. + + Args: + block (int): The block number for which the neuron data needs to be fetched. If ``None``, the latest block data is used. + lite (bool): A boolean flag indicating whether to use a lite version of the neuron data. The lite version typically includes essential information and is quicker to fetch and process. + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for fetching neuron data from the network. + + Internal Usage: + Used internally during the sync process to fetch and set neuron data:: + + from bittensor.core.subtensor import Subtensor + + block = 12345 + lite = False + subtensor = Subtensor() + self._assign_neurons(block, lite, subtensor) + """ + if lite: + self.neurons = subtensor.neurons_lite(block=block, netuid=self.netuid) + else: + self.neurons = subtensor.neurons(block=block, netuid=self.netuid) + self.lite = lite + + @staticmethod + def _create_tensor(data, dtype) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Creates a numpy array with the given data and data type. This method is a utility function used internally to encapsulate data into a np.array, making it compatible with the metagraph's numpy model structure. + + Args: + data: The data to be included in the tensor. This could be any numeric data, like stakes, ranks, etc. + dtype: The data type for the tensor, typically a numpy data type like ``np.float32`` or ``np.int64``. + + Returns: + A tensor parameter encapsulating the provided data. + + Internal Usage: + Used internally to create tensor parameters for various metagraph attributes:: + + self.stake = self._create_tensor(neuron_stakes, dtype=np.float32) + """ + # TODO: Check and test the creation of tensor + return ( + torch.nn.Parameter(torch.tensor(data, dtype=dtype), requires_grad=False) + if use_torch() + else np.array(data, dtype=dtype) + ) + + def _set_weights_and_bonds(self, subtensor: "Optional[Subtensor]" = None): + """ + Computes and sets the weights and bonds for each neuron in the metagraph. This method is responsible for processing the raw weight and bond data obtained from the network and converting it into a structured format suitable for the metagraph model. + + Args: + subtensor: The subtensor instance used for fetching weights and bonds data. If ``None``, the weights and bonds are not updated. + + Internal Usage: + Used internally during the sync process to update the weights and bonds of the neurons:: + + self._set_weights_and_bonds(subtensor=subtensor) + """ + # TODO: Check and test the computation of weights and bonds + if self.netuid == 0: + self.weights = self._process_root_weights( + [neuron.weights for neuron in self.neurons], + "weights", + subtensor, + ) + else: + self.weights = self._process_weights_or_bonds( + [neuron.weights for neuron in self.neurons], "weights" + ) + self.bonds = self._process_weights_or_bonds( + [neuron.bonds for neuron in self.neurons], "bonds" + ) + + def _process_weights_or_bonds( + self, data, attribute: str + ) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Processes the raw weights or bonds data and converts it into a structured tensor format. This method handles the transformation of neuron connection data (``weights`` or ``bonds``) from a list or other unstructured format into a tensor that can be utilized within the metagraph model. + + Args: + data: The raw weights or bonds data to be processed. This data typically comes from the subtensor. + attribute: A string indicating whether the data is ``weights`` or ``bonds``, which determines the specific processing steps to be applied. + + Returns: + A tensor parameter encapsulating the processed weights or bonds data. + + Internal Usage: + Used internally to process and set weights or bonds for the neurons:: + + self.weights = self._process_weights_or_bonds(raw_weights_data, "weights") + """ + data_array = [] + for item in data: + if len(item) == 0: + if use_torch(): + data_array.append(torch.zeros(len(self.neurons))) + else: + data_array.append(np.zeros(len(self.neurons), dtype=np.float32)) + else: + uids, values = zip(*item) + # TODO: Validate and test the conversion of uids and values to tensor + if attribute == "weights": + data_array.append( + convert_weight_uids_and_vals_to_tensor( + len(self.neurons), + list(uids), + list(values), + ) + ) + else: + data_array.append( + convert_bond_uids_and_vals_to_tensor( + len(self.neurons), list(uids), list(values) + ).astype(np.float32) + ) + tensor_param: Union["torch.nn.Parameter", NDArray] = ( + ( + torch.nn.Parameter(torch.stack(data_array), requires_grad=False) + if len(data_array) + else torch.nn.Parameter() + ) + if use_torch() + else ( + np.stack(data_array) + if len(data_array) + else np.array([], dtype=np.float32) + ) + ) + if len(data_array) == 0: + logging.warning( + f"Empty {attribute}_array on metagraph.sync(). The '{attribute}' tensor is empty." + ) + return tensor_param + + @abstractmethod + def _set_metagraph_attributes(self, block, subtensor): + pass + + def _process_root_weights( + self, data: list, attribute: str, subtensor: "Subtensor" + ) -> Union[NDArray, "torch.nn.Parameter"]: + """ + Specifically processes the root weights data for the metagraph. This method is similar to :func:`_process_weights_or_bonds` but is tailored for processing root weights, which have a different structure and significance in the network. + + Args: + data (list): The raw root weights data to be processed. + attribute (str): A string indicating the attribute type, here it's typically ``weights``. + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for additional data and context needed in processing. + + Returns: + A tensor parameter encapsulating the processed root weights data. + + Internal Usage: + Used internally to process and set root weights for the metagraph:: + + self.root_weights = self._process_root_weights(raw_root_weights_data, "weights", subtensor) + """ + data_array = [] + n_subnets = subtensor.get_total_subnets() or 0 + subnets = subtensor.get_subnets() + for item in data: + if len(item) == 0: + if use_torch(): + data_array.append(torch.zeros(n_subnets)) + else: + data_array.append(np.zeros(n_subnets, dtype=np.float32)) + else: + uids, values = zip(*item) + # TODO: Validate and test the conversion of uids and values to tensor + data_array.append( + convert_root_weight_uids_and_vals_to_tensor( + n_subnets, list(uids), list(values), subnets + ) + ) + + tensor_param: Union[NDArray, "torch.nn.Parameter"] = ( + ( + torch.nn.Parameter(torch.stack(data_array), requires_grad=False) + if len(data_array) + else torch.nn.Parameter() + ) + if use_torch() + else ( + np.stack(data_array) + if len(data_array) + else np.array([], dtype=np.float32) + ) + ) + if len(data_array) == 0: + logging.warning( + f"Empty {attribute}_array on metagraph.sync(). The '{attribute}' tensor is empty." + ) + return tensor_param + + def save(self) -> "Metagraph": + """ + Saves the current state of the metagraph to a file on disk. This function is crucial for persisting the current state of the network's metagraph, which can later be reloaded or analyzed. The save operation includes all neuron attributes and parameters, ensuring a complete snapshot of the metagraph's state. + + Returns: + metagraph (bittensor.core.metagraph.Metagraph): The metagraph instance after saving its state. + + Example: + Save the current state of the metagraph to the default directory:: + + metagraph.save() + + The saved state can later be loaded to restore or analyze the metagraph's state at this point. + + If using the default save path:: + + metagraph.load() + + If using a custom save path:: + + metagraph.load_from_path(dir_path) + """ + save_directory = get_save_dir(self.network, self.netuid) + os.makedirs(save_directory, exist_ok=True) + if use_torch(): + graph_filename = f"{save_directory}/block-{self.block.item()}.pt" + state_dict = self.state_dict() + state_dict["axons"] = self.axons + state_dict["neurons"] = self.neurons + torch.save(state_dict, graph_filename) + torch.load(graph_filename) # verifies that the file can be loaded correctly + else: + graph_filename = f"{save_directory}/block-{self.block.item()}.pt" + state_dict = self.state_dict() + with open(graph_filename, "wb") as graph_file: + pickle.dump(state_dict, graph_file) + return self + + def load(self): + """ + Loads the state of the metagraph from the default save directory. This method is instrumental for restoring the metagraph to its last saved state. It automatically identifies the save directory based on the ``network`` and ``netuid`` properties of the metagraph, locates the latest block file in that directory, and loads all metagraph parameters from it. + + This functionality is particularly beneficial when continuity in the state of the metagraph is necessary + across different runtime sessions, or after a restart of the system. It ensures that the metagraph reflects + the exact state it was in at the last save point, maintaining consistency in the network's representation. + + The method delegates to ``load_from_path``, supplying it with the directory path constructed from the metagraph's current ``network`` and ``netuid`` properties. This abstraction simplifies the process of loading the metagraph's state for the user, requiring no direct path specifications. + + Returns: + metagraph (bittensor.core.metagraph.Metagraph): The metagraph instance after loading its state from the default directory. + + Example: + Load the metagraph state from the last saved snapshot in the default directory:: + + metagraph.load() + + After this operation, the metagraph's parameters and neuron data are restored to their state at the time of the last save in the default directory. + + Note: + The default save directory is determined based on the metagraph's ``network`` and ``netuid`` attributes. It is important to ensure that these attributes are set correctly and that the default save directory contains the appropriate state files for the metagraph. + """ + self.load_from_path(get_save_dir(self.network, self.netuid)) + + @abstractmethod + def load_from_path(self, dir_path: str) -> "Metagraph": + """ + Loads the state of the metagraph from a specified directory path. This method is crucial for restoring the metagraph to a specific state based on saved data. It locates the latest block file in the given + directory and loads all metagraph parameters from it. This is particularly useful for analyses that require historical states of the network or for restoring previous states of the metagraph in different + execution environments. + + The method first identifies the latest block file in the specified directory, then loads the metagraph state including neuron attributes and parameters from this file. This ensures that the metagraph is accurately reconstituted to reflect the network state at the time of the saved block. + + Args: + dir_path (str): The directory path where the metagraph's state files are stored. This path should contain one or more saved state files, typically named in a format that includes the block number. + + Returns: + metagraph (bittensor.core.metagraph.Metagraph): The metagraph instance after loading its state from the specified directory path. + + Example: + Load the metagraph state from a specific directory:: + + dir_path = "/path/to/saved/metagraph/states" + metagraph.load_from_path(dir_path) + + The metagraph is now restored to the state it was in at the time of the latest saved block in the specified directory. + + Note: + This method assumes that the state files in the specified directory are correctly formatted and + contain valid data for the metagraph. It is essential to ensure that the directory path and the + state files within it are accurate and consistent with the expected metagraph structure. + """ + + +BaseClass: Union["torch.nn.Module", object] = torch.nn.Module if use_torch() else object +""" +Base class that extends :class:`torch.nn.Module` if PyTorch is used; otherwise, it defaults to object. +""" + + +class TorchMetaGraph(MetagraphMixin, BaseClass): + def __init__( + self, netuid: int, network: str = "finney", lite: bool = True, sync: bool = True + ): + """ + Initializes a new instance of the metagraph object, setting up the basic structure and parameters based on the provided arguments. + This class requires Torch to be installed. + This method is the entry point for creating a metagraph object, which is a central component in representing the state of the Bittensor network. + + Args: + netuid (int): The unique identifier for the network, distinguishing this instance of the metagraph within potentially multiple network configurations. + network (str): The name of the network, which can indicate specific configurations or versions of the Bittensor network. + lite (bool): A flag indicating whether to use a lite version of the metagraph. The lite version may contain less detailed information but can be quicker to initialize and sync. + sync (bool): A flag indicating whether to synchronize the metagraph with the network upon initialization. Synchronization involves updating the metagraph's parameters to reflect the current state of the network. + + Example: + Initializing a metagraph object for the Bittensor network with a specific network UID:: + + from bittensor.core.metagraph import Metagraph + + metagraph = Metagraph(netuid=123, network="finney", lite=True, sync=True) + """ + torch.nn.Module.__init__(self) + MetagraphMixin.__init__(self, netuid, network, lite, sync) + self.netuid = netuid + self.network = network + self.version = torch.nn.Parameter( + torch.tensor([settings.version_as_int], dtype=torch.int64), + requires_grad=False, + ) + self.n: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([0], dtype=torch.int64), requires_grad=False + ) + self.block: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([0], dtype=torch.int64), requires_grad=False + ) + self.stake = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.total_stake: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.ranks: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.trust: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.consensus: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.validator_trust: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.incentive: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.emission: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.dividends: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.active = torch.nn.Parameter( + torch.tensor([], dtype=torch.int64), requires_grad=False + ) + self.last_update = torch.nn.Parameter( + torch.tensor([], dtype=torch.int64), requires_grad=False + ) + self.validator_permit = torch.nn.Parameter( + torch.tensor([], dtype=torch.bool), requires_grad=False + ) + self.weights: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.float32), requires_grad=False + ) + self.bonds: torch.nn.Parameter = torch.nn.Parameter( + torch.tensor([], dtype=torch.int64), requires_grad=False + ) + self.uids = torch.nn.Parameter( + torch.tensor([], dtype=torch.int64), requires_grad=False + ) + self.axons: list[AxonInfo] = [] + if sync: + self.sync(block=None, lite=lite) + + def _set_metagraph_attributes(self, block: int, subtensor: "Subtensor"): + """ + Sets various attributes of the metagraph based on the latest network data fetched from the subtensor. + + This method updates parameters like the number of neurons, block number, stakes, trusts, ranks, and other neuron-specific information. + + Args: + block (int): The block number for which the metagraph attributes need to be set. If ``None``, the latest block data is used. + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for fetching the latest network data. + + Internal Usage: + Used internally during the sync process to update the metagraph's attributes:: + + from bittensor.core.subtensor import Subtensor + + subtensor = Subtensor() + block = subtensor.get_current_block() + + self._set_metagraph_attributes(block, subtensor) + """ + self.n = self._create_tensor(len(self.neurons), dtype=torch.int64) + self.version = self._create_tensor([settings.version_as_int], dtype=torch.int64) + self.block = self._create_tensor( + block if block else subtensor.block, dtype=torch.int64 + ) + self.uids = self._create_tensor( + [neuron.uid for neuron in self.neurons], dtype=torch.int64 + ) + self.trust = self._create_tensor( + [neuron.trust for neuron in self.neurons], dtype=torch.float32 + ) + self.consensus = self._create_tensor( + [neuron.consensus for neuron in self.neurons], dtype=torch.float32 + ) + self.incentive = self._create_tensor( + [neuron.incentive for neuron in self.neurons], dtype=torch.float32 + ) + self.dividends = self._create_tensor( + [neuron.dividends for neuron in self.neurons], dtype=torch.float32 + ) + self.ranks = self._create_tensor( + [neuron.rank for neuron in self.neurons], dtype=torch.float32 + ) + self.emission = self._create_tensor( + [neuron.emission for neuron in self.neurons], dtype=torch.float32 + ) + self.active = self._create_tensor( + [neuron.active for neuron in self.neurons], dtype=torch.int64 + ) + self.last_update = self._create_tensor( + [neuron.last_update for neuron in self.neurons], dtype=torch.int64 + ) + self.validator_permit = self._create_tensor( + [neuron.validator_permit for neuron in self.neurons], dtype=torch.bool + ) + self.validator_trust = self._create_tensor( + [neuron.validator_trust for neuron in self.neurons], dtype=torch.float32 + ) + self.total_stake = self._create_tensor( + [neuron.total_stake.tao for neuron in self.neurons], dtype=torch.float32 + ) + self.stake = self._create_tensor( + [neuron.stake for neuron in self.neurons], dtype=torch.float32 + ) + self.axons = [n.axon_info for n in self.neurons] + + def load_from_path(self, dir_path: str) -> "Metagraph": + """ + Loads the metagraph state from a specified directory path. + + Args: + dir_path (str): The directory path where the state file is located. + + Returns: + metagraph (bittensor.core.metagraph.Metagraph): The current metagraph instance with the loaded state. + + Example:: + + from bittensor.core.metagraph import Metagraph + + netuid = 1 + metagraph = Metagraph(netuid=netuid) + + metagraph.load_from_path("/path/to/dir") + + """ + + graph_file = latest_block_path(dir_path) + state_dict = torch.load(graph_file) + self.n = torch.nn.Parameter(state_dict["n"], requires_grad=False) + self.block = torch.nn.Parameter(state_dict["block"], requires_grad=False) + self.uids = torch.nn.Parameter(state_dict["uids"], requires_grad=False) + self.stake = torch.nn.Parameter(state_dict["stake"], requires_grad=False) + self.total_stake = torch.nn.Parameter( + state_dict["total_stake"], requires_grad=False + ) + self.ranks = torch.nn.Parameter(state_dict["ranks"], requires_grad=False) + self.trust = torch.nn.Parameter(state_dict["trust"], requires_grad=False) + self.consensus = torch.nn.Parameter( + state_dict["consensus"], requires_grad=False + ) + self.validator_trust = torch.nn.Parameter( + state_dict["validator_trust"], requires_grad=False + ) + self.incentive = torch.nn.Parameter( + state_dict["incentive"], requires_grad=False + ) + self.emission = torch.nn.Parameter(state_dict["emission"], requires_grad=False) + self.dividends = torch.nn.Parameter( + state_dict["dividends"], requires_grad=False + ) + self.active = torch.nn.Parameter(state_dict["active"], requires_grad=False) + self.last_update = torch.nn.Parameter( + state_dict["last_update"], requires_grad=False + ) + self.validator_permit = torch.nn.Parameter( + state_dict["validator_permit"], requires_grad=False + ) + self.uids = torch.nn.Parameter(state_dict["uids"], requires_grad=False) + self.axons = state_dict["axons"] + self.neurons = state_dict["neurons"] + if "weights" in state_dict: + self.weights = torch.nn.Parameter( + state_dict["weights"], requires_grad=False + ) + if "bonds" in state_dict: + self.bonds = torch.nn.Parameter(state_dict["bonds"], requires_grad=False) + return self + + +class NonTorchMetagraph(MetagraphMixin): + def __init__( + self, netuid: int, network: str = "finney", lite: bool = True, sync: bool = True + ): + """ + Initializes a new instance of the metagraph object, setting up the basic structure and parameters based on the provided arguments. + This class doesn't require installed Torch. + This method is the entry point for creating a metagraph object, which is a central component in representing the state of the Bittensor network. + + Args: + netuid (int): The unique identifier for the network, distinguishing this instance of the metagraph within potentially multiple network configurations. + network (str): The name of the network, which can indicate specific configurations or versions of the Bittensor network. + lite (bool): A flag indicating whether to use a lite version of the metagraph. The lite version may contain less detailed information but can be quicker to initialize and sync. + sync (bool): A flag indicating whether to synchronize the metagraph with the network upon initialization. Synchronization involves updating the metagraph's parameters to reflect the current state of the network. + + Example: + Initializing a metagraph object for the Bittensor network with a specific network UID:: + + from bittensor.core.metagraph import Metagraph + + metagraph = Metagraph(netuid=123, network="finney", lite=True, sync=True) + """ + # super(metagraph, self).__init__() + MetagraphMixin.__init__(self, netuid, network, lite, sync) + + self.netuid = netuid + self.network = network + self.version = (np.array([settings.version_as_int], dtype=np.int64),) + self.n = np.array([0], dtype=np.int64) + self.block = np.array([0], dtype=np.int64) + self.stake = np.array([], dtype=np.float32) + self.total_stake = np.array([], dtype=np.float32) + self.ranks = np.array([], dtype=np.float32) + self.trust = np.array([], dtype=np.float32) + self.consensus = np.array([], dtype=np.float32) + self.validator_trust = np.array([], dtype=np.float32) + self.incentive = np.array([], dtype=np.float32) + self.emission = np.array([], dtype=np.float32) + self.dividends = np.array([], dtype=np.float32) + self.active = np.array([], dtype=np.int64) + self.last_update = np.array([], dtype=np.int64) + self.validator_permit = np.array([], dtype=bool) + self.weights = np.array([], dtype=np.float32) + self.bonds = np.array([], dtype=np.int64) + self.uids = np.array([], dtype=np.int64) + self.axons: list[AxonInfo] = [] + if sync: + self.sync(block=None, lite=lite) + + def _set_metagraph_attributes(self, block: int, subtensor: "Subtensor"): + """ + Sets various attributes of the metagraph based on the latest network data fetched from the subtensor. + + This method updates parameters like the number of neurons, block number, stakes, trusts, ranks, and other neuron-specific information. + + Args: + block (int): The block number for which the metagraph attributes need to be set. If ``None``, the latest block data is used. + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for fetching the latest network data. + + Internal Usage: + Used internally during the sync process to update the metagraph's attributes:: + + self._set_metagraph_attributes(block, subtensor) + """ + # TODO: Check and test the setting of each attribute + self.n = self._create_tensor(len(self.neurons), dtype=np.int64) + self.version = self._create_tensor([settings.version_as_int], dtype=np.int64) + self.block = self._create_tensor( + block if block else subtensor.block, dtype=np.int64 + ) + self.uids = self._create_tensor( + [neuron.uid for neuron in self.neurons], dtype=np.int64 + ) + self.trust = self._create_tensor( + [neuron.trust for neuron in self.neurons], dtype=np.float32 + ) + self.consensus = self._create_tensor( + [neuron.consensus for neuron in self.neurons], dtype=np.float32 + ) + self.incentive = self._create_tensor( + [neuron.incentive for neuron in self.neurons], dtype=np.float32 + ) + self.dividends = self._create_tensor( + [neuron.dividends for neuron in self.neurons], dtype=np.float32 + ) + self.ranks = self._create_tensor( + [neuron.rank for neuron in self.neurons], dtype=np.float32 + ) + self.emission = self._create_tensor( + [neuron.emission for neuron in self.neurons], dtype=np.float32 + ) + self.active = self._create_tensor( + [neuron.active for neuron in self.neurons], dtype=np.int64 + ) + self.last_update = self._create_tensor( + [neuron.last_update for neuron in self.neurons], dtype=np.int64 + ) + self.validator_permit = self._create_tensor( + [neuron.validator_permit for neuron in self.neurons], dtype=bool + ) + self.validator_trust = self._create_tensor( + [neuron.validator_trust for neuron in self.neurons], dtype=np.float32 + ) + self.total_stake = self._create_tensor( + [neuron.total_stake.tao for neuron in self.neurons], dtype=np.float32 + ) + self.stake = self._create_tensor( + [neuron.stake for neuron in self.neurons], dtype=np.float32 + ) + self.axons = [n.axon_info for n in self.neurons] + + def load_from_path(self, dir_path: str) -> "Metagraph": + """ + Loads the state of the Metagraph from a specified directory path. + + Args: + dir_path (str): The directory path where the metagraph's state file is located. + + Returns: + metagraph (:func:`bittensor.core.metagraph.Metagraph`): An instance of the Metagraph with the state loaded from the file. + + Raises: + pickle.UnpicklingError: If there is an error unpickling the state file. + RuntimeError: If there is an error loading the state file using PyTorch. + ImportError: If there is an error importing PyTorch. + """ + graph_filename = latest_block_path(dir_path) + try: + with open(graph_filename, "rb") as graph_file: + state_dict = pickle.load(graph_file) + except pickle.UnpicklingError: + settings.bt_console.print( + "Unable to load file. Attempting to restore metagraph using torch." + ) + settings.bt_console.print( + ":warning:[yellow]Warning:[/yellow] This functionality exists to load " + "metagraph state from legacy saves, but will not be supported in the future." + ) + try: + import torch as real_torch + + state_dict = real_torch.load(graph_filename) + for key in METAGRAPH_STATE_DICT_NDARRAY_KEYS: + state_dict[key] = state_dict[key].detach().numpy() + del real_torch + except (RuntimeError, ImportError): + settings.bt_console.print("Unable to load file. It may be corrupted.") + raise + + self.n = state_dict["n"] + self.block = state_dict["block"] + self.uids = state_dict["uids"] + self.stake = state_dict["stake"] + self.total_stake = state_dict["total_stake"] + self.ranks = state_dict["ranks"] + self.trust = state_dict["trust"] + self.consensus = state_dict["consensus"] + self.validator_trust = state_dict["validator_trust"] + self.incentive = state_dict["incentive"] + self.emission = state_dict["emission"] + self.dividends = state_dict["dividends"] + self.active = state_dict["active"] + self.last_update = state_dict["last_update"] + self.validator_permit = state_dict["validator_permit"] + self.axons = state_dict["axons"] + self.neurons = state_dict["neurons"] + if "weights" in state_dict: + self.weights = state_dict["weights"] + if "bonds" in state_dict: + self.bonds = state_dict["bonds"] + return self + + +Metagraph = TorchMetaGraph if use_torch() else NonTorchMetagraph +"""Metagraph class that uses :class:`TorchMetaGraph` if PyTorch is available; otherwise, it falls back to :class:`NonTorchMetagraph`. + +- **With PyTorch**: When `use_torch()` returns `True`, `Metagraph` is set to :class:`TorchMetaGraph`, which utilizes PyTorch functionalities. +- **Without PyTorch**: When `use_torch()` returns `False`, `Metagraph` is set to :class:`NonTorchMetagraph`, which does not rely on PyTorch. +""" diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py new file mode 100644 index 0000000000..cfccf362be --- /dev/null +++ b/bittensor/core/settings.py @@ -0,0 +1,241 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +__version__ = "8.0.0" + +import os +import re +import warnings +from pathlib import Path + +from munch import munchify +from rich.console import Console +from rich.traceback import install + +# Rich console. +__console__ = Console() +__use_console__ = True + +# Remove overdue locals in debug training. +install(show_locals=False) + + +def turn_console_off(): + global __use_console__ + global __console__ + from io import StringIO + + __use_console__ = False + __console__ = Console(file=StringIO(), stderr=False) + + +def turn_console_on(): + global __use_console__ + global __console__ + __use_console__ = True + __console__ = Console() + + +turn_console_off() + +bt_console = __console__ + + +HOME_DIR = Path.home() +USER_BITTENSOR_DIR = HOME_DIR / ".bittensor" +WALLETS_DIR = USER_BITTENSOR_DIR / "wallets" +MINERS_DIR = USER_BITTENSOR_DIR / "miners" + +# Bittensor networks name +NETWORKS = ["local", "finney", "test", "archive"] + +DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443" +DEFAULT_NETWORK = NETWORKS[1] + +# Create dirs if they don't exist +WALLETS_DIR.mkdir(parents=True, exist_ok=True) +MINERS_DIR.mkdir(parents=True, exist_ok=True) + + +# Bittensor endpoints (Needs to use wss://) +FINNEY_ENTRYPOINT = "wss://entrypoint-finney.opentensor.ai:443" +FINNEY_TEST_ENTRYPOINT = "wss://test.finney.opentensor.ai:443/" +ARCHIVE_ENTRYPOINT = "wss://archive.chain.opentensor.ai:443/" +LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9946" + +# Currency Symbols Bittensor +TAO_SYMBOL: str = chr(0x03C4) +RAO_SYMBOL: str = chr(0x03C1) + +# Pip address for versioning +PIPADDRESS = "https://pypi.org/pypi/bittensor/json" + +# Substrate chain block time (seconds). +BLOCKTIME = 12 + +# Substrate ss58_format +SS58_FORMAT = 42 + +# Wallet ss58 address length +SS58_ADDRESS_LENGTH = 48 + +# Raw GitHub url for delegates registry file +DELEGATES_DETAILS_URL = "https://raw.githubusercontent.com/opentensor/bittensor-delegates/main/public/delegates.json" + +# Block Explorers map network to explorer url +# Must all be polkadotjs explorer urls +NETWORK_EXPLORER_MAP = { + "opentensor": { + "local": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer", + "endpoint": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer", + "finney": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer", + }, + "taostats": { + "local": "https://x.taostats.io", + "endpoint": "https://x.taostats.io", + "finney": "https://x.taostats.io", + }, +} + +# --- Type Registry --- +TYPE_REGISTRY: dict = { + "types": { + "Balance": "u64", # Need to override default u128 + }, + "runtime_api": { + "NeuronInfoRuntimeApi": { + "methods": { + "get_neuron_lite": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + { + "name": "uid", + "type": "u16", + }, + ], + "type": "Vec", + }, + "get_neurons_lite": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + ], + "type": "Vec", + }, + } + }, + "SubnetInfoRuntimeApi": { + "methods": { + "get_subnet_hyperparams": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + ], + "type": "Vec", + } + } + }, + "SubnetRegistrationRuntimeApi": { + "methods": {"get_network_registration_cost": {"params": [], "type": "u64"}} + }, + }, +} + + +_BT_AXON_PORT = os.getenv("BT_AXON_PORT") +_BT_AXON_MAX_WORKERS = os.getenv("BT_AXON_MAX_WORKERS") +_BT_PRIORITY_MAX_WORKERS = os.getenv("BT_PRIORITY_MAX_WORKERS") +_BT_PRIORITY_MAXSIZE = os.getenv("BT_PRIORITY_MAXSIZE") + +DEFAULTS = munchify( + { + "axon": { + "port": int(_BT_AXON_PORT) if _BT_AXON_PORT else 8091, + "ip": os.getenv("BT_AXON_IP") or "[::]", + "external_port": os.getenv("BT_AXON_EXTERNAL_PORT") or None, + "external_ip": os.getenv("BT_AXON_EXTERNAL_IP") or None, + "max_workers": int(_BT_AXON_MAX_WORKERS) if _BT_AXON_MAX_WORKERS else 10, + }, + "logging": { + "debug": os.getenv("BT_LOGGING_DEBUG") or False, + "trace": os.getenv("BT_LOGGING_TRACE") or False, + "record_log": os.getenv("BT_LOGGING_RECORD_LOG") or False, + "logging_dir": os.getenv("BT_LOGGING_LOGGING_DIR") or str(MINERS_DIR), + }, + "priority": { + "max_workers": int(_BT_PRIORITY_MAX_WORKERS) + if _BT_PRIORITY_MAX_WORKERS + else 5, + "maxsize": int(_BT_PRIORITY_MAXSIZE) if _BT_PRIORITY_MAXSIZE else 10, + }, + "subtensor": { + "chain_endpoint": DEFAULT_ENDPOINT, + "network": DEFAULT_NETWORK, + "_mock": False, + }, + "wallet": { + "name": "default", + "hotkey": "default", + "path": str(WALLETS_DIR), + }, + } +) + + +# Parsing version without any literals. +__version__ = re.match(r"^\d+\.\d+\.\d+", __version__).group(0) + +version_split = __version__.split(".") +_version_info = tuple(int(part) for part in version_split) +_version_int_base = 1000 +assert max(_version_info) < _version_int_base + +version_as_int: int = sum( + e * (_version_int_base**i) for i, e in enumerate(reversed(_version_info)) +) +assert version_as_int < 2**31 # fits in int32 + + +def __apply_nest_asyncio(): + """ + Apply nest_asyncio if the environment variable NEST_ASYNCIO is set to "1" or not set. + If not set, warn the user that the default will change in the future. + """ + nest_asyncio_env = os.getenv("NEST_ASYNCIO") + if nest_asyncio_env == "1" or nest_asyncio_env is None: + if nest_asyncio_env is None: + warnings.warn( + """NEST_ASYNCIO implicitly set to '1'. In the future, the default value will be '0'. + If you use `nest_asyncio`, make sure to add it explicitly to your project dependencies, + as it will be removed from `bittensor` package dependencies in the future. + To silence this warning, explicitly set the environment variable, e.g. `export NEST_ASYNCIO=0`.""", + DeprecationWarning, + ) + # Install and apply nest asyncio to allow the async functions to run in a .ipynb + import nest_asyncio + + nest_asyncio.apply() + + +__apply_nest_asyncio() diff --git a/bittensor/core/stream.py b/bittensor/core/stream.py new file mode 100644 index 0000000000..9e880ffa87 --- /dev/null +++ b/bittensor/core/stream.py @@ -0,0 +1,158 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from abc import ABC, abstractmethod +from typing import Callable, Awaitable, Optional + +from aiohttp import ClientResponse +from pydantic import ConfigDict, BaseModel +from starlette.responses import StreamingResponse as _StreamingResponse +from starlette.types import Send, Receive, Scope + +from .synapse import Synapse + + +class BTStreamingResponseModel(BaseModel): + """ + :func:`BTStreamingResponseModel` is a Pydantic model that encapsulates the token streamer callable for Pydantic validation. + It is used within the :func:`StreamingSynapse` class to create a :func:`BTStreamingResponse` object, which is responsible for handling + the streaming of tokens. + + The token streamer is a callable that takes a send function and returns an awaitable. It is responsible for generating + the content of the streaming response, typically by processing tokens and sending them to the client. + + This model ensures that the token streamer conforms to the expected signature and provides a clear interface for + passing the token streamer to the BTStreamingResponse class. + + Attributes: + token_streamer: Callable[[Send], Awaitable[None]] The token streamer callable, which takes a send function (provided by the ASGI server) and returns an awaitable. It is responsible for generating the content of the streaming response. + """ + + token_streamer: Callable[[Send], Awaitable[None]] + + +class StreamingSynapse(Synapse, ABC): + """ + The :func:`StreamingSynapse` class is designed to be subclassed for handling streaming responses in the Bittensor network. + It provides abstract methods that must be implemented by the subclass to deserialize, process streaming responses, + and extract JSON data. It also includes a method to create a streaming response object. + """ + + model_config = ConfigDict(validate_assignment=True) + + class BTStreamingResponse(_StreamingResponse): + """ + :func:`BTStreamingResponse` is a specialized subclass of the Starlette StreamingResponse designed to handle the streaming + of tokens within the Bittensor network. It is used internally by the StreamingSynapse class to manage the response + streaming process, including sending headers and calling the token streamer provided by the subclass. + + This class is not intended to be directly instantiated or modified by developers subclassing StreamingSynapse. + Instead, it is used by the :func:`create_streaming_response` method to create a response object based on the token streamer + provided by the subclass. + """ + + def __init__( + self, + model: "BTStreamingResponseModel", + *, + synapse: "Optional[StreamingSynapse]" = None, + **kwargs, + ): + """ + Initializes the BTStreamingResponse with the given token streamer model. + + Args: + model (bittensor.core.stream.BTStreamingResponseModel): A BTStreamingResponseModel instance containing the token streamer callable, which is responsible for generating the content of the response. + synapse (bittensor.core.stream.StreamingSynapse): The response Synapse to be used to update the response headers etc. + **kwargs: Additional keyword arguments passed to the parent StreamingResponse class. + """ + super().__init__(content=iter(()), **kwargs) + self.token_streamer = model.token_streamer + self.synapse = synapse + + async def stream_response(self, send: "Send"): + """ + Asynchronously streams the response by sending headers and calling the token streamer. + + This method is responsible for initiating the response by sending the appropriate headers, including the content type for event-streaming. It then calls the token streamer to generate the content and sends the response body to the client. + + Args: + send (starlette.types.Send): A callable to send the response, provided by the ASGI server. + """ + headers = [(b"content-type", b"text/event-stream")] + self.raw_headers + + await send( + {"type": "http.response.start", "status": 200, "headers": headers} + ) + + await self.token_streamer(send) + + await send({"type": "http.response.body", "body": b"", "more_body": False}) + + async def __call__(self, scope: "Scope", receive: "Receive", send: "Send"): + """ + Asynchronously calls the :func:`stream_response method`, allowing the :func:`BTStreamingResponse` object to be used as an ASGI application. + + This method is part of the ASGI interface and is called by the ASGI server to handle the request and send the response. It delegates to the :func:`stream_response` method to perform the actual streaming process. + + Args: + scope (starlette.types.Scope): The scope of the request, containing information about the client, server, and request itself. + receive (starlette.types.Receive): A callable to receive the request, provided by the ASGI server. + send (starlette.types.Send): A callable to send the response, provided by the ASGI server. + """ + await self.stream_response(send) + + @abstractmethod + async def process_streaming_response(self, response: "ClientResponse"): + """ + Abstract method that must be implemented by the subclass. + This method should provide logic to handle the streaming response, such as parsing and accumulating data. + It is called as the response is being streamed from the network, and should be implemented to handle the specific streaming data format and requirements of the subclass. + + Args: + response (aiohttp.ClientResponse): The response object to be processed, typically containing chunks of data. + """ + ... + + @abstractmethod + def extract_response_json(self, response: "ClientResponse") -> dict: + """ + Abstract method that must be implemented by the subclass. + This method should provide logic to extract JSON data from the response, including headers and content. + It is called after the response has been processed and is responsible for retrieving structured data that can be used by the application. + + Args: + response (aiohttp.ClientResponse): The response object from which to extract JSON data. + """ + + def create_streaming_response( + self, token_streamer: Callable[[Send], Awaitable[None]] + ) -> "BTStreamingResponse": + """ + Creates a streaming response using the provided token streamer. + This method can be used by the subclass to create a response object that can be sent back to the client. + The token streamer should be implemented to generate the content of the response according to the specific requirements of the subclass. + + Args: + token_streamer (Callable[[starlette.types.Send], Awaitable[None]]): A callable that takes a send function and returns an awaitable. It's responsible for generating the content of the response. + + Returns: + BTStreamingResponse (bittensor.core.stream.StreamingSynapse.BTStreamingResponse): The streaming response object, ready to be sent to the client. + """ + model_instance = BTStreamingResponseModel(token_streamer=token_streamer) + + return self.BTStreamingResponse(model_instance, synapse=self) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py new file mode 100644 index 0000000000..5339645952 --- /dev/null +++ b/bittensor/core/subtensor.py @@ -0,0 +1,1733 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +The ``bittensor.core.subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor +blockchain, facilitating a range of operations essential for the decentralized machine learning network. +""" + +import argparse +import copy +import socket +from typing import Union, Optional, TypedDict, Any + +import numpy as np +import scalecodec +from bittensor_wallet import Wallet +from numpy.typing import NDArray +from retry import retry +from scalecodec.base import RuntimeConfiguration +from scalecodec.exceptions import RemainingScaleBytesNotEmptyException +from scalecodec.type_registry import load_type_registry_preset +from scalecodec.types import ScaleType +from substrateinterface.base import QueryMapResult, SubstrateInterface + +from bittensor.core import settings +from bittensor.core.axon import Axon +from bittensor.core.chain_data import ( + NeuronInfo, + PrometheusInfo, + SubnetHyperparameters, + NeuronInfoLite, + custom_rpc_type_registry, +) +from bittensor.core.config import Config +from bittensor.core.extrinsics.commit_weights import ( + commit_weights_extrinsic, + reveal_weights_extrinsic, +) +from bittensor.core.extrinsics.prometheus import ( + do_serve_prometheus, + prometheus_extrinsic, +) +from bittensor.core.extrinsics.serving import ( + do_serve_axon, + serve_axon_extrinsic, + publish_metadata, + get_metadata, +) +from bittensor.core.extrinsics.set_weights import set_weights_extrinsic +from bittensor.core.extrinsics.transfer import ( + transfer_extrinsic, +) +from bittensor.core.metagraph import Metagraph +from bittensor.utils import torch +from bittensor.utils import u16_normalized_float, networking +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging +from bittensor.utils.weight_utils import generate_weight_hash + +KEY_NONCE: dict[str, int] = {} + + +class ParamWithTypes(TypedDict): + name: str # Name of the parameter. + type: str # ScaleType string of the parameter. + + +class Subtensor: + """ + The Subtensor class in Bittensor serves as a crucial interface for interacting with the Bittensor blockchain, + facilitating a range of operations essential for the decentralized machine learning network. + + This class enables neurons (network participants) to engage in activities such as registering on the network, + managing staked weights, setting inter-neuronal weights, and participating in consensus mechanisms. + + The Bittensor network operates on a digital ledger where each neuron holds stakes (S) and learns a set + of inter-peer weights (W). These weights, set by the neurons themselves, play a critical role in determining + the ranking and incentive mechanisms within the network. Higher-ranked neurons, as determined by their + contributions and trust within the network, receive more incentives. + + The Subtensor class connects to various Bittensor networks like the main ``finney`` network or local test + networks, providing a gateway to the blockchain layer of Bittensor. It leverages a staked weighted trust + system and consensus to ensure fair and distributed incentive mechanisms, where incentives (I) are + primarily allocated to neurons that are trusted by the majority of the network. + + Additionally, Bittensor introduces a speculation-based reward mechanism in the form of bonds (B), allowing + neurons to accumulate bonds in other neurons, speculating on their future value. This mechanism aligns + with market-based speculation, incentivizing neurons to make judicious decisions in their inter-neuronal + investments. + + Example Usage:: + + from bittensor.core.subtensor import Subtensor + + # Connect to the main Bittensor network (Finney). + finney_subtensor = Subtensor(network='finney') + + # Close websocket connection with the Bittensor network. + finney_subtensor.close() + + # Register a new neuron on the network. + wallet = bittensor_wallet.Wallet(...) # Assuming a wallet instance is created. + netuid = 1 + success = finney_subtensor.register(wallet=wallet, netuid=netuid) + + # Set inter-neuronal weights for collaborative learning. + success = finney_subtensor.set_weights(wallet=wallet, netuid=netuid, uids=[...], weights=[...]) + + # Get the metagraph for a specific subnet using given subtensor connection + metagraph = finney_subtensor.metagraph(netuid=netuid) + + By facilitating these operations, the Subtensor class is instrumental in maintaining the decentralized + intelligence and dynamic learning environment of the Bittensor network, as envisioned in its foundational + principles and mechanisms described in the `NeurIPS paper + `_. paper. + """ + + def __init__( + self, + network: Optional[str] = None, + config: Optional["Config"] = None, + _mock: bool = False, + log_verbose: bool = True, + connection_timeout: int = 600, + ) -> None: + """ + Initializes a Subtensor interface for interacting with the Bittensor blockchain. + + NOTE: + Currently subtensor defaults to the ``finney`` network. This will change in a future release. + + We strongly encourage users to run their own local subtensor node whenever possible. This increases decentralization and resilience of the network. In a future release, local subtensor will become the default and the fallback to ``finney`` removed. Please plan ahead for this change. We will provide detailed instructions on how to run a local subtensor node in the documentation in a subsequent release. + + Args: + network (Optional[str]): The network name to connect to (e.g., ``finney``, ``local``). This can also be the chain endpoint (e.g., ``wss://entrypoint-finney.opentensor.ai:443``) and will be correctly parsed into the network and chain endpoint. If not specified, defaults to the main Bittensor network. + config (Optional[bittensor.core.config.Config]): Configuration object for the subtensor. If not provided, a default configuration is used. + _mock (bool): If set to ``True``, uses a mocked connection for testing purposes. Default is ``False``. + log_verbose (bool): Whether to enable verbose logging. If set to ``True``, detailed log information about the connection and network operations will be provided. Default is ``True``. + connection_timeout (int): The maximum time in seconds to keep the connection alive. Default is ``600``. + + This initialization sets up the connection to the specified Bittensor network, allowing for various blockchain operations such as neuron registration, stake management, and setting weights. + """ + # Determine config.subtensor.chain_endpoint and config.subtensor.network config. + # If chain_endpoint is set, we override the network flag, otherwise, the chain_endpoint is assigned by the + # network. + # Argument importance: network > chain_endpoint > config.subtensor.chain_endpoint > config.subtensor.network + + if config is None: + config = Subtensor.config() + self._config = copy.deepcopy(config) + + # Setup config.subtensor.network and config.subtensor.chain_endpoint + self.chain_endpoint, self.network = Subtensor.setup_config( + network, self._config + ) + + if ( + self.network == "finney" + or self.chain_endpoint == settings.FINNEY_ENTRYPOINT + ) and log_verbose: + logging.info( + f"You are connecting to {self.network} network with endpoint {self.chain_endpoint}." + ) + logging.warning( + "We strongly encourage running a local subtensor node whenever possible. " + "This increases decentralization and resilience of the network." + ) + logging.warning( + "In a future release, local subtensor will become the default endpoint. " + "To get ahead of this change, please run a local subtensor node and point to it." + ) + + self.log_verbose = log_verbose + self._connection_timeout = connection_timeout + self._get_substrate() + + def __str__(self) -> str: + if self.network == self.chain_endpoint: + # Connecting to chain endpoint without network known. + return f"subtensor({self.chain_endpoint})" + else: + # Connecting to network with endpoint known. + return f"subtensor({self.network}, {self.chain_endpoint})" + + def __repr__(self) -> str: + return self.__str__() + + def close(self): + """Cleans up resources for this subtensor instance like active websocket connection and active extensions.""" + self.substrate.close() + + def _get_substrate(self): + """Establishes a connection to the Substrate node using configured parameters.""" + try: + # Set up params. + self.substrate = SubstrateInterface( + ss58_format=settings.SS58_FORMAT, + use_remote_preset=True, + url=self.chain_endpoint, + type_registry=settings.TYPE_REGISTRY, + ) + if self.log_verbose: + logging.info( + f"Connected to {self.network} network and {self.chain_endpoint}." + ) + + try: + self.substrate.websocket.settimeout(self._connection_timeout) + except (AttributeError, TypeError, socket.error, OSError) as e: + logging.warning(f"Error setting timeout: {e}") + + except ConnectionRefusedError: + logging.error( + f"Could not connect to {self.network} network with {self.chain_endpoint} chain endpoint.", + ) + logging.info( + "You can check if you have connectivity by running this command: nc -vz localhost " + f"{self.chain_endpoint.split(':')[2]}" + ) + + @staticmethod + def config() -> "Config": + """ + Creates and returns a Bittensor configuration object. + + Returns: + config (bittensor.core.config.Config): A Bittensor configuration object configured with arguments added by the `subtensor.add_args` method. + """ + parser = argparse.ArgumentParser() + Subtensor.add_args(parser) + return Config(parser, args=[]) + + @staticmethod + def setup_config(network: Optional[str], config: "Config"): + """ + Sets up and returns the configuration for the Subtensor network and endpoint. + + This method determines the appropriate network and chain endpoint based on the provided network string or + configuration object. It evaluates the network and endpoint in the following order of precedence: + 1. Provided network string. + 2. Configured chain endpoint in the `config` object. + 3. Configured network in the `config` object. + 4. Default chain endpoint. + 5. Default network. + + Args: + network (Optional[str]): The name of the Subtensor network. If None, the network and endpoint will be determined from the `config` object. + config (bittensor.core.config.Config): The configuration object containing the network and chain endpoint settings. + + Returns: + tuple: A tuple containing the formatted WebSocket endpoint URL and the evaluated network name. + """ + if network is not None: + ( + evaluated_network, + evaluated_endpoint, + ) = Subtensor.determine_chain_endpoint_and_network(network) + else: + if config.is_set("subtensor.chain_endpoint"): + ( + evaluated_network, + evaluated_endpoint, + ) = Subtensor.determine_chain_endpoint_and_network( + config.subtensor.chain_endpoint + ) + + elif config.is_set("subtensor.network"): + ( + evaluated_network, + evaluated_endpoint, + ) = Subtensor.determine_chain_endpoint_and_network( + config.subtensor.network + ) + + elif config.subtensor.get("chain_endpoint"): + ( + evaluated_network, + evaluated_endpoint, + ) = Subtensor.determine_chain_endpoint_and_network( + config.subtensor.chain_endpoint + ) + + elif config.subtensor.get("network"): + ( + evaluated_network, + evaluated_endpoint, + ) = Subtensor.determine_chain_endpoint_and_network( + config.subtensor.network + ) + + else: + ( + evaluated_network, + evaluated_endpoint, + ) = Subtensor.determine_chain_endpoint_and_network( + settings.DEFAULTS.subtensor.network + ) + + return ( + networking.get_formatted_ws_endpoint_url(evaluated_endpoint), + evaluated_network, + ) + + @classmethod + def help(cls): + """Print help to stdout.""" + parser = argparse.ArgumentParser() + cls.add_args(parser) + print(cls.__new__.__doc__) + parser.print_help() + + @classmethod + def add_args(cls, parser: "argparse.ArgumentParser", prefix: Optional[str] = None): + """ + Adds command-line arguments to the provided ArgumentParser for configuring the Subtensor settings. + + Args: + parser (argparse.ArgumentParser): The ArgumentParser object to which the Subtensor arguments will be added. + prefix (Optional[str]): An optional prefix for the argument names. If provided, the prefix is prepended to each argument name. + + Arguments added: + --subtensor.network: The Subtensor network flag. Possible values are 'finney', 'test', 'archive', and 'local'. Overrides the chain endpoint if set. + --subtensor.chain_endpoint: The Subtensor chain endpoint flag. If set, it overrides the network flag. + --subtensor._mock: If true, uses a mocked connection to the chain. + + Example: + parser = argparse.ArgumentParser() + Subtensor.add_args(parser) + """ + prefix_str = "" if prefix is None else f"{prefix}." + try: + default_network = settings.DEFAULT_NETWORK + default_chain_endpoint = settings.FINNEY_ENTRYPOINT + + parser.add_argument( + f"--{prefix_str}subtensor.network", + default=default_network, + type=str, + help="""The subtensor network flag. The likely choices are: + -- finney (main network) + -- test (test network) + -- archive (archive network +300 blocks) + -- local (local running network) + If this option is set it overloads subtensor.chain_endpoint with + an entry point node from that network. + """, + ) + parser.add_argument( + f"--{prefix_str}subtensor.chain_endpoint", + default=default_chain_endpoint, + type=str, + help="""The subtensor endpoint flag. If set, overrides the --network flag.""", + ) + parser.add_argument( + f"--{prefix_str}subtensor._mock", + default=False, + type=bool, + help="""If true, uses a mocked connection to the chain.""", + ) + + except argparse.ArgumentError: + # re-parsing arguments. + pass + + # Inner private functions + @networking.ensure_connected + def _encode_params( + self, + call_definition: list["ParamWithTypes"], + params: Union[list[Any], dict[str, Any]], + ) -> str: + """Returns a hex encoded string of the params using their types.""" + param_data = scalecodec.ScaleBytes(b"") + + for i, param in enumerate(call_definition["params"]): # type: ignore + scale_obj = self.substrate.create_scale_object(param["type"]) + if type(params) is list: + param_data += scale_obj.encode(params[i]) + else: + if param["name"] not in params: + raise ValueError(f"Missing param {param['name']} in params dict.") + + param_data += scale_obj.encode(params[param["name"]]) + + return param_data.to_hex() + + def _get_hyperparameter( + self, param_name: str, netuid: int, block: Optional[int] = None + ) -> Optional[Any]: + """ + Retrieves a specified hyperparameter for a specific subnet. + + Args: + param_name (str): The name of the hyperparameter to retrieve. + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[Union[int, float]]: The value of the specified hyperparameter if the subnet exists, ``None`` otherwise. + """ + if not self.subnet_exists(netuid, block): + return None + + result = self.query_subtensor(param_name, block, [netuid]) + if result is None or not hasattr(result, "value"): + return None + + return result.value + + # Calls methods + @networking.ensure_connected + def query_subtensor( + self, name: str, block: Optional[int] = None, params: Optional[list] = None + ) -> "ScaleType": + """ + Queries named storage from the Subtensor module on the Bittensor blockchain. This function is used to retrieve specific data or parameters from the blockchain, such as stake, rank, or other neuron-specific attributes. + + Args: + name (str): The name of the storage function to query. + block (Optional[int]): The blockchain block number at which to perform the query. + params (Optional[list[object]]): A list of parameters to pass to the query function. + + Returns: + query_response (scalecodec.ScaleType): An object containing the requested data. + + This query function is essential for accessing detailed information about the network and its neurons, providing valuable insights into the state and dynamics of the Bittensor ecosystem. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry() -> "ScaleType": + return self.substrate.query( + module="SubtensorModule", + storage_function=name, + params=params, + block_hash=( + None if block is None else self.substrate.get_block_hash(block) + ), + ) + + return make_substrate_call_with_retry() + + @networking.ensure_connected + def query_map_subtensor( + self, name: str, block: Optional[int] = None, params: Optional[list] = None + ) -> "QueryMapResult": + """ + Queries map storage from the Subtensor module on the Bittensor blockchain. This function is designed to retrieve a map-like data structure, which can include various neuron-specific details or network-wide attributes. + + Args: + name (str): The name of the map storage function to query. + block (Optional[int]): The blockchain block number at which to perform the query. + params (Optional[list[object]]): A list of parameters to pass to the query function. + + Returns: + QueryMapResult (substrateinterface.base.QueryMapResult): An object containing the map-like data structure, or ``None`` if not found. + + This function is particularly useful for analyzing and understanding complex network structures and relationships within the Bittensor ecosystem, such as inter-neuronal connections and stake distributions. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry(): + return self.substrate.query_map( + module="SubtensorModule", + storage_function=name, + params=params, + block_hash=( + None if block is None else self.substrate.get_block_hash(block) + ), + ) + + return make_substrate_call_with_retry() + + def query_runtime_api( + self, + runtime_api: str, + method: str, + params: Optional[Union[list[int], dict[str, int]]], + block: Optional[int] = None, + ) -> Optional[str]: + """ + Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users who need to interact with specific runtime methods and decode complex data types. + + Args: + runtime_api (str): The name of the runtime API to query. + method (str): The specific method within the runtime API to call. + params (Optional[list[ParamWithTypes]]): The parameters to pass to the method call. + block (Optional[int]): The blockchain block number at which to perform the query. + + Returns: + Optional[str]: The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails. + + This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed and specific interactions with the network's runtime environment. + """ + call_definition = settings.TYPE_REGISTRY["runtime_api"][runtime_api]["methods"][ + method + ] + + json_result = self.state_call( + method=f"{runtime_api}_{method}", + data=( + "0x" + if params is None + else self._encode_params(call_definition=call_definition, params=params) + ), + block=block, + ) + + if json_result is None: + return None + + return_type = call_definition["type"] + + as_scale_bytes = scalecodec.ScaleBytes(json_result["result"]) + + rpc_runtime_config = RuntimeConfiguration() + rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy")) + rpc_runtime_config.update_type_registry(custom_rpc_type_registry) + + obj = rpc_runtime_config.create_scale_object(return_type, as_scale_bytes) + if obj.data.to_hex() == "0x0400": # RPC returned None result + return None + + return obj.decode() + + @networking.ensure_connected + def state_call( + self, method: str, data: str, block: Optional[int] = None + ) -> dict[Any, Any]: + """ + Makes a state call to the Bittensor blockchain, allowing for direct queries of the blockchain's state. This function is typically used for advanced queries that require specific method calls and data inputs. + + Args: + method (str): The method name for the state call. + data (str): The data to be passed to the method. + block (Optional[int]): The blockchain block number at which to perform the state call. + + Returns: + result (dict[Any, Any]): The result of the rpc call. + + The state call function provides a more direct and flexible way of querying blockchain data, useful for specific use cases where standard queries are insufficient. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry() -> dict[Any, Any]: + block_hash = None if block is None else self.substrate.get_block_hash(block) + return self.substrate.rpc_request( + method="state_call", + params=[method, data, block_hash] if block_hash else [method, data], + ) + + return make_substrate_call_with_retry() + + @networking.ensure_connected + def query_map( + self, + module: str, + name: str, + block: Optional[int] = None, + params: Optional[list] = None, + ) -> "QueryMapResult": + """ + Queries map storage from any module on the Bittensor blockchain. This function retrieves data structures that represent key-value mappings, essential for accessing complex and structured data within the blockchain modules. + + Args: + module (str): The name of the module from which to query the map storage. + name (str): The specific storage function within the module to query. + block (Optional[int]): The blockchain block number at which to perform the query. + params (Optional[list[object]]): Parameters to be passed to the query. + + Returns: + result (substrateinterface.base.QueryMapResult): A data structure representing the map storage if found, ``None`` otherwise. + + This function is particularly useful for retrieving detailed and structured data from various blockchain modules, offering insights into the network's state and the relationships between its different components. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry() -> "QueryMapResult": + return self.substrate.query_map( + module=module, + storage_function=name, + params=params, + block_hash=( + None if block is None else self.substrate.get_block_hash(block) + ), + ) + + return make_substrate_call_with_retry() + + @networking.ensure_connected + def query_constant( + self, module_name: str, constant_name: str, block: Optional[int] = None + ) -> Optional["ScaleType"]: + """ + Retrieves a constant from the specified module on the Bittensor blockchain. This function is used to access fixed parameters or values defined within the blockchain's modules, which are essential for understanding the network's configuration and rules. + + Args: + module_name (str): The name of the module containing the constant. + constant_name (str): The name of the constant to retrieve. + block (Optional[int]): The blockchain block number at which to query the constant. + + Returns: + Optional[scalecodec.ScaleType]: The value of the constant if found, ``None`` otherwise. + + Constants queried through this function can include critical network parameters such as inflation rates, consensus rules, or validation thresholds, providing a deeper understanding of the Bittensor network's operational parameters. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry(): + return self.substrate.get_constant( + module_name=module_name, + constant_name=constant_name, + block_hash=( + None if block is None else self.substrate.get_block_hash(block) + ), + ) + + return make_substrate_call_with_retry() + + @networking.ensure_connected + def query_module( + self, + module: str, + name: str, + block: Optional[int] = None, + params: Optional[list] = None, + ) -> "ScaleType": + """ + Queries any module storage on the Bittensor blockchain with the specified parameters and block number. This function is a generic query interface that allows for flexible and diverse data retrieval from various blockchain modules. + + Args: + module (str): The name of the module from which to query data. + name (str): The name of the storage function within the module. + block (Optional[int]): The blockchain block number at which to perform the query. + params (Optional[list[object]]): A list of parameters to pass to the query function. + + Returns: + Optional[scalecodec.ScaleType]: An object containing the requested data if found, ``None`` otherwise. + + This versatile query function is key to accessing a wide range of data and insights from different parts of the Bittensor blockchain, enhancing the understanding and analysis of the network's state and dynamics. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry() -> "ScaleType": + return self.substrate.query( + module=module, + storage_function=name, + params=params, + block_hash=( + None if block is None else self.substrate.get_block_hash(block) + ), + ) + + return make_substrate_call_with_retry() + + # Common subtensor methods + def metagraph( + self, netuid: int, lite: bool = True, block: Optional[int] = None + ) -> "Metagraph": # type: ignore + """ + Returns a synced metagraph for a specified subnet within the Bittensor network. The metagraph represents the network's structure, including neuron connections and interactions. + + Args: + netuid (int): The network UID of the subnet to query. + lite (bool): If true, returns a metagraph using a lightweight sync (no weights, no bonds). Default is ``True``. + block (Optional[int]): Block number for synchronization, or ``None`` for the latest block. + + Returns: + bittensor.core.metagraph.Metagraph: The metagraph representing the subnet's structure and neuron relationships. + + The metagraph is an essential tool for understanding the topology and dynamics of the Bittensor network's decentralized architecture, particularly in relation to neuron interconnectivity and consensus processes. + """ + metagraph = Metagraph( + network=self.network, netuid=netuid, lite=lite, sync=False + ) + metagraph.sync(block=block, lite=lite, subtensor=self) + + return metagraph + + @staticmethod + def determine_chain_endpoint_and_network( + network: str, + ) -> tuple[Optional[str], Optional[str]]: + """Determines the chain endpoint and network from the passed network or chain_endpoint. + + Args: + network (str): The network flag. The choices are: ``finney`` (main network), ``archive`` (archive network +300 blocks), ``local`` (local running network), ``test`` (test network). + + Returns: + tuple[Optional[str], Optional[str]]: The network and chain endpoint flag. If passed, overrides the ``network`` argument. + """ + + if network is None: + return None, None + if network in ["finney", "local", "test", "archive"]: + if network == "finney": + # Kiru Finney staging network. + return network, settings.FINNEY_ENTRYPOINT + elif network == "local": + return network, settings.LOCAL_ENTRYPOINT + elif network == "test": + return network, settings.FINNEY_TEST_ENTRYPOINT + elif network == "archive": + return network, settings.ARCHIVE_ENTRYPOINT + else: + if ( + network == settings.FINNEY_ENTRYPOINT + or "entrypoint-finney.opentensor.ai" in network + ): + return "finney", settings.FINNEY_ENTRYPOINT + elif ( + network == settings.FINNEY_TEST_ENTRYPOINT + or "test.finney.opentensor.ai" in network + ): + return "test", settings.FINNEY_TEST_ENTRYPOINT + elif ( + network == settings.ARCHIVE_ENTRYPOINT + or "archive.chain.opentensor.ai" in network + ): + return "archive", settings.ARCHIVE_ENTRYPOINT + elif "127.0.0.1" in network or "localhost" in network: + return "local", network + else: + return "unknown", network + return None, None + + def get_netuids_for_hotkey( + self, hotkey_ss58: str, block: Optional[int] = None + ) -> list[int]: + """ + Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function identifies the specific subnets within the Bittensor network where the neuron associated with the hotkey is active. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + block (Optional[int]): The blockchain block number at which to perform the query. + + Returns: + list[int]: A list of netuids where the neuron is a member. + """ + result = self.query_map_subtensor("IsNetworkMember", block, [hotkey_ss58]) + return ( + [record[0].value for record in result if record[1]] + if result and hasattr(result, "records") + else [] + ) + + @networking.ensure_connected + def get_current_block(self) -> int: + """ + Returns the current block number on the Bittensor blockchain. This function provides the latest block number, indicating the most recent state of the blockchain. + + Returns: + int: The current chain block number. + + Knowing the current block number is essential for querying real-time data and performing time-sensitive operations on the blockchain. It serves as a reference point for network activities and data synchronization. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry(): + return self.substrate.get_block_number(None) # type: ignore + + return make_substrate_call_with_retry() + + def is_hotkey_registered_any( + self, hotkey_ss58: str, block: Optional[int] = None + ) -> bool: + """ + Checks if a neuron's hotkey is registered on any subnet within the Bittensor network. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + block (Optional[int]): The blockchain block number at which to perform the check. + + Returns: + bool: ``True`` if the hotkey is registered on any subnet, False otherwise. + + This function is essential for determining the network-wide presence and participation of a neuron. + """ + return len(self.get_netuids_for_hotkey(hotkey_ss58, block)) > 0 + + def is_hotkey_registered_on_subnet( + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + ) -> bool: + """ + Checks if a neuron's hotkey is registered on a specific subnet within the Bittensor network. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number at which to perform the check. + + Returns: + bool: ``True`` if the hotkey is registered on the specified subnet, False otherwise. + + This function helps in assessing the participation of a neuron in a particular subnet, indicating its specific area of operation or influence within the network. + """ + return self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block) is not None + + def is_hotkey_registered( + self, + hotkey_ss58: str, + netuid: Optional[int] = None, + block: Optional[int] = None, + ) -> bool: + """ + Determines whether a given hotkey (public key) is registered in the Bittensor network, either globally across any subnet or specifically on a specified subnet. This function checks the registration status of a neuron identified by its hotkey, which is crucial for validating its participation and activities within the network. + + Args: + hotkey_ss58 (str): The SS58 address of the neuron's hotkey. + netuid (Optional[int]): The unique identifier of the subnet to check the registration. If ``None``, the registration is checked across all subnets. + block (Optional[int]): The blockchain block number at which to perform the query. + + Returns: + bool: ``True`` if the hotkey is registered in the specified context (either any subnet or a specific subnet), ``False`` otherwise. + + This function is important for verifying the active status of neurons in the Bittensor network. It aids in understanding whether a neuron is eligible to participate in network processes such as consensus, validation, and incentive distribution based on its registration status. + """ + if netuid is None: + return self.is_hotkey_registered_any(hotkey_ss58, block) + else: + return self.is_hotkey_registered_on_subnet(hotkey_ss58, netuid, block) + + # Not used in Bittensor, but is actively used by the community in almost all subnets + def set_weights( + self, + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = settings.version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + max_retries: int = 5, + ) -> tuple[bool, str]: + """ + Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights. + netuid (int): The unique identifier of the subnet. + uids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs that the weights are being set for. + weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID. + version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. + max_retries (int): The number of maximum attempts to set weights. Default is ``5``. + + Returns: + tuple[bool, str]: ``True`` if the setting of weights is successful, False otherwise. And `msg`, a string value describing the success or potential error. + + This function is crucial in shaping the network's collective intelligence, where each neuron's learning and contribution are influenced by the weights it sets towards others【81†source】. + """ + uid = self.get_uid_for_hotkey_on_subnet(wallet.hotkey.ss58_address, netuid) + retries = 0 + success = False + message = "No attempt made. Perhaps it is too soon to set weights!" + while ( + self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore + and retries < max_retries + ): + try: + logging.info( + f"Setting weights for subnet #{netuid}. Attempt {retries + 1} of {max_retries}." + ) + success, message = set_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + except Exception as e: + logging.error(f"Error setting weights: {e}") + finally: + retries += 1 + + return success, message + + def serve_axon( + self, + netuid: int, + axon: "Axon", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + ) -> bool: + """ + Registers an ``Axon`` serving endpoint on the Bittensor network for a specific neuron. This function is used to set up the Axon, a key component of a neuron that handles incoming queries and data processing tasks. + + Args: + netuid (int): The unique identifier of the subnetwork. + axon (bittensor.core.axon.Axon): The Axon instance to be registered for serving. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``True``. + + Returns: + bool: ``True`` if the Axon serve registration is successful, False otherwise. + + By registering an Axon, the neuron becomes an active part of the network's distributed computing infrastructure, contributing to the collective intelligence of Bittensor. + """ + return serve_axon_extrinsic( + self, netuid, axon, wait_for_inclusion, wait_for_finalization + ) + + # metagraph + @property + def block(self) -> int: + """Returns current chain block. + + Returns: + block (int): Current chain block. + """ + return self.get_current_block() + + def blocks_since_last_update(self, netuid: int, uid: int) -> Optional[int]: + """ + Returns the number of blocks since the last update for a specific UID in the subnetwork. + + Args: + netuid (int): The unique identifier of the subnetwork. + uid (int): The unique identifier of the neuron. + + Returns: + Optional[int]: The number of blocks since the last update, or ``None`` if the subnetwork or UID does not exist. + """ + call = self._get_hyperparameter(param_name="LastUpdate", netuid=netuid) + return None if call is None else self.get_current_block() - int(call[uid]) + + @networking.ensure_connected + def get_block_hash(self, block_id: int) -> str: + """ + Retrieves the hash of a specific block on the Bittensor blockchain. The block hash is a unique identifier representing the cryptographic hash of the block's content, ensuring its integrity and immutability. + + Args: + block_id (int): The block number for which the hash is to be retrieved. + + Returns: + str: The cryptographic hash of the specified block. + + The block hash is a fundamental aspect of blockchain technology, providing a secure reference to each block's data. It is crucial for verifying transactions, ensuring data consistency, and maintaining the trustworthiness of the blockchain. + """ + return self.substrate.get_block_hash(block_id=block_id) + + def weights_rate_limit(self, netuid: int) -> Optional[int]: + """ + Returns network WeightsSetRateLimit hyperparameter. + + Args: + netuid (int): The unique identifier of the subnetwork. + + Returns: + Optional[int]: The value of the WeightsSetRateLimit hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. + """ + call = self._get_hyperparameter(param_name="WeightsSetRateLimit", netuid=netuid) + return None if call is None else int(call) + + # Keep backwards compatibility for community usage. + # Make some commitment on-chain about arbitrary data. + def commit(self, wallet, netuid: int, data: str): + """ + Commits arbitrary data to the Bittensor network by publishing metadata. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the data. + netuid (int): The unique identifier of the subnetwork. + data (str): The data to be committed to the network. + """ + publish_metadata(self, wallet, netuid, f"Raw{len(data)}", data.encode()) + + # Keep backwards compatibility for community usage. + def subnetwork_n(self, netuid: int, block: Optional[int] = None) -> Optional[int]: + """ + Returns network SubnetworkN hyperparameter. + + Args: + netuid (int): The unique identifier of the subnetwork. + block (Optional[int]): The block number to retrieve the parameter from. If ``None``, the latest block is used. Default is ``None``. + + Returns: + Optional[int]: The value of the SubnetworkN hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. + """ + call = self._get_hyperparameter( + param_name="SubnetworkN", netuid=netuid, block=block + ) + return None if call is None else int(call) + + # Community uses this method + def transfer( + self, + wallet: "Wallet", + dest: str, + amount: Union["Balance", float], + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + prompt: bool = False, + ) -> bool: + """ + Executes a transfer of funds from the provided wallet to the specified destination address. This function is used to move TAO tokens within the Bittensor network, facilitating transactions between neurons. + + Args: + wallet (bittensor_wallet.Wallet): The wallet from which funds are being transferred. + dest (str): The destination public key address. + amount (Union[bittensor.utils.balance.Balance, float]): The amount of TAO to be transferred. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``True``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. + + Returns: + transfer_extrinsic (bool): ``True`` if the transfer is successful, False otherwise. + + This function is essential for the fluid movement of tokens in the network, supporting various economic activities such as staking, delegation, and reward distribution. + """ + return transfer_extrinsic( + subtensor=self, + wallet=wallet, + dest=dest, + amount=amount, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + + # Community uses this method via `bittensor.api.extrinsics.prometheus.prometheus_extrinsic` + def get_neuron_for_pubkey_and_subnet( + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + ) -> Optional["NeuronInfo"]: + """ + Retrieves information about a neuron based on its public key (hotkey SS58 address) and the specific subnet UID (netuid). This function provides detailed neuron information for a particular subnet within the Bittensor network. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number at which to perform the query. + + Returns: + Optional[bittensor.core.chain_data.neuron_info.NeuronInfo]: Detailed information about the neuron if found, ``None`` otherwise. + + This function is crucial for accessing specific neuron data and understanding its status, stake, and other attributes within a particular subnet of the Bittensor ecosystem. + """ + return self.neuron_for_uid( + self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block=block), + netuid, + block=block, + ) + + @networking.ensure_connected + def neuron_for_uid( + self, uid: Optional[int], netuid: int, block: Optional[int] = None + ) -> "NeuronInfo": + """ + Retrieves detailed information about a specific neuron identified by its unique identifier (UID) within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive view of a neuron's attributes, including its stake, rank, and operational status. + + Args: + uid (Optional[int]): The unique identifier of the neuron. + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + bittensor.core.chain_data.neuron_info.NeuronInfo: Detailed information about the neuron if found, ``None`` otherwise. + + This function is crucial for analyzing individual neurons' contributions and status within a specific subnet, offering insights into their roles in the network's consensus and validation mechanisms. + """ + if uid is None: + return NeuronInfo.get_null_neuron() + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry(): + block_hash = None if block is None else self.substrate.get_block_hash(block) + params = [netuid, uid] + if block_hash: + params = params + [block_hash] + return self.substrate.rpc_request( + method="neuronInfo_getNeuron", + params=params, # custom rpc method + ) + + json_body = make_substrate_call_with_retry() + + if not (result := json_body.get("result", None)): + return NeuronInfo.get_null_neuron() + + return NeuronInfo.from_vec_u8(result) + + # Community uses this method + def serve_prometheus( + self, + wallet: "Wallet", + port: int, + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + ) -> bool: + """ + Serves Prometheus metrics by submitting an extrinsic to a blockchain network via the specified wallet. The function allows configuring whether to wait for the transaction's inclusion in a block and its finalization. + + Args: + wallet (bittensor_wallet.Wallet): Bittensor wallet instance used for submitting the extrinsic. + port (int): The port number on which Prometheus metrics are served. + netuid (int): The unique identifier of the subnetwork. + wait_for_inclusion (bool): If True, waits for the transaction to be included in a block. Defaults to ``False``. + wait_for_finalization (bool): If True, waits for the transaction to be finalized. Defaults to ``True``. + + Returns: + bool: Returns True if the Prometheus extrinsic is successfully processed, otherwise False. + """ + return prometheus_extrinsic( + self, + wallet=wallet, + port=port, + netuid=netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # Community uses this method + def get_subnet_hyperparameters( + self, netuid: int, block: Optional[int] = None + ) -> Optional[Union[list, "SubnetHyperparameters"]]: + """ + Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters define the operational settings and rules governing the subnet's behavior. + + Args: + netuid (int): The network UID of the subnet to query. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[bittensor.core.chain_data.subnet_hyperparameters.SubnetHyperparameters]: The subnet's hyperparameters, or ``None`` if not available. + + Understanding the hyperparameters is crucial for comprehending how subnets are configured and managed, and how they interact with the network's consensus and incentive mechanisms. + """ + hex_bytes_result = self.query_runtime_api( + runtime_api="SubnetInfoRuntimeApi", + method="get_subnet_hyperparams", + params=[netuid], + block=block, + ) + + if hex_bytes_result is None: + return [] + + if hex_bytes_result.startswith("0x"): + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + else: + bytes_result = bytes.fromhex(hex_bytes_result) + + return SubnetHyperparameters.from_vec_u8(bytes_result) # type: ignore + + # Community uses this method + # Returns network ImmunityPeriod hyper parameter. + def immunity_period( + self, netuid: int, block: Optional[int] = None + ) -> Optional[int]: + """ + Retrieves the 'ImmunityPeriod' hyperparameter for a specific subnet. This parameter defines the duration during which new neurons are protected from certain network penalties or restrictions. + + Args: + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[int]: The value of the 'ImmunityPeriod' hyperparameter if the subnet exists, ``None`` otherwise. + + The 'ImmunityPeriod' is a critical aspect of the network's governance system, ensuring that new participants have a grace period to establish themselves and contribute to the network without facing immediate punitive actions. + """ + call = self._get_hyperparameter( + param_name="ImmunityPeriod", netuid=netuid, block=block + ) + return None if call is None else int(call) + + # Community uses this method + def get_uid_for_hotkey_on_subnet( + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + ) -> Optional[int]: + """ + Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[int]: The UID of the neuron if it is registered on the subnet, ``None`` otherwise. + + The UID is a critical identifier within the network, linking the neuron's hotkey to its operational and governance activities on a particular subnet. + """ + _result = self.query_subtensor("Uids", block, [netuid, hotkey_ss58]) + return getattr(_result, "value", None) + + # Community uses this method + def tempo(self, netuid: int, block: Optional[int] = None) -> Optional[int]: + """ + Returns network Tempo hyperparameter. + + Args: + netuid (int): The unique identifier of the subnetwork. + block (Optional[int]): The block number to retrieve the parameter from. If ``None``, the latest block is used. Default is ``None``. + + Returns: + Optional[int]: The value of the Tempo hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. + """ + call = self._get_hyperparameter(param_name="Tempo", netuid=netuid, block=block) + return None if call is None else int(call) + + # Community uses this method + def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> str: + """ + Retrieves the on-chain commitment for a specific neuron in the Bittensor network. + + Args: + netuid (int): The unique identifier of the subnetwork. + uid (int): The unique identifier of the neuron. + block (Optional[int]): The block number to retrieve the commitment from. If None, the latest block is used. Default is ``None``. + + Returns: + str: The commitment data as a string. + """ + metagraph = self.metagraph(netuid) + hotkey = metagraph.hotkeys[uid] # type: ignore + + metadata = get_metadata(self, netuid, hotkey, block) + commitment = metadata["info"]["fields"][0] # type: ignore + hex_data = commitment[list(commitment.keys())[0]][2:] # type: ignore + + return bytes.fromhex(hex_data).decode() + + # Community uses this via `bittensor.utils.weight_utils.process_weights_for_netuid` function. + def min_allowed_weights( + self, netuid: int, block: Optional[int] = None + ) -> Optional[int]: + """ + Returns network MinAllowedWeights hyperparameter. + + Args: + netuid (int): The unique identifier of the subnetwork. + block (Optional[int]): The block number to retrieve the parameter from. If ``None``, the latest block is used. Default is ``None``. + + Returns: + Optional[int]: The value of the MinAllowedWeights hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. + """ + call = self._get_hyperparameter( + param_name="MinAllowedWeights", block=block, netuid=netuid + ) + return None if call is None else int(call) + + # Community uses this via `bittensor.utils.weight_utils.process_weights_for_netuid` function. + def max_weight_limit( + self, netuid: int, block: Optional[int] = None + ) -> Optional[float]: + """ + Returns network MaxWeightsLimit hyperparameter. + + Args: + netuid (int): The unique identifier of the subnetwork. + block (Optional[int]): The block number to retrieve the parameter from. If ``None``, the latest block is used. Default is ``None``. + + Returns: + Optional[float]: The value of the MaxWeightsLimit hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found. + """ + call = self._get_hyperparameter( + param_name="MaxWeightsLimit", block=block, netuid=netuid + ) + return None if call is None else u16_normalized_float(int(call)) + + # # Community uses this method. It is used in subtensor in neuron_info, and serving. + def get_prometheus_info( + self, netuid: int, hotkey_ss58: str, block: Optional[int] = None + ) -> Optional["PrometheusInfo"]: + """ + Returns the prometheus information for this hotkey account. + + Args: + netuid (int): The unique identifier of the subnetwork. + hotkey_ss58 (str): The SS58 address of the hotkey. + block (Optional[int]): The block number to retrieve the prometheus information from. If ``None``, the latest block is used. Default is ``None``. + + Returns: + Optional[bittensor.core.chain_data.prometheus_info.PrometheusInfo]: A PrometheusInfo object containing the prometheus information, or ``None`` if the prometheus information is not found. + """ + result = self.query_subtensor("Prometheus", block, [netuid, hotkey_ss58]) + if result is not None and hasattr(result, "value"): + return PrometheusInfo( + ip=networking.int_to_ip(result.value["ip"]), + ip_type=result.value["ip_type"], + port=result.value["port"], + version=result.value["version"], + block=result.value["block"], + ) + return None + + # Community uses this method + def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool: + """ + Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network. + + Args: + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number at which to check the subnet's existence. + + Returns: + bool: ``True`` if the subnet exists, False otherwise. + + This function is critical for verifying the presence of specific subnets in the network, enabling a deeper understanding of the network's structure and composition. + """ + _result = self.query_subtensor("NetworksAdded", block, [netuid]) + return getattr(_result, "value", False) + + # Metagraph uses this method + def bonds( + self, netuid: int, block: Optional[int] = None + ) -> list[tuple[int, list[tuple[int, int]]]]: + """ + Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network. Bonds represent the investments or commitments made by neurons in one another, indicating a level of trust and perceived value. This bonding mechanism is integral to the network's market-based approach to measuring and rewarding machine intelligence. + + Args: + netuid (int): The network UID of the subnet to query. + block (Optional[int]): The blockchain block number for the query. + + Returns: + list[tuple[int, list[tuple[int, int]]]]: A list of tuples mapping each neuron's UID to its bonds with other neurons. + + Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior within the subnet. It reflects how neurons recognize and invest in each other's intelligence and contributions, supporting diverse and niche systems within the Bittensor ecosystem. + """ + b_map = [] + b_map_encoded = self.query_map_subtensor( + name="Bonds", block=block, params=[netuid] + ) + if b_map_encoded.records: + for uid, b in b_map_encoded: + b_map.append((uid.serialize(), b.serialize())) + + return b_map + + # Metagraph uses this method + def neurons(self, netuid: int, block: Optional[int] = None) -> list["NeuronInfo"]: + """ + Retrieves a list of all neurons within a specified subnet of the Bittensor network. This function provides a snapshot of the subnet's neuron population, including each neuron's attributes and network interactions. + + Args: + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + list[bittensor.core.chain_data.neuron_info.NeuronInfo]: A list of NeuronInfo objects detailing each neuron's characteristics in the subnet. + + Understanding the distribution and status of neurons within a subnet is key to comprehending the network's decentralized structure and the dynamics of its consensus and governance processes. + """ + neurons_lite = self.neurons_lite(netuid=netuid, block=block) + weights = self.weights(block=block, netuid=netuid) + bonds = self.bonds(block=block, netuid=netuid) + + weights_as_dict = {uid: w for uid, w in weights} + bonds_as_dict = {uid: b for uid, b in bonds} + + neurons = [ + NeuronInfo.from_weights_bonds_and_neuron_lite( + neuron_lite, weights_as_dict, bonds_as_dict + ) + for neuron_lite in neurons_lite + ] + + return neurons + + # Metagraph uses this method + def get_total_subnets(self, block: Optional[int] = None) -> Optional[int]: + """ + Retrieves the total number of subnets within the Bittensor network as of a specific blockchain block. + + Args: + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[int]: The total number of subnets in the network. + + Understanding the total number of subnets is essential for assessing the network's growth and the extent of its decentralized infrastructure. + """ + _result = self.query_subtensor("TotalNetworks", block) + return getattr(_result, "value", None) + + # Metagraph uses this method + def get_subnets(self, block: Optional[int] = None) -> list[int]: + """ + Retrieves a list of all subnets currently active within the Bittensor network. This function provides an overview of the various subnets and their identifiers. + + Args: + block (Optional[int]): The blockchain block number for the query. + + Returns: + list[int]: A list of network UIDs representing each active subnet. + + This function is valuable for understanding the network's structure and the diversity of subnets available for neuron participation and collaboration. + """ + result = self.query_map_subtensor("NetworksAdded", block) + return ( + [network[0].value for network in result.records if network[1]] + if result and hasattr(result, "records") + else [] + ) + + # Metagraph uses this method + def neurons_lite( + self, netuid: int, block: Optional[int] = None + ) -> list["NeuronInfoLite"]: + """ + Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network. This function provides a streamlined view of the neurons, focusing on key attributes such as stake and network participation. + + Args: + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + list[bittensor.core.chain_data.neuron_info_lite.NeuronInfoLite]: A list of simplified neuron information for the subnet. + + This function offers a quick overview of the neuron population within a subnet, facilitating efficient analysis of the network's decentralized structure and neuron dynamics. + """ + hex_bytes_result = self.query_runtime_api( + runtime_api="NeuronInfoRuntimeApi", + method="get_neurons_lite", + params=[netuid], + block=block, + ) + + if hex_bytes_result is None: + return [] + + if hex_bytes_result.startswith("0x"): + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + else: + bytes_result = bytes.fromhex(hex_bytes_result) + + return NeuronInfoLite.list_from_vec_u8(bytes_result) # type: ignore + + # Used in the `neurons` method which is used in metagraph.py + def weights( + self, netuid: int, block: Optional[int] = None + ) -> list[tuple[int, list[tuple[int, int]]]]: + """ + Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network. This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the network's trust and value assignment mechanisms. + + Args: + netuid (int): The network UID of the subnet to query. + block (Optional[int]): The blockchain block number for the query. + + Returns: + list[tuple[int, list[tuple[int, int]]]]: A list of tuples mapping each neuron's UID to its assigned weights. + + The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, influencing their influence and reward allocation within the subnet. + """ + w_map = [] + w_map_encoded = self.query_map_subtensor( + name="Weights", block=block, params=[netuid] + ) + if w_map_encoded.records: + for uid, w in w_map_encoded: + w_map.append((uid.serialize(), w.serialize())) + + return w_map + + # Used by community via `transfer_extrinsic` + @networking.ensure_connected + def get_balance(self, address: str, block: Optional[int] = None) -> "Balance": + """ + Retrieves the token balance of a specific address within the Bittensor network. This function queries the blockchain to determine the amount of Tao held by a given account. + + Args: + address (str): The Substrate address in ``ss58`` format. + block (Optional[int]): The blockchain block number at which to perform the query. + + Returns: + bittensor.utils.balance.Balance: The account balance at the specified block, represented as a Balance object. + + This function is important for monitoring account holdings and managing financial transactions within the Bittensor ecosystem. It helps in assessing the economic status and capacity of network participants. + """ + try: + + @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging) + def make_substrate_call_with_retry(): + return self.substrate.query( + module="System", + storage_function="Account", + params=[address], + block_hash=( + None if block is None else self.substrate.get_block_hash(block) + ), + ) + + result = make_substrate_call_with_retry() + + except RemainingScaleBytesNotEmptyException: + logging.error( + "Received a corrupted message. This likely points to an error with the network or subnet." + ) + return Balance(1000) + return Balance(result.value["data"]["free"]) + + # Used in community via `bittensor.core.subtensor.Subtensor.transfer` + @networking.ensure_connected + def get_transfer_fee( + self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] + ) -> "Balance": + """ + Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. This function simulates the transfer to estimate the associated cost, taking into account the current network conditions and transaction complexity. + + Args: + wallet (bittensor_wallet.Wallet): The wallet from which the transfer is initiated. + dest (str): The ``SS58`` address of the destination account. + value (Union[bittensor.utils.balance.Balance, float, int]): The amount of tokens to be transferred, specified as a Balance object, or in Tao (float) or Rao (int) units. + + Returns: + bittensor.utils.balance.Balance: The estimated transaction fee for the transfer, represented as a Balance object. + + Estimating the transfer fee is essential for planning and executing token transactions, ensuring that the wallet has sufficient funds to cover both the transfer amount and the associated costs. This function provides a crucial tool for managing financial operations within the Bittensor network. + """ + if isinstance(value, float): + value = Balance.from_tao(value) + elif isinstance(value, int): + value = Balance.from_rao(value) + + if isinstance(value, Balance): + call = self.substrate.compose_call( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": dest, "value": value.rao}, + ) + + try: + payment_info = self.substrate.get_payment_info( + call=call, keypair=wallet.coldkeypub + ) + except Exception as e: + settings.bt_console.print( + f":cross_mark: [red]Failed to get payment info[/red]:[bold white]\n {e}[/bold white]" + ) + payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao + + fee = Balance.from_rao(payment_info["partialFee"]) + return fee + else: + fee = Balance.from_rao(int(2e7)) + logging.error( + "To calculate the transaction fee, the value must be Balance, float, or int. Received type: %s. Fee " + "is %s", + type(value), + 2e7, + ) + return fee + + # Used in community via `bittensor.core.subtensor.Subtensor.transfer` + def get_existential_deposit( + self, block: Optional[int] = None + ) -> Optional["Balance"]: + """ + Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with balances below this threshold can be reaped to conserve network resources. + + Args: + block (Optional[int]): Block number at which to query the deposit amount. If ``None``, the current block is used. + + Returns: + Optional[bittensor.utils.balance.Balance]: The existential deposit amount, or ``None`` if the query fails. + + The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring efficient use of storage and preventing the proliferation of dust accounts. + """ + result = self.query_constant( + module_name="Balances", constant_name="ExistentialDeposit", block=block + ) + if result is None or not hasattr(result, "value"): + return None + return Balance.from_rao(result.value) + + # Community uses this method + def commit_weights( + self, + wallet: "Wallet", + netuid: int, + salt: list[int], + uids: Union[NDArray[np.int64], list], + weights: Union[NDArray[np.int64], list], + version_key: int = settings.version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + max_retries: int = 5, + ) -> tuple[bool, str]: + """ + Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. + This action serves as a commitment or snapshot of the neuron's current weight distribution. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + salt (list[int]): list of randomly generated integers as salt to generated weighted hash. + uids (np.ndarray): NumPy array of neuron UIDs for which weights are being committed. + weights (np.ndarray): NumPy array of weight values corresponding to each UID. + version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. + max_retries (int): The number of maximum attempts to commit weights. Default is ``5``. + + Returns: + tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string + value describing the success or potential error. + + This function allows neurons to create a tamper-proof record of their weight distribution at a specific point in time, + enhancing transparency and accountability within the Bittensor network. + """ + retries = 0 + success = False + message = "No attempt made. Perhaps it is too soon to commit weights!" + + logging.info( + f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" + ) + + # Generate the hash of the weights + commit_hash = generate_weight_hash( + address=wallet.hotkey.ss58_address, + netuid=netuid, + uids=list(uids), + values=list(weights), + salt=salt, + version_key=version_key, + ) + + logging.info(f"Commit Hash: {commit_hash}") + + while retries < max_retries: + try: + success, message = commit_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + if success: + break + except Exception as e: + logging.error(f"Error committing weights: {e}") + finally: + retries += 1 + + return success, message + + # Community uses this method + def reveal_weights( + self, + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], list], + weights: Union[NDArray[np.int64], list], + salt: Union[NDArray[np.int64], list], + version_key: int = settings.version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + max_retries: int = 5, + ) -> tuple[bool, str]: + """ + Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. + This action serves as a revelation of the neuron's previously committed weight distribution. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (np.ndarray): NumPy array of neuron UIDs for which weights are being revealed. + weights (np.ndarray): NumPy array of weight values corresponding to each UID. + salt (np.ndarray): NumPy array of salt values corresponding to the hash function. + version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version``. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. + max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``. + + Returns: + tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string + value describing the success or potential error. + + This function allows neurons to reveal their previously committed weight distribution, ensuring transparency + and accountability within the Bittensor network. + """ + + retries = 0 + success = False + message = "No attempt made. Perhaps it is too soon to reveal weights!" + + while retries < max_retries: + try: + success, message = reveal_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + uids=list(uids), + weights=list(weights), + salt=list(salt), + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + if success: + break + except Exception as e: + logging.error(f"Error revealing weights: {e}") + finally: + retries += 1 + + return success, message + + # Subnet 27 uses this method + _do_serve_prometheus = do_serve_prometheus + # Subnet 27 uses this method name + _do_serve_axon = do_serve_axon diff --git a/bittensor/core/synapse.py b/bittensor/core/synapse.py new file mode 100644 index 0000000000..a96a92e1a1 --- /dev/null +++ b/bittensor/core/synapse.py @@ -0,0 +1,852 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import base64 +import json +import sys +import warnings +from typing import cast, Any, ClassVar, Optional, Union + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + field_validator, + model_validator, +) + +from bittensor.utils import get_hash +from bittensor.utils.btlogging import logging + + +def get_size(obj: Any, seen: Optional[set] = None) -> int: + """ + Recursively finds size of objects. + + This function traverses every item of a given object and sums their sizes to compute the total size. + + Args: + obj (Any): The object to get the size of. + seen (Optional[set]): Set of object ids that have been calculated. + + Returns: + int: The total size of the object. + + """ + size = sys.getsizeof(obj) + if seen is None: + seen = set() + obj_id = id(obj) + if obj_id in seen: + return 0 + # Important mark as seen *before* entering recursion to gracefully handle + # self-referential objects + seen.add(obj_id) + if isinstance(obj, dict): + size += sum([get_size(v, seen) for v in obj.values()]) + size += sum([get_size(k, seen) for k in obj.keys()]) + elif hasattr(obj, "__dict__"): + size += get_size(obj.__dict__, seen) + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): + size += sum([get_size(i, seen) for i in obj]) + return size + + +def cast_int(raw: str) -> int: + """ + Converts a string to an integer, if the string is not ``None``. + + This function attempts to convert a string to an integer. If the string is ``None``, it simply returns ``None``. + + Args: + raw (str): The string to convert. + + Returns: + int or None: The converted integer, or ``None`` if the input was ``None``. + + """ + return int(raw) if raw is not None else raw + + +def cast_float(raw: str) -> Optional[float]: + """ + Converts a string to a float, if the string is not ``None``. + + This function attempts to convert a string to a float. If the string is ``None``, it simply returns ``None``. + + Args: + raw (str): The string to convert. + + Returns: + float or None: The converted float, or ``None`` if the input was ``None``. + + """ + return float(raw) if raw is not None else raw + + +class TerminalInfo(BaseModel): + """ + TerminalInfo encapsulates detailed information about a network synapse (node) involved in a communication process. + + This class serves as a metadata carrier, + providing essential details about the state and configuration of a terminal during network interactions. This is a crucial class in the Bittensor framework. + + The TerminalInfo class contains information such as HTTP status codes and messages, processing times, + IP addresses, ports, Bittensor version numbers, and unique identifiers. These details are vital for + maintaining network reliability, security, and efficient data flow within the Bittensor network. + + This class includes Pydantic validators and root validators to enforce data integrity and format. It is + designed to be used natively within Synapses, so that you will not need to call this directly, but rather + is used as a helper class for Synapses. + + Args: + status_code (int): HTTP status code indicating the result of a network request. Essential for identifying the outcome of network interactions. + status_message (str): Descriptive message associated with the status code, providing additional context about the request's result. + process_time (float): Time taken by the terminal to process the call, important for performance monitoring and optimization. + ip (str): IP address of the terminal, crucial for network routing and data transmission. + port (int): Network port used by the terminal, key for establishing network connections. + version (int): Bittensor version running on the terminal, ensuring compatibility between different nodes in the network. + nonce (int): Unique, monotonically increasing number for each terminal, aiding in identifying and ordering network interactions. + uuid (str): Unique identifier for the terminal, fundamental for network security and identification. + hotkey (str): Encoded hotkey string of the terminal wallet, important for transaction and identity verification in the network. + signature (str): Digital signature verifying the tuple of nonce, axon_hotkey, dendrite_hotkey, and uuid, critical for ensuring data authenticity and security. + + Usage:: + + # Creating a TerminalInfo instance + from bittensor.core.synapse import TerminalInfo + + terminal_info = TerminalInfo( + status_code=200, + status_message="Success", + process_time=0.1, + ip="198.123.23.1", + port=9282, + version=111, + nonce=111111, + uuid="5ecbd69c-1cec-11ee-b0dc-e29ce36fec1a", + hotkey="5EnjDGNqqWnuL2HCAdxeEtN2oqtXZw6BMBe936Kfy2PFz1J1", + signature="0x0813029319030129u4120u10841824y0182u091u230912u" + ) + + # Accessing TerminalInfo attributes + ip_address = terminal_info.ip + processing_duration = terminal_info.process_time + + # TerminalInfo can be used to monitor and verify network interactions, ensuring proper communication and security within the Bittensor network. + + TerminalInfo plays a pivotal role in providing transparency and control over network operations, making it an indispensable tool for developers and users interacting with the Bittensor ecosystem. + """ + + model_config = ConfigDict(validate_assignment=True) + + # The HTTP status code from: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status + status_code: Optional[int] = Field( + title="status_code", + description="The HTTP status code from: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status", + examples=[200], + default=None, + frozen=False, + ) + + # The HTTP status code from: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status + status_message: Optional[str] = Field( + title="status_message", + description="The status_message associated with the status_code", + examples=["Success"], + default=None, + frozen=False, + ) + + # Process time on this terminal side of call + process_time: Optional[float] = Field( + title="process_time", + description="Process time on this terminal side of call", + examples=[0.1], + default=None, + frozen=False, + ) + + # The terminal ip. + ip: Optional[str] = Field( + title="ip", + description="The ip of the axon receiving the request.", + examples=["198.123.23.1"], + default=None, + frozen=False, + ) + + # The host port of the terminal. + port: Optional[int] = Field( + title="port", + description="The port of the terminal.", + examples=["9282"], + default=None, + frozen=False, + ) + + # The bittensor version on the terminal as an int. + version: Optional[int] = Field( + title="version", + description="The bittensor version on the axon as str(int)", + examples=[111], + default=None, + frozen=False, + ) + + # A Unix timestamp to associate with the terminal + nonce: Optional[int] = Field( + title="nonce", + description="A Unix timestamp that prevents replay attacks", + examples=[111111], + default=None, + frozen=False, + ) + + # A unique identifier associated with the terminal, set on the axon side. + uuid: Optional[str] = Field( + title="uuid", + description="A unique identifier associated with the terminal", + examples=["5ecbd69c-1cec-11ee-b0dc-e29ce36fec1a"], + default=None, + frozen=False, + ) + + # The bittensor version on the terminal as an int. + hotkey: Optional[str] = Field( + title="hotkey", + description="The ss58 encoded hotkey string of the terminal wallet.", + examples=["5EnjDGNqqWnuL2HCAdxeEtN2oqtXZw6BMBe936Kfy2PFz1J1"], + default=None, + frozen=False, + ) + + # A signature verifying the tuple (axon_nonce, axon_hotkey, dendrite_hotkey, axon_uuid) + signature: Optional[str] = Field( + title="signature", + description="A signature verifying the tuple (nonce, axon_hotkey, dendrite_hotkey, uuid)", + examples=["0x0813029319030129u4120u10841824y0182u091u230912u"], + default=None, + frozen=False, + ) + + # Extract the process time on this terminal side of call as a float + _extract_process_time = field_validator("process_time", mode="before")(cast_float) + + # Extract the host port of the terminal as an int + _extract_port = field_validator("port", mode="before")(cast_int) + + # Extract the bittensor version on the terminal as an int. + _extract_version = field_validator("version", mode="before")(cast_int) + + # Extract the Unix timestamp associated with the terminal as an int + _extract_nonce = field_validator("nonce", mode="before")(cast_int) + + # Extract the HTTP status code as an int + _extract_status_code = field_validator("status_code", mode="before")(cast_int) + + +class Synapse(BaseModel): + """ + Represents a Synapse in the Bittensor network, serving as a communication schema between neurons (nodes). + + Synapses ensure the format and correctness of transmission tensors according to the Bittensor protocol. + Each Synapse type is tailored for a specific machine learning (ML) task, following unique compression and + communication processes. This helps maintain sanitized, correct, and useful information flow across the network. + + The Synapse class encompasses essential network properties such as HTTP route names, timeouts, request sizes, and + terminal information. It also includes methods for serialization, deserialization, attribute setting, and hash + computation, ensuring secure and efficient data exchange in the network. + + The class includes Pydantic validators and root validators to enforce data integrity and format. Additionally, + properties like ``is_success``, ``is_failure``, ``is_timeout``, etc., provide convenient status checks based on + dendrite responses. + + Think of Bittensor Synapses as glorified pydantic wrappers that have been designed to be used in a distributed + network. They provide a standardized way to communicate between neurons, and are the primary mechanism for + communication between neurons in Bittensor. + + Key Features: + + 1. HTTP Route Name (``name`` attribute): + Enables the identification and proper routing of requests within the network. Essential for users + defining custom routes for specific machine learning tasks. + + 2. Query Timeout (``timeout`` attribute): + Determines the maximum duration allowed for a query, ensuring timely responses and network + efficiency. Crucial for users to manage network latency and response times, particularly in + time-sensitive applications. + + 3. Request Sizes (``total_size``, ``header_size`` attributes): + Keeps track of the size of request bodies and headers, ensuring efficient data transmission without + overloading the network. Important for users to monitor and optimize the data payload, especially + in bandwidth-constrained environments. + + 4. Terminal Information (``dendrite``, ``axon`` attributes): + Stores information about the dendrite (receiving end) and axon (sending end), facilitating communication + between nodes. Users can access detailed information about the communication endpoints, aiding in + debugging and network analysis. + + 5. Body Hash Computation (``computed_body_hash``, ``required_hash_fields``): + Ensures data integrity and security by computing hashes of transmitted data. Provides users with a + mechanism to verify data integrity and detect any tampering during transmission. + It is recommended that names of fields in `required_hash_fields` are listed in the order they are + defined in the class. + + 6. Serialization and Deserialization Methods: + Facilitates the conversion of Synapse objects to and from a format suitable for network transmission. + Essential for users who need to customize data formats for specific machine learning models or tasks. + + 7. Status Check Properties (``is_success``, ``is_failure``, ``is_timeout``, etc.): + Provides quick and easy methods to check the status of a request, improving error handling and + response management. Users can efficiently handle different outcomes of network requests, enhancing + the robustness of their applications. + + Example usage:: + + # Creating a Synapse instance with default values + from bittensor.core.synapse import Synapse + + synapse = Synapse() + + # Setting properties and input + synapse.timeout = 15.0 + synapse.name = "MySynapse" + + # Not setting fields that are not defined in your synapse class will result in an error, e.g.: + synapse.dummy_input = 1 # This will raise an error because dummy_input is not defined in the Synapse class + + # Get a dictionary of headers and body from the synapse instance + synapse_dict = synapse.model_dump_json() + + # Get a dictionary of headers from the synapse instance + headers = synapse.to_headers() + + # Reconstruct the synapse from headers using the classmethod 'from_headers' + synapse = Synapse.from_headers(headers) + + # Deserialize synapse after receiving it over the network, controlled by `deserialize` method + deserialized_synapse = synapse.deserialize() + + # Checking the status of the request + if synapse.is_success: + print("Request succeeded") + + # Checking and setting the status of the request + print(synapse.axon.status_code) + synapse.axon.status_code = 408 # Timeout + + Args: + name (str): HTTP route name, set on :func:`axon.attach`. + timeout (float): Total query length, set by the dendrite terminal. + total_size (int): Total size of request body in bytes. + header_size (int): Size of request header in bytes. + dendrite (:func:`TerminalInfo`): Information about the dendrite terminal. + axon (:func:`TerminalInfo`): Information about the axon terminal. + computed_body_hash (str): Computed hash of the request body. + required_hash_fields (list[str]): Fields required to compute the body hash. + + Methods: + deserialize: Custom deserialization logic for subclasses. + __setattr__: Override method to make ``required_hash_fields`` read-only. + get_total_size: Calculates and returns the total size of the object. + to_headers: Constructs a dictionary of headers from instance properties. + body_hash: Computes a SHA3-256 hash of the serialized body. + parse_headers_to_inputs: Parses headers to construct an inputs dictionary. + from_headers: Creates an instance from a headers dictionary. + + This class is a cornerstone in the Bittensor framework, providing the necessary tools for secure, efficient, and + standardized communication in a decentralized environment. + """ + + model_config = ConfigDict(validate_assignment=True) + + def deserialize(self) -> "Synapse": + """ + Deserializes the Synapse object. + + This method is intended to be overridden by subclasses for custom deserialization logic. + In the context of the Synapse superclass, this method simply returns the instance itself. + When inheriting from this class, subclasses should provide their own implementation for + deserialization if specific deserialization behavior is desired. + + By default, if a subclass does not provide its own implementation of this method, the + Synapse's deserialize method will be used, returning the object instance as-is. + + In its default form, this method simply returns the instance of the Synapse itself without any modifications. Subclasses of Synapse can override this method to add specific deserialization behaviors, such as converting serialized data back into complex object types or performing additional data integrity checks. + + Example:: + + class CustomSynapse(Synapse): + additional_data: str + + def deserialize(self) -> "CustomSynapse": + # Custom deserialization logic + # For example, decoding a base64 encoded string in 'additional_data' + if self.additional_data: + self.additional_data = base64.b64decode(self.additional_data).decode('utf-8') + return self + + serialized_data = '{"additional_data": "SGVsbG8gV29ybGQ="}' # Base64 for 'Hello World' + custom_synapse = CustomSynapse.model_validate_json(serialized_data) + deserialized_synapse = custom_synapse.deserialize() + + # deserialized_synapse.additional_data would now be 'Hello World' + + Returns: + Synapse: The deserialized Synapse object. In this default implementation, it returns the object itself. + """ + return self + + @model_validator(mode="before") + def set_name_type(cls, values: dict) -> dict: + values["name"] = cls.__name__ # type: ignore + return values + + # Defines the http route name which is set on axon.attach( callable( request: RequestName )) + name: Optional[str] = Field( + title="name", + description="Defines the http route name which is set on axon.attach( callable( request: RequestName ))", + examples=["Forward"], + frozen=False, + default=None, + repr=False, + ) + + # The call timeout, set by the dendrite terminal. + timeout: Optional[float] = Field( + title="timeout", + description="Defines the total query length.", + examples=[12.0], + default=12.0, + frozen=False, + repr=False, + ) + + # The call timeout, set by the dendrite terminal. + total_size: Optional[int] = Field( + title="total_size", + description="Total size of request body in bytes.", + examples=[1000], + default=0, + frozen=False, + repr=False, + ) + + # The call timeout, set by the dendrite terminal. + header_size: Optional[int] = Field( + title="header_size", + description="Size of request header in bytes.", + examples=[1000], + default=0, + frozen=False, + repr=False, + ) + + # The dendrite Terminal Information. + dendrite: Optional[TerminalInfo] = Field( + title="dendrite", + description="Dendrite Terminal Information", + examples=["TerminalInfo"], + default=TerminalInfo(), + frozen=False, + repr=False, + ) + + # A axon terminal information + axon: Optional[TerminalInfo] = Field( + title="axon", + description="Axon Terminal Information", + examples=["TerminalInfo"], + default=TerminalInfo(), + frozen=False, + repr=False, + ) + + computed_body_hash: Optional[str] = Field( + title="computed_body_hash", + description="The computed body hash of the request.", + examples=["0x0813029319030129u4120u10841824y0182u091u230912u"], + default="", + frozen=True, + repr=False, + ) + + required_hash_fields: ClassVar[tuple[str, ...]] = () + + _extract_total_size = field_validator("total_size", mode="before")(cast_int) + + _extract_header_size = field_validator("header_size", mode="before")(cast_int) + + _extract_timeout = field_validator("timeout", mode="before")(cast_float) + + def __setattr__(self, name: str, value: Any): + """ + Override the :func:`__setattr__` method to make the ``required_hash_fields`` property read-only. + + This is a security mechanism such that the ``required_hash_fields`` property cannot be + overridden by the user or malicious code. + """ + if name == "body_hash": + raise AttributeError( + "body_hash property is read-only and cannot be overridden." + ) + super().__setattr__(name, value) + + def get_total_size(self) -> int: + """ + Get the total size of the current object. + + This method first calculates the size of the current object, then assigns it + to the instance variable :func:`self.total_size` and finally returns this value. + + Returns: + int: The total size of the current object. + """ + self.total_size = get_size(self) + return self.total_size + + @property + def is_success(self) -> bool: + """ + Checks if the dendrite's status code indicates success. + + This method returns ``True`` if the status code of the dendrite is ``200``, + which typically represents a successful HTTP request. + + Returns: + bool: ``True`` if dendrite's status code is ``200``, ``False`` otherwise. + """ + return self.dendrite is not None and self.dendrite.status_code == 200 + + @property + def is_failure(self) -> bool: + """ + Checks if the dendrite's status code indicates failure. + + This method returns ``True`` if the status code of the dendrite is not ``200``, + which would mean the HTTP request was not successful. + + Returns: + bool: ``True`` if dendrite's status code is not ``200``, ``False`` otherwise. + """ + return self.dendrite is not None and self.dendrite.status_code != 200 + + @property + def is_timeout(self) -> bool: + """ + Checks if the dendrite's status code indicates a timeout. + + This method returns ``True`` if the status code of the dendrite is ``408``, + which is the HTTP status code for a request timeout. + + Returns: + bool: ``True`` if dendrite's status code is ``408``, ``False`` otherwise. + """ + return self.dendrite is not None and self.dendrite.status_code == 408 + + @property + def is_blacklist(self) -> bool: + """ + Checks if the dendrite's status code indicates a blacklisted request. + + This method returns ``True`` if the status code of the dendrite is ``403``, + which is the HTTP status code for a forbidden request. + + Returns: + bool: ``True`` if dendrite's status code is ``403``, ``False`` otherwise. + """ + return self.dendrite is not None and self.dendrite.status_code == 403 + + @property + def failed_verification(self) -> bool: + """ + Checks if the dendrite's status code indicates failed verification. + + This method returns ``True`` if the status code of the dendrite is ``401``, + which is the HTTP status code for unauthorized access. + + Returns: + bool: ``True`` if dendrite's status code is ``401``, ``False`` otherwise. + """ + return self.dendrite is not None and self.dendrite.status_code == 401 + + def get_required_fields(self): + """ + Get the required fields from the model's JSON schema. + """ + schema = self.__class__.model_json_schema() + return schema.get("required", []) + + def to_headers(self) -> dict: + """ + Converts the state of a Synapse instance into a dictionary of HTTP headers. + + This method is essential for + packaging Synapse data for network transmission in the Bittensor framework, ensuring that each key aspect of + the Synapse is represented in a format suitable for HTTP communication. + + Process: + + 1. Basic Information: It starts by including the ``name`` and ``timeout`` of the Synapse, which are fundamental for identifying the query and managing its lifespan on the network. + 2. Complex Objects: The method serializes the ``axon`` and ``dendrite`` objects, if present, into strings. This serialization is crucial for preserving the state and structure of these objects over the network. + 3. Encoding: Non-optional complex objects are serialized and encoded in base64, making them safe for HTTP transport. + 4. Size Metrics: The method calculates and adds the size of headers and the total object size, providing valuable information for network bandwidth management. + + Example Usage:: + + synapse = Synapse(name="ExampleSynapse", timeout=30) + headers = synapse.to_headers() + # headers now contains a dictionary representing the Synapse instance + + Returns: + dict: A dictionary containing key-value pairs representing the Synapse's properties, suitable for HTTP communication. + """ + # Initializing headers with 'name' and 'timeout' + headers = {"name": self.name, "timeout": str(self.timeout)} + + # Adding headers for 'axon' and 'dendrite' if they are not None + if self.axon: + headers.update( + { + f"bt_header_axon_{k}": str(v) + for k, v in self.axon.model_dump().items() + if v is not None + } + ) + if self.dendrite: + headers.update( + { + f"bt_header_dendrite_{k}": str(v) + for k, v in self.dendrite.model_dump().items() + if v is not None + } + ) + + # Getting the fields of the instance + instance_fields = self.model_dump() + + # Iterating over the fields of the instance + for field, value in instance_fields.items(): + # If the object is not optional, serializing it, encoding it, and adding it to the headers + required = self.get_required_fields() + + # Skipping the field if it's already in the headers or its value is None + if field in headers or value is None: + continue + + elif required and field in required: + try: + # create an empty (dummy) instance of type(value) to pass pydantic validation on the axon side + serialized_value = json.dumps(value.__class__.__call__()) + encoded_value = base64.b64encode(serialized_value.encode()).decode( + "utf-8" + ) + headers[f"bt_header_input_obj_{field}"] = encoded_value + except TypeError as e: + raise ValueError( + f"Error serializing {field} with value {value}. Objects must be json serializable." + ) from e + + # Adding the size of the headers and the total size to the headers + headers["header_size"] = str(sys.getsizeof(headers)) + headers["total_size"] = str(self.get_total_size()) + headers["computed_body_hash"] = self.body_hash + + return headers + + @property + def body_hash(self) -> str: + """ + Computes a SHA3-256 hash of the serialized body of the Synapse instance. + + This hash is used to + ensure the data integrity and security of the Synapse instance when it's transmitted across the + network. It is a crucial feature for verifying that the data received is the same as the data sent. + + Process: + + 1. Iterates over each required field as specified in ``required_hash_fields``. + 2. Concatenates the string representation of these fields. + 3. Applies SHA3-256 hashing to the concatenated string to produce a unique fingerprint of the data. + + Example:: + + synapse = Synapse(name="ExampleRoute", timeout=10) + hash_value = synapse.body_hash + # hash_value is the SHA3-256 hash of the serialized body of the Synapse instance + + Returns: + str: The SHA3-256 hash as a hexadecimal string, providing a fingerprint of the Synapse instance's data for integrity checks. + """ + hashes = [] + + hash_fields_field = self.model_fields.get("required_hash_fields") + instance_fields = None + if hash_fields_field: + warnings.warn( + "The 'required_hash_fields' field handling deprecated and will be removed. " + "Please update Synapse class definition to use 'required_hash_fields' class variable instead.", + DeprecationWarning, + ) + required_hash_fields = hash_fields_field.default + + if required_hash_fields: + instance_fields = self.model_dump() + # Preserve backward compatibility in which fields will added in .model_dump() order + # instead of the order one from `self.required_hash_fields` + required_hash_fields = [ + field for field in instance_fields if field in required_hash_fields + ] + + # Hack to cache the required hash fields names + if len(required_hash_fields) == len(required_hash_fields): + self.__class__.required_hash_fields = tuple(required_hash_fields) + else: + required_hash_fields = self.__class__.required_hash_fields + + if required_hash_fields: + instance_fields = instance_fields or self.model_dump() + for field in required_hash_fields: + hashes.append(get_hash(str(instance_fields[field]))) + + return get_hash("".join(hashes)) + + @classmethod + def parse_headers_to_inputs(cls, headers: dict) -> dict: + """ + Interprets and transforms a given dictionary of headers into a structured dictionary, facilitating the reconstruction of Synapse objects. + + This method is essential for parsing network-transmitted + data back into a Synapse instance, ensuring data consistency and integrity. + + Process: + + 1. Separates headers into categories based on prefixes (``axon``, ``dendrite``, etc.). + 2. Decodes and deserializes ``input_obj`` headers into their original objects. + 3. Assigns simple fields directly from the headers to the input dictionary. + + Example:: + + received_headers = { + 'bt_header_axon_address': '127.0.0.1', + 'bt_header_dendrite_port': '8080', + # Other headers... + } + inputs = Synapse.parse_headers_to_inputs(received_headers) + # inputs now contains a structured representation of Synapse properties based on the headers + + Note: + This is handled automatically when calling :func:`Synapse.from_headers(headers)` and does not need to be called directly. + + Args: + headers (dict): The headers dictionary to parse. + + Returns: + dict: A structured dictionary representing the inputs for constructing a Synapse instance. + """ + + # Initialize the input dictionary with empty sub-dictionaries for 'axon' and 'dendrite' + inputs_dict: dict[str, Union[dict, Optional[str]]] = { + "axon": {}, + "dendrite": {}, + } + + # Iterate over each item in the headers + for key, value in headers.items(): + # Handle 'axon' headers + if "bt_header_axon_" in key: + try: + new_key = key.split("bt_header_axon_")[1] + axon_dict = cast(dict, inputs_dict["axon"]) + axon_dict[new_key] = value + except Exception as e: + logging.error(f"Error while parsing 'axon' header {key}: {str(e)}") + continue + # Handle 'dendrite' headers + elif "bt_header_dendrite_" in key: + try: + new_key = key.split("bt_header_dendrite_")[1] + dendrite_dict = cast(dict, inputs_dict["dendrite"]) + dendrite_dict[new_key] = value + except Exception as e: + logging.error(f"Error while parsing 'dendrite' header {key}: {e}") + continue + # Handle 'input_obj' headers + elif "bt_header_input_obj" in key: + try: + new_key = key.split("bt_header_input_obj_")[1] + # Skip if the key already exists in the dictionary + if new_key in inputs_dict: + continue + # Decode and load the serialized object + inputs_dict[new_key] = json.loads( + base64.b64decode(value.encode()).decode("utf-8") + ) + except json.JSONDecodeError as e: + logging.error( + f"Error while json decoding 'input_obj' header {key}: {e}" + ) + continue + except Exception as e: + logging.error(f"Error while parsing 'input_obj' header {key}: {e}") + continue + else: + pass # TODO: log unexpected keys + + # Assign the remaining known headers directly + inputs_dict["timeout"] = headers.get("timeout", None) + inputs_dict["name"] = headers.get("name", None) + inputs_dict["header_size"] = headers.get("header_size", None) + inputs_dict["total_size"] = headers.get("total_size", None) + inputs_dict["computed_body_hash"] = headers.get("computed_body_hash", None) + + return inputs_dict + + @classmethod + def from_headers(cls, headers: dict) -> "Synapse": + """ + Constructs a new Synapse instance from a given headers dictionary, enabling the re-creation of the Synapse's state as it was prior to network transmission. + + This method is a key part of the + deserialization process in the Bittensor network, allowing nodes to accurately reconstruct Synapse + objects from received data. + + Example:: + + received_headers = { + 'bt_header_axon_address': '127.0.0.1', + 'bt_header_dendrite_port': '8080', + # Other headers... + } + synapse = Synapse.from_headers(received_headers) + # synapse is a new Synapse instance reconstructed from the received headers + + Args: + headers (dict): The dictionary of headers containing serialized Synapse information. + + Returns: + bittensor.core.synapse.Synapse: A new instance of Synapse, reconstructed from the parsed header information, replicating the original instance's state. + """ + + # Get the inputs dictionary from the headers + input_dict = cls.parse_headers_to_inputs(headers) + + # Use the dictionary unpacking operator to pass the inputs to the class constructor + synapse = cls(**input_dict) + + return synapse diff --git a/bittensor/core/tensor.py b/bittensor/core/tensor.py new file mode 100644 index 0000000000..4ec71cc44f --- /dev/null +++ b/bittensor/core/tensor.py @@ -0,0 +1,249 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import base64 +from typing import Optional, Union + +import msgpack +import msgpack_numpy +import numpy as np +from pydantic import ConfigDict, BaseModel, Field, field_validator + +from bittensor.utils.registration import torch, use_torch + + +class DTypes(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.torch: bool = False + self.update( + { + "float16": np.float16, + "float32": np.float32, + "float64": np.float64, + "uint8": np.uint8, + "int16": np.int16, + "int8": np.int8, + "int32": np.int32, + "int64": np.int64, + "bool": bool, + } + ) + + def __getitem__(self, key): + self._add_torch() + return super().__getitem__(key) + + def __contains__(self, key): + self._add_torch() + return super().__contains__(key) + + def _add_torch(self): + if self.torch is False: + torch_dtypes = { + "torch.float16": torch.float16, + "torch.float32": torch.float32, + "torch.float64": torch.float64, + "torch.uint8": torch.uint8, + "torch.int16": torch.int16, + "torch.int8": torch.int8, + "torch.int32": torch.int32, + "torch.int64": torch.int64, + "torch.bool": torch.bool, + } + self.update(torch_dtypes) + self.torch = True + + +dtypes = DTypes() + + +def cast_dtype(raw: Union[None, np.dtype, "torch.dtype", str]) -> Optional[str]: + """ + Casts the raw value to a string representing the `numpy data type `_, or the `torch data type `_ if using torch. + + Args: + raw (Union[None, numpy.dtype, torch.dtype, str]): The raw value to cast. + + Returns: + str: The string representing the numpy/torch data type. + + Raises: + Exception: If the raw value is of an invalid type. + """ + if not raw: + return None + if use_torch() and isinstance(raw, torch.dtype): + return dtypes[raw] + elif isinstance(raw, np.dtype): + return dtypes[raw] + elif isinstance(raw, str): + if use_torch(): + assert raw in dtypes, f"{raw} not a valid torch type in dict {dtypes}" + return raw + else: + assert raw in dtypes, f"{raw} not a valid numpy type in dict {dtypes}" + return raw + else: + raise Exception( + f"{raw} of type {type(raw)} does not have a valid type in Union[None, numpy.dtype, torch.dtype, str]" + ) + + +def cast_shape(raw: Union[None, list[int], str]) -> Optional[Union[str, list]]: + """ + Casts the raw value to a string representing the tensor shape. + + Args: + raw (Union[None, list[int], str]): The raw value to cast. + + Returns: + str: The string representing the tensor shape. + + Raises: + Exception: If the raw value is of an invalid type or if the list elements are not of type int. + """ + if not raw: + return None + elif isinstance(raw, list): + if len(raw) == 0 or isinstance(raw[0], int): + return raw + else: + raise Exception(f"{raw} list elements are not of type int") + elif isinstance(raw, str): + shape = list(map(int, raw.split("[")[1].split("]")[0].split(","))) + return shape + else: + raise Exception( + f"{raw} of type {type(raw)} does not have a valid type in Union[None, list[int], str]" + ) + + +class tensor: + def __new__(cls, tensor: Union[list, "np.ndarray", "torch.Tensor"]): + if isinstance(tensor, list) or isinstance(tensor, np.ndarray): + tensor = torch.tensor(tensor) if use_torch() else np.array(tensor) + return Tensor.serialize(tensor_=tensor) + + +class Tensor(BaseModel): + """ + Represents a Tensor object. + + Args: + buffer (Optional[str]): Tensor buffer data. + dtype (str): Tensor data type. + shape (list[int]): Tensor shape. + """ + + model_config = ConfigDict(validate_assignment=True) + + def tensor(self) -> Union[np.ndarray, "torch.Tensor"]: + return self.deserialize() + + def tolist(self) -> list[object]: + return self.deserialize().tolist() + + def numpy(self) -> "np.ndarray": + return ( + self.deserialize().detach().numpy() if use_torch() else self.deserialize() + ) + + def deserialize(self) -> Union["np.ndarray", "torch.Tensor"]: + """ + Deserializes the Tensor object. + + Returns: + np.array or torch.Tensor: The deserialized tensor object. + + Raises: + Exception: If the deserialization process encounters an error. + """ + shape = tuple(self.shape) + buffer_bytes = base64.b64decode(self.buffer.encode("utf-8")) + numpy_object = msgpack.unpackb( + buffer_bytes, object_hook=msgpack_numpy.decode + ).copy() + if use_torch(): + torch_object = torch.as_tensor(numpy_object) + # Reshape does not work for (0) or [0] + if not (len(shape) == 1 and shape[0] == 0): + torch_object = torch_object.reshape(shape) + return torch_object.type(dtypes[self.dtype]) + else: + # Reshape does not work for (0) or [0] + if not (len(shape) == 1 and shape[0] == 0): + numpy_object = numpy_object.reshape(shape) + return numpy_object.astype(dtypes[self.dtype]) + + @staticmethod + def serialize(tensor_: Union["np.ndarray", "torch.Tensor"]) -> "Tensor": + """ + Serializes the given tensor. + + Args: + tensor_ (np.array or torch.Tensor): The tensor to serialize. + + Returns: + :func:`Tensor`: The serialized tensor. + + Raises: + Exception: If the serialization process encounters an error. + """ + dtype = str(tensor_.dtype) + shape = list(tensor_.shape) + if len(shape) == 0: + shape = [0] + tensor__ = tensor_.cpu().detach().numpy().copy() if use_torch() else tensor_ + data_buffer = base64.b64encode( + msgpack.packb(tensor__, default=msgpack_numpy.encode) + ).decode("utf-8") + return Tensor(buffer=data_buffer, shape=shape, dtype=dtype) + + # Represents the tensor buffer data. + buffer: Optional[str] = Field( + default=None, + title="buffer", + description="Tensor buffer data. This field stores the serialized representation of the tensor data.", + examples=["0x321e13edqwds231231231232131"], + frozen=True, + repr=False, + ) + + # Represents the data type of the tensor. + dtype: str = Field( + title="dtype", + description="Tensor data type. This field specifies the data type of the tensor, such as numpy.float32 or torch.int64.", + examples=["np.float32"], + frozen=True, + repr=True, + ) + + # Represents the shape of the tensor. + shape: list[int] = Field( + title="shape", + description="Tensor shape. This field defines the dimensions of the tensor as a list of integers, such as [10, 10] for a 2D tensor with shape (10, 10).", + examples=[10, 10], + frozen=True, + repr=True, + ) + + # Extract the represented shape of the tensor. + _extract_shape = field_validator("shape", mode="before")(cast_shape) + + # Extract the represented data type of the tensor. + _extract_dtype = field_validator("dtype", mode="before")(cast_dtype) diff --git a/bittensor/core/threadpool.py b/bittensor/core/threadpool.py new file mode 100644 index 0000000000..17e5535096 --- /dev/null +++ b/bittensor/core/threadpool.py @@ -0,0 +1,295 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements `ThreadPoolExecutor `_.""" + +__author__ = "Brian Quinlan (brian@sweetapp.com)" + +import argparse +import itertools +import logging +import os +import queue +import random +import sys +import threading +import time +import weakref +from concurrent.futures import _base +from typing import Callable + +from bittensor.core.config import Config +from bittensor.core.settings import BLOCKTIME +from bittensor.utils.btlogging.defines import BITTENSOR_LOGGER_NAME + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpreter shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +logger = logging.getLogger(BITTENSOR_LOGGER_NAME) + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + + +class _WorkItem(object): + def __init__(self, future, fn, start_time, args, kwargs): + self.future = future + self.fn = fn + self.start_time = start_time + self.args = args + self.kwargs = kwargs + + def run(self): + """Run the given work item""" + # Checks if future is canceled or if work item is stale + if (not self.future.set_running_or_notify_cancel()) or ( + time.time() - self.start_time > BLOCKTIME + ): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException as exc: + self.future.set_exception(exc) + # Break a reference cycle with the exception 'exc' + self = None + else: + self.future.set_result(result) + + +NULL_ENTRY = (sys.maxsize, _WorkItem(None, None, time.time(), (), {})) + + +def _worker(executor_reference, work_queue, initializer, initargs): + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + _base.LOGGER.critical("Exception in initializer:", exc_info=True) + executor = executor_reference() + if executor is not None: + executor._initializer_failed() + return + try: + while True: + work_item = work_queue.get(block=True) + priority = work_item[0] + item = work_item[1] + if priority == sys.maxsize: + del item + elif item is not None: + item.run() + # Delete references to object. See issue16284 + del item + continue + + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Flag the executor as shutting down as early as possible if it + # is not gc-ed yet. + if executor is not None: + executor._shutdown = True + # Notice other workers + work_queue.put(NULL_ENTRY) + return + del executor + except BaseException: + logger.error("work_item", work_item) + _base.LOGGER.critical("Exception in worker", exc_info=True) + + +class BrokenThreadPool(_base.BrokenExecutor): + """ + Raised when a worker thread in a `ThreadPoolExecutor `_ failed initializing. + """ + + +class PriorityThreadPoolExecutor(_base.Executor): + """Base threadpool executor with a priority queue.""" + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().__next__ + + def __init__( + self, + maxsize=-1, + max_workers=None, + thread_name_prefix="", + initializer=None, + initargs=(), + ): + """Initializes a new `ThreadPoolExecutor `_ instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: An callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + """ + if max_workers is None: + # Use this number because ThreadPoolExecutor is often + # used to overlap I/O instead of CPU work. + max_workers = (os.cpu_count() or 1) * 5 + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + if initializer is not None and not callable(initializer): + raise TypeError("initializer must be a callable") + + self._max_workers = max_workers + self._work_queue = queue.PriorityQueue(maxsize=maxsize) + self._idle_semaphore = threading.Semaphore(0) + self._threads = set() + self._broken = False + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = thread_name_prefix or ( + "ThreadPoolExecutor-%d" % self._counter() + ) + self._initializer = initializer + self._initargs = initargs + + @classmethod + def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None): + """Accept specific arguments from parser""" + prefix_str = "" if prefix is None else prefix + "." + try: + default_max_workers = ( + os.getenv("BT_PRIORITY_MAX_WORKERS") + if os.getenv("BT_PRIORITY_MAX_WORKERS") is not None + else 5 + ) + default_maxsize = ( + os.getenv("BT_PRIORITY_MAXSIZE") + if os.getenv("BT_PRIORITY_MAXSIZE") is not None + else 10 + ) + parser.add_argument( + "--" + prefix_str + "priority.max_workers", + type=int, + help="""maximum number of threads in thread pool""", + default=default_max_workers, + ) + parser.add_argument( + "--" + prefix_str + "priority.maxsize", + type=int, + help="""maximum size of tasks in priority queue""", + default=default_maxsize, + ) + except argparse.ArgumentError: + # re-parsing arguments. + pass + + @classmethod + def config(cls) -> "Config": + """Get config from the argument parser. + + Return: :func:`bittensor.Config` object. + """ + parser = argparse.ArgumentParser() + PriorityThreadPoolExecutor.add_args(parser) + return Config(parser, args=[]) + + @property + def is_empty(self): + return self._work_queue.empty() + + def submit(self, fn: Callable, *args, **kwargs) -> _base.Future: + with self._shutdown_lock: + if self._broken: + raise BrokenThreadPool(self._broken) + + if self._shutdown: + raise RuntimeError("cannot schedule new futures after shutdown") + if _shutdown: + raise RuntimeError( + "cannot schedule new futures after " "interpreter shutdown" + ) + + priority = kwargs.get("priority", random.randint(0, 1000000)) + if priority == 0: + priority = random.randint(1, 100) + epsilon = random.uniform(0, 0.01) * priority + start_time = time.time() + if "priority" in kwargs: + del kwargs["priority"] + + f = _base.Future() + w = _WorkItem(f, fn, start_time, args, kwargs) + self._work_queue.put((-float(priority + epsilon), w), block=False) + self._adjust_thread_count() + return f + + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self._idle_semaphore.acquire(timeout=0): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(NULL_ENTRY) + + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = "%s_%d" % (self._thread_name_prefix or self, num_threads) + t = threading.Thread( + name=thread_name, + target=_worker, + args=( + weakref.ref(self, weakref_cb), + self._work_queue, + self._initializer, + self._initargs, + ), + ) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def _initializer_failed(self): + with self._shutdown_lock: + self._broken = ( + "A thread initializer failed, the thread pool " "is not usable anymore" + ) + # Drain work queue and mark pending futures failed + while True: + try: + work_item = self._work_queue.get_nowait() + except queue.Empty: + break + if work_item is not None: + work_item.future.set_exception(BrokenThreadPool(self._broken)) + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(NULL_ENTRY) + + if wait: + for t in self._threads: + try: + t.join(timeout=2) + except Exception: + pass + + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/bittensor/core/types.py b/bittensor/core/types.py new file mode 100644 index 0000000000..9fd2b4d052 --- /dev/null +++ b/bittensor/core/types.py @@ -0,0 +1,38 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import TypedDict + + +class AxonServeCallParams(TypedDict): + """Axon serve chain call parameters.""" + + version: int + ip: int + port: int + ip_type: int + netuid: int + + +class PrometheusServeCallParams(TypedDict): + """Prometheus serve chain call parameters.""" + + version: int + ip: int + port: int + ip_type: int + netuid: int diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py new file mode 100644 index 0000000000..58a7e0a7c2 --- /dev/null +++ b/bittensor/utils/__init__.py @@ -0,0 +1,279 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import hashlib +from typing import List, Dict, Literal, Union, Optional, TYPE_CHECKING + +import scalecodec +from substrateinterface import Keypair +from substrateinterface.utils import ss58 + +from bittensor.core.settings import SS58_FORMAT +from bittensor.utils.btlogging import logging +from .registration import torch, use_torch +from .version import version_checking, check_version, VersionCheckError + +if TYPE_CHECKING: + from substrateinterface import SubstrateInterface + +RAOPERTAO = 1e9 +U16_MAX = 65535 +U64_MAX = 18446744073709551615 + + +def ss58_to_vec_u8(ss58_address: str) -> list[int]: + ss58_bytes: bytes = ss58_address_to_bytes(ss58_address) + encoded_address: list[int] = [int(byte) for byte in ss58_bytes] + return encoded_address + + +def strtobool(val: str) -> Union[bool, Literal["==SUPRESS=="]]: + """ + Converts a string to a boolean value. + + truth-y values are 'y', 'yes', 't', 'true', 'on', and '1'; + false-y values are 'n', 'no', 'f', 'false', 'off', and '0'. + + Raises ValueError if 'val' is anything else. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError("invalid truth value %r" % (val,)) + + +def _get_explorer_root_url_by_network_from_map( + network: str, network_map: dict[str, dict[str, str]] +) -> Optional[dict[str, str]]: + """ + Returns the explorer root url for the given network name from the given network map. + + Args: + network(str): The network to get the explorer url for. + network_map(dict[str, str]): The network map to get the explorer url from. + + Returns: + The explorer url for the given network. + Or None if the network is not in the network map. + """ + explorer_urls: Optional[dict[str, str]] = {} + for entity_nm, entity_network_map in network_map.items(): + if network in entity_network_map: + explorer_urls[entity_nm] = entity_network_map[network] + + return explorer_urls + + +def get_explorer_url_for_network( + network: str, block_hash: str, network_map: dict[str, dict[str, str]] +) -> Optional[dict[str, str]]: + """ + Returns the explorer url for the given block hash and network. + + Args: + network(str): The network to get the explorer url for. + block_hash(str): The block hash to get the explorer url for. + network_map(dict[str, dict[str, str]]): The network maps to get the explorer urls from. + + Returns: + The explorer url for the given block hash and network. + Or None if the network is not known. + """ + + explorer_urls: Optional[dict[str, str]] = {} + # Will be None if the network is not known. i.e. not in network_map + explorer_root_urls: Optional[dict[str, str]] = ( + _get_explorer_root_url_by_network_from_map(network, network_map) + ) + + if explorer_root_urls != {}: + # We are on a known network. + explorer_opentensor_url = ( + f"{explorer_root_urls.get('opentensor')}/query/{block_hash}" + ) + explorer_taostats_url = ( + f"{explorer_root_urls.get('taostats')}/extrinsic/{block_hash}" + ) + explorer_urls["opentensor"] = explorer_opentensor_url + explorer_urls["taostats"] = explorer_taostats_url + + return explorer_urls + + +def ss58_address_to_bytes(ss58_address: str) -> bytes: + """Converts a ss58 address to a bytes object.""" + account_id_hex: str = scalecodec.ss58_decode(ss58_address, SS58_FORMAT) + return bytes.fromhex(account_id_hex) + + +def u16_normalized_float(x: int) -> float: + return float(x) / float(U16_MAX) + + +def u64_normalized_float(x: int) -> float: + return float(x) / float(U64_MAX) + + +def get_hash(content, encoding="utf-8"): + sha3 = hashlib.sha3_256() + + # Update the hash object with the concatenated string + sha3.update(content.encode(encoding)) + + # Produce the hash + return sha3.hexdigest() + + +def format_error_message( + error_message: dict, substrate: "SubstrateInterface" = None +) -> str: + """ + Formats an error message from the Subtensor error information for use in extrinsics. + + Args: + error_message (dict): A dictionary containing the error information from Subtensor. + substrate (SubstrateInterface, optional): The substrate interface to use. + + Returns: + str: A formatted error message string. + """ + err_name = "UnknownError" + err_type = "UnknownType" + err_description = "Unknown Description" + + if isinstance(error_message, dict): + # subtensor error structure + if ( + error_message.get("code") + and error_message.get("message") + and error_message.get("data") + ): + err_name = "SubstrateRequestException" + err_type = error_message.get("message") + err_data = error_message.get("data") + + # subtensor custom error marker + if err_data.startswith("Custom error:") and substrate: + if not substrate.metadata: + substrate.get_metadata() + + if substrate.metadata: + try: + pallet = substrate.metadata.get_metadata_pallet( + "SubtensorModule" + ) + error_index = int(err_data.split("Custom error:")[-1]) + + error_dict = pallet.errors[error_index].value + err_type = error_dict.get("message", err_type) + err_docs = error_dict.get("docs", []) + err_description = err_docs[0] if err_docs else err_description + except Exception: + logging.error("Substrate pallets data unavailable.") + else: + err_description = err_data + + elif ( + error_message.get("type") + and error_message.get("name") + and error_message.get("docs") + ): + err_type = error_message.get("type", err_type) + err_name = error_message.get("name", err_name) + err_docs = error_message.get("docs", [err_description]) + err_description = err_docs[0] if err_docs else err_description + + return f"Subtensor returned `{err_name}({err_type})` error. This means: `{err_description}`." + + +# Subnet 24 uses this function +def is_valid_ss58_address(address: str) -> bool: + """ + Checks if the given address is a valid ss58 address. + + Args: + address(str): The address to check. + + Returns: + True if the address is a valid ss58 address for Bittensor, False otherwise. + """ + try: + return ss58.is_valid_ss58_address( + address, valid_ss58_format=SS58_FORMAT + ) or ss58.is_valid_ss58_address( + address, valid_ss58_format=42 + ) # Default substrate ss58 format (legacy) + except IndexError: + return False + + +def _is_valid_ed25519_pubkey(public_key: Union[str, bytes]) -> bool: + """ + Checks if the given public_key is a valid ed25519 key. + + Args: + public_key(Union[str, bytes]): The public_key to check. + + Returns: + True if the public_key is a valid ed25519 key, False otherwise. + + """ + try: + if isinstance(public_key, str): + if len(public_key) != 64 and len(public_key) != 66: + raise ValueError("a public_key should be 64 or 66 characters") + elif isinstance(public_key, bytes): + if len(public_key) != 32: + raise ValueError("a public_key should be 32 bytes") + else: + raise ValueError("public_key must be a string or bytes") + + keypair = Keypair(public_key=public_key, ss58_format=SS58_FORMAT) + + ss58_addr = keypair.ss58_address + return ss58_addr is not None + + except (ValueError, IndexError): + return False + + +def is_valid_bittensor_address_or_public_key(address: Union[str, bytes]) -> bool: + """ + Checks if the given address is a valid destination address. + + Args: + address(Union[str, bytes]): The address to check. + + Returns: + True if the address is a valid destination address, False otherwise. + """ + if isinstance(address, str): + # Check if ed25519 + if address.startswith("0x"): + return _is_valid_ed25519_pubkey(address) + else: + # Assume ss58 address + return is_valid_ss58_address(address) + elif isinstance(address, bytes): + # Check if ed25519 + return _is_valid_ed25519_pubkey(address) + else: + # Invalid address type + return False diff --git a/bittensor/utils/axon_utils.py b/bittensor/utils/axon_utils.py new file mode 100644 index 0000000000..3c73c8080d --- /dev/null +++ b/bittensor/utils/axon_utils.py @@ -0,0 +1,58 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import Optional + +ALLOWED_DELTA = 4_000_000_000 # Delta of 4 seconds for nonce validation +NANOSECONDS_IN_SECOND = 1_000_000_000 + + +def allowed_nonce_window_ns(current_time_ns: int, synapse_timeout: Optional[float]): + """ + Calculates the allowed window for a nonce in nanoseconds. + + Args: + current_time_ns (int): The current time in nanoseconds. + synapse_timeout (Optional[float]): The optional timeout for the synapse in seconds. If None, it defaults to 0. + + Returns: + int: The allowed nonce window in nanoseconds. + """ + synapse_timeout_ns = (synapse_timeout or 0) * NANOSECONDS_IN_SECOND + allowed_window_ns = current_time_ns - ALLOWED_DELTA - synapse_timeout_ns + return allowed_window_ns + + +def calculate_diff_seconds( + current_time: int, synapse_timeout: Optional[float], synapse_nonce: int +): + """ + Calculates the difference in seconds between the current time and the synapse nonce, + and also returns the allowed delta in seconds. + + Args: + current_time (int): The current time in nanoseconds. + synapse_timeout (Optional[float]): The optional timeout for the synapse in seconds. + synapse_nonce (int): The nonce value for the synapse in nanoseconds. + + Returns: + tuple: A tuple containing the difference in seconds (float) and the allowed delta in seconds (float). + """ + synapse_timeout_ns = (synapse_timeout or 0) * NANOSECONDS_IN_SECOND + diff_seconds = (current_time - synapse_nonce) / NANOSECONDS_IN_SECOND + allowed_delta_seconds = (ALLOWED_DELTA + synapse_timeout_ns) / NANOSECONDS_IN_SECOND + return diff_seconds, allowed_delta_seconds diff --git a/bittensor/utils/balance.py b/bittensor/utils/balance.py new file mode 100644 index 0000000000..016db373a4 --- /dev/null +++ b/bittensor/utils/balance.py @@ -0,0 +1,268 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import Union + +from bittensor.core import settings + + +class Balance: + """ + Represents the bittensor balance of the wallet, stored as rao (int). + This class provides a way to interact with balances in two different units: rao and tao. + It provides methods to convert between these units, as well as to perform arithmetic and comparison operations. + + Attributes: + unit (str): A string representing the symbol for the tao unit. + rao_unit (str): A string representing the symbol for the rao unit. + rao (int): An integer that stores the balance in rao units. + tao (float): A float property that gives the balance in tao units. + """ + + unit: str = settings.TAO_SYMBOL # This is the tao unit + rao_unit: str = settings.RAO_SYMBOL # This is the rao unit + rao: int + tao: float + + def __init__(self, balance: Union[int, float]): + """ + Initialize a Balance object. If balance is an int, it's assumed to be in rao. + If balance is a float, it's assumed to be in tao. + + Args: + balance: The initial balance, in either rao (if an int) or tao (if a float). + """ + if isinstance(balance, int): + self.rao = balance + elif isinstance(balance, float): + # Assume tao value for the float + self.rao = int(balance * pow(10, 9)) + else: + raise TypeError("balance must be an int (rao) or a float (tao)") + + @property + def tao(self): + return self.rao / pow(10, 9) + + def __int__(self): + """Convert the Balance object to an int. The resulting value is in rao.""" + return self.rao + + def __float__(self): + """Convert the Balance object to a float. The resulting value is in tao.""" + return self.tao + + def __str__(self): + """Returns the Balance object as a string in the format "symbolvalue", where the value is in tao.""" + return f"{self.unit}{float(self.tao):,.9f}" + + def __rich__(self): + int_tao, fract_tao = format(float(self.tao), "f").split(".") + return f"[green]{self.unit}[/green][green]{int_tao}[/green][green].[/green][dim green]{fract_tao}[/dim green]" + + def __str_rao__(self): + return f"{self.rao_unit}{int(self.rao)}" + + def __rich_rao__(self): + return f"[green]{self.rao_unit}{int(self.rao)}[/green]" + + def __repr__(self): + return self.__str__() + + def __eq__(self, other: Union[int, float, "Balance"]): + if other is None: + return False + + if hasattr(other, "rao"): + return self.rao == other.rao + else: + try: + # Attempt to cast to int from rao + other_rao = int(other) + return self.rao == other_rao + except (TypeError, ValueError): + raise NotImplementedError("Unsupported type") + + def __ne__(self, other: Union[int, float, "Balance"]): + return not self == other + + def __gt__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return self.rao > other.rao + else: + try: + # Attempt to cast to int from rao + other_rao = int(other) + return self.rao > other_rao + except ValueError: + raise NotImplementedError("Unsupported type") + + def __lt__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return self.rao < other.rao + else: + try: + # Attempt to cast to int from rao + other_rao = int(other) + return self.rao < other_rao + except ValueError: + raise NotImplementedError("Unsupported type") + + def __le__(self, other: Union[int, float, "Balance"]): + try: + return self < other or self == other + except TypeError: + raise NotImplementedError("Unsupported type") + + def __ge__(self, other: Union[int, float, "Balance"]): + try: + return self > other or self == other + except TypeError: + raise NotImplementedError("Unsupported type") + + def __add__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return Balance.from_rao(int(self.rao + other.rao)) + else: + try: + # Attempt to cast to int from rao + return Balance.from_rao(int(self.rao + other)) + except (ValueError, TypeError): + raise NotImplementedError("Unsupported type") + + def __radd__(self, other: Union[int, float, "Balance"]): + try: + return self + other + except TypeError: + raise NotImplementedError("Unsupported type") + + def __sub__(self, other: Union[int, float, "Balance"]): + try: + return self + -other + except TypeError: + raise NotImplementedError("Unsupported type") + + def __rsub__(self, other: Union[int, float, "Balance"]): + try: + return -self + other + except TypeError: + raise NotImplementedError("Unsupported type") + + def __mul__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return Balance.from_rao(int(self.rao * other.rao)) + else: + try: + # Attempt to cast to int from rao + return Balance.from_rao(int(self.rao * other)) + except (ValueError, TypeError): + raise NotImplementedError("Unsupported type") + + def __rmul__(self, other: Union[int, float, "Balance"]): + return self * other + + def __truediv__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return Balance.from_rao(int(self.rao / other.rao)) + else: + try: + # Attempt to cast to int from rao + return Balance.from_rao(int(self.rao / other)) + except (ValueError, TypeError): + raise NotImplementedError("Unsupported type") + + def __rtruediv__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return Balance.from_rao(int(other.rao / self.rao)) + else: + try: + # Attempt to cast to int from rao + return Balance.from_rao(int(other / self.rao)) + except (ValueError, TypeError): + raise NotImplementedError("Unsupported type") + + def __floordiv__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return Balance.from_rao(int(self.tao // other.tao)) + else: + try: + # Attempt to cast to int from rao + return Balance.from_rao(int(self.rao // other)) + except (ValueError, TypeError): + raise NotImplementedError("Unsupported type") + + def __rfloordiv__(self, other: Union[int, float, "Balance"]): + if hasattr(other, "rao"): + return Balance.from_rao(int(other.rao // self.rao)) + else: + try: + # Attempt to cast to int from rao + return Balance.from_rao(int(other // self.rao)) + except (ValueError, TypeError): + raise NotImplementedError("Unsupported type") + + def __nonzero__(self) -> bool: + return bool(self.rao) + + def __neg__(self): + return Balance.from_rao(-self.rao) + + def __pos__(self): + return Balance.from_rao(self.rao) + + def __abs__(self): + return Balance.from_rao(abs(self.rao)) + + @staticmethod + def from_float(amount: float): + """ + Given tao, return :func:`Balance` object with rao(``int``) and tao(``float``), where rao = int(tao*pow(10,9)) + Args: + amount (float): The amount in tao. + + Returns: + A Balance object representing the given amount. + """ + rao = int(amount * pow(10, 9)) + return Balance(rao) + + @staticmethod + def from_tao(amount: float): + """ + Given tao, return Balance object with rao(``int``) and tao(``float``), where rao = int(tao*pow(10,9)) + + Args: + amount (float): The amount in tao. + + Returns: + A Balance object representing the given amount. + """ + rao = int(amount * pow(10, 9)) + return Balance(rao) + + @staticmethod + def from_rao(amount: int): + """ + Given rao, return Balance object with rao(``int``) and tao(``float``), where rao = int(tao*pow(10,9)) + + Args: + amount (int): The amount in rao. + + Returns: + A Balance object representing the given amount. + """ + return Balance(amount) diff --git a/bittensor/utils/btlogging/__init__.py b/bittensor/utils/btlogging/__init__.py new file mode 100644 index 0000000000..a5e6d2518c --- /dev/null +++ b/bittensor/utils/btlogging/__init__.py @@ -0,0 +1,27 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +btlogging sub-package standardized logging for Bittensor. + +This module provides logging functionality for the Bittensor package. It includes custom loggers, handlers, and formatters to ensure consistent logging throughout the project. +""" + +from .loggingmachine import LoggingMachine + + +logging = LoggingMachine(LoggingMachine.config()) diff --git a/bittensor/utils/btlogging/defines.py b/bittensor/utils/btlogging/defines.py new file mode 100644 index 0000000000..9e1dada25b --- /dev/null +++ b/bittensor/utils/btlogging/defines.py @@ -0,0 +1,28 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +"""Btlogging constant definition module.""" + +BASE_LOG_FORMAT = "%(asctime)s | %(levelname)s | %(message)s" +TRACE_LOG_FORMAT = ( + f"%(asctime)s | %(levelname)s | %(name)s:%(filename)s:%(lineno)s | %(message)s" +) +DATE_FORMAT = "%Y-%m-%d %H:%M:%S" +BITTENSOR_LOGGER_NAME = "bittensor" +DEFAULT_LOG_FILE_NAME = "bittensor.log" +DEFAULT_MAX_ROTATING_LOG_FILE_SIZE = 25 * 1024 * 1024 +DEFAULT_LOG_BACKUP_COUNT = 10 diff --git a/bittensor/utils/btlogging/format.py b/bittensor/utils/btlogging/format.py new file mode 100644 index 0000000000..1aa505c82c --- /dev/null +++ b/bittensor/utils/btlogging/format.py @@ -0,0 +1,222 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +btlogging.format module + +This module defines custom logging formatters for the Bittensor project. +""" + +import logging +import time +from typing import Optional +from colorama import init, Fore, Back, Style + +init(autoreset=True) + +TRACE_LEVEL_NUM: int = 5 +SUCCESS_LEVEL_NUM: int = 21 + + +def _trace(self, message: str, *args, **kws): + if self.isEnabledFor(TRACE_LEVEL_NUM): + self._log(TRACE_LEVEL_NUM, message, args, **kws) + + +def _success(self, message: str, *args, **kws): + if self.isEnabledFor(SUCCESS_LEVEL_NUM): + self._log(SUCCESS_LEVEL_NUM, message, args, **kws) + + +logging.SUCCESS = SUCCESS_LEVEL_NUM +logging.addLevelName(SUCCESS_LEVEL_NUM, "SUCCESS") +logging.Logger.success = _success + +logging.TRACE = TRACE_LEVEL_NUM +logging.addLevelName(TRACE_LEVEL_NUM, "TRACE") +logging.Logger.trace = _trace + +emoji_map: dict[str, str] = { + ":white_heavy_check_mark:": "✅", + ":cross_mark:": "❌", + ":satellite:": "đŸ›°ïž", +} + + +color_map: dict[str, str] = { + "": Fore.RED, + "": Style.RESET_ALL, + "": Fore.BLUE, + "": Style.RESET_ALL, + "": Fore.GREEN, + "": Style.RESET_ALL, +} + + +log_level_color_prefix: dict[int, str] = { + logging.NOTSET: Fore.RESET, + logging.TRACE: Fore.MAGENTA, + logging.DEBUG: Fore.BLUE, + logging.INFO: Fore.WHITE, + logging.SUCCESS: Fore.GREEN, + logging.WARNING: Fore.YELLOW, + logging.ERROR: Fore.RED, + logging.CRITICAL: Back.RED, +} + + +LOG_FORMATS: dict[int, str] = { + level: f"{Fore.BLUE}%(asctime)s{Fore.RESET} | {Style.BRIGHT}{color}%(levelname)s\033[0m | %(message)s" + for level, color in log_level_color_prefix.items() +} + +LOG_TRACE_FORMATS: dict[int, str] = { + level: f"{Fore.BLUE}%(asctime)s{Fore.RESET}" + f" | {Style.BRIGHT}{color}%(levelname)s{Fore.RESET}{Back.RESET}{Style.RESET_ALL}" + f" | %(name)s:%(filename)s:%(lineno)s" + f" | %(message)s" + for level, color in log_level_color_prefix.items() +} + +DEFAULT_LOG_FORMAT: str = ( + f"{Fore.BLUE}%(asctime)s{Fore.RESET} | " + f"{Style.BRIGHT}{Fore.WHITE}%(levelname)s{Style.RESET_ALL} | " + f"%(name)s:%(filename)s:%(lineno)s | %(message)s" +) + +DEFAULT_TRACE_FORMAT: str = ( + f"{Fore.BLUE}%(asctime)s{Fore.RESET} | " + f"{Style.BRIGHT}{Fore.WHITE}%(levelname)s{Style.RESET_ALL} | " + f"%(name)s:%(filename)s:%(lineno)s | %(message)s" +) + + +class BtStreamFormatter(logging.Formatter): + """ + A custom logging formatter for the Bittensor project that overrides the time formatting to include milliseconds, + centers the level name, and applies custom log formats, emojis, and colors. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.trace = False + + def formatTime(self, record, datefmt: Optional[str] = None) -> str: + """ + Override formatTime to add milliseconds. + + Args: + record (logging.LogRecord): The log record. + datefmt (Optional[str]): The date format string. + + Returns: + s (str): The formatted time string with milliseconds. + """ + + created = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, created) + else: + s = time.strftime("%Y-%m-%d %H:%M:%S", created) + s += f".{int(record.msecs):03d}" + return s + + def format(self, record: "logging.LogRecord") -> str: + """ + Override format to apply custom formatting including emojis and colors. + + This method saves the original format, applies custom formatting based on the log level and trace flag, replaces + text with emojis and colors, and then returns the formatted log record. + + Args: + record (logging.LogRecord): The log record. + + Returns: + result (str): The formatted log record. + """ + + format_orig = self._style._fmt + record.levelname = f"{record.levelname:^16}" + + if record.levelno not in LOG_FORMATS: + self._style._fmt = ( + DEFAULT_TRACE_FORMAT if self.trace else DEFAULT_LOG_FORMAT + ) + else: + if self.trace is True: + self._style._fmt = LOG_TRACE_FORMATS[record.levelno] + else: + self._style._fmt = LOG_FORMATS[record.levelno] + + for text, emoji in emoji_map.items(): + record.msg = record.msg.replace(text, emoji) + # Apply color specifiers + for text, color in color_map.items(): + record.msg = record.msg.replace(text, color) + + result = super().format(record) + self._style._fmt = format_orig + + return result + + def set_trace(self, state: bool = True): + """Change formatter state.""" + self.trace = state + + +class BtFileFormatter(logging.Formatter): + """ + BtFileFormatter + + A custom logging formatter for the Bittensor project that overrides the time formatting to include milliseconds and + centers the level name. + """ + + def formatTime( + self, record: "logging.LogRecord", datefmt: Optional[str] = None + ) -> str: + """ + Override formatTime to add milliseconds. + + Args: + record (logging.LogRecord): The log record. + datefmt (Optional[str]): The date format string. + + Returns: + s (str): The formatted time string with milliseconds. + """ + + created = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, created) + else: + s = time.strftime("%Y-%m-%d %H:%M:%S", created) + s += f".{int(record.msecs):03d}" + return s + + def format(self, record: "logging.LogRecord") -> str: + """ + Override format to center the level name. + + Args: + record (logging.LogRecord): The log record. + + Returns: + formated record (str): The formatted log record. + """ + record.levelname = f"{record.levelname:^16}" + return super().format(record) diff --git a/bittensor/utils/btlogging/helpers.py b/bittensor/utils/btlogging/helpers.py new file mode 100644 index 0000000000..3fdca4ee04 --- /dev/null +++ b/bittensor/utils/btlogging/helpers.py @@ -0,0 +1,88 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +btlogging.helpers module provides helper functions for the Bittensor logging system. +""" + +import logging +from typing import Generator + + +def all_loggers() -> Generator["logging.Logger", None, None]: + """Generator that yields all logger instances in the application. + + Iterates through the logging root manager's logger dictionary and yields all active `Logger` instances. It skips + placeholders and other types that are not instances of `Logger`. + + Yields: + logger (logging.Logger): An active logger instance. + """ + for logger in logging.root.manager.loggerDict.values(): + if isinstance(logger, logging.PlaceHolder): + continue + # In some versions of Python, the values in loggerDict might be + # LoggerAdapter instances instead of Logger instances. + # We check for Logger instances specifically. + if isinstance(logger, logging.Logger): + yield logger + else: + # If it's not a Logger instance, it could be a LoggerAdapter or + # another form that doesn't directly offer logging methods. + # This branch can be extended to handle such cases as needed. + pass + + +def all_logger_names() -> Generator[str, None, None]: + """ + Generate the names of all active loggers. + + This function iterates through the logging root manager's logger dictionary and yields the names of all active + `Logger` instances. It skips placeholders and other types that are not instances of `Logger`. + + Yields: + name (str): The name of an active logger. + """ + for name, logger in logging.root.manager.loggerDict.items(): + if isinstance(logger, logging.PlaceHolder): + continue + # In some versions of Python, the values in loggerDict might be + # LoggerAdapter instances instead of Logger instances. + # We check for Logger instances specifically. + if isinstance(logger, logging.Logger): + yield name + else: + # If it's not a Logger instance, it could be a LoggerAdapter or + # another form that doesn't directly offer logging methods. + # This branch can be extended to handle such cases as needed. + pass + + +def get_max_logger_name_length() -> int: + """ + Calculate and return the length of the longest logger name. + + This function iterates through all active logger names and determines the length of the longest name. + + Returns: + max_length (int): The length of the longest logger name. + """ + max_length = 0 + for name in all_logger_names(): + if len(name) > max_length: + max_length = len(name) + return max_length diff --git a/bittensor/utils/btlogging/loggingmachine.py b/bittensor/utils/btlogging/loggingmachine.py new file mode 100644 index 0000000000..056aa206cd --- /dev/null +++ b/bittensor/utils/btlogging/loggingmachine.py @@ -0,0 +1,534 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +Module provides a logging framework for Bittensor, managing both Bittensor-specific and third-party logging states. +It leverages the StateMachine from the statemachine package to transition between different logging states such as +Default, Debug, Trace, and Disabled. +""" + +import argparse +import atexit +import copy +import logging as stdlogging +import multiprocessing as mp +import os +import sys +from logging import Logger +from logging.handlers import QueueHandler, QueueListener, RotatingFileHandler +from typing import NamedTuple + +from statemachine import State, StateMachine + +from bittensor.core.config import Config +from .defines import ( + BITTENSOR_LOGGER_NAME, + DATE_FORMAT, + DEFAULT_LOG_BACKUP_COUNT, + DEFAULT_LOG_FILE_NAME, + DEFAULT_MAX_ROTATING_LOG_FILE_SIZE, + TRACE_LOG_FORMAT, +) +from .format import BtFileFormatter, BtStreamFormatter +from .helpers import all_loggers + + +def _concat_message(msg="", prefix="", suffix=""): + """Concatenates a message with optional prefix and suffix.""" + msg = f"{f'{prefix} - ' if prefix else ''}{msg}{f' - {suffix}' if suffix else ''}" + return msg + + +class LoggingConfig(NamedTuple): + """Named tuple to hold the logging configuration.""" + + debug: bool + trace: bool + record_log: bool + logging_dir: str + + +class LoggingMachine(StateMachine, Logger): + """Handles logger states for bittensor and 3rd party libraries.""" + + Default = State(initial=True) + Debug = State() + Trace = State() + Disabled = State() + + enable_default = ( + Debug.to(Default) + | Trace.to(Default) + | Disabled.to(Default) + | Default.to(Default) + ) + + enable_trace = ( + Default.to(Trace) | Debug.to(Trace) | Disabled.to(Trace) | Trace.to(Trace) + ) + + enable_debug = ( + Default.to(Debug) | Trace.to(Debug) | Disabled.to(Debug) | Debug.to(Debug) + ) + + disable_trace = Trace.to(Default) + + disable_debug = Debug.to(Default) + + disable_logging = ( + Trace.to(Disabled) + | Debug.to(Disabled) + | Default.to(Disabled) + | Disabled.to(Disabled) + ) + + def __init__(self, config: "Config", name: str = BITTENSOR_LOGGER_NAME): + # basics + super(LoggingMachine, self).__init__() + self._queue = mp.Queue(-1) + self._primary_loggers = {name} + self._config = self._extract_logging_config(config) + + # Formatters + # + # In the future, this may be expanded to a dictionary mapping handler + # types to their respective formatters. + self._stream_formatter = BtStreamFormatter() + self._file_formatter = BtFileFormatter(TRACE_LOG_FORMAT, DATE_FORMAT) + + # start with handlers for the QueueListener. + # + # In the future, we may want to add options to introduce other handlers + # for things like log aggregation by external services. + self._handlers = self._configure_handlers(self._config) + + # configure and start the queue listener + self._listener = self._create_and_start_listener(self._handlers) + + # set up all the loggers + self._logger = self._initialize_bt_logger(name) + self.disable_third_party_loggers() + self._enable_initial_state(self._config) + + def _enable_initial_state(self, config): + """Set correct state action on initializing""" + if config.trace: + self.enable_trace() + elif config.debug: + self.enable_debug() + else: + self.enable_default() + + def _extract_logging_config(self, config: "Config") -> dict: + """Extract btlogging's config from bittensor config + + Args: + config (bittensor.core.config.Config): Bittensor config instance. + + Returns: + (dict): btlogging's config from Bittensor config or Bittensor config. + """ + if hasattr(config, "logging"): + return config.logging + else: + return config + + def _configure_handlers(self, config) -> list[stdlogging.Handler]: + handlers = list() + + # stream handler, a given + stream_handler = stdlogging.StreamHandler(sys.stdout) + stream_handler.setFormatter(self._stream_formatter) + handlers.append(stream_handler) + + # file handler, maybe + if config.record_log and config.logging_dir: + logfile = os.path.abspath( + os.path.join(config.logging_dir, DEFAULT_LOG_FILE_NAME) + ) + file_handler = self._create_file_handler(logfile) + handlers.append(file_handler) + return handlers + + def get_config(self): + return self._config + + def set_config(self, config: "Config"): + """Set config after initialization, if desired. + + Args: + config (bittensor.core.config.Config): Bittensor config instance. + """ + self._config = config + if config.logging_dir and config.record_log: + expanded_dir = os.path.expanduser(config.logging_dir) + logfile = os.path.abspath(os.path.join(expanded_dir, DEFAULT_LOG_FILE_NAME)) + self._enable_file_logging(logfile) + if config.trace: + self.enable_trace() + elif config.debug: + self.enable_debug() + + def _create_and_start_listener(self, handlers): + """ + A listener to receive and publish log records. + + This listener receives records from a queue populated by the main bittensor logger, as well as 3rd party loggers + """ + + listener = QueueListener(self._queue, *handlers, respect_handler_level=True) + listener.start() + atexit.register(listener.stop) + return listener + + def get_queue(self): + """ + Get the queue the QueueListener is publishing from. + + To set up logging in a separate process, a QueueHandler must be added to all the desired loggers. + """ + return self._queue + + def _initialize_bt_logger(self, name: str): + """ + Initialize logging for bittensor. + + Since the initial state is Default, logging level for the module logger is INFO, and all third-party loggers are + silenced. Subsequent state transitions will handle all logger outputs. + """ + logger = stdlogging.getLogger(name) + queue_handler = QueueHandler(self._queue) + logger.addHandler(queue_handler) + return logger + + def _deinitialize_bt_logger(self, name: str): + """Find the logger by name and remove the queue handler associated with it.""" + logger = stdlogging.getLogger(name) + for handler in list(logger.handlers): + if isinstance(handler, QueueHandler): + logger.removeHandler(handler) + return logger + + def _create_file_handler(self, logfile: str): + file_handler = RotatingFileHandler( + logfile, + maxBytes=DEFAULT_MAX_ROTATING_LOG_FILE_SIZE, + backupCount=DEFAULT_LOG_BACKUP_COUNT, + ) + file_handler.setFormatter(self._file_formatter) + file_handler.setLevel(stdlogging.TRACE) + return file_handler + + def register_primary_logger(self, name: str): + """ + Register a logger as primary logger + + This adds a logger to the _primary_loggers set to ensure + it doesn't get disabled when disabling third-party loggers. + A queue handler is also associated with it. + + Args: + name (str): the name for primary logger. + """ + self._primary_loggers.add(name) + self._initialize_bt_logger(name) + + def deregister_primary_logger(self, name: str): + """ + De-registers a primary logger + + This function removes the logger from the _primary_loggers + set and deinitializes its queue handler + + Args: + name (str): the name of primary logger. + """ + self._primary_loggers.remove(name) + self._deinitialize_bt_logger(name) + + def enable_third_party_loggers(self): + """Enables logging for third-party loggers by adding a queue handler to each.""" + for logger in all_loggers(): + if logger.name in self._primary_loggers: + continue + queue_handler = QueueHandler(self._queue) + logger.addHandler(queue_handler) + logger.setLevel(self._logger.level) + + def disable_third_party_loggers(self): + """Disables logging for third-party loggers by removing all their handlers.""" + # remove all handlers + for logger in all_loggers(): + if logger.name in self._primary_loggers: + continue + for handler in logger.handlers: + logger.removeHandler(handler) + + def _enable_file_logging(self, logfile: str): + # preserve idempotency; do not create extra filehandlers + # if one already exists + if any( + [isinstance(handler, RotatingFileHandler) for handler in self._handlers] + ): + return + file_handler = self._create_file_handler(logfile) + self._handlers.append(file_handler) + self._listener.handlers = tuple(self._handlers) + + # state transitions + def before_transition(self, event, state): + """Stops listener after transition.""" + self._listener.stop() + + def after_transition(self, event, state): + """Starts listener after transition.""" + self._listener.start() + + # Default Logging + def before_enable_default(self): + """Logs status before enable Default.""" + self._logger.info(f"Enabling default logging.") + self._logger.setLevel(stdlogging.INFO) + for logger in all_loggers(): + if logger.name in self._primary_loggers: + continue + logger.setLevel(stdlogging.CRITICAL) + + def after_enable_default(self): + pass + + # Trace + def before_enable_trace(self): + """Logs status before enable Trace.""" + self._logger.info("Enabling trace.") + self._stream_formatter.set_trace(True) + for logger in all_loggers(): + logger.setLevel(stdlogging.TRACE) + + def after_enable_trace(self): + """Logs status after enable Trace.""" + self._logger.info("Trace enabled.") + + def before_disable_trace(self): + """Logs status before disable Trace.""" + self._logger.info(f"Disabling trace.") + self._stream_formatter.set_trace(False) + self.enable_default() + + def after_disable_trace(self): + """Logs status after disable Trace.""" + self._logger.info("Trace disabled.") + + # Debug + def before_enable_debug(self): + """Logs status before enable Debug.""" + self._logger.info("Enabling debug.") + self._stream_formatter.set_trace(True) + for logger in all_loggers(): + logger.setLevel(stdlogging.DEBUG) + + def after_enable_debug(self): + """Logs status after enable Debug.""" + self._logger.info("Debug enabled.") + + def before_disable_debug(self): + """Logs status before disable Debug.""" + self._logger.info("Disabling debug.") + self._stream_formatter.set_trace(False) + self.enable_default() + + def after_disable_debug(self): + """Logs status after disable Debug.""" + self._logger.info("Debug disabled.") + + # Disable Logging + def before_disable_logging(self): + """ + Prepares the logging system for disabling. + + This method performs the following actions: + 1. Logs an informational message indicating that logging is being disabled. + 2. Disables trace mode in the stream formatter. + 3. Sets the logging level to CRITICAL for all loggers. + + This ensures that only critical messages will be logged after this method is called. + """ + self._logger.info("Disabling logging.") + self._stream_formatter.set_trace(False) + + for logger in all_loggers(): + logger.setLevel(stdlogging.CRITICAL) + + # Required API support log commands for API backwards compatibility. + @property + def __trace_on__(self) -> bool: + """ + Checks if the current state is in "Trace" mode. + + Returns: + bool: True if the current state is "Trace", otherwise False. + """ + return self.current_state_value == "Trace" + + def trace(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps trace message with prefix and suffix.""" + msg = _concat_message(msg, prefix, suffix) + self._logger.trace(msg, *args, **kwargs) + + def debug(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps debug message with prefix and suffix.""" + msg = _concat_message(msg, prefix, suffix) + self._logger.debug(msg, *args, **kwargs) + + def info(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps info message with prefix and suffix.""" + msg = _concat_message(msg, prefix, suffix) + self._logger.info(msg, *args, **kwargs) + + def success(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps success message with prefix and suffix.""" + msg = f"{prefix} - {msg} - {suffix}" + self._logger.success(msg, *args, **kwargs) + + def warning(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps warning message with prefix and suffix.""" + msg = f"{prefix} - {msg} - {suffix}" + self._logger.warning(msg, *args, **kwargs) + + def error(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps error message with prefix and suffix.""" + msg = f"{prefix} - {msg} - {suffix}" + self._logger.error(msg, *args, **kwargs) + + def critical(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps critical message with prefix and suffix.""" + msg = f"{prefix} - {msg} - {suffix}" + self._logger.critical(msg, *args, **kwargs) + + def exception(self, msg="", prefix="", suffix="", *args, **kwargs): + """Wraps exception message with prefix and suffix.""" + msg = f"{prefix} - {msg} - {suffix}" + self._logger.exception(msg, *args, **kwargs) + + def on(self): + """Enable default state.""" + self._logger.info("Logging enabled.") + self.enable_default() + + def off(self): + """Disables all states.""" + self.disable_logging() + + def set_debug(self, on: bool = True): + """Sets Debug state.""" + if on and not self.current_state_value == "Debug": + self.enable_debug() + elif not on: + if self.current_state_value == "Debug": + self.disable_debug() + + def set_trace(self, on: bool = True): + """Sets Trace state.""" + if on and not self.current_state_value == "Trace": + self.enable_trace() + elif not on: + if self.current_state_value == "Trace": + self.disable_trace() + + def get_level(self) -> int: + """Returns Logging level.""" + return self._logger.level + + def check_config(self, config: "Config"): + assert config.logging + + def help(self): + pass + + @classmethod + def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None): + """Accept specific arguments fro parser""" + prefix_str = "" if prefix is None else prefix + "." + try: + default_logging_debug = os.getenv("BT_LOGGING_DEBUG") or False + default_logging_trace = os.getenv("BT_LOGGING_TRACE") or False + default_logging_record_log = os.getenv("BT_LOGGING_RECORD_LOG") or False + default_logging_logging_dir = os.getenv( + "BT_LOGGING_LOGGING_DIR" + ) or os.path.join("~", ".bittensor", "miners") + parser.add_argument( + "--" + prefix_str + "logging.debug", + action="store_true", + help="""Turn on bittensor debugging information""", + default=default_logging_debug, + ) + parser.add_argument( + "--" + prefix_str + "logging.trace", + action="store_true", + help="""Turn on bittensor trace level information""", + default=default_logging_trace, + ) + parser.add_argument( + "--" + prefix_str + "logging.record_log", + action="store_true", + help="""Turns on logging to file.""", + default=default_logging_record_log, + ) + parser.add_argument( + "--" + prefix_str + "logging.logging_dir", + type=str, + help="Logging default root directory.", + default=default_logging_logging_dir, + ) + except argparse.ArgumentError: + # re-parsing arguments. + pass + + @classmethod + def config(cls) -> "Config": + """Get config from the argument parser. + + Return: + config (bittensor.core.config.Config): config object + """ + parser = argparse.ArgumentParser() + cls.add_args(parser) + return Config(parser, args=[]) + + def __call__( + self, + config: "Config" = None, + debug: bool = None, + trace: bool = None, + record_log: bool = None, + logging_dir: str = None, + ): + if config is not None: + cfg = copy.deepcopy(config) + if debug is not None: + cfg.debug = debug + elif trace is not None: + cfg.trace = trace + if record_log is not None: + cfg.record_log = record_log + if logging_dir is not None: + cfg.logging_dir = logging_dir + else: + cfg = LoggingConfig( + debug=debug, trace=trace, record_log=record_log, logging_dir=logging_dir + ) + self.set_config(cfg) diff --git a/bittensor/utils/deprecated.py b/bittensor/utils/deprecated.py new file mode 100644 index 0000000000..6075a93d8f --- /dev/null +++ b/bittensor/utils/deprecated.py @@ -0,0 +1,150 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +""" +The Bittensor Compatibility Module is designed to ensure seamless integration and functionality with legacy versions of +the Bittensor framework, specifically up to and including version 7.3.0. This module addresses changes and deprecated +features in recent versions, allowing users to maintain compatibility with older systems and projects. +""" + +import importlib +import sys + +from bittensor_wallet.errors import KeyFileError # noqa: F401 +from bittensor_wallet.keyfile import ( # noqa: F401 + serialized_keypair_to_keyfile_data, + deserialize_keypair_from_keyfile_data, + validate_password, + ask_password_to_encrypt, + keyfile_data_is_encrypted_nacl, + keyfile_data_is_encrypted_ansible, + keyfile_data_is_encrypted_legacy, + keyfile_data_is_encrypted, + keyfile_data_encryption_method, + legacy_encrypt_keyfile_data, + encrypt_keyfile_data, + get_coldkey_password_from_environment, + decrypt_keyfile_data, + Keyfile, +) +from bittensor_wallet.wallet import display_mnemonic_msg, Wallet # noqa: F401 +from substrateinterface import Keypair # noqa: F401 + +from bittensor.core import settings +from bittensor.core.axon import Axon +from bittensor.core.chain_data import ( # noqa: F401 + AxonInfo, + NeuronInfo, + NeuronInfoLite, + PrometheusInfo, + DelegateInfo, + StakeInfo, + SubnetInfo, + SubnetHyperparameters, + IPInfo, + ProposalCallData, + ProposalVoteData, +) +from bittensor.core.config import ( # noqa: F401 + InvalidConfigFile, + DefaultConfig, + Config, + T, +) +from bittensor.core.dendrite import Dendrite # noqa: F401 +from bittensor.core.errors import ( # noqa: F401 + BlacklistedException, + ChainConnectionError, + ChainError, + ChainQueryError, + ChainTransactionError, + IdentityError, + InternalServerError, + InvalidRequestNameError, + MetadataError, + NominationError, + NotDelegateError, + NotRegisteredError, + NotVerifiedException, + PostProcessException, + PriorityException, + RegistrationError, + RunException, + StakeError, + SynapseDendriteNoneException, + SynapseParsingError, + TransferError, + UnknownSynapseError, + UnstakeError, +) +from bittensor.core.metagraph import Metagraph +from bittensor.core.settings import BLOCKTIME +from bittensor.core.stream import StreamingSynapse # noqa: F401 +from bittensor.core.subtensor import Subtensor +from bittensor.core.synapse import TerminalInfo, Synapse # noqa: F401 +from bittensor.core.tensor import Tensor # noqa: F401 +from bittensor.core.threadpool import ( # noqa: F401 + PriorityThreadPoolExecutor as PriorityThreadPoolExecutor, +) +from bittensor.utils import ( # noqa: F401 + ss58_to_vec_u8, + version_checking, + strtobool, + get_explorer_url_for_network, + ss58_address_to_bytes, + u16_normalized_float, + u64_normalized_float, + get_hash, +) +from bittensor.utils.balance import Balance as Balance # noqa: F401 +from bittensor.utils.mock.subtensor_mock import MockSubtensor as MockSubtensor # noqa: F401 +from bittensor.utils.subnets import SubnetsAPI # noqa: F401 + +# Backwards compatibility with previous bittensor versions. +axon = Axon +config = Config +dendrite = Dendrite +keyfile = Keyfile +metagraph = Metagraph +wallet = Wallet +subtensor = Subtensor +synapse = Synapse + +__blocktime__ = BLOCKTIME +__network_explorer_map__ = settings.NETWORK_EXPLORER_MAP +__pipaddress__ = settings.PIPADDRESS +__ss58_format__ = settings.SS58_FORMAT +__type_registry__ = settings.TYPE_REGISTRY +__ss58_address_length__ = settings.SS58_ADDRESS_LENGTH + +__networks__ = settings.NETWORKS + +__finney_entrypoint__ = settings.FINNEY_ENTRYPOINT +__finney_test_entrypoint__ = settings.FINNEY_TEST_ENTRYPOINT +__archive_entrypoint__ = settings.ARCHIVE_ENTRYPOINT +__local_entrypoint__ = settings.LOCAL_ENTRYPOINT + +__tao_symbol__ = settings.TAO_SYMBOL +__rao_symbol__ = settings.RAO_SYMBOL + +# Makes the `bittensor.utils.mock` subpackage available as `bittensor.mock` for backwards compatibility. +mock_subpackage = importlib.import_module("bittensor.utils.mock") +sys.modules["bittensor.mock"] = mock_subpackage + +# Makes the `bittensor.core.extrinsics` subpackage available as `bittensor.extrinsics` for backwards compatibility. +extrinsics_subpackage = importlib.import_module("bittensor.core.extrinsics") +sys.modules["bittensor.extrinsics"] = extrinsics_subpackage diff --git a/bittensor/utils/mock/__init__.py b/bittensor/utils/mock/__init__.py new file mode 100644 index 0000000000..218579a153 --- /dev/null +++ b/bittensor/utils/mock/__init__.py @@ -0,0 +1,18 @@ +# The MIT License (MIT) +# Copyright © 2023 Opentensor Technologies Inc + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from .subtensor_mock import MockSubtensor diff --git a/bittensor/utils/mock/subtensor_mock.py b/bittensor/utils/mock/subtensor_mock.py new file mode 100644 index 0000000000..817be08434 --- /dev/null +++ b/bittensor/utils/mock/subtensor_mock.py @@ -0,0 +1,908 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from collections.abc import Mapping +from dataclasses import dataclass +from hashlib import sha256 +from types import SimpleNamespace +from typing import Any, Optional, Union, TypedDict +from unittest.mock import MagicMock + +from bittensor_wallet import Wallet + +from bittensor.core.chain_data import ( + NeuronInfo, + NeuronInfoLite, + PrometheusInfo, + AxonInfo, +) +from bittensor.core.errors import ChainQueryError +from bittensor.core.subtensor import Subtensor +from bittensor.utils import RAOPERTAO, u16_normalized_float +from bittensor.utils.balance import Balance + +# Mock Testing Constant +__GLOBAL_MOCK_STATE__ = {} + + +class AxonServeCallParams(TypedDict): + """Axon serve chain call parameters.""" + + version: int + ip: int + port: int + ip_type: int + netuid: int + + +class PrometheusServeCallParams(TypedDict): + """Prometheus serve chain call parameters.""" + + version: int + ip: int + port: int + ip_type: int + netuid: int + + +BlockNumber = int + + +class InfoDict(Mapping): + @classmethod + def default(cls): + raise NotImplementedError + + def __getitem__(self, key): + return getattr(self, key) + + def __setitem__(self, key, value): + return setattr(self, key, value) + + def __iter__(self): + return iter(self.__dict__) + + def __len__(self): + return len(self.__dict__) + + +@dataclass +class AxonInfoDict(InfoDict): + block: int + version: int + ip: int # integer representation of ip address + port: int + ip_type: int + protocol: int + placeholder1: int # placeholder for future use + placeholder2: int + + @classmethod + def default(cls): + return cls( + block=0, + version=0, + ip=0, + port=0, + ip_type=0, + protocol=0, + placeholder1=0, + placeholder2=0, + ) + + +@dataclass +class PrometheusInfoDict(InfoDict): + block: int + version: int + ip: int # integer representation of ip address + port: int + ip_type: int + + @classmethod + def default(cls): + return cls(block=0, version=0, ip=0, port=0, ip_type=0) + + +@dataclass +class MockSubtensorValue: + value: Optional[Any] + + +class MockMapResult: + records: Optional[list[tuple[MockSubtensorValue, MockSubtensorValue]]] + + def __init__( + self, + records: Optional[ + list[tuple[Union[Any, MockSubtensorValue], Union[Any, MockSubtensorValue]]] + ] = None, + ): + _records = [ + ( + ( + MockSubtensorValue(value=record[0]), + MockSubtensorValue(value=record[1]), + ) + # Make sure record is a tuple of MockSubtensorValue (dict with value attr) + if not ( + isinstance(record, tuple) + and all( + isinstance(item, dict) and hasattr(item, "value") + for item in record + ) + ) + else record + ) + for record in records + ] + + self.records = _records + + def __iter__(self): + return iter(self.records) + + +class MockSystemState(TypedDict): + Account: dict[str, dict[int, int]] # address -> block -> balance + + +class MockSubtensorState(TypedDict): + Rho: dict[int, dict[BlockNumber, int]] # netuid -> block -> rho + Kappa: dict[int, dict[BlockNumber, int]] # netuid -> block -> kappa + Difficulty: dict[int, dict[BlockNumber, int]] # netuid -> block -> difficulty + ImmunityPeriod: dict[ + int, dict[BlockNumber, int] + ] # netuid -> block -> immunity_period + ValidatorBatchSize: dict[ + int, dict[BlockNumber, int] + ] # netuid -> block -> validator_batch_size + Active: dict[int, dict[BlockNumber, bool]] # (netuid, uid), block -> active + Stake: dict[str, dict[str, dict[int, int]]] # (hotkey, coldkey) -> block -> stake + + Delegates: dict[str, dict[int, float]] # address -> block -> delegate_take + + NetworksAdded: dict[int, dict[BlockNumber, bool]] # netuid -> block -> added + + +class MockChainState(TypedDict): + System: MockSystemState + SubtensorModule: MockSubtensorState + + +class MockSubtensor(Subtensor): + """ + A Mock Subtensor class for running tests. + This should mock only methods that make queries to the chain. + e.g. We mock `Subtensor.query_subtensor` instead of all query methods. + + This class will also store a local (mock) state of the chain. + """ + + chain_state: MockChainState + block_number: int + + @classmethod + def reset(cls) -> None: + __GLOBAL_MOCK_STATE__.clear() + + _ = cls() + + def setup(self) -> None: + if not hasattr(self, "chain_state") or getattr(self, "chain_state") is None: + self.chain_state = { + "System": {"Account": {}}, + "Balances": {"ExistentialDeposit": {0: 500}}, + "SubtensorModule": { + "NetworksAdded": {}, + "Rho": {}, + "Kappa": {}, + "Difficulty": {}, + "ImmunityPeriod": {}, + "ValidatorBatchSize": {}, + "ValidatorSequenceLength": {}, + "ValidatorEpochsPerReset": {}, + "ValidatorEpochLength": {}, + "MaxAllowedValidators": {}, + "MinAllowedWeights": {}, + "MaxWeightLimit": {}, + "SynergyScalingLawPower": {}, + "ScalingLawPower": {}, + "SubnetworkN": {}, + "MaxAllowedUids": {}, + "NetworkModality": {}, + "BlocksSinceLastStep": {}, + "Tempo": {}, + "NetworkConnect": {}, + "EmissionValues": {}, + "Burn": {}, + "Active": {}, + "Uids": {}, + "Keys": {}, + "Owner": {}, + "IsNetworkMember": {}, + "LastUpdate": {}, + "Rank": {}, + "Emission": {}, + "Incentive": {}, + "Consensus": {}, + "Trust": {}, + "ValidatorTrust": {}, + "Dividends": {}, + "PruningScores": {}, + "ValidatorPermit": {}, + "Weights": {}, + "Bonds": {}, + "Stake": {}, + "TotalStake": {0: 0}, + "TotalIssuance": {0: 0}, + "TotalHotkeyStake": {}, + "TotalColdkeyStake": {}, + "TxRateLimit": {0: 0}, # No limit + "Delegates": {}, + "Axons": {}, + "Prometheus": {}, + "SubnetOwner": {}, + "Commits": {}, + "AdjustmentAlpha": {}, + "BondsMovingAverage": {}, + }, + } + + self.block_number = 0 + + self.network = "mock" + self.chain_endpoint = "ws://mock_endpoint.bt" + self.substrate = MagicMock() + + def __init__(self, *args, **kwargs) -> None: + super().__init__() + self.__dict__ = __GLOBAL_MOCK_STATE__ + + if not hasattr(self, "chain_state") or getattr(self, "chain_state") is None: + self.setup() + + def get_block_hash(self, block_id: int) -> str: + return "0x" + sha256(str(block_id).encode()).hexdigest()[:64] + + def create_subnet(self, netuid: int) -> None: + subtensor_state = self.chain_state["SubtensorModule"] + if netuid not in subtensor_state["NetworksAdded"]: + # Per Subnet + subtensor_state["Rho"][netuid] = {} + subtensor_state["Rho"][netuid][0] = 10 + subtensor_state["Kappa"][netuid] = {} + subtensor_state["Kappa"][netuid][0] = 32_767 + subtensor_state["Difficulty"][netuid] = {} + subtensor_state["Difficulty"][netuid][0] = 10_000_000 + subtensor_state["ImmunityPeriod"][netuid] = {} + subtensor_state["ImmunityPeriod"][netuid][0] = 4096 + subtensor_state["ValidatorBatchSize"][netuid] = {} + subtensor_state["ValidatorBatchSize"][netuid][0] = 32 + subtensor_state["ValidatorSequenceLength"][netuid] = {} + subtensor_state["ValidatorSequenceLength"][netuid][0] = 256 + subtensor_state["ValidatorEpochsPerReset"][netuid] = {} + subtensor_state["ValidatorEpochsPerReset"][netuid][0] = 60 + subtensor_state["ValidatorEpochLength"][netuid] = {} + subtensor_state["ValidatorEpochLength"][netuid][0] = 100 + subtensor_state["MaxAllowedValidators"][netuid] = {} + subtensor_state["MaxAllowedValidators"][netuid][0] = 128 + subtensor_state["MinAllowedWeights"][netuid] = {} + subtensor_state["MinAllowedWeights"][netuid][0] = 1024 + subtensor_state["MaxWeightLimit"][netuid] = {} + subtensor_state["MaxWeightLimit"][netuid][0] = 1_000 + subtensor_state["SynergyScalingLawPower"][netuid] = {} + subtensor_state["SynergyScalingLawPower"][netuid][0] = 50 + subtensor_state["ScalingLawPower"][netuid] = {} + subtensor_state["ScalingLawPower"][netuid][0] = 50 + subtensor_state["SubnetworkN"][netuid] = {} + subtensor_state["SubnetworkN"][netuid][0] = 0 + subtensor_state["MaxAllowedUids"][netuid] = {} + subtensor_state["MaxAllowedUids"][netuid][0] = 4096 + subtensor_state["NetworkModality"][netuid] = {} + subtensor_state["NetworkModality"][netuid][0] = 0 + subtensor_state["BlocksSinceLastStep"][netuid] = {} + subtensor_state["BlocksSinceLastStep"][netuid][0] = 0 + subtensor_state["Tempo"][netuid] = {} + subtensor_state["Tempo"][netuid][0] = 99 + + # subtensor_state['NetworkConnect'][netuid] = {} + # subtensor_state['NetworkConnect'][netuid][0] = {} + subtensor_state["EmissionValues"][netuid] = {} + subtensor_state["EmissionValues"][netuid][0] = 0 + subtensor_state["Burn"][netuid] = {} + subtensor_state["Burn"][netuid][0] = 0 + subtensor_state["Commits"][netuid] = {} + + # Per-UID/Hotkey + + subtensor_state["Uids"][netuid] = {} + subtensor_state["Keys"][netuid] = {} + subtensor_state["Owner"][netuid] = {} + + subtensor_state["LastUpdate"][netuid] = {} + subtensor_state["Active"][netuid] = {} + subtensor_state["Rank"][netuid] = {} + subtensor_state["Emission"][netuid] = {} + subtensor_state["Incentive"][netuid] = {} + subtensor_state["Consensus"][netuid] = {} + subtensor_state["Trust"][netuid] = {} + subtensor_state["ValidatorTrust"][netuid] = {} + subtensor_state["Dividends"][netuid] = {} + subtensor_state["PruningScores"][netuid] = {} + subtensor_state["PruningScores"][netuid][0] = {} + subtensor_state["ValidatorPermit"][netuid] = {} + + subtensor_state["Weights"][netuid] = {} + subtensor_state["Bonds"][netuid] = {} + + subtensor_state["Axons"][netuid] = {} + subtensor_state["Prometheus"][netuid] = {} + + subtensor_state["NetworksAdded"][netuid] = {} + subtensor_state["NetworksAdded"][netuid][0] = True + + subtensor_state["AdjustmentAlpha"][netuid] = {} + subtensor_state["AdjustmentAlpha"][netuid][0] = 1000 + + subtensor_state["BondsMovingAverage"][netuid] = {} + subtensor_state["BondsMovingAverage"][netuid][0] = 1000 + else: + raise Exception("Subnet already exists") + + def set_difficulty(self, netuid: int, difficulty: int) -> None: + subtensor_state = self.chain_state["SubtensorModule"] + if netuid not in subtensor_state["NetworksAdded"]: + raise Exception("Subnet does not exist") + + subtensor_state["Difficulty"][netuid][self.block_number] = difficulty + + @staticmethod + def _convert_to_balance(balance: Union["Balance", float, int]) -> "Balance": + if isinstance(balance, float): + balance = Balance.from_tao(balance) + + if isinstance(balance, int): + balance = Balance.from_rao(balance) + + return balance + + def force_set_balance( + self, ss58_address: str, balance: Union["Balance", float, int] = Balance(0) + ) -> tuple[bool, Optional[str]]: + """ + Returns: + tuple[bool, Optional[str]]: (success, err_msg) + """ + balance = self._convert_to_balance(balance) + + if ss58_address not in self.chain_state["System"]["Account"]: + self.chain_state["System"]["Account"][ss58_address] = { + "data": {"free": {0: 0}} + } + + old_balance = self.get_balance(ss58_address, self.block_number) + diff = balance.rao - old_balance.rao + + # Update total issuance + self.chain_state["SubtensorModule"]["TotalIssuance"][self.block_number] = ( + self._get_most_recent_storage( + self.chain_state["SubtensorModule"]["TotalIssuance"] + ) + + diff + ) + + self.chain_state["System"]["Account"][ss58_address] = { + "data": {"free": {self.block_number: balance.rao}} + } + + return True, None + + # Alias for force_set_balance + sudo_force_set_balance = force_set_balance + + def do_block_step(self) -> None: + self.block_number += 1 + + # Doesn't do epoch + subtensor_state = self.chain_state["SubtensorModule"] + for subnet in subtensor_state["NetworksAdded"]: + subtensor_state["BlocksSinceLastStep"][subnet][self.block_number] = ( + self._get_most_recent_storage( + subtensor_state["BlocksSinceLastStep"][subnet] + ) + + 1 + ) + + def _handle_type_default(self, name: str, params: list[object]) -> object: + defaults_mapping = { + "TotalStake": 0, + "TotalHotkeyStake": 0, + "TotalColdkeyStake": 0, + "Stake": 0, + } + + return defaults_mapping.get(name, None) + + def commit(self, wallet: "Wallet", netuid: int, data: str) -> None: + uid = self.get_uid_for_hotkey_on_subnet( + hotkey_ss58=wallet.hotkey.ss58_address, + netuid=netuid, + ) + if uid is None: + raise Exception("Neuron not found") + subtensor_state = self.chain_state["SubtensorModule"] + subtensor_state["Commits"][netuid].setdefault(self.block_number, {})[uid] = data + + def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> str: + if block and self.block_number < block: + raise Exception("Cannot query block in the future") + block = block or self.block_number + + subtensor_state = self.chain_state["SubtensorModule"] + return subtensor_state["Commits"][netuid][block][uid] + + def query_subtensor( + self, + name: str, + block: Optional[int] = None, + params: Optional[list[object]] = [], + ) -> MockSubtensorValue: + if block: + if self.block_number < block: + raise Exception("Cannot query block in the future") + + else: + block = self.block_number + + state = self.chain_state["SubtensorModule"][name] + if state is not None: + # Use prefix + if len(params) > 0: + while state is not None and len(params) > 0: + state = state.get(params.pop(0), None) + if state is None: + return SimpleNamespace( + value=self._handle_type_default(name, params) + ) + + # Use block + state_at_block = state.get(block, None) + while state_at_block is None and block > 0: + block -= 1 + state_at_block = state.get(block, None) + if state_at_block is not None: + return SimpleNamespace(value=state_at_block) + + return SimpleNamespace(value=self._handle_type_default(name, params)) + else: + return SimpleNamespace(value=self._handle_type_default(name, params)) + + def query_map_subtensor( + self, + name: str, + block: Optional[int] = None, + params: Optional[list[object]] = [], + ) -> Optional[MockMapResult]: + """ + Note: Double map requires one param + """ + if block: + if self.block_number < block: + raise Exception("Cannot query block in the future") + + else: + block = self.block_number + + state = self.chain_state["SubtensorModule"][name] + if state is not None: + # Use prefix + if len(params) > 0: + while state is not None and len(params) > 0: + state = state.get(params.pop(0), None) + if state is None: + return MockMapResult([]) + + # Check if single map or double map + if len(state.keys()) == 0: + return MockMapResult([]) + + inner = list(state.values())[0] + # Should have at least one key + if len(inner.keys()) == 0: + raise Exception("Invalid state") + + # Check if double map + if isinstance(list(inner.values())[0], dict): + # is double map + raise ChainQueryError("Double map requires one param") + + # Iterate over each key and add value to list, max at block + records = [] + for key in state: + result = self._get_most_recent_storage(state[key], block) + if result is None: + continue # Skip if no result for this key at `block` or earlier + + records.append((key, result)) + + return MockMapResult(records) + else: + return MockMapResult([]) + + def query_constant( + self, module_name: str, constant_name: str, block: Optional[int] = None + ) -> Optional[object]: + if block: + if self.block_number < block: + raise Exception("Cannot query block in the future") + + else: + block = self.block_number + + state = self.chain_state.get(module_name, None) + if state is not None: + if constant_name in state: + state = state[constant_name] + else: + return None + + # Use block + state_at_block = self._get_most_recent_storage(state, block) + if state_at_block is not None: + return SimpleNamespace(value=state_at_block) + + return state_at_block["data"]["free"] # Can be None + else: + return None + + def get_current_block(self) -> int: + return self.block_number + + # ==== Balance RPC methods ==== + + def get_balance(self, address: str, block: int = None) -> "Balance": + if block: + if self.block_number < block: + raise Exception("Cannot query block in the future") + + else: + block = self.block_number + + state = self.chain_state["System"]["Account"] + if state is not None: + if address in state: + state = state[address] + else: + return Balance(0) + + # Use block + balance_state = state["data"]["free"] + state_at_block = self._get_most_recent_storage( + balance_state, block + ) # Can be None + if state_at_block is not None: + bal_as_int = state_at_block + return Balance.from_rao(bal_as_int) + else: + return Balance(0) + else: + return Balance(0) + + # ==== Neuron RPC methods ==== + + def neuron_for_uid( + self, uid: int, netuid: int, block: Optional[int] = None + ) -> Optional[NeuronInfo]: + if uid is None: + return NeuronInfo.get_null_neuron() + + if block: + if self.block_number < block: + raise Exception("Cannot query block in the future") + + else: + block = self.block_number + + if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: + return None + + neuron_info = self._neuron_subnet_exists(uid, netuid, block) + if neuron_info is None: + return None + + else: + return neuron_info + + def neurons(self, netuid: int, block: Optional[int] = None) -> list[NeuronInfo]: + if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: + raise Exception("Subnet does not exist") + + neurons = [] + subnet_n = self._get_most_recent_storage( + self.chain_state["SubtensorModule"]["SubnetworkN"][netuid], block + ) + for uid in range(subnet_n): + neuron_info = self.neuron_for_uid(uid, netuid, block) + if neuron_info is not None: + neurons.append(neuron_info) + + return neurons + + @staticmethod + def _get_most_recent_storage( + storage: dict[BlockNumber, Any], block_number: Optional[int] = None + ) -> Any: + if block_number is None: + items = list(storage.items()) + items.sort(key=lambda x: x[0], reverse=True) + if len(items) == 0: + return None + + return items[0][1] + + else: + while block_number >= 0: + if block_number in storage: + return storage[block_number] + + block_number -= 1 + + return None + + def _get_axon_info( + self, netuid: int, hotkey: str, block: Optional[int] = None + ) -> AxonInfoDict: + # Axons [netuid][hotkey][block_number] + subtensor_state = self.chain_state["SubtensorModule"] + if netuid not in subtensor_state["Axons"]: + return AxonInfoDict.default() + + if hotkey not in subtensor_state["Axons"][netuid]: + return AxonInfoDict.default() + + result = self._get_most_recent_storage( + subtensor_state["Axons"][netuid][hotkey], block + ) + if not result: + return AxonInfoDict.default() + + return result + + def _get_prometheus_info( + self, netuid: int, hotkey: str, block: Optional[int] = None + ) -> PrometheusInfoDict: + subtensor_state = self.chain_state["SubtensorModule"] + if netuid not in subtensor_state["Prometheus"]: + return PrometheusInfoDict.default() + + if hotkey not in subtensor_state["Prometheus"][netuid]: + return PrometheusInfoDict.default() + + result = self._get_most_recent_storage( + subtensor_state["Prometheus"][netuid][hotkey], block + ) + if not result: + return PrometheusInfoDict.default() + + return result + + def _neuron_subnet_exists( + self, uid: int, netuid: int, block: Optional[int] = None + ) -> Optional[NeuronInfo]: + subtensor_state = self.chain_state["SubtensorModule"] + if netuid not in subtensor_state["NetworksAdded"]: + return None + + if self._get_most_recent_storage(subtensor_state["SubnetworkN"][netuid]) <= uid: + return None + + hotkey = self._get_most_recent_storage(subtensor_state["Keys"][netuid][uid]) + if hotkey is None: + return None + + axon_info_ = self._get_axon_info(netuid, hotkey, block) + + prometheus_info = self._get_prometheus_info(netuid, hotkey, block) + + coldkey = self._get_most_recent_storage(subtensor_state["Owner"][hotkey], block) + active = self._get_most_recent_storage( + subtensor_state["Active"][netuid][uid], block + ) + rank = self._get_most_recent_storage( + subtensor_state["Rank"][netuid][uid], block + ) + emission = self._get_most_recent_storage( + subtensor_state["Emission"][netuid][uid], block + ) + incentive = self._get_most_recent_storage( + subtensor_state["Incentive"][netuid][uid], block + ) + consensus = self._get_most_recent_storage( + subtensor_state["Consensus"][netuid][uid], block + ) + trust = self._get_most_recent_storage( + subtensor_state["Trust"][netuid][uid], block + ) + validator_trust = self._get_most_recent_storage( + subtensor_state["ValidatorTrust"][netuid][uid], block + ) + dividends = self._get_most_recent_storage( + subtensor_state["Dividends"][netuid][uid], block + ) + pruning_score = self._get_most_recent_storage( + subtensor_state["PruningScores"][netuid][uid], block + ) + last_update = self._get_most_recent_storage( + subtensor_state["LastUpdate"][netuid][uid], block + ) + validator_permit = self._get_most_recent_storage( + subtensor_state["ValidatorPermit"][netuid][uid], block + ) + + weights = self._get_most_recent_storage( + subtensor_state["Weights"][netuid][uid], block + ) + bonds = self._get_most_recent_storage( + subtensor_state["Bonds"][netuid][uid], block + ) + + stake_dict = { + coldkey: Balance.from_rao( + self._get_most_recent_storage( + subtensor_state["Stake"][hotkey][coldkey], block + ) + ) + for coldkey in subtensor_state["Stake"][hotkey] + } + + stake = sum(stake_dict.values()) + + weights = [[int(weight[0]), int(weight[1])] for weight in weights] + bonds = [[int(bond[0]), int(bond[1])] for bond in bonds] + rank = u16_normalized_float(rank) + emission = emission / RAOPERTAO + incentive = u16_normalized_float(incentive) + consensus = u16_normalized_float(consensus) + trust = u16_normalized_float(trust) + validator_trust = u16_normalized_float(validator_trust) + dividends = u16_normalized_float(dividends) + prometheus_info = PrometheusInfo.fix_decoded_values(prometheus_info) + axon_info_ = AxonInfo.from_neuron_info( + {"hotkey": hotkey, "coldkey": coldkey, "axon_info": axon_info_} + ) + + neuron_info = NeuronInfo( + hotkey=hotkey, + coldkey=coldkey, + uid=uid, + netuid=netuid, + active=active, + rank=rank, + emission=emission, + incentive=incentive, + consensus=consensus, + trust=trust, + validator_trust=validator_trust, + dividends=dividends, + pruning_score=pruning_score, + last_update=last_update, + validator_permit=validator_permit, + stake=stake, + stake_dict=stake_dict, + total_stake=stake, + prometheus_info=prometheus_info, + axon_info=axon_info_, + weights=weights, + bonds=bonds, + is_null=False, + ) + + return neuron_info + + def neurons_lite( + self, netuid: int, block: Optional[int] = None + ) -> list[NeuronInfoLite]: + if netuid not in self.chain_state["SubtensorModule"]["NetworksAdded"]: + raise Exception("Subnet does not exist") + + neurons = [] + subnet_n = self._get_most_recent_storage( + self.chain_state["SubtensorModule"]["SubnetworkN"][netuid] + ) + for uid in range(subnet_n): + neuron_info = self.neuron_for_uid_lite(uid, netuid, block) + if neuron_info is not None: + neurons.append(neuron_info) + + return neurons + + def get_transfer_fee( + self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] + ) -> "Balance": + return Balance(700) + + def do_transfer( + self, + wallet: "Wallet", + dest: str, + transfer_balance: "Balance", + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + ) -> tuple[bool, Optional[str], Optional[str]]: + bal = self.get_balance(wallet.coldkeypub.ss58_address) + dest_bal = self.get_balance(dest) + transfer_fee = self.get_transfer_fee(wallet, dest, transfer_balance) + + existential_deposit = self.get_existential_deposit() + + if bal < transfer_balance + existential_deposit + transfer_fee: + raise Exception("Insufficient balance") + + # Remove from the free balance + self.chain_state["System"]["Account"][wallet.coldkeypub.ss58_address]["data"][ + "free" + ][self.block_number] = (bal - transfer_balance - transfer_fee).rao + + # Add to the free balance + if dest not in self.chain_state["System"]["Account"]: + self.chain_state["System"]["Account"][dest] = {"data": {"free": {}}} + + self.chain_state["System"]["Account"][dest]["data"]["free"][ + self.block_number + ] = (dest_bal + transfer_balance).rao + + return True, None, None + + @staticmethod + def min_required_stake(): + """ + As the minimum required stake may change, this method allows us to dynamically + update the amount in the mock without updating the tests + """ + # valid minimum threshold as of 2024/05/01 + return 100_000_000 # RAO + + def do_serve_prometheus( + self, + wallet: "Wallet", + call_params: "PrometheusServeCallParams", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + ) -> tuple[bool, Optional[str]]: + return True, None + + def do_set_weights( + self, + wallet: "Wallet", + netuid: int, + uids: int, + vals: list[int], + version_key: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + ) -> tuple[bool, Optional[str]]: + return True, None + + def do_serve_axon( + self, + wallet: "Wallet", + call_params: "AxonServeCallParams", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + ) -> tuple[bool, Optional[str]]: + return True, None diff --git a/bittensor/utils/networking.py b/bittensor/utils/networking.py new file mode 100644 index 0000000000..4c0c475851 --- /dev/null +++ b/bittensor/utils/networking.py @@ -0,0 +1,199 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +"""Utils for handling local network with ip and ports.""" + +import json +import os +import socket +import urllib +from functools import wraps +from typing import Optional + +import netaddr +import requests + +from bittensor.utils.btlogging import logging + + +def int_to_ip(int_val: int) -> str: + """Maps an integer to a unique ip-string + Args: + int_val (int): + The integer representation of an ip. Must be in the range (0, 3.4028237e+38). + + Returns: + str_val (str): + The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 + + Raises: + netaddr.core.AddrFormatError (Exception): Raised when the passed int_vals is not a valid ip int value. + """ + return str(netaddr.IPAddress(int_val)) + + +def ip_to_int(str_val: str) -> int: + """Maps an ip-string to a unique integer. + arg: + str_val (:tyep:`str`, `required): + The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 + + Returns: + int_val (:type:`int128`, `required`): + The integer representation of an ip. Must be in the range (0, 3.4028237e+38). + + Raises: + netaddr.core.AddrFormatError (Exception): + Raised when the passed str_val is not a valid ip string value. + """ + return int(netaddr.IPAddress(str_val)) + + +def ip_version(str_val: str) -> int: + """Returns the ip version (IPV4 or IPV6). + arg: + str_val (:tyep:`str`, `required): + The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 + + Returns: + int_val (:type:`int128`, `required`): + The ip version (Either 4 or 6 for IPv4/IPv6) + + Raises: + netaddr.core.AddrFormatError (Exception): + Raised when the passed str_val is not a valid ip string value. + """ + return int(netaddr.IPAddress(str_val).version) + + +def ip__str__(ip_type: int, ip_str: str, port: int): + """Return a formatted ip string""" + return "/ipv%i/%s:%i" % (ip_type, ip_str, port) + + +class ExternalIPNotFound(Exception): + """Raised if we cannot attain your external ip from CURL/URLLIB/IPIFY/AWS""" + + +def get_external_ip() -> str: + """Checks CURL/URLLIB/IPIFY/AWS for your external ip. + Returns: + external_ip (:obj:`str` `required`): + Your routers external facing ip as a string. + + Raises: + ExternalIPNotFound (Exception): + Raised if all external ip attempts fail. + """ + # --- Try AWS + try: + external_ip = requests.get("https://checkip.amazonaws.com").text.strip() + assert isinstance(ip_to_int(external_ip), int) + return str(external_ip) + except Exception: + pass + + # --- Try ipconfig. + try: + process = os.popen("curl -s ifconfig.me") + external_ip = process.readline() + process.close() + assert isinstance(ip_to_int(external_ip), int) + return str(external_ip) + except Exception: + pass + + # --- Try ipinfo. + try: + process = os.popen("curl -s https://ipinfo.io") + external_ip = json.loads(process.read())["ip"] + process.close() + assert isinstance(ip_to_int(external_ip), int) + return str(external_ip) + except Exception: + pass + + # --- Try myip.dnsomatic + try: + process = os.popen("curl -s myip.dnsomatic.com") + external_ip = process.readline() + process.close() + assert isinstance(ip_to_int(external_ip), int) + return str(external_ip) + except Exception: + pass + + # --- Try urllib ipv6 + try: + external_ip = urllib.request.urlopen("https://ident.me").read().decode("utf8") + assert isinstance(ip_to_int(external_ip), int) + return str(external_ip) + except Exception: + pass + + # --- Try Wikipedia + try: + external_ip = requests.get("https://www.wikipedia.org").headers["X-Client-IP"] + assert isinstance(ip_to_int(external_ip), int) + return str(external_ip) + except Exception: + pass + + raise ExternalIPNotFound + + +def get_formatted_ws_endpoint_url(endpoint_url: Optional[str]) -> Optional[str]: + """ + Returns a formatted websocket endpoint url. + Note: The port (or lack thereof) is left unchanged + Args: + endpoint_url (Optional[str]): + The endpoint url to format. + Returns: + formatted_endpoint_url (Optional[str]): The formatted endpoint url. In the form of ws:// or wss:// + """ + if endpoint_url is None: + return None + + if endpoint_url[0:6] != "wss://" and endpoint_url[0:5] != "ws://": + endpoint_url = f"ws://{endpoint_url}" + + return endpoint_url + + +def ensure_connected(func): + """Decorator ensuring the function executes with an active substrate connection.""" + + @wraps(func) + def wrapper(self, *args, **kwargs): + """Wrapper function where `self` argument is Subtensor instance with the substrate connection.""" + # Check the socket state before method execution + if ( + # connection was closed correctly + self.substrate.websocket.sock is None + # connection has a broken pipe + or self.substrate.websocket.sock.getsockopt( + socket.SOL_SOCKET, socket.SO_ERROR + ) + != 0 + ): + logging.info("Reconnection substrate...") + self._get_substrate() + # Execute the method if the connection is active or after reconnecting + return func(self, *args, **kwargs) + + return wrapper diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py new file mode 100644 index 0000000000..4d0cdb93d6 --- /dev/null +++ b/bittensor/utils/registration.py @@ -0,0 +1,99 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import functools +import os +from typing import TYPE_CHECKING + +import numpy + +from bittensor.utils.btlogging import logging + + +def use_torch() -> bool: + """Force the use of torch over numpy for certain operations.""" + return True if os.getenv("USE_TORCH") == "1" else False + + +def legacy_torch_api_compat(func): + """ + Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True. + + Args: + func (function): Function with numpy Input/Output to be decorated. + + Returns: + decorated (function): Decorated function. + """ + + @functools.wraps(func) + def decorated(*args, **kwargs): + if use_torch(): + # if argument is a Torch tensor, convert it to numpy + args = [ + arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg + for arg in args + ] + kwargs = { + key: value.cpu().numpy() if isinstance(value, torch.Tensor) else value + for key, value in kwargs.items() + } + ret = func(*args, **kwargs) + if use_torch(): + # if return value is a numpy array, convert it to Torch tensor + if isinstance(ret, numpy.ndarray): + ret = torch.from_numpy(ret) + return ret + + return decorated + + +@functools.cache +def _get_real_torch(): + try: + import torch as _real_torch + except ImportError: + _real_torch = None + return _real_torch + + +def log_no_torch_error(): + logging.error( + "This command requires torch. You can install torch for bittensor" + ' with `pip install bittensor[torch]` or `pip install ".[torch]"`' + " if installing from source, and then run the command with USE_TORCH=1 {command}" + ) + + +class LazyLoadedTorch: + """A lazy-loading proxy for the torch module.""" + + def __bool__(self): + return bool(_get_real_torch()) + + def __getattr__(self, name): + if real_torch := _get_real_torch(): + return getattr(real_torch, name) + else: + log_no_torch_error() + raise ImportError("torch not installed") + + +if TYPE_CHECKING: + import torch +else: + torch = LazyLoadedTorch() diff --git a/bittensor/utils/subnets.py b/bittensor/utils/subnets.py new file mode 100644 index 0000000000..2b42bead98 --- /dev/null +++ b/bittensor/utils/subnets.py @@ -0,0 +1,77 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from abc import ABC, abstractmethod +from typing import Any, Union, Optional, TYPE_CHECKING + +from bittensor.core.axon import Axon +from bittensor.core.dendrite import Dendrite +from bittensor.utils.btlogging import logging + +# For annotation purposes +if TYPE_CHECKING: + from bittensor_wallet import Wallet + from bittensor.core.synapse import Synapse + + +# Community uses this class +class SubnetsAPI(ABC): + """This class is not used within the bittensor package, but is actively used by the community.""" + + def __init__(self, wallet: "Wallet"): + self.wallet = wallet + self.dendrite = Dendrite(wallet=wallet) + + async def __call__(self, *args, **kwargs): + return await self.query_api(*args, **kwargs) + + @abstractmethod + def prepare_synapse(self, *args, **kwargs) -> Any: + """Prepare the synapse-specific payload.""" + + @abstractmethod + def process_responses(self, responses: list[Union["Synapse", Any]]) -> Any: + """Process the responses from the network.""" + + async def query_api( + self, + axons: Union["Axon", list["Axon"]], + deserialize: Optional[bool] = False, + timeout: Optional[int] = 12, + **kwargs, + ) -> Any: + """ + Queries the API nodes of a subnet using the given synapse and bespoke query function. + + Args: + axons (Union[bt.axon, list[bt.axon]]): The list of axon(s) to query. + deserialize (Optional[bool]): Whether to deserialize the responses. Defaults to False. + timeout (Optional[int]): The timeout in seconds for the query. Defaults to 12. + **kwargs: Keyword arguments for the prepare_synapse_fn. + + Returns: + Any: The result of the process_responses_fn. + """ + synapse = self.prepare_synapse(**kwargs) + logging.debug(f"Querying validator axons with synapse {synapse.name}...") + responses = await self.dendrite( + axons=axons, + synapse=synapse, + deserialize=deserialize, + timeout=timeout, + ) + return self.process_responses(responses) diff --git a/bittensor/utils/version.py b/bittensor/utils/version.py new file mode 100644 index 0000000000..1134361ade --- /dev/null +++ b/bittensor/utils/version.py @@ -0,0 +1,134 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import time +from pathlib import Path +from typing import Optional + +import requests +from packaging.version import Version + +from bittensor.core.settings import __version__, PIPADDRESS +from bittensor.utils.btlogging import logging + +VERSION_CHECK_THRESHOLD = 86400 + + +class VersionCheckError(Exception): + """Exception raised for errors in the version check process.""" + + +def _get_version_file_path() -> Path: + return Path.home() / ".bittensor" / ".last_known_version" + + +def _get_version_from_file(version_file: Path) -> Optional[str]: + try: + mtime = version_file.stat().st_mtime + logging.debug(f"Found version file, last modified: {mtime}") + diff = time.time() - mtime + + if diff >= VERSION_CHECK_THRESHOLD: + logging.debug("Version file expired") + return None + + return version_file.read_text() + except FileNotFoundError: + logging.debug("No bittensor version file found") + return None + except OSError: + logging.exception("Failed to read version file") + return None + + +def _get_version_from_pypi(timeout: int = 15) -> str: + logging.debug(f"Checking latest Bittensor version at: {PIPADDRESS}") + try: + response = requests.get(PIPADDRESS, timeout=timeout) + latest_version = response.json()["info"]["version"] + return latest_version + except requests.exceptions.RequestException: + logging.exception("Failed to get latest version from pypi") + raise + + +def get_and_save_latest_version(timeout: int = 15) -> str: + """ + Retrieves and saves the latest version of Bittensor. + + Args: + timeout (int): The timeout for the request to PyPI in seconds. Default is ``15``. + + Returns: + str: The latest version of Bittensor. + """ + version_file = _get_version_file_path() + + if last_known_version := _get_version_from_file(version_file): + return last_known_version + + latest_version = _get_version_from_pypi(timeout) + + try: + version_file.write_text(latest_version) + except OSError: + logging.exception("Failed to save latest version to file") + + return latest_version + + +def check_version(timeout: int = 15): + """ + Check if the current version of Bittensor is up-to-date with the latest version on PyPi. + Raises a VersionCheckError if the version check fails. + + Args: + timeout (int): The timeout for the request to PyPI in seconds. Default is ``15``. + """ + + try: + latest_version = get_and_save_latest_version(timeout) + + if Version(latest_version) > Version(__version__): + print( + f"\u001b[33mBittensor Version: Current {__version__}/Latest {latest_version}\n" + f"Please update to the latest version at your earliest convenience. " + "Run the following command to upgrade:\n\n\u001b[0mpython -m pip install --upgrade bittensor" + ) + pass + except Exception as e: + raise VersionCheckError("Version check failed") from e + + +def version_checking(timeout: int = 15): + """Deprecated, kept for backwards compatibility. Use check_version() instead. + + Args: + timeout (int): The timeout for calling :func:``check_version`` function. Default is ``15``. + """ + + from warnings import warn + + warn( + "version_checking() is deprecated, please use check_version() instead", + DeprecationWarning, + ) + + try: + check_version(timeout) + except VersionCheckError: + logging.exception("Version check failed") diff --git a/bittensor/utils/weight_utils.py b/bittensor/utils/weight_utils.py new file mode 100644 index 0000000000..d7c86bcdca --- /dev/null +++ b/bittensor/utils/weight_utils.py @@ -0,0 +1,414 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +"""Conversion for weight between chain representation and np.array or torch.Tensor""" + +import hashlib +import logging +import typing +from typing import Union, Optional + +import numpy as np + +from numpy.typing import NDArray +from scalecodec import U16, ScaleBytes, Vec +from substrateinterface import Keypair + +from bittensor.utils.btlogging import logging +from bittensor.utils.registration import legacy_torch_api_compat, torch, use_torch + +if typing.TYPE_CHECKING: + from bittensor.core.metagraph import Metagraph + from bittensor.core.subtensor import Subtensor + + +U32_MAX = 4294967295 +U16_MAX = 65535 + + +# Uses in `bittensor.utils.weight_utils.process_weights_for_netuid` +@legacy_torch_api_compat +def normalize_max_weight( + x: Union[NDArray[np.float32], "torch.FloatTensor"], limit: float = 0.1 +) -> Union[NDArray[np.float32], "torch.FloatTensor"]: + """Normalizes the tensor x so that sum(x) = 1 and the max value is not greater than the limit. + Args: + x (:obj:`np.float32`): Tensor to be max_value normalized. + limit: float: Max value after normalization. + + Returns: + y (:obj:`np.float32`): Normalized x tensor. + """ + epsilon = 1e-7 # For numerical stability after normalization + + weights = x.copy() + values = np.sort(weights) + + if x.sum() == 0 or x.shape[0] * limit <= 1: + return np.ones_like(x) / x.shape[0] + else: + estimation = values / values.sum() + + if estimation.max() <= limit: + return weights / weights.sum() + + # Find the cumulative sum and sorted tensor + cumsum = np.cumsum(estimation, 0) + + # Determine the index of cutoff + estimation_sum = np.array( + [(len(values) - i - 1) * estimation[i] for i in range(len(values))] + ) + n_values = (estimation / (estimation_sum + cumsum + epsilon) < limit).sum() + + # Determine the cutoff based on the index + cutoff_scale = (limit * cumsum[n_values - 1] - epsilon) / ( + 1 - (limit * (len(estimation) - n_values)) + ) + cutoff = cutoff_scale * values.sum() + + # Applying the cutoff + weights[weights > cutoff] = cutoff + + y = weights / weights.sum() + + return y + + +# Metagraph uses this function. +def convert_weight_uids_and_vals_to_tensor( + n: int, uids: list[int], weights: list[int] +) -> Union[NDArray[np.float32], "torch.FloatTensor"]: + """ + Converts weights and uids from chain representation into a np.array (inverse operation from convert_weights_and_uids_for_emit). + + Args: + n (int): number of neurons on network. + uids (list[int]): Tensor of uids as destinations for passed weights. + weights (list[int]): Tensor of weights. + + Returns: + row_weights (np.float32 or torch.FloatTensor): Converted row weights. + """ + row_weights = ( + torch.zeros([n], dtype=torch.float32) + if use_torch() + else np.zeros([n], dtype=np.float32) + ) + for uid_j, wij in list(zip(uids, weights)): + row_weights[uid_j] = float( + wij + ) # assumes max-upscaled values (w_max = U16_MAX). + row_sum = row_weights.sum() + if row_sum > 0: + row_weights /= row_sum # normalize + return row_weights + + +# Metagraph uses this function. +def convert_root_weight_uids_and_vals_to_tensor( + n: int, uids: list[int], weights: list[int], subnets: list[int] +) -> Union[NDArray[np.float32], "torch.FloatTensor"]: + """Converts root weights and uids from chain representation into a np.array or torch FloatTensor (inverse operation from convert_weights_and_uids_for_emit) + Args: + n (int): number of neurons on network. + uids (list[int]): Tensor of uids as destinations for passed weights. + weights (list[int]): Tensor of weights. + subnets (list[int]): list of subnets on the network. + + Returns: + row_weights (np.float32): Converted row weights. + """ + row_weights = ( + torch.zeros([n], dtype=torch.float32) + if use_torch() + else np.zeros([n], dtype=np.float32) + ) + for uid_j, wij in list(zip(uids, weights)): + if uid_j in subnets: + index_s = subnets.index(uid_j) + row_weights[index_s] = float( + wij + ) # assumes max-upscaled values (w_max = U16_MAX). + else: + logging.warning( + f"Incorrect Subnet uid {uid_j} in Subnets {subnets}. The subnet is unavailable at the moment." + ) + continue + row_sum = row_weights.sum() + if row_sum > 0: + row_weights /= row_sum # normalize + return row_weights + + +# Metagraph uses this function. +def convert_bond_uids_and_vals_to_tensor( + n: int, uids: list[int], bonds: list[int] +) -> Union[NDArray[np.int64], "torch.LongTensor"]: + """Converts bond and uids from chain representation into a np.array. + + Args: + n (int): number of neurons on network. + uids (list[int]): Tensor of uids as destinations for passed bonds. + bonds (list[int]): Tensor of bonds. + + Returns: + row_bonds (np.float32): Converted row bonds. + """ + row_bonds = ( + torch.zeros([n], dtype=torch.int64) + if use_torch() + else np.zeros([n], dtype=np.int64) + ) + for uid_j, bij in list(zip(uids, bonds)): + row_bonds[uid_j] = int(bij) + return row_bonds + + +# This is used by the community via `bittensor.api.extrinsics.set_weights.set_weights_extrinsic` +def convert_weights_and_uids_for_emit( + uids: Union[NDArray[np.int64], "torch.LongTensor"], + weights: Union[NDArray[np.float32], "torch.FloatTensor"], +) -> tuple[list[int], list[int]]: + """Converts weights into integer u32 representation that sum to MAX_INT_WEIGHT. + + Args: + uids (np.int64):Tensor of uids as destinations for passed weights. + weights (np.float32):Tensor of weights. + + Returns: + weight_uids (list[int]): Uids as a list. + weight_vals (list[int]): Weights as a list. + """ + # Checks. + weights = weights.tolist() + uids = uids.tolist() + if min(weights) < 0: + raise ValueError(f"Passed weight is negative cannot exist on chain {weights}") + if min(uids) < 0: + raise ValueError(f"Passed uid is negative cannot exist on chain {uids}") + if len(uids) != len(weights): + raise ValueError( + f"Passed weights and uids must have the same length, got {len(uids)} and {len(weights)}" + ) + if sum(weights) == 0: + return [], [] # Nothing to set on chain. + else: + max_weight = float(max(weights)) + weights = [ + float(value) / max_weight for value in weights + ] # max-upscale values (max_weight = 1). + + weight_vals = [] + weight_uids = [] + for i, (weight_i, uid_i) in enumerate(list(zip(weights, uids))): + uint16_val = round( + float(weight_i) * int(U16_MAX) + ) # convert to int representation. + + # Filter zeros + if uint16_val != 0: # Filter zeros + weight_vals.append(uint16_val) + weight_uids.append(uid_i) + + return weight_uids, weight_vals + + +# The community uses / bittensor does not +def process_weights_for_netuid( + uids: Union[NDArray[np.int64], "torch.Tensor"], + weights: Union[NDArray[np.float32], "torch.Tensor"], + netuid: int, + subtensor: "Subtensor", + metagraph: Optional["Metagraph"] = None, + exclude_quantile: int = 0, +) -> Union[ + tuple["torch.Tensor", "torch.FloatTensor"], + tuple[NDArray[np.int64], NDArray[np.float32]], +]: + """ + Processes weight tensors for a given subnet id using the provided weight and UID arrays, applying constraints and normalization based on the subtensor and metagraph data. This function can handle both NumPy arrays and PyTorch tensors. + + Args: + uids (Union[NDArray[np.int64], "torch.Tensor"]): Array of unique identifiers of the neurons. + weights (Union[NDArray[np.float32], "torch.Tensor"]): Array of weights associated with the user IDs. + netuid (int): The network uid to process weights for. + subtensor (Subtensor): Subtensor instance to access blockchain data. + metagraph (Optional[Metagraph]): Metagraph instance for additional network data. If None, it is fetched from the subtensor using the netuid. + exclude_quantile (int): Quantile threshold for excluding lower weights. Defaults to ``0``. + + Returns: + Union[tuple["torch.Tensor", "torch.FloatTensor"], tuple[NDArray[np.int64], NDArray[np.float32]]]: tuple containing the array of user IDs and the corresponding normalized weights. The data type of the return matches the type of the input weights (NumPy or PyTorch). + """ + + logging.debug("process_weights_for_netuid()") + logging.debug("weights", *weights) + logging.debug("netuid", netuid) + logging.debug("subtensor", subtensor) + logging.debug("metagraph", metagraph) + + # Get latest metagraph from chain if metagraph is None. + if metagraph is None: + metagraph = subtensor.metagraph(netuid) + + # Cast weights to floats. + if use_torch(): + if not isinstance(weights, torch.FloatTensor): + weights = weights.type(torch.float32) + else: + if not isinstance(weights, np.float32): + weights = weights.astype(np.float32) + + # Network configuration parameters from an subtensor. + # These parameters determine the range of acceptable weights for each neuron. + quantile = exclude_quantile / U16_MAX + min_allowed_weights = subtensor.min_allowed_weights(netuid=netuid) + max_weight_limit = subtensor.max_weight_limit(netuid=netuid) + logging.debug("quantile", quantile) + logging.debug("min_allowed_weights", min_allowed_weights) + logging.debug("max_weight_limit", max_weight_limit) + + # Find all non zero weights. + non_zero_weight_idx = ( + torch.argwhere(weights > 0).squeeze(dim=1) + if use_torch() + else np.argwhere(weights > 0).squeeze(axis=1) + ) + non_zero_weight_uids = uids[non_zero_weight_idx] + non_zero_weights = weights[non_zero_weight_idx] + nzw_size = non_zero_weights.numel() if use_torch() else non_zero_weights.size + if nzw_size == 0 or metagraph.n < min_allowed_weights: + logging.warning("No non-zero weights returning all ones.") + final_weights = ( + torch.ones((metagraph.n)).to(metagraph.n) / metagraph.n + if use_torch() + else np.ones((metagraph.n), dtype=np.int64) / metagraph.n + ) + logging.debug("final_weights", final_weights) + final_weights_count = ( + torch.tensor(list(range(len(final_weights)))) + if use_torch() + else np.arange(len(final_weights)) + ) + return ( + (final_weights_count, final_weights) + if use_torch() + else (final_weights_count, final_weights) + ) + + elif nzw_size < min_allowed_weights: + logging.warning( + "No non-zero weights less then min allowed weight, returning all ones." + ) + # ( const ): Should this be np.zeros( ( metagraph.n ) ) to reset everyone to build up weight? + weights = ( + torch.ones((metagraph.n)).to(metagraph.n) * 1e-5 + if use_torch() + else np.ones((metagraph.n), dtype=np.int64) * 1e-5 + ) # creating minimum even non-zero weights + weights[non_zero_weight_idx] += non_zero_weights + logging.debug("final_weights", *weights) + normalized_weights = normalize_max_weight(x=weights, limit=max_weight_limit) + nw_arange = ( + torch.tensor(list(range(len(normalized_weights)))) + if use_torch() + else np.arange(len(normalized_weights)) + ) + return nw_arange, normalized_weights + + logging.debug("non_zero_weights", *non_zero_weights) + + # Compute the exclude quantile and find the weights in the lowest quantile + max_exclude = max(0, len(non_zero_weights) - min_allowed_weights) / len( + non_zero_weights + ) + exclude_quantile = min([quantile, max_exclude]) + lowest_quantile = ( + non_zero_weights.quantile(exclude_quantile) + if use_torch() + else np.quantile(non_zero_weights, exclude_quantile) + ) + logging.debug("max_exclude", max_exclude) + logging.debug("exclude_quantile", exclude_quantile) + logging.debug("lowest_quantile", lowest_quantile) + + # Exclude all weights below the allowed quantile. + non_zero_weight_uids = non_zero_weight_uids[lowest_quantile <= non_zero_weights] + non_zero_weights = non_zero_weights[lowest_quantile <= non_zero_weights] + logging.debug("non_zero_weight_uids", *non_zero_weight_uids) + logging.debug("non_zero_weights", *non_zero_weights) + + # Normalize weights and return. + normalized_weights = normalize_max_weight( + x=non_zero_weights, limit=max_weight_limit + ) + logging.debug("final_weights", normalized_weights) + + return non_zero_weight_uids, normalized_weights + + +def generate_weight_hash( + address: str, + netuid: int, + uids: list[int], + values: list[int], + version_key: int, + salt: list[int], +) -> str: + """ + Generate a valid commit hash from the provided weights. + + Args: + address (str): The account identifier. Wallet ss58_address. + netuid (int): The network unique identifier. + uids (list[int]): The list of UIDs. + salt (list[int]): The salt to add to hash. + values (list[int]): The list of weight values. + version_key (int): The version key. + + Returns: + str: The generated commit hash. + """ + # Encode data using SCALE codec + wallet_address = ScaleBytes(Keypair(ss58_address=address).public_key) + netuid = ScaleBytes(netuid.to_bytes(2, "little")) + + vec_uids = Vec(data=None, sub_type="U16") + vec_uids.value = [U16(ScaleBytes(uid.to_bytes(2, "little"))) for uid in uids] + uids = ScaleBytes(vec_uids.encode().data) + + vec_values = Vec(data=None, sub_type="U16") + vec_values.value = [ + U16(ScaleBytes(value.to_bytes(2, "little"))) for value in values + ] + values = ScaleBytes(vec_values.encode().data) + + version_key = ScaleBytes(version_key.to_bytes(8, "little")) + + vec_salt = Vec(data=None, sub_type="U16") + vec_salt.value = [U16(ScaleBytes(salts.to_bytes(2, "little"))) for salts in salt] + salt = ScaleBytes(vec_salt.encode().data) + + data = wallet_address + netuid + uids + values + salt + version_key + + # Generate Blake2b hash of the data tuple + blake2b_hash = hashlib.blake2b(data.data, digest_size=32) + + # Convert the hash to hex string and add "0x" prefix + commit_hash = "0x" + blake2b_hash.hexdigest() + + return commit_hash diff --git a/contrib/CODE_REVIEW_DOCS.md b/contrib/CODE_REVIEW_DOCS.md new file mode 100644 index 0000000000..9909606a89 --- /dev/null +++ b/contrib/CODE_REVIEW_DOCS.md @@ -0,0 +1,72 @@ +# Code Review +### Conceptual Review + +A review can be a conceptual review, where the reviewer leaves a comment + * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull + request", + * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the + approach of this change". + +A `NACK` needs to include a rationale why the change is not worthwhile. +NACKs without accompanying reasoning may be disregarded. +After conceptual agreement on the change, code review can be provided. A review +begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR +branch, followed by a description of how the reviewer did the review. The +following language is used within pull request comments: + + - "I have tested the code", involving change-specific manual testing in + addition to running the unit, functional, or fuzz tests, and in case it is + not obvious how the manual testing was done, it should be described; + - "I have not tested the code, but I have reviewed it and it looks + OK, I agree it can be merged"; + - A "nit" refers to a trivial, often non-blocking issue. + +### Code Review +Project maintainers reserve the right to weigh the opinions of peer reviewers +using common sense judgement and may also weigh based on merit. Reviewers that +have demonstrated a deeper commitment and understanding of the project over time +or who have clear domain expertise may naturally have more weight, as one would +expect in all walks of life. + +Where a patch set affects consensus-critical code, the bar will be much +higher in terms of discussion and peer review requirements, keeping in mind that +mistakes could be very costly to the wider community. This includes refactoring +of consensus-critical code. + +Where a patch set proposes to change the Bittensor consensus, it must have been +discussed extensively on the discord server and other channels, be accompanied by a widely +discussed BIP and have a generally widely perceived technical consensus of being +a worthwhile change based on the judgement of the maintainers. + +### Finding Reviewers + +As most reviewers are themselves developers with their own projects, the review +process can be quite lengthy, and some amount of patience is required. If you find +that you've been waiting for a pull request to be given attention for several +months, there may be a number of reasons for this, some of which you can do something +about: + + - It may be because of a feature freeze due to an upcoming release. During this time, + only bug fixes are taken into consideration. If your pull request is a new feature, + it will not be prioritized until after the release. Wait for the release. + - It may be because the changes you are suggesting do not appeal to people. Rather than + nits and critique, which require effort and means they care enough to spend time on your + contribution, thundering silence is a good sign of widespread (mild) dislike of a given change + (because people don't assume *others* won't actually like the proposal). Don't take + that personally, though! Instead, take another critical look at what you are suggesting + and see if it: changes too much, is too broad, doesn't adhere to the + [developer notes](DEVELOPMENT_WORKFLOW.md), is dangerous or insecure, is messily written, etc. + Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give + their opinion on the concept itself. + - It may be because your code is too complex for all but a few people, and those people + may not have realized your pull request even exists. A great way to find people who + are qualified and care about the code you are touching is the + [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply + look up who last modified the code you are changing and see if you can find + them and give them a nudge. Don't be incessant about the nudging, though. + - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request + a look. If you think you've been waiting for an unreasonably long time (say, + more than a month) for no particular reason (a few lines changed, etc.), + this is totally fine. Try to return the favor when someone else is asking + for feedback on their code, and the universe balances out. + - Remember that the best thing you can do while waiting is give review to others! \ No newline at end of file diff --git a/contrib/CONTRIBUTING.md b/contrib/CONTRIBUTING.md new file mode 100644 index 0000000000..f9f4ed5f34 --- /dev/null +++ b/contrib/CONTRIBUTING.md @@ -0,0 +1,299 @@ +# Contributing to Bittensor + +The following is a set of guidelines for contributing to Bittensor, which are hosted in the [Opentensor Organization](https://github.com/opentensor) on GitHub. These are mostly guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. + +## Table Of Contents +1. [I don't want to read this whole thing, I just have a question!!!](#i-dont-want-to-read-this-whole-thing-i-just-have-a-question) +1. [What should I know before I get started?](#what-should-i-know-before-i-get-started) +1. [Getting Started](#getting-started) + 1. [Good First Issue Label](#good-first-issue-label) + 1. [Beginner and Help-wanted Issues Label](#beginner-and-help-wanted-issues-label) +1. [How Can I Contribute?](#how-can-i-contribute) + 1. [Code Contribution General Guideline](#code-contribution-general-guidelines) + 1. [Pull Request Philosophy](#pull-request-philosophy) + 1. [Pull Request Process](#pull-request-process) + 1. [Testing](#testing) + 1. [Addressing Feedback](#addressing-feedback) + 1. [Squashing Commits](#squashing-commits) + 1. [Refactoring](#refactoring) + 1. [Peer Review](#peer-review) + 1. [Reporting Bugs](#reporting-bugs) + 1. [Suggesting Features](#suggesting-enhancements) + + +## I don't want to read this whole thing I just have a question! + +> **Note:** Please don't file an issue to ask a question. You'll get faster results by using the resources below. + +We have an official Discord server where the community chimes in with helpful advice if you have questions. +This is the fastest way to get an answer and the core development team is active on Discord. + +* [Official Bittensor Discord](https://discord.gg/7wvFuPJZgq) + +## What should I know before I get started? +Bittensor is still in the Alpha stages, and as such you will likely run into some problems in deploying your model or installing Bittensor itself. If you run into an issue or end up resolving an issue yourself, feel free to create a pull request with a fix or with a fix to the documentation. The documentation repository can be found [here](https://github.com/opentensor/docs). + +Additionally, note that the core implementation of Bittensor consists of two separate repositories: [The core Bittensor code](https://github.com/opentensor/bittensor) and the Bittensor Blockchain [subtensor](https://github.com/opentensor/subtensor). + +Supplemental repository for the Bittensor subnet template can be found [here](https://github.com/opentensor/bittensor-subnet-template). This is a great first place to look for getting your hands dirty and started learning and building on Bittensor. See the subnet links [page](https://github.com/opentensor/bittensor-subnet-template/blob/main/subnet_links.json) for a list of all the repositories for the active registered subnets. + +## Getting Started +New contributors are very welcome and needed. +Reviewing and testing is highly valued and the most effective way you can contribute as a new contributor. It also will teach you much more about the code and process than opening pull requests. + +Before you start contributing, familiarize yourself with the Bittensor Core build system and tests. Refer to the documentation in the repository on how to build Bittensor core and how to run the unit tests, functional tests. + +There are many open issues of varying difficulty waiting to be fixed. If you're looking for somewhere to start contributing, check out the [good first issue](https://github.com/opentensor/bittensor/labels/good%20first%20issue) list or changes that are up for grabs. Some of them might no longer be applicable. So if you are interested, but unsure, you might want to leave a comment on the issue first. Also peruse the [issues](https://github.com/opentensor/bittensor/issues) tab for all open issues. + +### Good First Issue Label +The purpose of the good first issue label is to highlight which issues are suitable for a new contributor without a deep understanding of the codebase. + +However, good first issues can be solved by anyone. If they remain unsolved for a longer time, a frequent contributor might address them. + +You do not need to request permission to start working on an issue. However, you are encouraged to leave a comment if you are planning to work on it. This will help other contributors monitor which issues are actively being addressed and is also an effective way to request assistance if and when you need it. + +### Beginner and Help-wanted Issues Label +You can start by looking through these `beginner` and `help-wanted` issues: + +* [Beginner issues](https://github.com/opentensor/bittensor/labels/beginner) - issues which should only require a few lines of code, and a test or two. +* [Help wanted issues](https://github.com/opentensor/bittensor/labels/help%20wanted) - issues which should be a bit more involved than `beginner` issues. + +## Communication Channels +Most communication about Bittensor development happens on Discord channel. +Here's the link of Discord community. +[Bittensor Discord](https://discord.com/channels/799672011265015819/799672011814862902) + +And also here. +[Bittensor Community Discord](https://discord.com/channels/1120750674595024897/1120799375703162950) + +## How Can I Contribute? + +You can contribute to Bittensor in one of two main ways (as well as many others): +1. [Bug](#reporting-bugs) reporting and fixes +2. New features and Bittensor [enhancements](#suggesting-enhancements) + +> Please follow the Bittensor [style guide](./STYLE.md) regardless of your contribution type. + +Here is a high-level summary: +- Code consistency is crucial; adhere to established programming language conventions. +- Use `ruff format .` to format your Python code; it ensures readability and consistency. +- Write concise Git commit messages; summarize changes in ~50 characters. +- Follow these six commit rules: + - Atomic Commits: Focus on one task or fix per commit. + - Subject and Body Separation: Use a blank line to separate the subject from the body. + - Subject Line Length: Keep it under 50 characters for readability. + - Imperative Mood: Write subject line as if giving a command or instruction. + - Body Text Width: Wrap text manually at 72 characters. + - Body Content: Explain what changed and why, not how. +- Make use of your commit messages to simplify project understanding and maintenance. + +> For clear examples of each of the commit rules, see the style guide's [rules](./STYLE.md#the-six-rules-of-a-great-commit) section. + +### Code Contribution General Guidelines + +> Review the Bittensor [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before contributing. + +If you're looking to contribute to Bittensor but unsure where to start, please join our community [discord](https://discord.gg/bittensor), a developer-friendly Bittensor town square. Start with [#development](https://discord.com/channels/799672011265015819/799678806159392768) and [#bounties](https://discord.com/channels/799672011265015819/1095684873810890883) to see what issues are currently posted. For a greater understanding of Bittensor's usage and development, check the [Bittensor Documentation](https://bittensor.com/docs). + +#### Pull Request Philosophy + +Patchsets and enhancements should always be focused. A pull request could add a feature, fix a bug, or refactor code, but it should not contain a mixture of these. Please also avoid 'super' pull requests which attempt to do too much, are overly large, or overly complex as this makes review difficult. + +Specifically, pull requests must adhere to the following criteria: +- **Must** branch off from `staging`. Make sure that all your PRs are using `staging` branch as a base or will be closed. +- Contain fewer than 50 files. PRs with more than 50 files will be closed. +- Use the specific [template](./.github/pull_request_template.md) appropriate to your contribution. +- If a PR introduces a new feature, it *must* include corresponding tests. +- Other PRs (bug fixes, refactoring, etc.) should ideally also have tests, as they provide proof of concept and prevent regression. +- Categorize your PR properly by using GitHub labels. This aids in the review process by informing reviewers about the type of change at a glance. +- Make sure your code includes adequate comments. These should explain why certain decisions were made and how your changes work. +- If your changes are extensive, consider breaking your PR into smaller, related PRs. This makes your contributions easier to understand and review. +- Be active in the discussion about your PR. Respond promptly to comments and questions to help reviewers understand your changes and speed up the acceptance process. + +Generally, all pull requests must: + + - Have a clear use case, fix a demonstrable bug or serve the greater good of the project (e.g. refactoring for modularisation). + - Be well peer-reviewed. + - Follow code style guidelines. + - Not break the existing test suite. + - Where bugs are fixed, where possible, there should be unit tests demonstrating the bug and also proving the fix. + - Change relevant comments and documentation when behaviour of code changes. + +#### Pull Request Process + +Please follow these steps to have your contribution considered by the maintainers: + +*Before* creating the PR: +1. Read the [development workflow](./DEVELOPMENT_WORKFLOW.md) defined for this repository to understand our workflow. +2. Ensure your PR meets the criteria stated in the 'Pull Request Philosophy' section. +3. Include relevant tests for any fixed bugs or new features as stated in the [testing guide](./TESTING.md). +4. Follow all instructions in [the template](https://github.com/opentensor/bittensor/blob/master/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md) to create the PR. +5. Ensure your commit messages are clear and concise. Include the issue number if applicable. +6. If you have multiple commits, rebase them into a single commit using `git rebase -i`. +7. Explain what your changes do and why you think they should be merged in the PR description consistent with the [style guide](./STYLE.md). + +*After* creating the PR: +1. Verify that all [status checks](https://help.github.com/articles/about-status-checks/) are passing after you submit your pull request. +2. Label your PR using GitHub's labeling feature. The labels help categorize the PR and streamline the review process. +3. Document your code with comments that provide a clear understanding of your changes. Explain any non-obvious parts of your code or design decisions you've made. +4. If your PR has extensive changes, consider splitting it into smaller, related PRs. This reduces the cognitive load on the reviewers and speeds up the review process. + +Please be responsive and participate in the discussion on your PR! This aids in clarifying any confusion or concerns and leads to quicker resolution and merging of your PR. + +> Note: If your changes are not ready for merge but you want feedback, create a draft pull request. + +Following these criteria will aid in quicker review and potential merging of your PR. +While the prerequisites above must be satisfied prior to having your pull request reviewed, the reviewer(s) may ask you to complete additional design work, tests, or other changes before your pull request can be ultimately accepted. + +When you are ready to submit your changes, create a pull request: + +> **Always** follow the [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before submitting pull requests. + +After you submit a pull request, it will be reviewed by the maintainers. They may ask you to make changes. Please respond to any comments and push your changes as a new commit. + +> Note: Be sure to merge the latest from "upstream" before making a pull request: + +```bash +git remote add upstream https://github.com/opentensor/bittensor.git +git fetch upstream +git merge upstream/ +git push origin +``` + +#### Testing +Before making a PR for any code changes, please write adequate testing with unittest and/or pytest if it is warranted. This is **mandatory** for new features and enhancements. See the [testing guide](./TESTING.md) for more complete information. + +You may also like to view the [/tests](https://github.com/opentensor/bittensor/tree/master/tests) for starter examples. + +Here is a quick summary: +- **Running Tests**: Use `pytest` from the root directory of the Bittensor repository to run all tests. To run a specific test file or a specific test within a file, specify it directly (e.g., `pytest tests/test_wallet.py::test_create_new_coldkey`). +- **Writing Tests**: When writing tests, cover both the "happy path" and any potential error conditions. Use the `assert` statement to verify the expected behavior of a function. +- **Mocking**: Use the `unittest.mock` library to mock certain functions or objects when you need to isolate the functionality you're testing. This allows you to control the behavior of these functions or objects during testing. +- **Test Coverage**: Use the `pytest-cov` plugin to measure your test coverage. Aim for high coverage but also ensure your tests are meaningful and accurately represent the conditions under which your code will run. +- **Continuous Integration**: Bittensor uses GitHub Actions for continuous integration. Tests are automatically run every time you push changes to the repository. Check the "Actions" tab of the Bittensor GitHub repository to view the results. + +Remember, testing is crucial for maintaining code health, catching issues early, and facilitating the addition of new features or refactoring of existing code. + +#### Addressing Feedback + +After submitting your pull request, expect comments and reviews from other contributors. You can add more commits to your pull request by committing them locally and pushing to your fork. + +You are expected to reply to any review comments before your pull request is merged. You may update the code or reject the feedback if you do not agree with it, but you should express so in a reply. If there is outstanding feedback and you are not actively working on it, your pull request may be closed. + +#### Squashing Commits + +If your pull request contains fixup commits (commits that change the same line of code repeatedly) or too fine-grained commits, you may be asked to [squash](https://git-scm.com/docs/git-rebase#_interactive_mode) your commits before it will be reviewed. The basic squashing workflow is shown below. + + git checkout your_branch_name + git rebase -i HEAD~n + # n is normally the number of commits in the pull request. + # Set commits (except the one in the first line) from 'pick' to 'squash', save and quit. + # On the next screen, edit/refine commit messages. + # Save and quit. + git push -f # (force push to GitHub) + +Please update the resulting commit message, if needed. It should read as a coherent message. In most cases, this means not just listing the interim commits. + +If your change contains a merge commit, the above workflow may not work and you will need to remove the merge commit first. See the next section for details on how to rebase. + +Please refrain from creating several pull requests for the same change. Use the pull request that is already open (or was created earlier) to amend changes. This preserves the discussion and review that happened earlier for the respective change set. + +The length of time required for peer review is unpredictable and will vary from pull request to pull request. + +#### Refactoring + +Refactoring is a necessary part of any software project's evolution. The following guidelines cover refactoring pull requests for the Bittensor project. + +There are three categories of refactoring: code-only moves, code style fixes, and code refactoring. In general, refactoring pull requests should not mix these three kinds of activities in order to make refactoring pull requests easy to review and uncontroversial. In all cases, refactoring PRs must not change the behaviour of code within the pull request (bugs must be preserved as is). + +Project maintainers aim for a quick turnaround on refactoring pull requests, so where possible keep them short, uncomplex and easy to verify. + +Pull requests that refactor the code should not be made by new contributors. It requires a certain level of experience to know where the code belongs to and to understand the full ramification (including rebase effort of open pull requests). Trivial pull requests or pull requests that refactor the code with no clear benefits may be immediately closed by the maintainers to reduce unnecessary workload on reviewing. + +#### Peer Review + +Anyone may participate in peer review which is expressed by comments in the pull request. Typically reviewers will review the code for obvious errors, as well as test out the patch set and opine on the technical merits of the patch. Project maintainers take into account the peer review when determining if there is consensus to merge a pull request (remember that discussions may have taken place elsewhere, not just on GitHub). The following language is used within pull-request comments: + +- ACK means "I have tested the code and I agree it should be merged"; +- NACK means "I disagree this should be merged", and must be accompanied by sound technical justification. NACKs without accompanying reasoning may be disregarded; +- utACK means "I have not tested the code, but I have reviewed it and it looks OK, I agree it can be merged"; +- Concept ACK means "I agree in the general principle of this pull request"; +- Nit refers to trivial, often non-blocking issues. + +Reviewers should include the commit(s) they have reviewed in their comments. This can be done by copying the commit SHA1 hash. + +A pull request that changes consensus-critical code is considerably more involved than a pull request that adds a feature to the wallet, for example. Such patches must be reviewed and thoroughly tested by several reviewers who are knowledgeable about the changed subsystems. Where new features are proposed, it is helpful for reviewers to try out the patch set on a test network and indicate that they have done so in their review. Project maintainers will take this into consideration when merging changes. + +For a more detailed description of the review process, see the [Code Review Guidelines](CODE_REVIEW_DOCS.md). + +### Reporting Bugs + +This section guides you through submitting a bug report for Bittensor. Following these guidelines helps maintainers and the community understand your report :pencil:, reproduce the behavior :computer: :computer:, and find related reports :mag_right:. + +When you are creating a bug report, please [include as many details as possible](#how-do-i-submit-a-good-bug-report). + +> **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue and include a link to the original issue in the body of your new one. + +#### Before Submitting A Bug Report + +* **Check the [debugging guide](./DEBUGGING.md).** You might be able to find the cause of the problem and fix things yourself. Most importantly, check if you can reproduce the problem in the latest version of Bittensor by updating to the latest Master branch changes. +* **Check the [Discord Server](https://discord.gg/7wvFuPJZgq)** and ask in [#finney-issues](https://discord.com/channels/799672011265015819/1064247007688007800) or [#subnet-1-issues](https://discord.com/channels/799672011265015819/1096187495667998790). +* **Determine which repository the problem should be reported in**: if it has to do with your ML model, then it's likely [Bittensor](https://github.com/opentensor/bittensor). If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor) + +#### How Do I Submit A (Good) Bug Report? + +Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). You can find Bittensor's issues [here](https://github.com/opentensor/bittensor/issues). After you've determined which repository ([Bittensor](https://github.com/opentensor/bittensor) or [subtensor](https://github.com/opentensor/subtensor)) your bug is related to, create an issue on that repository. + +Explain the problem and include additional details to help maintainers reproduce the problem: + +* **Use a clear and descriptive title** for the issue to identify the problem. +* **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started Bittensor, e.g. which command exactly you used in the terminal, or how you started Bittensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran Bittensor with a set of custom configs, explain if you used a config file or command line arguments. +* **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). +* **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. +* **Explain which behavior you expected to see instead and why.** +* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +* **If you're reporting that Bittensor crashed**, include a crash report with a stack trace from the operating system. On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". Include the crash report in the issue in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines), a [file attachment](https://help.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist. +* **If the problem is related to performance or memory**, include a CPU profile capture with your report, if you're using a GPU then include a GPU profile capture as well. Look into the [PyTorch Profiler](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html) to look at memory usage of your model. +* **If the problem wasn't triggered by a specific action**, describe what you were doing before the problem happened and share more information using the guidelines below. + +Provide more context by answering these questions: + +* **Did the problem start happening recently** (e.g. after updating to a new version of Bittensor) or was this always a problem? +* If the problem started happening recently, **can you reproduce the problem in an older version of Bittensor?** +* **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under which conditions it normally happens. + +Include details about your configuration and environment: + +* **Which version of Bittensor are you using?** You can get the version by checking for `__version__` in [`bittensor/bittensor/__init.py`](https://github.com/opentensor/bittensor/blob/master/bittensor/__init__.py#L30). This is not sufficient. Also add the commit hash of the branch you are on. +* **What commit hash are you on?** You can get the exact commit hash by checking `git log` and pasting the full commit hash. +* **What's the name and version of the OS you're using**? +* **Are you running Bittensor in a virtual machine?** If so, which VM software are you using and which operating systems and versions are used for the host and the guest? +* **Are you running Bittensor in a dockerized container?** If so, have you made sure that your docker container contains your latest changes and is up to date with Master branch? +* **Are you using [local configuration files](https://opentensor.github.io/getting-started/configuration.html)** `config.yaml` to customize your Bittensor experiment? If so, provide the contents of that config file, preferably in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines) or with a link to a [gist](https://gist.github.com/). + +### Suggesting Enhancements and Features + +This section guides you through submitting an enhancement suggestion for Bittensor, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion :pencil: and find related suggestions :mag_right:. + +When you are creating an enhancement suggestion, please [include as many details as possible](#how-do-i-submit-a-good-enhancement-suggestion). + +#### Before Submitting An Enhancement Suggestion + +* **Check the [debugging guide](./DEBUGGING.md).** for tips — you might discover that the enhancement is already available. Most importantly, check if you're using the latest version of Bittensor by pulling the latest changes from the Master branch and if you can get the desired behavior by changing [Bittensor's config settings](https://opentensor.github.io/getting-started/configuration.html). +* **Determine which repository the problem should be reported in: if it has to do with your ML model, then it's likely [Bittensor](https://github.com/opentensor/bittensor). If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor) + +#### How Submit A (Good) Feature Suggestion + +Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com/features/issues/). After you've determined which repository ([Bittensor](https://github.com/opentensor/bittensor) or [subtensor](https://github.com/opentensor/subtensor)) your enhancement suggestion is related to, create an issue on that repository and provide the following information: + +* **Use a clear and descriptive title** for the issue to identify the problem. +* **Provide a step-by-step description of the suggested enhancement** in as many details as possible. +* **Provide specific examples to demonstrate the steps**. Include copy/pasteable snippets which you use in those examples, as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). +* **Describe the current behavior** and **explain which behavior you expected to see instead** and why. +* **Include screenshots and animated GIFs** which help you demonstrate the steps or point out the part of Bittensor which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +* **Explain why this enhancement would be useful** to most Bittensor users. +* **List some other text editors or applications where this enhancement exists.** +* **Specify which version of Bittensor are you using?** You can get the exact version by checking for `__version__` in [`bittensor/bittensor/__init.py`](https://github.com/opentensor/bittensor/blob/master/bittensor/__init__.py#L30). +* **Specify the name and version of the OS you're using.** + +Thank you for considering contributing to Bittensor! Any help is greatly appreciated along this journey to incentivize open and permissionless intelligence. diff --git a/contrib/DEBUGGING.md b/contrib/DEBUGGING.md new file mode 100644 index 0000000000..093e3432bf --- /dev/null +++ b/contrib/DEBUGGING.md @@ -0,0 +1,161 @@ +## Installation + +First, make sure you have Bittensor installed correctly. There are three ways to install Bittensor: + +1. Through the installer: + +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/opentensor/bittensor/master/scripts/install.sh)" +``` + +2. With pip: + +```bash +pip install bittensor +``` + +3. From source: + +```bash +git clone https://github.com/opentensor/bittensor.git +python3 -m pip install -e bittensor/ +``` + +You can test your installation by running: + +```bash +python3 -c "import bittensor; print(bittensor.__version__)" +``` +## Logging +Make good use of the `bittensor.logging` module. It can be your friend and will help you find things that are otherwise difficult to get visibility on. + +You can enable debug or trace modes by running: +``` +import bittensor +bittensor.trace() # lowest level of granularity, best for figuring out what went wrong. +bittensor.debug() # for most everything else that you don't want to see normally at runtime +``` +at the top of your script or source file to enable more verbose output logs. + +You can also write your own in the code simply: +```python +# Bittensor's wallet maintenance class. +wallet = bittensor.wallet() + +bittensor.logging.debug( f"wallet keypair: {wallet.hotkey}" ) + +... + +# Bittensor's chain state object. +metagraph = bittensor.metagraph(netuid=1) + +bittensor.logging.trace( f"metagraph created! netuid {metagraph.netuid}" ) +``` + + +## Querying the Network + +Ensure you can query the Bittensor network using the Python API. If something is broken with your installation or the chain, this won't work out of the box. Here's an example of how to do this: + +```python +import bittensor +bittensor.trace() + +# Attempt to query through the foundation endpoint. +print(bittensor.prompt("Heraclitus was a ")) +``` + +## Debugging Miners + + +First, try registering and running on a testnet: +```bash +btcli register --netuid --subtensor.chain_endpoint wss://test.finney.opentensor.ai:443 +``` + +If that works, then try to register a miner on mainnet: + +```bash +btcli register --netuid +``` + +See if you can observe your slot specified by UID: + +```bash +btcli overview --netuid +``` + +Here's an example of how to run a pre-configured miner: + +```bash +python3 bittensor/neurons/text_prompting/miners/GPT4ALL/neuron.py --netuid +``` + +## Debugging with the Bittensor Package + +The Bittensor package contains data structures for interacting with the Bittensor ecosystem, writing miners, validators, and querying the network. + +Try to use the Bittensor package to create a wallet, connect to the axon running on slot 10, and send a prompt to this endpoint and see where things are breaking along this typical codepath: + +```python +import bittensor + +# Bittensor's wallet maintenance class. +wallet = bittensor.wallet() + +# Bittensor's chain interface. +subtensor = bittensor.subtensor() + +# Bittensor's chain state object. +metagraph = bittensor.metagraph(netuid=1) + +# Instantiate a Bittensor endpoint. +axon = bittensor.axon(wallet=wallet, metagraph=metagraph) + +# Start servicing messages on the wire. +axon.start() + +# Register this axon on a subnetwork +subtensor.serve_axon(netuid=1, axon=axon) + +# Connect to the axon running on slot 10, use the wallet to sign messages. +dendrite = bittensor.text_prompting(keypair=wallet.hotkey, axon=metagraph.axons[10]) + +# Send a prompt to this endpoint +dendrite.forward(roles=['user'], messages=['Who is Rick James?']) +``` + +> NOTE: It may be helpful to throw in breakpoints such as with `pdb`. +```python +# some code ... +import pdb; pdb.set_trace() # breakpoint! +# more code ... + +``` +This will stop execution at the breakpoint you set and can operate on the stack directly in the terminal. + +## Searching for strings +Use `ag`. It's fast, convenient, and widely available on unix systems. Ag will highlight all occurnaces of a given pattern. + +```bash +apt-get install silversearcher-ag +``` + +Usage: +```bash +$ ag "query_subtensor" + +>>> bittensor/_subtensor/subtensor_mock.py +>>> 165: e.g. We mock `Subtensor.query_subtensor` instead of all query methods. +>>> 536: def query_subtensor( +>>> 1149: curr_total_hotkey_stake = self.query_subtensor( +>>> 1154: curr_total_coldkey_stake = self.query_subtensor( +>>> 1345: return self.query_subtensor(name=name, block=block, params=[netuid]).value +>>> +>>> bittensor/_subtensor/subtensor_impl.py +>>> 902: def query_subtensor( +>>> 1017: return self.query_subtensor("Rho", block, [netuid]).value +... +``` + +Remember, debugging involves a lot of trial and error. Don't be discouraged if things don't work right away. Keep trying different things, and don't hesitate to ask for help if you need it. diff --git a/contrib/DEVELOPMENT_WORKFLOW.md b/contrib/DEVELOPMENT_WORKFLOW.md new file mode 100644 index 0000000000..91e781ffcc --- /dev/null +++ b/contrib/DEVELOPMENT_WORKFLOW.md @@ -0,0 +1,159 @@ +# Bittensor Development Workflow + +## Table of contents + +- [Bittensor Development Workflow](#bittensor-development-workflow) + - [Main Branches](#main-branches) + - [Development Model](#development-model) + - [Feature Branches](#feature-branches) + - [Release Branches](#release-branches) + - [Hotfix Branches](#hotfix-branches) + - [Git Operations](#git-operations) + - [Creating a Feature Branch](#creating-a-feature-branch) + - [Merging Feature Branch into Staging](#merging-feature-branch-into-staging) + - [Creating a Release Branch](#creating-a-release-branch) + - [Finishing a Release Branch](#finishing-a-release-branch) + - [Creating a Hotfix Branch](#creating-a-hotfix-branch) + - [Finishing a Hotfix Branch](#finishing-a-hotfix-branch) + - [Continuous Integration (CI) and Continuous Deployment (CD)](#continuous-integration-ci-and-continuous-deployment-cd) + - [Versioning and Release Notes](#versioning-and-release-notes) + - [Pending Tasks](#pending-tasks) + +## Main Branches + +Bittensor's codebase consists of two main branches: **master** and **staging**. + +**master** +- This is Bittensor's live production branch, which should only be updated by the core development team. This branch is protected, so refrain from pushing or merging into it unless authorized. + +**staging** +- This branch is continuously updated and is where you propose and merge changes. It's essentially Bittensor's active development branch. + +## Development Model + +### Feature Branches + +- Branch off from: `staging` +- Merge back into: `staging` +- Naming convention: `feature//` + +Feature branches are used to develop new features for upcoming or future releases. They exist as long as the feature is in development, but will eventually be merged into `staging` or discarded. Always delete your feature branch after merging to avoid unnecessary clutter. + +### Release Branches + +- Branch off from: `staging` +- Merge back into: `staging` and then `master` +- Naming convention: `release///` + +Release branches support the preparation of a new production release, allowing for minor bug fixes and preparation of metadata (version number, configuration, etc). All new features should be merged into `staging` and wait for the next big release. + +### Hotfix Branches + +General workflow: + +- Branch off from: `master` or `staging` +- Merge back into: `staging` then `master` +- Naming convention: `hotfix///` + +Hotfix branches are meant for quick fixes in the production environment. When a critical bug in a production version must be resolved immediately, a hotfix branch is created. + +## Git Operations + +#### Create a feature branch + +1. Branch from the **staging** branch. + 1. Command: `git checkout -b feature/my-feature staging` + +> Rebase frequently with the updated staging branch so you do not face big conflicts before submitting your pull request. Remember, syncing your changes with other developers could also help you avoid big conflicts. + +#### Merge feature branch into staging + +In other words, integrate your changes into a branch that will be tested and prepared for release. + +1. Switch branch to staging: `git checkout staging` +2. Merging feature branch into staging: `git merge --no-ff feature/my-feature` +3. Pushing changes to staging: `git push origin staging` +4. Delete feature branch: `git branch -d feature/my-feature` (alternatively, this can be navigated on the GitHub web UI) + +This operation is done by Github when merging a PR. + +So, what you have to keep in mind is: +- Open the PR against the `staging` branch. +- After merging a PR you should delete your feature branch. This will be strictly enforced. + +#### Creating a release branch + +1. Create branch from staging: `git checkout -b release/3.4.0/descriptive-message/creator's_name staging` +2. Updating version with major or minor: `./scripts/update_version.sh major|minor` +3. Commit file changes with new version: `git commit -a -m "Updated version to 3.4.0"` + + +#### Finishing a Release Branch + +This involves releasing stable code and generating a new version for bittensor. + +1. Switch branch to master: `git checkout master` +2. Merge release branch into master: `git merge --no-ff release/3.4.0/optional-descriptive-message` +3. Tag changeset: `git tag -a v3.4.0 -m "Releasing v3.4.0: some comment about it"` +4. Push changes to master: `git push origin master` +5. Push tags to origin: `git push origin --tags` + +To keep the changes made in the __release__ branch, we need to merge those back into `staging`: + +- Switch branch to staging: `git checkout staging`. +- Merging release branch into staging: `git merge --no-ff release/3.4.0/optional-descriptive-message` + +This step may well lead to a merge conflict (probably even, since we have changed the version number). If so, fix it and commit. + + +#### Creating a hotfix branch +1. Create branch from master: `git checkout -b hotfix/3.3.4/descriptive-message/creator's-name master` +2. Update patch version: `./scripts/update_version.sh patch` +3. Commit file changes with new version: `git commit -a -m "Updated version to 3.3.4"` +4. Fix the bug and commit the fix: `git commit -m "Fixed critical production issue X"` + +#### Finishing a Hotfix Branch + +Finishing a hotfix branch involves merging the bugfix into both `master` and `staging`. + +1. Switch branch to master: `git checkout master` +2. Merge hotfix into master: `git merge --no-ff hotfix/3.3.4/optional-descriptive-message` +3. Tag new version: `git tag -a v3.3.4 -m "Releasing v3.3.4: descriptive comment about the hotfix"` +4. Push changes to master: `git push origin master` +5. Push tags to origin: `git push origin --tags` +6. Switch branch to staging: `git checkout staging` +7. Merge hotfix into staging: `git merge --no-ff hotfix/3.3.4/descriptive-message/creator's-name` +8. Push changes to origin/staging: `git push origin staging` +9. Delete hotfix branch: `git branch -d hotfix/3.3.4/optional-descriptive-message` + +The one exception to the rule here is that, **when a release branch currently exists, the hotfix changes need to be merged into that release branch, instead of** `staging`. Back-merging the bugfix into the __release__ branch will eventually result in the bugfix being merged into `develop` too, when the release branch is finished. (If work in develop immediately requires this bugfix and cannot wait for the release branch to be finished, you may safely merge the bugfix into develop now already as well.) + +Finally, we remove the temporary branch: + +- `git branch -d hotfix/3.3.4/optional-descriptive-message` +## Continuous Integration (CI) and Continuous Deployment (CD) + +Continuous Integration (CI) is a software development practice where members of a team integrate their work frequently. Each integration is verified by an automated build and test process to detect integration errors as quickly as possible. + +Continuous Deployment (CD) is a software engineering approach in which software functionalities are delivered frequently through automated deployments. + +- **CircleCI job**: Create jobs in CircleCI to automate the merging of staging into master and release version (needed to release code) and building and testing Bittensor (needed to merge PRs). + +## Versioning and Release Notes + +Semantic versioning helps keep track of the different versions of the software. When code is merged into master, generate a new version. + +Release notes provide documentation for each version released to the users, highlighting the new features, improvements, and bug fixes. When merged into master, generate GitHub release and release notes. + +## Pending Tasks + +- Determine if master and staging are different +- Determine what is in staging that is not merged yet + - Document not released developments + - When merged into staging, generate information about what's merged into staging but not released. + - When merged into master, generate GitHub release and release notes. +- CircleCI jobs + - Merge staging into master and release version (needed to release code) + - Build and Test Bittensor (needed to merge PRs) + +This document can be improved as the Bittensor project continues to develop and change. diff --git a/contrib/RELEASE_GUIDELINES.md b/contrib/RELEASE_GUIDELINES.md new file mode 100644 index 0000000000..d6bda7c860 --- /dev/null +++ b/contrib/RELEASE_GUIDELINES.md @@ -0,0 +1,87 @@ +# Release Guidelines + +The release manager in charge can release a Bittensor version using two scripts: + - [../scripts/release/versioning.sh](../scripts/release/versioning.sh) + - [../scripts/release/release.sh](../scripts/release/release.sh) + +The release manager will need the right permissions for: + - github.com + - pypi.org + - hub.docker.com + +If you are new in this role, ask for the proper setup you need to run this process manually. + +## Process of release + +1. Create a branch called `release/VERSION`, having VERSION with the version to release. +1. Make sure twine is installed: `pip install twine` +1. Within the release branch: + 1. Update the version executing:`./scripts/release/versioning.sh --update UPDATE_TYPE` + 1. **UPDATE_TYPE** could be *major*, *minor* or *patch*. + 1. Add release notes to CHANGELOG executing: `./scripts/release/add_notes_changelog.sh -A -V NEW_VERSION -P PREVIOUS_TAG -T GH_ACCESS_TOKEN` + 1. **NEW_VERSION**: e.g.: 3.6.4 + 1. **PREVIOUS_TAG**: e.g.: v3.6.3 + 1. **GH_ACCESS_TOKEN**: A github [personal access token](https://docs.github.com/en/enterprise-server@3.4/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) you need. + +1. Test the release branch and verify that it meets the requirements. +1. After merging the release branch; Run the release script + +## Versioning script usage + +Options: + - -U, --update: type of update. It could be major, minor, patch or rc (release candidate). + - -A, --apply: This specify to apply the release. Without this the versioning will just show a dry run with no changes. + +## Release script usage + +Options: + - -A, --apply: This specify to apply the release. Without this the release will just show a dry run with no changes. + - -T,--github-token: A github personal access token to interact with the Github API. + +### Github token + +Since you need to use a secret when releasing bittensor (github personal access token), I encourage you to use [pass](https://www.passwordstore.org/) or a similar tool that allows you to store the secret safely and not expose it in the history of the machine you use. + +So you can have: +``` +GITHUB_ACCESS_TOKEN=$(pass github/your_personal_token_with_permisions) +``` + +or +``` +GITHUB_ACCESS_TOKEN=$(whatever you need to get the token safely) +``` + +### Executions + +So, executing the script to release a minor version will be: + +``` +# For a dry run +./scripts/release/release.sh +``` + +``` +# Applying changes +./scripts/release/release.sh --apply --github-token $GITHUB_ACCESS_TOKEN` +``` + +## Checking release + +After the execution of the release script we would have generated: + - A new git tag in [github.com](https://github.com/opentensor/bittensor/tags) + - A new github release in [github.com](https://github.com/opentensor/bittensor/releases) + - A new pip package in [pypi.org](https://pypi.org/project/bittensor/#history) + - A new docker image in [hub.docker.com](https://hub.docker.com/r/opentensorfdn/bittensor/tags) + +## After release + +After a Bittensor release we have to +- Update [cubit](https://github.com/opentensor/cubit). + +### Updating cubit + +1. Updating the [Dockerfile](https://github.com/opentensor/cubit/blob/master/docker/Dockerfile) +1. Building its docker image (follow its README instructions) +1. Push it to hub.docker.com + 1. The generated name will be the same but with `-cubit` in its name \ No newline at end of file diff --git a/contrib/STYLE.md b/contrib/STYLE.md new file mode 100644 index 0000000000..7804359d22 --- /dev/null +++ b/contrib/STYLE.md @@ -0,0 +1,350 @@ +# Style Guide + +A project’s long-term success rests (among other things) on its maintainability, and a maintainer has few tools more powerful than his or her project’s log. It’s worth taking the time to learn how to care for one properly. What may be a hassle at first soon becomes habit, and eventually a source of pride and productivity for all involved. + +Most programming languages have well-established conventions as to what constitutes idiomatic style, i.e. naming, formatting and so on. There are variations on these conventions, of course, but most developers agree that picking one and sticking to it is far better than the chaos that ensues when everybody does their own thing. + +# Table of Contents +1. [Code Style](#code-style) +2. [Naming Conventions](#naming-conventions) +3. [Git Commit Style](#git-commit-style) +4. [The Six Rules of a Great Commit](#the-six-rules-of-a-great-commit) + - [1. Atomic Commits](#1-atomic-commits) + - [2. Separate Subject from Body with a Blank Line](#2-separate-subject-from-body-with-a-blank-line) + - [3. Limit the Subject Line to 50 Characters](#3-limit-the-subject-line-to-50-characters) + - [4. Use the Imperative Mood in the Subject Line](#4-use-the-imperative-mood-in-the-subject-line) + - [5. Wrap the Body at 72 Characters](#5-wrap-the-body-at-72-characters) + - [6. Use the Body to Explain What and Why vs. How](#6-use-the-body-to-explain-what-and-why-vs-how) +5. [Tools Worth Mentioning](#tools-worth-mentioning) + - [Using `--fixup`](#using---fixup) + - [Interactive Rebase](#interactive-rebase) +6. [Pull Request and Squashing Commits Caveats](#pull-request-and-squashing-commits-caveats) + + +### Code style + +#### General Style +Python's official style guide is PEP 8, which provides conventions for writing code for the main Python distribution. Here are some key points: + +- `Indentation:` Use 4 spaces per indentation level. + +- `Line Length:` Limit all lines to a maximum of 79 characters. + +- `Blank Lines:` Surround top-level function and class definitions with two blank lines. Method definitions inside a class are surrounded by a single blank line. + +- `Imports:` Imports should usually be on separate lines and should be grouped in the following order: + + - Standard library imports. + - Related third party imports. + - Local application/library specific imports. +- `Whitespace:` Avoid extraneous whitespace in the following situations: + + - Immediately inside parentheses, brackets or braces. + - Immediately before a comma, semicolon, or colon. + - Immediately before the open parenthesis that starts the argument list of a function call. +- `Comments:` Comments should be complete sentences and should be used to clarify code and are not a substitute for poorly written code. + +#### For Python + +- `List Comprehensions:` Use list comprehensions for concise and readable creation of lists. + +- `Generators:` Use generators when dealing with large amounts of data to save memory. + +- `Context Managers:` Use context managers (with statement) for resource management. + +- `String Formatting:` Use f-strings for formatting strings in Python 3.6 and above. + +- `Error Handling:` Use exceptions for error handling whenever possible. + +#### More details + +Use [`ruff` to format](https://docs.astral.sh/ruff/formatter/#the-ruff-formatter) your python code before commiting for consistency across such a large pool of contributors. +Black code [style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#code-style) ensures consistent and opinionated code formatting. +Ruff automatically formats your Python code according to the Black style guide, enhancing code readability and maintainability. + +Key Features of ruff & Black code style: + + Consistency: ruff enforces a single, consistent coding style across your project, eliminating style debates and allowing developers to focus on code logic. + + Readability: By applying a standard formatting style, Black improves code readability, making it easier to understand and collaborate on projects. + + Automation: ruff automates the code formatting process, saving time and effort. It eliminates the need for manual formatting and reduces the likelihood of inconsistencies. + +### Naming Conventions + +- `Classes:` Class names should normally use the CapWords Convention. +- `Functions and Variables:` Function names should be lowercase, with words separated by underscores as necessary to improve readability. Variable names follow the same convention as function names. + +- `Constants:` Constants are usually defined on a module level and written in all capital letters with underscores separating words. + +- `Non-public Methods and Instance Variables:` Use a single leading underscore (_). This is a weak "internal use" indicator. + +- `Strongly "private" methods and variables:` Use a double leading underscore (__). This triggers name mangling in Python. + + +### Git commit style + +Here’s a model Git commit message when contributing: +``` +Summarize changes in around 50 characters or less + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of the commit and the rest of the text as the body. The +blank line separating the summary from the body is critical (unless +you omit the body entirely); various tools like `log`, `shortlog` +and `rebase` can get confused if you run the two together. + +Explain the problem that this commit is solving. Focus on why you +are making this change as opposed to how (the code explains that). +Are there side effects or other unintuitive consequences of this +change? Here's the place to explain them. + +Further paragraphs come after blank lines. + + - Bullet points are okay, too + + - Typically a hyphen or asterisk is used for the bullet, preceded + by a single space, with blank lines in between, but conventions + vary here + +If you use an issue tracker, put references to them at the bottom, +like this: + +Resolves: #123 +See also: #456, #789 +``` + + +## The six rules of a great commit. + +#### 1. Atomic Commits +An “atomic” change revolves around one task or one fix. + +Atomic Approach + - Commit each fix or task as a separate change + - Only commit when a block of work is complete + - Commit each layout change separately + - Joint commit for layout file, code behind file, and additional resources + +Benefits + +- Easy to roll back without affecting other changes +- Easy to make other changes on the fly +- Easy to merge features to other branches + +#### Avoid trivial commit messages + +Commit messages like "fix", "fix2", or "fix3" don't provide any context or clear understanding of what changes the commit introduces. Here are some examples of good vs. bad commit messages: + +**Bad Commit Message:** + + $ git commit -m "fix" + +**Good Commit Message:** + + $ git commit -m "Fix typo in README file" + +> **Caveat**: When working with new features, an atomic commit will often consist of multiple files, since a layout file, code behind file, and additional resources may have been added/modified. You don’t want to commit all of these separately, because if you had to roll back the application to a state before the feature was added, it would involve multiple commit entries, and that can get confusing + +#### 2. Separate subject from body with a blank line + +Not every commit requires both a subject and a body. Sometimes a single line is fine, especially when the change is so simple that no further context is necessary. + +For example: + + Fix typo in introduction to user guide + +Nothing more need be said; if the reader wonders what the typo was, she can simply take a look at the change itself, i.e. use git show or git diff or git log -p. + +If you’re committing something like this at the command line, it’s easy to use the -m option to git commit: + + $ git commit -m"Fix typo in introduction to user guide" + +However, when a commit merits a bit of explanation and context, you need to write a body. For example: + + Derezz the master control program + + MCP turned out to be evil and had become intent on world domination. + This commit throws Tron's disc into MCP (causing its deresolution) + and turns it back into a chess game. + +Commit messages with bodies are not so easy to write with the -m option. You’re better off writing the message in a proper text editor. [See Pro Git](https://git-scm.com/book/en/v2/Customizing-Git-Git-Configuration). + +In any case, the separation of subject from body pays off when browsing the log. Here’s the full log entry: + + $ git log + commit 42e769bdf4894310333942ffc5a15151222a87be + Author: Kevin Flynn + Date: Fri Jan 01 00:00:00 1982 -0200 + + Derezz the master control program + + MCP turned out to be evil and had become intent on world domination. + This commit throws Tron's disc into MCP (causing its deresolution) + and turns it back into a chess game. + + +#### 3. Limit the subject line to 50 characters +50 characters is not a hard limit, just a rule of thumb. Keeping subject lines at this length ensures that they are readable, and forces the author to think for a moment about the most concise way to explain what’s going on. + +GitHub’s UI is fully aware of these conventions. It will warn you if you go past the 50 character limit. Git will truncate any subject line longer than 72 characters with an ellipsis, thus keeping it to 50 is best practice. + +#### 4. Use the imperative mood in the subject line +Imperative mood just means “spoken or written as if giving a command or instruction”. A few examples: + + Clean your room + Close the door + Take out the trash + +Each of the seven rules you’re reading about right now are written in the imperative (“Wrap the body at 72 characters”, etc.). + +The imperative can sound a little rude; that’s why we don’t often use it. But it’s perfect for Git commit subject lines. One reason for this is that Git itself uses the imperative whenever it creates a commit on your behalf. + +For example, the default message created when using git merge reads: + + Merge branch 'myfeature' + +And when using git revert: + + Revert "Add the thing with the stuff" + + This reverts commit cc87791524aedd593cff5a74532befe7ab69ce9d. + +Or when clicking the “Merge” button on a GitHub pull request: + + Merge pull request #123 from someuser/somebranch + +So when you write your commit messages in the imperative, you’re following Git’s own built-in conventions. For example: + + Refactor subsystem X for readability + Update getting started documentation + Remove deprecated methods + Release version 1.0.0 + +Writing this way can be a little awkward at first. We’re more used to speaking in the indicative mood, which is all about reporting facts. That’s why commit messages often end up reading like this: + + Fixed bug with Y + Changing behavior of X + +And sometimes commit messages get written as a description of their contents: + + More fixes for broken stuff + Sweet new API methods + +To remove any confusion, here’s a simple rule to get it right every time. + +**A properly formed Git commit subject line should always be able to complete the following sentence:** + + If applied, this commit will + +For example: + + If applied, this commit will refactor subsystem X for readability + If applied, this commit will update getting started documentation + If applied, this commit will remove deprecated methods + If applied, this commit will release version 1.0.0 + If applied, this commit will merge pull request #123 from user/branch + +#### 5. Wrap the body at 72 characters +Git never wraps text automatically. When you write the body of a commit message, you must mind its right margin, and wrap text manually. + +The recommendation is to do this at 72 characters, so that Git has plenty of room to indent text while still keeping everything under 80 characters overall. + +A good text editor can help here. It’s easy to configure Vim, for example, to wrap text at 72 characters when you’re writing a Git commit. + +#### 6. Use the body to explain what and why vs. how +This [commit](https://github.com/bitcoin/bitcoin/commit/eb0b56b19017ab5c16c745e6da39c53126924ed6) from Bitcoin Core is a great example of explaining what changed and why: + +``` +commit eb0b56b19017ab5c16c745e6da39c53126924ed6 +Author: Pieter Wuille +Date: Fri Aug 1 22:57:55 2014 +0200 + + Simplify serialize.h's exception handling + + Remove the 'state' and 'exceptmask' from serialize.h's stream + implementations, as well as related methods. + + As exceptmask always included 'failbit', and setstate was always + called with bits = failbit, all it did was immediately raise an + exception. Get rid of those variables, and replace the setstate + with direct exception throwing (which also removes some dead + code). + + As a result, good() is never reached after a failure (there are + only 2 calls, one of which is in tests), and can just be replaced + by !eof(). + + fail(), clear(n) and exceptions() are just never called. Delete + them. +``` + +Take a look at the [full diff](https://github.com/bitcoin/bitcoin/commit/eb0b56b19017ab5c16c745e6da39c53126924ed6) and just think how much time the author is saving fellow and future committers by taking the time to provide this context here and now. If he didn’t, it would probably be lost forever. + +In most cases, you can leave out details about how a change has been made. Code is generally self-explanatory in this regard (and if the code is so complex that it needs to be explained in prose, that’s what source comments are for). Just focus on making clear the reasons why you made the change in the first place—the way things worked before the change (and what was wrong with that), the way they work now, and why you decided to solve it the way you did. + +The future maintainer that thanks you may be yourself! + + + +#### Tools worth mentioning + +##### Using `--fixup` + +If you've made a commit and then realize you've missed something or made a minor mistake, you can use the `--fixup` option. + +For example, suppose you've made a commit with a hash `9fceb02`. Later, you realize you've left a debug statement in your code. Instead of making a new commit titled "remove debug statement" or "fix", you can do the following: + + $ git commit --fixup 9fceb02 + +This will create a new commit to fix the issue, with a message like "fixup! The original commit message". + +##### Interactive Rebase + +Interactive rebase, or `rebase -i`, can be used to squash these fixup commits into the original commits they're fixing, which cleans up your commit history. You can use the `autosquash` option to automatically squash any commits marked as "fixup" into their target commits. + +For example: + + $ git rebase -i --autosquash HEAD~5 + +This command starts an interactive rebase for the last 5 commits (`HEAD~5`). Any commits marked as "fixup" will be automatically moved to squash with their target commits. + +The benefit of using `--fixup` and interactive rebase is that it keeps your commit history clean and readable. It groups fixes with the commits they are related to, rather than having a separate "fix" commit that might not make sense to other developers (or even to you) in the future. + + +--- + +#### Pull Request and Squashing Commits Caveats + +While atomic commits are great for development and for understanding the changes within the branch, the commit history can get messy when merging to the main branch. To keep a cleaner and more understandable commit history in our main branch, we encourage squashing all the commits of a PR into one when merging. + +This single commit should provide an overview of the changes that the PR introduced. It should follow the guidelines for atomic commits (an atomic commit is complete, self-contained, and understandable) but on the scale of the entire feature, task, or fix that the PR addresses. This approach combines the benefits of atomic commits during development with a clean commit history in our main branch. + +Here is how you can squash commits: + +```bash +git rebase -i HEAD~n +``` + +where `n` is the number of commits to squash. After running the command, replace `pick` with `squash` for the commits you want to squash into the previous commit. This will combine the commits and allow you to write a new commit message. + +In this context, an atomic commit message could look like: + +``` +Add feature X + +This commit introduces feature X which does A, B, and C. It adds +new files for layout, updates the code behind the file, and introduces +new resources. This change is important because it allows users to +perform task Y more efficiently. + +It includes: +- Creation of new layout file +- Updates in the code-behind file +- Addition of new resources + +Resolves: #123 +``` + +In your PRs, remember to detail what the PR is introducing or fixing. This will be helpful for reviewers to understand the context and the reason behind the changes. diff --git a/contrib/TESTING.md b/contrib/TESTING.md new file mode 100644 index 0000000000..59dc1d81a3 --- /dev/null +++ b/contrib/TESTING.md @@ -0,0 +1,94 @@ +# Testing Guide for Bittensor + +Testing is an essential part of software development that ensures the correctness and performance of your code. Bittensor uses a combination of unit tests and integration tests to verify the functionality of its components. This guide will walk you through how to run and write tests for Bittensor. + +## Running Tests + +Bittensor uses `pytest` for running its tests. To run all tests, navigate to the root directory of the Bittensor repository and run: + +```bash +pytest +``` + +This will automatically discover all test files (those that start with `test_`) and run them. + +If you want to run a specific test file, you can specify it directly. For example, to run the tests in `test_wallet.py`, you would use: + +```bash +pytest tests/test_wallet.py +``` + +Similarly, you can run a specific test within a file by appending `::` and the test name. For example: + +```bash +pytest tests/test_wallet.py::test_create_new_coldkey +``` + +## Writing Tests + +When writing tests for Bittensor, you should aim to cover both the "happy path" (where everything works as expected) and any potential error conditions. Here's a basic structure for a test file: + +```python +import pytest +import bittensor + +def test_some_functionality(): + # Setup any necessary objects or state. + wallet = bittensor.wallet() + + # Call the function you're testing. + result = wallet.create_new_coldkey() + + # Assert that the function behaved as expected. + assert result is not None +``` + +In this example, we're testing the `create_new_coldkey` function of the `wallet` object. We assert that the result is not `None`, which is the expected behavior. + +## Mocking + +In some cases, you may need to mock certain functions or objects to isolate the functionality you're testing. Bittensor uses the `unittest.mock` library for this. Here's a simple example from the axon unittest: + +```python +def test_axon_start(self): + mock_wallet = MagicMock( + spec=bittensor.Wallet, + coldkey=MagicMock(), + coldkeypub=MagicMock( + # mock ss58 address + ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" + ), + hotkey=MagicMock( + ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" + ), + ) + axon = bittensor.axon(wallet=mock_wallet, metagraph=None) + axon.start() + assert axon.server._state.stage == grpc._server._ServerStage.STARTED +``` + +In this example, we're mocking the `coldkey`, `coldkeypub` and `hotkey` for a wallet. This allows us to test how the axon code behaves when `bittensor.Wallet()` would normally be called, without actually calling the constructor. +## Test Coverage + +It's important to ensure that your tests cover as much of your code as possible. You can use the `pytest-cov` plugin to measure your test coverage. To use it, first install it with pip: + +```bash +pip install pytest-cov +``` + +Then, you can run your tests with coverage like this: + +```bash +pytest --cov=bittensor +``` + +This will output a coverage report showing the percentage of your code that's covered by tests. + +Remember, while high test coverage is a good goal, it's also important to write meaningful tests. A test isn't very useful if it doesn't accurately represent the conditions under which your code will run. + +## Continuous Integration + +Bittensor uses CircleCI for continuous integration. This means that every time you push changes to the repository, all tests are automatically run. If any tests fail, you'll be notified so you can fix the issue before merging your changes. + + +Remember, tests are an important part of maintaining the health of a codebase. They help catch issues early and make it easier to add new features or refactor existing code. Happy testing! \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..7e6933ed25 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,10 @@ +version: "3.2" + +services: + dev: + container_name: node-bittensor + image: "bittensor/bittensor:latest" + ports: + - "8091:8091" + volumes: + - ~/.bittensor:/root/.bittensor \ No newline at end of file diff --git a/example.env b/example.env new file mode 100644 index 0000000000..35d405fb58 --- /dev/null +++ b/example.env @@ -0,0 +1,5 @@ +# To use legacy Torch-based of bittensor, you must set USE_TORCH=1 +USE_TORCH=0 +# If set to 0 (or anything else than 1), it will use current, numpy-based, bittensor interface. +# This is generally what you want unless you want legacy interoperability. +# Please note that the legacy interface is deprecated, and is not tested nearly as much. diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000000..d38bdc7172 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,18 @@ +[mypy] +ignore_missing_imports = True +ignore_errors = True + +[mypy-*.axon.*] +ignore_errors = False + +[mypy-*.dendrite.*] +ignore_errors = False + +[mypy-bittensor.metagraph.*] +ignore_errors = False + +[mypy-*.subtensor.*] +ignore_errors = False + +[mypy-*.synapse.*] +ignore_errors = False diff --git a/requirements/btcli.txt b/requirements/btcli.txt new file mode 100644 index 0000000000..429461facf --- /dev/null +++ b/requirements/btcli.txt @@ -0,0 +1 @@ +git+https://github.com/opentensor/btcli.git@main#egg=bittensor-cli \ No newline at end of file diff --git a/requirements/cubit.txt b/requirements/cubit.txt new file mode 100644 index 0000000000..5af1316836 --- /dev/null +++ b/requirements/cubit.txt @@ -0,0 +1,3 @@ +torch>=1.13.1 +cubit>=1.1.0 +cubit @ git+https://github.com/opentensor/cubit.git diff --git a/requirements/dev.txt b/requirements/dev.txt new file mode 100644 index 0000000000..14d616b48b --- /dev/null +++ b/requirements/dev.txt @@ -0,0 +1,19 @@ +pytest==7.2.0 +pytest-asyncio==0.23.7 +pytest-mock==3.12.0 +pytest-split==0.8.0 +pytest-xdist==3.0.2 +pytest-rerunfailures==10.2 +coveralls==3.3.1 +pytest-cov==4.0.0 +ddt==1.6.0 +hypothesis==6.81.1 +flake8==7.0.0 +mypy==1.8.0 +types-retry==0.9.9.4 +freezegun==1.5.0 +torch>=1.13.1 +httpx==0.27.0 +ruff==0.4.7 +aioresponses==0.7.6 +factory-boy==3.3.0 diff --git a/requirements/prod.txt b/requirements/prod.txt new file mode 100644 index 0000000000..fab144bf76 --- /dev/null +++ b/requirements/prod.txt @@ -0,0 +1,23 @@ +wheel +setuptools~=70.0.0 +aiohttp~=3.9 +bt-decode +colorama~=0.4.6 +fastapi~=0.110.1 +munch~=2.5.0 +numpy~=2.0.1 +msgpack-numpy-opentensor~=0.5.0 +nest_asyncio +netaddr +packaging +python-statemachine~=2.1 +pyyaml +retry +requests +rich +pydantic>=2.3, <3 +python-Levenshtein +scalecodec==1.2.11 +substrate-interface~=1.7.9 +uvicorn +git+https://github.com/opentensor/btwallet.git#egg=bittensor-wallet \ No newline at end of file diff --git a/requirements/torch.txt b/requirements/torch.txt new file mode 100644 index 0000000000..028dec0810 --- /dev/null +++ b/requirements/torch.txt @@ -0,0 +1 @@ +torch>=1.13.1 diff --git a/scripts/check_compatibility.sh b/scripts/check_compatibility.sh new file mode 100755 index 0000000000..b9c89c24dd --- /dev/null +++ b/scripts/check_compatibility.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +if [ -z "$1" ]; then + echo "Please provide a Python version as an argument." + exit 1 +fi + +python_version="$1" +all_passed=true + +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +check_compatibility() { + all_supported=0 + + while read -r requirement; do + # Skip lines starting with git+ + if [[ "$requirement" == git+* ]]; then + continue + fi + + package_name=$(echo "$requirement" | awk -F'[!=<>~]' '{print $1}' | awk -F'[' '{print $1}') # Strip off brackets + echo -n "Checking $package_name... " + + url="https://pypi.org/pypi/$package_name/json" + response=$(curl -s $url) + status_code=$(curl -s -o /dev/null -w "%{http_code}" $url) + + if [ "$status_code" != "200" ]; then + echo -e "${RED}Information not available for $package_name. Failure.${NC}" + all_supported=1 + continue + fi + + classifiers=$(echo "$response" | jq -r '.info.classifiers[]') + requires_python=$(echo "$response" | jq -r '.info.requires_python') + + base_version="Programming Language :: Python :: ${python_version%%.*}" + specific_version="Programming Language :: Python :: $python_version" + + if echo "$classifiers" | grep -q "$specific_version" || echo "$classifiers" | grep -q "$base_version"; then + echo -e "${GREEN}Supported${NC}" + elif [ "$requires_python" != "null" ]; then + if echo "$requires_python" | grep -Eq "==$python_version|>=$python_version|<=$python_version"; then + echo -e "${GREEN}Supported${NC}" + else + echo -e "${RED}Not compatible with Python $python_version due to constraint $requires_python.${NC}" + all_supported=1 + fi + else + echo -e "${YELLOW}Warning: Specific version not listed, assuming compatibility${NC}" + fi + done < requirements/prod.txt + + return $all_supported +} + +echo "Checking compatibility for Python $python_version..." +check_compatibility +if [ $? -eq 0 ]; then + echo -e "${GREEN}All requirements are compatible with Python $python_version.${NC}" +else + echo -e "${RED}All requirements are NOT compatible with Python $python_version.${NC}" + all_passed=false +fi + +echo "" +if $all_passed; then + echo -e "${GREEN}All tests passed.${NC}" +else + echo -e "${RED}All tests did not pass.${NC}" + exit 1 +fi diff --git a/scripts/check_pre_submit.sh b/scripts/check_pre_submit.sh new file mode 100755 index 0000000000..4dbe7747f6 --- /dev/null +++ b/scripts/check_pre_submit.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# ruff checks formating +echo ">>> Run the pre-submit format check with \`ruff format .\`." +ruff format . + +echo ">>> Run the pre-submit format check with \`mypy\`." + +# mypy checks python versions compatibility +versions=("3.9" "3.10" "3.11") +for version in "${versions[@]}"; do + echo "Running mypy for Python $version..." + mypy --ignore-missing-imports bittensor/ --python-version="$version" +done + +# flake8 checks errors count in bittensor folder +error_count=$(flake8 bittensor/ --count) +echo ">>> Flake8 found ${error_count} errors." diff --git a/scripts/check_requirements_changes.sh b/scripts/check_requirements_changes.sh new file mode 100755 index 0000000000..5fcd27ea3f --- /dev/null +++ b/scripts/check_requirements_changes.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Check if requirements files have changed in the last commit +if git diff --name-only HEAD~1 | grep -E 'requirements/prod.txt|requirements/dev.txt'; then + echo "Requirements files have changed. Running compatibility checks..." + echo 'export REQUIREMENTS_CHANGED="true"' >> $BASH_ENV +else + echo "Requirements files have not changed. Skipping compatibility checks..." + echo 'export REQUIREMENTS_CHANGED="false"' >> $BASH_ENV +fi diff --git a/scripts/create_wallet.sh b/scripts/create_wallet.sh new file mode 100755 index 0000000000..d0ee08b69f --- /dev/null +++ b/scripts/create_wallet.sh @@ -0,0 +1,13 @@ +mkdir -p ~/.bittensor/wallets/default/hotkeys +rm ~/.bittensor/wallets/default/coldkeypub.txt +rm ~/.bittensor/wallets/default/hotkeys/default +touch ~/.bittensor/wallets/default/coldkeypub.txt +touch ~/.bittensor/wallets/default/hotkeys/default +echo "0x74acaa8d7829336dfff7569f19225818cc593335b9aafcde3f69db23c3538561" >> ~/.bittensor/wallets/default/coldkeypub.txt +echo '{"accountId": "0x9cf7085aa3304c21dc0f571c0134abb12f2e8e1bc9dbfc82440b8d6ba7908655", "publicKey": "0x9cf7085aa3304c21dc0f571c0134abb12f2e8e1bc9dbfc82440b8d6ba7908655", "secretPhrase": "document usage siren cross across crater shrug jump marine distance absurd caught", "secretSeed": "0x2465ae0757117bea271ad622e1cd0c4b319c96896a3c7d9469a68e63cf7f9646", "ss58Address": "5FcWiCiFoSspGGocSxzatNL5kT6cjxjXQ9LuAuYbvFNUqcfX"}' >> ~/.bittensor/wallets/default/hotkeys/default +chmod 0600 ~/.bittensor/wallets/default/coldkeypub.txt +chmod 0600 ~/.bittensor/wallets/default/hotkeys/default +echo "~/.bittensor/wallets/default/coldkeypub.txt" +cat ~/.bittensor/wallets/default/coldkeypub.txt +echo "~/.bittensor/wallets/default/hotkeys/default" +cat ~/.bittensor/wallets/default/hotkeys/default \ No newline at end of file diff --git a/scripts/environments/README.md b/scripts/environments/README.md new file mode 100644 index 0000000000..0caa0d2ae4 --- /dev/null +++ b/scripts/environments/README.md @@ -0,0 +1,21 @@ +## 04 Installation on Apple M chip +There are quite a few Python libraries that are not yet compatible with Apple M chipset architecture. The best way to use Bittensor on this hardware is through Conda and Miniforge. The Opentensor team has created a Conda environment that makes installing Bittensor on these systems very easy. + +> NOTE: This tutorial assumes you have installed conda on mac, if you have not done so already you can install it from [here](https://conda.io/projects/conda/en/latest/user-guide/install/macos.html). + +1. Create the conda environment from the `apple_m1_environment.yml` file here: + ```bash + conda env create -f apple_m1_environment.yml + ``` + +2. Activate the new environment: `conda activate bittensor`. +3. Verify that the new environment was installed correctly: + ```bash + conda env list + ``` + +4. Install bittensor (without dependencies): + ```bash + conda activate bittensor # activate the env + pip install --no-deps bittensor # install bittensor + ``` diff --git a/scripts/environments/apple_m1_environment.yml b/scripts/environments/apple_m1_environment.yml new file mode 100644 index 0000000000..25824aa64e --- /dev/null +++ b/scripts/environments/apple_m1_environment.yml @@ -0,0 +1,272 @@ +name: bittensor +channels: + - conda-forge +dependencies: + - anyio=3.6.2=pyhd8ed1ab_0 + - appnope=0.1.3=pyhd8ed1ab_0 + - argon2-cffi=21.3.0=pyhd8ed1ab_0 + - argon2-cffi-bindings=21.2.0=py310h8e9501a_3 + - asttokens=2.2.1=pyhd8ed1ab_0 + - async-lru=2.0.2=pyhd8ed1ab_0 + - attrs=23.1.0=pyh71513ae_1 + - babel=2.12.1=pyhd8ed1ab_1 + - backcall=0.2.0=pyh9f0ad1d_0 + - backports=1.0=pyhd8ed1ab_3 + - backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0 + - beautifulsoup4=4.12.2=pyha770c72_0 + - bleach=6.0.0=pyhd8ed1ab_0 + - brotli=1.0.9=h1a8c8d9_8 + - brotli-bin=1.0.9=h1a8c8d9_8 + - bzip2=1.0.8=h3422bc3_4 + - c-ares=1.18.1=h3422bc3_0 + - ca-certificates=2023.5.7=hf0a4a13_0 + - cffi=1.15.1=py310h2399d43_3 + - charset-normalizer=3.1.0=pyhd8ed1ab_0 + - comm=0.1.3=pyhd8ed1ab_0 + - debugpy=1.6.7=py310h0f1eb42_0 + - decorator=5.1.1=pyhd8ed1ab_0 + - defusedxml=0.7.1=pyhd8ed1ab_0 + - entrypoints=0.4=pyhd8ed1ab_0 + - executing=1.2.0=pyhd8ed1ab_0 + - flit-core=3.9.0=pyhd8ed1ab_0 + - gmp=6.2.1=h9f76cd9_0 + - grpcio=1.42.0=py310h00ca444_0 + - importlib-metadata=6.6.0=pyha770c72_0 + - importlib_metadata=6.6.0=hd8ed1ab_0 + - importlib_resources=5.12.0=pyhd8ed1ab_0 + - ipython=8.13.2=pyhd1c38e8_0 + - jedi=0.18.2=pyhd8ed1ab_0 + - json5=0.9.5=pyh9f0ad1d_0 + - jupyter-lsp=2.1.0=pyhd8ed1ab_0 + - jupyter_client=8.2.0=pyhd8ed1ab_0 + - jupyter_core=5.3.0=py310hbe9552e_0 + - jupyter_events=0.6.3=pyhd8ed1ab_0 + - jupyter_server=2.5.0=pyhd8ed1ab_0 + - jupyter_server_terminals=0.4.4=pyhd8ed1ab_1 + - jupyterlab=4.0.0=pyhd8ed1ab_1 + - jupyterlab_pygments=0.2.2=pyhd8ed1ab_0 + - jupyterlab_server=2.22.1=pyhd8ed1ab_0 + - libabseil=20230125.0=cxx17_hb7217d7_1 + - libbrotlicommon=1.0.9=h1a8c8d9_8 + - libbrotlidec=1.0.9=h1a8c8d9_8 + - libbrotlienc=1.0.9=h1a8c8d9_8 + - libcxx=16.0.3=h4653b0c_0 + - libffi=3.4.2=h3422bc3_5 + - libgrpc=1.54.1=h9dbdbd0_0 + - libsodium=1.0.18=h27ca646_1 + - libsqlite=3.41.2=hb31c410_1 + - libzlib=1.2.13=h03a7124_4 + - matplotlib-inline=0.1.6=pyhd8ed1ab_0 + - mistune=2.0.5=pyhd8ed1ab_0 + - nb_conda_kernels=2.3.1=py310hbe9552e_2 + - nbconvert-core=7.4.0=pyhd8ed1ab_0 + - nbformat=5.8.0=pyhd8ed1ab_0 + - ncurses=6.3=h07bb92c_1 + - nest-asyncio=1.5.6=pyhd8ed1ab_0 + - notebook-shim=0.2.3=pyhd8ed1ab_0 + - openssl=3.1.0=h53f4e23_3 + - packaging=23.1=pyhd8ed1ab_0 + - pandocfilters=1.5.0=pyhd8ed1ab_0 + - parso=0.8.3=pyhd8ed1ab_0 + - pexpect=4.8.0=pyh1a96a4e_2 + - pickleshare=0.7.5=py_1003 + - pip=23.1.2=pyhd8ed1ab_0 + - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_0 + - prompt-toolkit=3.0.38=pyha770c72_0 + - prompt_toolkit=3.0.38=hd8ed1ab_0 + - ptyprocess=0.7.0=pyhd3deb0d_0 + - pure_eval=0.2.2=pyhd8ed1ab_0 + - pycparser=2.21=pyhd8ed1ab_0 + - pycryptodome=3.19.0=py310hd71b1c6_1 + - pygments=2.15.1=pyhd8ed1ab_0 + - python-levenshtein=0.12.2=py310he2143c4_1 + - pyobjc-core=9.1.1=py310h44ed3dd_0 + - pyobjc-framework-cocoa=9.1.1=py310h44ed3dd_0 + - pyrsistent=0.19.3=py310h8e9501a_0 + - pysocks=1.7.1=pyha2e5f31_6 + - pytest-asyncio=0.21.0=pyhd8ed1ab_0 + - python=3.10.10=h3ba56d0_0_cpython + - python-dateutil=2.8.2=pyhd8ed1ab_0 + - python-json-logger=2.0.7=pyhd8ed1ab_0 + - python_abi=3.10=3_cp310 + - pytz=2023.3=pyhd8ed1ab_0 + - pyzmq=25.0.2=py310hc407298_0 + - re2=2023.02.02=hb7217d7_0 + - readline=8.2=h92ec313_1 + - rfc3339-validator=0.1.4=pyhd8ed1ab_0 + - rfc3986-validator=0.1.1=pyh9f0ad1d_0 + - send2trash=1.8.2=pyhd1c38e8_0 + - setuptools=67.7.2=pyhd8ed1ab_0 + - six=1.16.0=pyh6c4a22f_0 + - sniffio=1.3.0=pyhd8ed1ab_0 + - stack_data=0.6.2=pyhd8ed1ab_0 + - terminado=0.17.1=pyhd1c38e8_0 + - tinycss2=1.2.1=pyhd8ed1ab_0 + - tk=8.6.12=he1e0b03_0 + - tomli=2.0.1=pyhd8ed1ab_0 + - traitlets=5.9.0=pyhd8ed1ab_0 + - typing_extensions=4.6.1=pyha770c72_0 + - tzdata=2023c=h71feb2d_0 + - uvicorn=0.22.0=py310hbe9552e_0 + - wcwidth=0.2.6=pyhd8ed1ab_0 + - xz=5.2.6=h57fd34a_0 + - yaml=0.2.5=h3422bc3_2 + - zeromq=4.3.4=hbdafb3b_1 + - zipp=3.15.0=pyhd8ed1ab_0 + - zlib=1.2.13=h03a7124_4 + - pip: + - addict==2.4.0 + - aiohttp==3.9.0 + - aiosignal==1.3.1 + - altair==4.2.2 + - ansible==6.7.0 + - ansible-core==2.13.7 + - ansible-vault==2.1.0 + - appdirs==1.4.4 + - argparse==1.4.0 + - arrow==1.2.3 + - async-timeout==4.0.2 + - backoff==2.1.0 + - blinker==1.6.2 + - cachetools==4.2.4 + - certifi==2024.2.2 + - cfgv==3.4.0 + - chardet==3.0.4 + - click==8.1.3 + - colorama==0.4.6 + - commonmark==0.9.1 + - cryptography==42.0.5 + - cytoolz==0.12.2 + - dataclasses-json==0.5.13 + - ddt==1.6.0 + - dill==0.3.6 + - distlib==0.3.7 + - docker-pycreds==0.4.0 + - ecdsa==0.18.0 + - eth-hash==0.5.2 + - eth-keys==0.4.0 + - eth-typing==3.4.0 + - eth-utils==2.2.0 + - exceptiongroup==1.1.2 + - fastapi==0.110.1 + - filelock==3.12.2 + - fqdn==1.5.1 + - frozenlist==1.4.0 + - fsspec==2023.6.0 + - fuzzywuzzy==0.18.0 + - gitdb==4.0.10 + - gitpython==3.1.32 + - google-api-core==1.34.0 + - google-api-python-client==2.7.0 + - google-auth==1.35.0 + - google-auth-httplib2==0.1.0 + - googleapis-common-protos==1.59.0 + - grpcio-tools==1.42.0 + - httplib2==0.22.0 + - huggingface-hub==0.16.4 + - hypothesis==6.47.4 + - identify==2.5.26 + - ipykernel==6.26.0 + - ipython-genutils==0.2.0 + - ipywidgets==8.0.6 + - isoduration==20.11.0 + - jinja2==3.1.2 + - joblib==1.2.0 + - jsonpointer==2.3 + - jupyter==1.0.0 + - jupyter-console==6.6.3 + - jupyterlab-widgets==3.0.7 + - markupsafe==2.0.1 + - marshmallow==3.19.0 + - marshmallow-enum==1.5.1 + - more-itertools==10.0.0 + - msgpack-numpy-opentensor==0.5.0 + - multidict==6.0.4 + - multiprocess==0.70.14 + - munch==2.5.0 + - mypy-extensions==1.0.0 + - nbclassic==1.0.0 + - nbclient==0.7.4 + - netaddr==0.8.0 + - networkx==3.1 + - nltk==3.8.1 + - nodeenv==1.8.0 + - notebook==6.5.4 + - numexpr==2.8.4 + - openapi-schema-pydantic==1.2.4 + - password-strength==0.0.3.post2 + - pathtools==0.1.2 + - pillow==10.1.0 + - platformdirs==3.10.0 + - plotly==5.14.1 + - pre-commit==3.3.2 + - prometheus-client==0.14.1 + - promise==2.3 + - py==1.11.0 + - py-bip39-bindings==0.1.11 + - py-ed25519-bindings==1.0.2 + - py-ed25519-zebra-bindings==1.0.1 + - py-sr25519-bindings==0.2.0 + - pyarrow==12.0.1 + - pyasn1==0.5.0 + - pyasn1-modules==0.3.0 + - pydantic==2.7.1 + - pydeck==0.8.1b0 + - pyinstrument==4.4.0 + - pympler==1.0.1 + - pynacl==1.5.0 + - pyparsing==3.1.1 + - python-statemachine==2.1.2 + - pytest==7.4.0 + - qqdm==0.0.7 + - qtconsole==5.4.3 + - qtpy==2.3.1 + - regex==2023.6.3 + - requests==2.31.0 + - resolvelib==0.8.1 + - responses==0.18.0 + - retry==0.9.2 + - rich==12.5.1 + - rsa==4.9 + - scalecodec==1.2.11 + - scikit-learn==1.2.2 + - scipy==1.10.1 + - sentencepiece==0.1.99 + - sentry-sdk==1.28.1 + - setproctitle==1.3.2 + - shortuuid==1.0.11 + - shtab==1.6.5 + - smmap==5.0.0 + - sortedcontainers==2.4.0 + - soupsieve==2.4.1 + - sqlalchemy==2.0.19 + - starlette==0.37.2 + - streamlit==1.22.0 + - substrate-interface==1.7.9 + - tenacity==8.2.2 + - termcolor==2.1.1 + - threadpoolctl==3.1.0 + - tokenizers==0.13.3 + - toml==0.10.2 + - toolz==0.12.0 + - torch==2.0.1 + - torchvision==0.15.2 + - tornado==6.3.3 + - tqdm==4.64.1 + - typing-extensions==4.8.0 + - typing-inspect==0.8.0 + - tzlocal==5.0.1 + - uri-template==1.2.0 + - uritemplate==3.0.1 + - urllib3==1.26.15 + - validators==0.20.0 + - virtualenv==20.24.3 + - wandb==0.15.10 + - webcolors==1.13 + - webencodings==0.5.1 + - websocket-client==1.6.1 + - wheel==0.37.1 + - widgetsnbextension==4.0.7 + - xxhash==3.2.0 + - yarl==1.9.2 +prefix: /opt/homebrew/Caskroom/miniforge/base/envs/bittensor diff --git a/scripts/install.sh b/scripts/install.sh new file mode 100755 index 0000000000..5111d75afb --- /dev/null +++ b/scripts/install.sh @@ -0,0 +1,298 @@ + +#!/bin/bash +set -u + +# enable command completion +set -o history -o histexpand + +python="python3" + +abort() { + printf "%s\n" "$1" + exit 1 +} + +getc() { + local save_state + save_state=$(/bin/stty -g) + /bin/stty raw -echo + IFS= read -r -n 1 -d '' "$@" + /bin/stty "$save_state" +} + +exit_on_error() { + exit_code=$1 + last_command=${@:2} + if [ $exit_code -ne 0 ]; then + >&2 echo "\"${last_command}\" command failed with exit code ${exit_code}." + exit $exit_code + fi +} + +wait_for_user() { + local c + echo + echo "Press RETURN to continue or any other key to abort" + getc c + # we test for \r and \n because some stuff does \r instead + if ! [[ "$c" == $'\r' || "$c" == $'\n' ]]; then + exit 1 + fi +} + +shell_join() { + local arg + printf "%s" "$1" + shift + for arg in "$@"; do + printf " " + printf "%s" "${arg// /\ }" + done +} + +# string formatters +if [[ -t 1 ]]; then + tty_escape() { printf "\033[%sm" "$1"; } +else + tty_escape() { :; } +fi +tty_mkbold() { tty_escape "1;$1"; } +tty_underline="$(tty_escape "4;39")" +tty_blue="$(tty_mkbold 34)" +tty_red="$(tty_mkbold 31)" +tty_bold="$(tty_mkbold 39)" +tty_reset="$(tty_escape 0)" + +ohai() { + printf "${tty_blue}==>${tty_bold} %s${tty_reset}\n" "$(shell_join "$@")" +} + +# Things can fail later if `pwd` doesn't exist. +# Also sudo prints a warning message for no good reason +cd "/usr" || exit 1 + +linux_install_pre() { + sudo apt-get update + sudo apt-get install --no-install-recommends --no-install-suggests -y apt-utils curl git cmake build-essential + exit_on_error $? +} + +linux_install_python() { + which $python + if [[ $? != 0 ]] ; then + ohai "Installing python" + sudo apt-get install --no-install-recommends --no-install-suggests -y $python + else + ohai "Updating python" + sudo apt-get install --only-upgrade $python + fi + exit_on_error $? + ohai "Installing python tools" + sudo apt-get install --no-install-recommends --no-install-suggests -y $python-pip $python-dev + exit_on_error $? +} + +linux_update_pip() { + PYTHONPATH=$(which $python) + ohai "You are using python@ $PYTHONPATH$" + ohai "Installing python tools" + $python -m pip install --upgrade pip +} + +linux_install_bittensor() { + ohai "Cloning bittensor@master into ~/.bittensor/bittensor" + mkdir -p ~/.bittensor/bittensor + git clone https://github.com/opentensor/bittensor.git ~/.bittensor/bittensor/ 2> /dev/null || (cd ~/.bittensor/bittensor/ ; git fetch origin master ; git checkout master ; git pull --ff-only ; git reset --hard ; git clean -xdf) + ohai "Installing bittensor" + $python -m pip install -e ~/.bittensor/bittensor/ + exit_on_error $? +} + +linux_increase_ulimit(){ + ohai "Increasing ulimit to 1,000,000" + prlimit --pid=$PPID --nofile=1000000 +} + + +mac_install_xcode() { + which -s xcode-select + if [[ $? != 0 ]] ; then + ohai "Installing xcode:" + xcode-select --install + exit_on_error $? + fi +} + +mac_install_brew() { + which -s brew + if [[ $? != 0 ]] ; then + ohai "Installing brew:" + ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" + else + ohai "Updating brew:" + brew update --verbose + fi + exit_on_error $? +} + +mac_install_cmake() { + which -s cmake + if [[ $? != 0 ]] ; then + ohai "Installing cmake:" + brew install cmake + else + ohai "Updating cmake:" + brew upgrade cmake + fi +} + +mac_install_python() { + which -s python3 + ohai "Installing python3" + brew list python@3 &>/dev/null || brew install python@3; + ohai "Updating python3" + brew upgrade python@3 + exit_on_error $? +} + +mac_update_pip() { + PYTHONPATH=$(which $python) + ohai "You are using python@ $PYTHONPATH$" + ohai "Installing python tools" + $python -m pip install --upgrade pip +} + +mac_install_bittensor() { + ohai "Cloning bittensor@text_prompting into ~/.bittensor/bittensor" + git clone https://github.com/opentensor/bittensor.git ~/.bittensor/bittensor/ 2> /dev/null || (cd ~/.bittensor/bittensor/ ; git fetch origin master ; git checkout master ; git pull --ff-only ; git reset --hard; git clean -xdf) + ohai "Installing bittensor" + $python -m pip install -e ~/.bittensor/bittensor/ + exit_on_error $? + deactivate +} + +# Do install. +OS="$(uname)" +if [[ "$OS" == "Linux" ]]; then + + which -s apt + if [[ $? == 0 ]] ; then + abort "This linux based install requires apt. To run with other distros (centos, arch, etc), you will need to manually install the requirements" + fi + echo """ + +██████╗░██╗████████╗████████╗███████╗███╗░░██╗░██████╗░█████╗░██████╗░ +██╔══██╗██║╚══██╔══╝╚══██╔══╝██╔════╝████╗░██║██╔════╝██╔══██╗██╔══██╗ +██████╩╝██║░░░██║░░░░░░██║░░░█████╗░░██╔██╗██║╚█████╗░██║░░██║██████╔╝ +██╔══██╗██║░░░██║░░░░░░██║░░░██╔══╝░░██║╚████║░╚═══██╗██║░░██║██╔══██╗ +██████╩╝██║░░░██║░░░░░░██║░░░███████╗██║░╚███║██████╔╝╚█████╔╝██║░░██║ +╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚══╝╚═════╝░░╚════╝░╚═╝░░╚═╝ + + - Mining a new element. + """ + ohai "This script will install:" + echo "git" + echo "curl" + echo "cmake" + echo "build-essential" + echo "python3" + echo "python3-pip" + echo "bittensor" + + wait_for_user + linux_install_pre + linux_install_python + linux_update_pip + linux_install_bittensor + + ohai "Would you like to increase the ulimit? This will allow your miner to run for a longer time" + wait_for_user + linux_increase_ulimit + echo "" + echo "" + echo "######################################################################" + echo "## ##" + echo "## BITTENSOR SETUP ##" + echo "## ##" + echo "######################################################################" + echo "" + echo "" + +elif [[ "$OS" == "Darwin" ]]; then + echo """ + +██████╗░██╗████████╗████████╗███████╗███╗░░██╗░██████╗░█████╗░██████╗░ +██╔══██╗██║╚══██╔══╝╚══██╔══╝██╔════╝████╗░██║██╔════╝██╔══██╗██╔══██╗ +██████╩╝██║░░░██║░░░░░░██║░░░█████╗░░██╔██╗██║╚█████╗░██║░░██║██████╔╝ +██╔══██╗██║░░░██║░░░░░░██║░░░██╔══╝░░██║╚████║░╚═══██╗██║░░██║██╔══██╗ +██████╩╝██║░░░██║░░░░░░██║░░░███████╗██║░╚███║██████╔╝╚█████╔╝██║░░██║ +╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚══╝╚═════╝░░╚════╝░╚═╝░░╚═╝ + + - Mining a new element. + """ + ohai "This script will install:" + echo "xcode" + echo "homebrew" + echo "git" + echo "cmake" + echo "python3" + echo "python3-pip" + echo "bittensor" + + wait_for_user + mac_install_brew + mac_install_cmake + mac_install_python + mac_update_pip + mac_install_bittensor + echo "" + echo "" + echo "######################################################################" + echo "## ##" + echo "## BITTENSOR SETUP ##" + echo "## ##" + echo "######################################################################" +else + abort "Bittensor is only supported on macOS and Linux" +fi + +# Use the shell's audible bell. +if [[ -t 1 ]]; then +printf "\a" +fi + +echo "" +echo "" +ohai "Welcome. Installation successful" +echo "" +echo "- 1. Create a wallet " +echo " $ btcli new_coldkey (for holding funds)" +echo " $ btcli new_hotkey (for running miners)" +echo "" +echo "- 2. Run a miner on the prompting network. " +echo " $ python3 ~/.bittensor/bittensor/neurons/text/prompting/miners/gpt4all/neuron.py" +echo "" +ohai "Extras:" +echo "" +echo "- Check your tao balance: " +echo " $ btcli wallet overview" +echo "" +echo "- Stake to your miners:" +echo " $ btcli stake add" +echo " $ btcli stake remove" +echo "" +echo "- Create/list/register wallets" +echo " $ btcli w new_coldkey" +echo " $ btcli w new_hotkey" +echo " $ btcli w list" +echo " $ btcli s register" +echo "" +echo "- Use the Python API" +echo " $ python3"echo " >> import bittensor" +echo "" +echo "- Join the discussion: " +echo " ${tty_underline}https://discord.gg/3rUr6EcvbB${tty_reset}" +echo "" + + + diff --git a/scripts/post_install_cli.py b/scripts/post_install_cli.py new file mode 100644 index 0000000000..bfaca34c37 --- /dev/null +++ b/scripts/post_install_cli.py @@ -0,0 +1,29 @@ +import os +import subprocess +import sys + + +def post_install(): + # Determine the shell type (bash, zsh, etc.) + shell = os.environ.get("SHELL") + if "bash" in shell: + shell_config = "~/.bashrc" + elif "zsh" in shell: + shell_config = "~/.zshrc" + else: + print("Unsupported shell for autocompletion.") + return + + # Generate the completion script + completion_script = subprocess.check_output( + [sys.executable, "-m", "bittensor.cli", "--print-completion", shell] + ).decode() + + # Append the completion script to the shell configuration file + with open(os.path.expanduser(shell_config), "a") as file: + file.write("\n# Bittensor CLI Autocompletion\n") + file.write(completion_script) + + +if __name__ == "__main__": + post_install() diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..bb0db98080 --- /dev/null +++ b/setup.py @@ -0,0 +1,99 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import codecs +import os +import pathlib +import re +from io import open +from os import path + +from setuptools import setup, find_packages + + +def read_requirements(path): + requirements = [] + + with pathlib.Path(path).open() as requirements_txt: + for line in requirements_txt: + if line.startswith("git+"): + pkg_name = re.search(r"egg=([a-zA-Z0-9_-]+)", line.strip()).group(1) + requirements.append(pkg_name + " @ " + line.strip()) + else: + requirements.append(line.strip()) + + return requirements + + +requirements = read_requirements("requirements/prod.txt") +extra_requirements_btcli = read_requirements("requirements/btcli.txt") +extra_requirements_dev = read_requirements("requirements/dev.txt") +extra_requirements_cubit = read_requirements("requirements/cubit.txt") +extra_requirements_torch = read_requirements("requirements/torch.txt") + +here = path.abspath(path.dirname(__file__)) + +with open(path.join(here, "README.md"), encoding="utf-8") as f: + long_description = f.read() + + +# loading version from setup.py +with codecs.open( + os.path.join(here, "bittensor/core/settings.py"), encoding="utf-8" +) as init_file: + version_match = re.search( + r"^__version__ = ['\"]([^'\"]*)['\"]", init_file.read(), re.M + ) + version_string = version_match.group(1) + +setup( + name="bittensor", + version=version_string, + description="bittensor", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/opentensor/bittensor", + author="bittensor.com", + packages=find_packages(exclude=["tests", "tests.*"]), + include_package_data=True, + author_email="", + license="MIT", + python_requires=">=3.9", + install_requires=requirements, + extras_require={ + "btcli": extra_requirements_btcli, + "cubit": extra_requirements_cubit, + "dev": extra_requirements_dev, + "torch": extra_requirements_torch, + }, + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Topic :: Software Development :: Build Tools", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + ], +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000..1c7bc4757e --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,18 @@ +# The MIT License (MIT) +# Copyright © 2022 Yuma Rao +# Copyright © 2022-2023 Opentensor Foundation +# Copyright © 2023 Opentensor Technologies Inc + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. diff --git a/tests/e2e_tests/__init__.py b/tests/e2e_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py new file mode 100644 index 0000000000..9fc9faec68 --- /dev/null +++ b/tests/e2e_tests/conftest.py @@ -0,0 +1,84 @@ +import os +import re +import shlex +import signal +import subprocess +import time + +import pytest +from substrateinterface import SubstrateInterface + +from bittensor import logging +from tests.e2e_tests.utils.test_utils import ( + clone_or_update_templates, + install_templates, + template_path, + uninstall_templates, +) + + +# Fixture for setting up and tearing down a localnet.sh chain between tests +@pytest.fixture(scope="function") +def local_chain(request): + param = request.param if hasattr(request, "param") else None + # Get the environment variable for the script path + script_path = os.getenv("LOCALNET_SH_PATH") + + if not script_path: + # Skip the test if the localhost.sh path is not set + logging.warning("LOCALNET_SH_PATH env variable is not set, e2e test skipped.") + pytest.skip("LOCALNET_SH_PATH environment variable is not set.") + + # Check if param is None, and handle it accordingly + args = "" if param is None else f"{param}" + + # Compile commands to send to process + cmds = shlex.split(f"{script_path} {args}") + + # Start new node process + process = subprocess.Popen( + cmds, stdout=subprocess.PIPE, text=True, preexec_fn=os.setsid + ) + + # Pattern match indicates node is compiled and ready + pattern = re.compile(r"Imported #1") + + # install neuron templates + logging.info("downloading and installing neuron templates from github") + templates_dir = clone_or_update_templates() + install_templates(templates_dir) + + timestamp = int(time.time()) + + def wait_for_node_start(process, pattern): + for line in process.stdout: + print(line.strip()) + # 10 min as timeout + if int(time.time()) - timestamp > 10 * 60: + print("Subtensor not started in time") + break + if pattern.search(line): + print("Node started!") + break + + wait_for_node_start(process, pattern) + + # Run the test, passing in substrate interface + yield SubstrateInterface(url="ws://127.0.0.1:9945") + + # Terminate the process group (includes all child processes) + os.killpg(os.getpgid(process.pid), signal.SIGTERM) + + # Give some time for the process to terminate + time.sleep(1) + + # If the process is not terminated, send SIGKILL + if process.poll() is None: + os.killpg(os.getpgid(process.pid), signal.SIGKILL) + + # Ensure the process has terminated + process.wait() + + # uninstall templates + logging.info("uninstalling neuron templates") + uninstall_templates(template_path) diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py new file mode 100644 index 0000000000..bcf8650fd1 --- /dev/null +++ b/tests/e2e_tests/test_axon.py @@ -0,0 +1,128 @@ +import asyncio +import sys + +import pytest + +import bittensor +from bittensor import logging +from bittensor.utils import networking +from tests.e2e_tests.utils.chain_interactions import register_neuron, register_subnet +from tests.e2e_tests.utils.test_utils import ( + setup_wallet, + template_path, + templates_repo, +) + + +@pytest.mark.asyncio +async def test_axon(local_chain): + """ + Test the Axon mechanism and successful registration on the network. + + Steps: + 1. Register a subnet and register Alice + 2. Check if metagraph.axon is updated and check axon attributes + 3. Run Alice as a miner on the subnet + 4. Check the metagraph again after running the miner and verify all attributes + Raises: + AssertionError: If any of the checks or verifications fail + """ + + logging.info("Testing test_axon") + + netuid = 1 + # Register root as Alice - the subnet owner + alice_keypair, wallet = setup_wallet("//Alice") + + # Register a subnet, netuid 1 + assert register_subnet(local_chain, wallet), "Subnet wasn't created" + + # Verify subnet created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [netuid] + ).serialize(), "Subnet wasn't created successfully" + + # Register Alice to the network + assert register_neuron( + local_chain, wallet, netuid + ), f"Neuron wasn't registered to subnet {netuid}" + + metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + + # Validate current metagraph stats + old_axon = metagraph.axons[0] + assert len(metagraph.axons) == 1, f"Expected 1 axon, but got {len(metagraph.axons)}" + assert old_axon.hotkey == alice_keypair.ss58_address, "Hotkey mismatch for the axon" + assert ( + old_axon.coldkey == alice_keypair.ss58_address + ), "Coldkey mismatch for the axon" + assert old_axon.ip == "0.0.0.0", f"Expected IP 0.0.0.0, but got {old_axon.ip}" + assert old_axon.port == 0, f"Expected port 0, but got {old_axon.port}" + assert old_axon.ip_type == 0, f"Expected IP type 0, but got {old_axon.ip_type}" + + # Prepare to run the miner + cmd = " ".join( + [ + f"{sys.executable}", + f'"{template_path}{templates_repo}/neurons/miner.py"', + "--no_prompt", + "--netuid", + str(netuid), + "--subtensor.network", + "local", + "--subtensor.chain_endpoint", + "ws://localhost:9945", + "--wallet.path", + wallet.path, + "--wallet.name", + wallet.name, + "--wallet.hotkey", + "default", + ] + ) + + # Run the miner in the background + await asyncio.create_subprocess_shell( + cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + logging.info("Neuron Alice is now mining") + + # Waiting for 5 seconds for metagraph to be updated + await asyncio.sleep(5) + + # Refresh the metagraph + metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + updated_axon = metagraph.axons[0] + external_ip = networking.get_external_ip() + + # Assert updated attributes + assert ( + len(metagraph.axons) == 1 + ), f"Expected 1 axon, but got {len(metagraph.axons)} after mining" + + assert ( + len(metagraph.neurons) == 1 + ), f"Expected 1 neuron, but got {len(metagraph.neurons)}" + + assert ( + updated_axon.ip == external_ip + ), f"Expected IP {external_ip}, but got {updated_axon.ip}" + + assert ( + updated_axon.ip_type == networking.ip_version(external_ip) + ), f"Expected IP type {networking.ip_version(external_ip)}, but got {updated_axon.ip_type}" + + assert updated_axon.port == 8091, f"Expected port 8091, but got {updated_axon.port}" + + assert ( + updated_axon.hotkey == alice_keypair.ss58_address + ), "Hotkey mismatch after mining" + + assert ( + updated_axon.coldkey == alice_keypair.ss58_address + ), "Coldkey mismatch after mining" + + logging.info("✅ Passed test_axon") diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py new file mode 100644 index 0000000000..1974854b9b --- /dev/null +++ b/tests/e2e_tests/test_commit_weights.py @@ -0,0 +1,165 @@ +import time + +import numpy as np +import pytest + +import bittensor +from bittensor import logging +from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit +from tests.e2e_tests.utils.chain_interactions import ( + add_stake, + register_neuron, + register_subnet, + sudo_set_hyperparameter_bool, + sudo_set_hyperparameter_values, + wait_interval, +) +from tests.e2e_tests.utils.test_utils import setup_wallet + + +@pytest.mark.asyncio +async def test_commit_and_reveal_weights(local_chain): + """ + Tests the commit/reveal weights mechanism + + Steps: + 1. Register a subnet through Alice + 2. Register Alice's neuron and add stake + 3. Enable commit-reveal mechanism on the subnet + 4. Lower the commit_reveal interval and rate limit + 5. Commit weights and verify + 6. Wait interval & reveal weights and verify + Raises: + AssertionError: If any of the checks or verifications fail + """ + netuid = 1 + logging.info("Testing test_commit_and_reveal_weights") + # Register root as Alice + keypair, alice_wallet = setup_wallet("//Alice") + assert register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet 1 created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [1] + ).serialize(), "Subnet wasn't created successfully" + + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + + # Enable commit_reveal on the subnet + assert sudo_set_hyperparameter_bool( + local_chain, + alice_wallet, + "sudo_set_commit_reveal_weights_enabled", + True, + netuid, + ), "Unable to enable commit reveal on the subnet" + + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" + + # Lower the commit_reveal interval + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "370"}, + return_error_message=True, + ) + + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 370 + ), "Failed to set commit/reveal interval" + + assert ( + subtensor.weights_rate_limit(netuid=netuid) > 0 + ), "Weights rate limit is below 0" + # Lower the rate limit + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_weights_set_rate_limit", + call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, + return_error_message=True, + ) + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + ), "Failed to set weights_rate_limit" + assert subtensor.weights_rate_limit(netuid=netuid) == 0 + + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.1], dtype=np.float32) + salt = [18, 179, 107, 0, 165, 211, 141, 197] + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # Commit weights + success, message = subtensor.commit_weights( + alice_wallet, + netuid, + salt=salt, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + weight_commits = subtensor.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, alice_wallet.hotkey.ss58_address], + ) + # Assert that the committed weights are set correctly + assert weight_commits.value is not None, "Weight commit not found in storage" + commit_hash, commit_block = weight_commits.value + assert commit_block > 0, f"Invalid block number: {commit_block}" + + # Query the WeightCommitRevealInterval storage map + weight_commit_reveal_interval = subtensor.query_module( + module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + ) + interval = weight_commit_reveal_interval.value + assert interval > 0, "Invalid WeightCommitRevealInterval" + + # Wait until the reveal block range + await wait_interval(interval, subtensor) + + # Reveal weights + success, message = subtensor.reveal_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + salt=salt, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + time.sleep(10) + + # Query the Weights storage map + revealed_weights = subtensor.query_module( + module="SubtensorModule", + name="Weights", + params=[netuid, 0], # netuid and uid + ) + + # Assert that the revealed weights are set correctly + assert revealed_weights.value is not None, "Weight reveal not found in storage" + + assert ( + weight_vals[0] == revealed_weights.value[0][1] + ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" + logging.info("✅ Passed test_commit_and_reveal_weights") diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py new file mode 100644 index 0000000000..3f02d021c0 --- /dev/null +++ b/tests/e2e_tests/test_dendrite.py @@ -0,0 +1,136 @@ +import asyncio +import sys + +import pytest + +import bittensor +from bittensor import logging, Subtensor + +from tests.e2e_tests.utils.test_utils import ( + setup_wallet, + template_path, + templates_repo, +) +from tests.e2e_tests.utils.chain_interactions import ( + register_neuron, + register_subnet, + add_stake, + wait_epoch, +) + + +@pytest.mark.asyncio +async def test_dendrite(local_chain): + """ + Test the Dendrite mechanism + + Steps: + 1. Register a subnet through Alice + 2. Register Bob as a validator + 3. Add stake to Bob and ensure neuron is not a validator yet + 4. Run Bob as a validator and wait epoch + 5. Ensure Bob's neuron has all correct attributes of a validator + Raises: + AssertionError: If any of the checks or verifications fail + """ + + logging.info("Testing test_dendrite") + netuid = 1 + + # Register root as Alice - the subnet owner + alice_keypair, alice_wallet = setup_wallet("//Alice") + + # Register a subnet, netuid 1 + assert register_subnet(local_chain, alice_wallet), "Subnet wasn't created" + + # Verify subnet created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [netuid] + ).serialize(), "Subnet wasn't created successfully" + + # Register Bob + bob_keypair, bob_wallet = setup_wallet("//Bob") + + # Register Bob to the network + assert register_neuron( + local_chain, bob_wallet, netuid + ), f"Neuron wasn't registered to subnet {netuid}" + + metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") + + # Assert one neuron is Bob + assert len(subtensor.neurons(netuid=netuid)) == 1 + neuron = metagraph.neurons[0] + assert neuron.hotkey == bob_keypair.ss58_address + assert neuron.coldkey == bob_keypair.ss58_address + + # Assert stake is 0 + assert neuron.stake.tao == 0 + + # Stake to become to top neuron after the first epoch + assert add_stake(local_chain, bob_wallet, bittensor.Balance.from_tao(10_000)) + + # Refresh metagraph + metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + old_neuron = metagraph.neurons[0] + + # Assert stake is 10000 + assert ( + old_neuron.stake.tao == 10_000.0 + ), f"Expected 10_000.0 staked TAO, but got {neuron.stake.tao}" + + # Assert neuron is not a validator yet + assert old_neuron.active is True + assert old_neuron.validator_permit is False + assert old_neuron.validator_trust == 0.0 + assert old_neuron.pruning_score == 0 + + # Prepare to run the validator + cmd = " ".join( + [ + f"{sys.executable}", + f'"{template_path}{templates_repo}/neurons/validator.py"', + "--no_prompt", + "--netuid", + str(netuid), + "--subtensor.network", + "local", + "--subtensor.chain_endpoint", + "ws://localhost:9945", + "--wallet.path", + bob_wallet.path, + "--wallet.name", + bob_wallet.name, + "--wallet.hotkey", + "default", + ] + ) + + # Run the validator in the background + await asyncio.create_subprocess_shell( + cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + logging.info("Neuron Alice is now validating") + await asyncio.sleep( + 5 + ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data + + await wait_epoch(subtensor, netuid=netuid) + + # Refresh metagraph + metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + + # Refresh validator neuron + updated_neuron = metagraph.neurons[0] + + assert len(metagraph.neurons) == 1 + assert updated_neuron.active is True + assert updated_neuron.validator_permit is True + assert updated_neuron.hotkey == bob_keypair.ss58_address + assert updated_neuron.coldkey == bob_keypair.ss58_address + assert updated_neuron.pruning_score != 0 + + logging.info("✅ Passed test_dendrite") diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py new file mode 100644 index 0000000000..355bf44077 --- /dev/null +++ b/tests/e2e_tests/test_incentive.py @@ -0,0 +1,184 @@ +import asyncio +import sys + +import pytest + +from bittensor import Subtensor, logging +from tests.e2e_tests.utils.chain_interactions import ( + add_stake, + register_neuron, + register_subnet, + wait_epoch, +) +from tests.e2e_tests.utils.test_utils import ( + setup_wallet, + template_path, + templates_repo, +) +from bittensor.utils.balance import Balance +from bittensor.core.extrinsics.set_weights import do_set_weights +from bittensor.core.metagraph import Metagraph + + +@pytest.mark.asyncio +async def test_incentive(local_chain): + """ + Test the incentive mechanism and interaction of miners/validators + + Steps: + 1. Register a subnet and register Alice & Bob + 2. Add Stake by Alice + 3. Run Alice as validator & Bob as miner. Wait Epoch + 4. Verify miner has correct: trust, rank, consensus, incentive + 5. Verify validator has correct: validator_permit, validator_trust, dividends, stake + Raises: + AssertionError: If any of the checks or verifications fail + """ + + logging.info("Testing test_incentive") + netuid = 1 + + # Register root as Alice - the subnet owner and validator + alice_keypair, alice_wallet = setup_wallet("//Alice") + register_subnet(local_chain, alice_wallet) + + # Verify subnet created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [netuid] + ).serialize(), "Subnet wasn't created successfully" + + # Register Bob as miner + bob_keypair, bob_wallet = setup_wallet("//Bob") + + # Register Alice as a neuron on the subnet + register_neuron(local_chain, alice_wallet, netuid) + + # Register Bob as a neuron on the subnet + register_neuron(local_chain, bob_wallet, netuid) + + subtensor = Subtensor(network="ws://localhost:9945") + # Assert two neurons are in network + assert ( + len(subtensor.neurons(netuid=netuid)) == 2 + ), "Alice & Bob not registered in the subnet" + + # Alice to stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, Balance.from_tao(10_000)) + + # Prepare to run Bob as miner + cmd = " ".join( + [ + f"{sys.executable}", + f'"{template_path}{templates_repo}/neurons/miner.py"', + "--no_prompt", + "--netuid", + str(netuid), + "--subtensor.network", + "local", + "--subtensor.chain_endpoint", + "ws://localhost:9945", + "--wallet.path", + bob_wallet.path, + "--wallet.name", + bob_wallet.name, + "--wallet.hotkey", + "default", + "--logging.trace", + ] + ) + + # Run Bob as miner in the background + await asyncio.create_subprocess_shell( + cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + logging.info("Neuron Bob is now mining") + await asyncio.sleep( + 5 + ) # wait for 5 seconds for the metagraph to refresh with latest data + + # Prepare to run Alice as validator + cmd = " ".join( + [ + f"{sys.executable}", + f'"{template_path}{templates_repo}/neurons/validator.py"', + "--no_prompt", + "--netuid", + str(netuid), + "--subtensor.network", + "local", + "--subtensor.chain_endpoint", + "ws://localhost:9945", + "--wallet.path", + alice_wallet.path, + "--wallet.name", + alice_wallet.name, + "--wallet.hotkey", + "default", + "--logging.trace", + ] + ) + + # Run Alice as validator in the background + await asyncio.create_subprocess_shell( + cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + logging.info("Neuron Alice is now validating") + await asyncio.sleep( + 5 + ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data + + # Get latest metagraph + metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945") + + # Get current miner/validator stats + bob_neuron = metagraph.neurons[1] + assert bob_neuron.incentive == 0 + assert bob_neuron.consensus == 0 + assert bob_neuron.rank == 0 + assert bob_neuron.trust == 0 + + alice_neuron = metagraph.neurons[0] + assert alice_neuron.validator_permit is False + assert alice_neuron.dividends == 0 + assert alice_neuron.stake.tao == 10_000.0 + assert alice_neuron.validator_trust == 0 + + # Wait until next epoch + await wait_epoch(subtensor) + + # Set weights by Alice on the subnet + do_set_weights( + self=subtensor, + wallet=alice_wallet, + uids=[1], + vals=[65535], + netuid=netuid, + version_key=0, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + logging.info("Alice neuron set weights successfully") + + await wait_epoch(subtensor) + + # Refresh metagraph + metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945") + + # Get current emissions and validate that Alice has gotten tao + bob_neuron = metagraph.neurons[1] + assert bob_neuron.incentive == 1 + assert bob_neuron.consensus == 1 + assert bob_neuron.rank == 1 + assert bob_neuron.trust == 1 + + alice_neuron = metagraph.neurons[0] + assert alice_neuron.validator_permit is True + assert alice_neuron.dividends == 1 + assert alice_neuron.stake.tao == 10_000.0 + assert alice_neuron.validator_trust == 1 + + logging.info("✅ Passed test_incentive") diff --git a/tests/e2e_tests/test_liquid_alpha.py b/tests/e2e_tests/test_liquid_alpha.py new file mode 100644 index 0000000000..21492fba8d --- /dev/null +++ b/tests/e2e_tests/test_liquid_alpha.py @@ -0,0 +1,186 @@ +import bittensor +from bittensor import logging +from tests.e2e_tests.utils.chain_interactions import ( + add_stake, + register_neuron, + register_subnet, + sudo_set_hyperparameter_bool, + sudo_set_hyperparameter_values, +) +from tests.e2e_tests.utils.test_utils import setup_wallet + + +def liquid_alpha_call_params(netuid: int, alpha_values: str): + alpha_low, alpha_high = [v.strip() for v in alpha_values.split(",")] + return { + "netuid": netuid, + "alpha_low": alpha_low, + "alpha_high": alpha_high, + } + + +def test_liquid_alpha(local_chain): + """ + Test the liquid alpha mechanism + + Steps: + 1. Register a subnet through Alice + 2. Register Alice's neuron and add stake + 3. Verify we can't set alpha values without enabling liquid_alpha + 4. Test setting alpha values after enabling liquid_alpha + 5. Verify failures when setting incorrect values (upper and lower bounds) + Raises: + AssertionError: If any of the checks or verifications fail + """ + u16_max = 65535 + netuid = 1 + logging.info("Testing test_liquid_alpha_enabled") + + # Register root as Alice + keypair, alice_wallet = setup_wallet("//Alice") + register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet 1 created successfully + assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() + + # Register a neuron to the subnet + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + + # Assert liquid alpha is disabled + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).liquid_alpha_enabled + is False + ), "Liquid alpha is enabled by default" + + # Attempt to set alpha high/low while disabled (should fail) + alpha_values = "6553, 53083" + call_params = liquid_alpha_call_params(netuid, alpha_values) + result, error_message = sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + return_error_message=True, + ) + assert result is False, "Alpha values set while being disabled" + assert error_message["name"] == "LiquidAlphaDisabled" + + # Enabled liquid alpha on the subnet + assert sudo_set_hyperparameter_bool( + local_chain, alice_wallet, "sudo_set_liquid_alpha_enabled", True, netuid + ), "Unable to enable liquid alpha" + + assert subtensor.get_subnet_hyperparameters( + netuid=1 + ).liquid_alpha_enabled, "Failed to enable liquid alpha" + + # Attempt to set alpha high & low after enabling the hyperparameter + alpha_values = "87, 54099" + call_params = liquid_alpha_call_params(netuid, alpha_values) + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + ), "Unable to set alpha_values" + assert ( + subtensor.get_subnet_hyperparameters(netuid=1).alpha_high == 54099 + ), "Failed to set alpha high" + assert ( + subtensor.get_subnet_hyperparameters(netuid=1).alpha_low == 87 + ), "Failed to set alpha low" + + # Testing alpha high upper and lower bounds + + # 1. Test setting Alpha_high too low + alpha_high_too_low = ( + u16_max * 4 // 5 + ) - 1 # One less than the minimum acceptable value + call_params = liquid_alpha_call_params(netuid, f"6553, {alpha_high_too_low}") + result, error_message = sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + return_error_message=True, + ) + + assert result is False, "Able to set incorrect alpha_high value" + assert error_message["name"] == "AlphaHighTooLow" + + # 2. Test setting Alpha_high too high + alpha_high_too_high = u16_max + 1 # One more than the max acceptable value + call_params = liquid_alpha_call_params(netuid, f"6553, {alpha_high_too_high}") + try: + result, error_message = sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + return_error_message=True, + ) + except Exception as e: + assert str(e) == "65536 out of range for u16", f"Unexpected error: {e}" + + # Testing alpha low upper and lower bounds + + # 1. Test setting Alpha_low too low + alpha_low_too_low = 0 + call_params = liquid_alpha_call_params(netuid, f"{alpha_low_too_low}, 53083") + result, error_message = sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + return_error_message=True, + ) + assert result is False, "Able to set incorrect alpha_low value" + assert error_message["name"] == "AlphaLowOutOfRange" + + # 2. Test setting Alpha_low too high + alpha_low_too_high = ( + u16_max * 4 // 5 + ) + 1 # One more than the maximum acceptable value + call_params = liquid_alpha_call_params(netuid, f"{alpha_low_too_high}, 53083") + result, error_message = sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + return_error_message=True, + ) + assert result is False, "Able to set incorrect alpha_low value" + assert error_message["name"] == "AlphaLowOutOfRange" + + # Setting normal alpha values + alpha_values = "6553, 53083" + call_params = liquid_alpha_call_params(netuid, alpha_values) + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_alpha_values", + call_params=call_params, + ), "Unable to set liquid alpha values" + + assert ( + subtensor.get_subnet_hyperparameters(netuid=1).alpha_high == 53083 + ), "Failed to set alpha high" + assert ( + subtensor.get_subnet_hyperparameters(netuid=1).alpha_low == 6553 + ), "Failed to set alpha low" + + # Disable Liquid Alpha + assert sudo_set_hyperparameter_bool( + local_chain, alice_wallet, "sudo_set_liquid_alpha_enabled", False, netuid + ), "Unable to disable liquid alpha" + + assert ( + subtensor.get_subnet_hyperparameters(netuid=1).liquid_alpha_enabled is False + ), "Failed to disable liquid alpha" + logging.info("✅ Passed test_liquid_alpha") diff --git a/tests/e2e_tests/test_metagraph.py b/tests/e2e_tests/test_metagraph.py new file mode 100644 index 0000000000..60dc2826a3 --- /dev/null +++ b/tests/e2e_tests/test_metagraph.py @@ -0,0 +1,177 @@ +import time + +import bittensor +from bittensor import logging +from tests.e2e_tests.utils.chain_interactions import ( + add_stake, + register_neuron, + register_subnet, +) +from tests.e2e_tests.utils.test_utils import ( + setup_wallet, +) + + +def neuron_to_dict(neuron): + """ + Convert a neuron object to a dictionary, excluding private attributes, methods, and specific fields. + Returns: + dict: A dictionary of the neuron's public attributes. + + Note: + Excludes 'weights' and 'bonds' fields. These are present in subtensor + but not in metagraph + """ + excluded_fields = {"weights", "bonds"} + return { + attr: getattr(neuron, attr) + for attr in dir(neuron) + if not attr.startswith("_") + and not callable(getattr(neuron, attr)) + and attr not in excluded_fields + } + + +def test_metagraph(local_chain): + """ + Tests the metagraph + + Steps: + 1. Register a subnet through Alice + 2. Assert metagraph's initial state + 3. Register Bob and validate info in metagraph + 4. Fetch neuron info of Bob through subtensor & metagraph and verify + 5. Register Dave and validate info in metagraph + 6. Verify low balance stake fails & add stake thru Bob and verify + 7. Load pre_dave metagraph from latest save and verify both instances + Raises: + AssertionError: If any of the checks or verifications fail + """ + logging.info("Testing test_metagraph_command") + netuid = 1 + + # Register Alice, Bob, and Dave + alice_keypair, alice_wallet = setup_wallet("//Alice") + bob_keypair, bob_wallet = setup_wallet("//Bob") + dave_keypair, dave_wallet = setup_wallet("//Dave") + + # Register the subnet through Alice + register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet was created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [1] + ).serialize(), "Subnet wasn't created successfully" + + # Initialize metagraph + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + metagraph = subtensor.metagraph(netuid=1) + + # Assert metagraph is empty + assert len(metagraph.uids) == 0, "Metagraph is not empty" + + # Register Bob to the subnet + assert register_neuron( + local_chain, bob_wallet, netuid + ), "Unable to register Bob as a neuron" + + # Refresh the metagraph + metagraph.sync(subtensor=subtensor) + + # Assert metagraph has Bob neuron + assert len(metagraph.uids) == 1, "Metagraph doesn't have exactly 1 neuron" + assert ( + metagraph.hotkeys[0] == bob_keypair.ss58_address + ), "Bob's hotkey doesn't match in metagraph" + assert len(metagraph.coldkeys) == 1, "Metagraph doesn't have exactly 1 coldkey" + assert metagraph.n.max() == 1, "Metagraph's max n is not 1" + assert metagraph.n.min() == 1, "Metagraph's min n is not 1" + assert len(metagraph.addresses) == 1, "Metagraph doesn't have exactly 1 address" + + # Fetch UID of Bob + uid = subtensor.get_uid_for_hotkey_on_subnet( + bob_keypair.ss58_address, netuid=netuid + ) + + # Fetch neuron info of Bob through subtensor and metagraph + neuron_info_bob = subtensor.neuron_for_uid(uid, netuid=netuid) + metagraph_dict = neuron_to_dict(metagraph.neurons[uid]) + subtensor_dict = neuron_to_dict(neuron_info_bob) + + # Verify neuron info is the same in both objects + assert ( + metagraph_dict == subtensor_dict + ), "Neuron info of Bob doesn't match b/w metagraph & subtensor" + + # Create pre_dave metagraph for future verifications + metagraph_pre_dave = subtensor.metagraph(netuid=1) + + # Register Dave as a neuron + assert register_neuron( + local_chain, dave_wallet, netuid + ), "Unable to register Dave as a neuron" + + metagraph.sync(subtensor=subtensor) + + # Assert metagraph now includes Dave's neuron + assert ( + len(metagraph.uids) == 2 + ), "Metagraph doesn't have exactly 2 neurons post Dave" + assert ( + metagraph.hotkeys[1] == dave_keypair.ss58_address + ), "Neuron's hotkey in metagraph doesn't match" + assert ( + len(metagraph.coldkeys) == 2 + ), "Metagraph doesn't have exactly 2 coldkeys post Dave" + assert metagraph.n.max() == 2, "Metagraph's max n is not 2 post Dave" + assert metagraph.n.min() == 2, "Metagraph's min n is not 2 post Dave" + assert len(metagraph.addresses) == 2, "Metagraph doesn't have 2 addresses post Dave" + + # Test staking with low balance + assert not add_stake( + local_chain, dave_wallet, bittensor.Balance.from_tao(10_000) + ), "Low balance stake should fail" + + # Add stake by Bob + assert add_stake( + local_chain, bob_wallet, bittensor.Balance.from_tao(10_000) + ), "Failed to add stake for Bob" + + # Assert stake is added after updating metagraph + metagraph.sync(subtensor=subtensor) + assert metagraph.neurons[0].stake == bittensor.Balance.from_tao( + 10_000 + ), "Bob's stake not updated in metagraph" + + # Test the save() and load() mechanism + # We save the metagraph and pre_dave loads it + metagraph.save() + time.sleep(3) + metagraph_pre_dave.load() + + # Ensure data is synced between two metagraphs + assert len(metagraph.uids) == len( + metagraph_pre_dave.uids + ), "UID count mismatch after save and load" + assert ( + metagraph.uids == metagraph_pre_dave.uids + ).all(), "UIDs don't match after save and load" + + assert len(metagraph.axons) == len( + metagraph_pre_dave.axons + ), "Axon count mismatch after save and load" + assert ( + metagraph.axons[1].hotkey == metagraph_pre_dave.axons[1].hotkey + ), "Axon hotkey mismatch after save and load" + assert ( + metagraph.axons == metagraph_pre_dave.axons + ), "Axons don't match after save and load" + + assert len(metagraph.neurons) == len( + metagraph_pre_dave.neurons + ), "Neuron count mismatch after save and load" + assert ( + metagraph.neurons == metagraph_pre_dave.neurons + ), "Neurons don't match after save and load" + + logging.info("✅ Passed test_metagraph") diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py new file mode 100644 index 0000000000..5665e6e058 --- /dev/null +++ b/tests/e2e_tests/test_subtensor_functions.py @@ -0,0 +1,152 @@ +import asyncio +import sys + +import pytest + +import bittensor +from bittensor import logging +from tests.e2e_tests.utils.chain_interactions import ( + register_neuron, + register_subnet, +) +from tests.e2e_tests.utils.test_utils import ( + setup_wallet, + template_path, + templates_repo, +) + + +@pytest.mark.asyncio +async def test_subtensor_extrinsics(local_chain): + """ + Tests subtensor extrinsics + + Steps: + 1. Validate subnets in the chain before/after registering netuid = 1 + 2. Register Alice's neuron + 3. Verify Alice and Bob's participation in subnets (individually and global) + 4. Verify uids of Alice and Bob gets populated correctly + 5. Start Alice as a validator and verify neuroninfo before/after is different + Raises: + AssertionError: If any of the checks or verifications fail + """ + netuid = 1 + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + + # Subnets 0 and 3 are bootstrapped from the start + assert subtensor.get_subnets() == [0, 3] + assert subtensor.get_total_subnets() == 2 + + # Add wallets for Alice and Bob + alice_keypair, alice_wallet = setup_wallet("//Alice") + bob_keypair, bob_wallet = setup_wallet("//Bob") + + # Register subnet + register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Subnet 1 is added after registration + assert subtensor.get_subnets() == [0, 1, 3] + assert subtensor.get_total_subnets() == 3 + + # Verify subnet 1 created successfully + assert local_chain.query("SubtensorModule", "NetworksAdded", [1]).serialize() + assert subtensor.subnet_exists(netuid) + + # Register Alice to the subnet + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Verify Alice is registered to netuid 1 and Bob isn't registered to any + assert subtensor.get_netuids_for_hotkey(hotkey_ss58=alice_keypair.ss58_address) == [ + 1 + ], "Alice is not registered to netuid 1 as expected" + assert ( + subtensor.get_netuids_for_hotkey(hotkey_ss58=bob_keypair.ss58_address) == [] + ), "Bob is unexpectedly registered to some netuid" + + # Verify Alice's hotkey is registered to any subnet (currently netuid = 1) + assert subtensor.is_hotkey_registered_any( + hotkey_ss58=alice_keypair.ss58_address + ), "Alice's hotkey is not registered to any subnet" + assert not subtensor.is_hotkey_registered_any( + hotkey_ss58=bob_keypair.ss58_address + ), "Bob's hotkey is unexpectedly registered to a subnet" + + # Verify netuid = 1 only has Alice registered and not Bob + assert subtensor.is_hotkey_registered_on_subnet( + netuid=netuid, hotkey_ss58=alice_keypair.ss58_address + ), "Alice's hotkey is not registered on netuid 1" + assert not subtensor.is_hotkey_registered_on_subnet( + netuid=netuid, hotkey_ss58=bob_keypair.ss58_address + ), "Bob's hotkey is unexpectedly registered on netuid 1" + + # Verify Alice's UID on netuid 1 is 0 + assert ( + subtensor.get_uid_for_hotkey_on_subnet( + hotkey_ss58=alice_keypair.ss58_address, netuid=netuid + ) + == 0 + ), "UID for Alice's hotkey on netuid 1 is not 0 as expected" + + # Register Bob to the subnet + assert register_neuron( + local_chain, bob_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Verify Bob's UID on netuid 1 is 1 + assert ( + subtensor.get_uid_for_hotkey_on_subnet( + hotkey_ss58=bob_keypair.ss58_address, netuid=netuid + ) + == 1 + ), "UID for Bob's hotkey on netuid 1 is not 1 as expected" + + neuron_info_old = subtensor.get_neuron_for_pubkey_and_subnet( + alice_keypair.ss58_address, netuid=netuid + ) + + # Prepare to run Alice as validator + cmd = " ".join( + [ + f"{sys.executable}", + f'"{template_path}{templates_repo}/neurons/validator.py"', + "--no_prompt", + "--netuid", + str(netuid), + "--subtensor.network", + "local", + "--subtensor.chain_endpoint", + "ws://localhost:9945", + "--wallet.path", + alice_wallet.path, + "--wallet.name", + alice_wallet.name, + "--wallet.hotkey", + "default", + "--logging.trace", + ] + ) + + # Run Alice as validator in the background + await asyncio.create_subprocess_shell( + cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + logging.info("Neuron Alice is now validating") + + await asyncio.sleep( + 5 + ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + + # Verify neuron info is updated after running as a validator + neuron_info = subtensor.get_neuron_for_pubkey_and_subnet( + alice_keypair.ss58_address, netuid=netuid + ) + assert ( + neuron_info_old.axon_info != neuron_info.axon_info + ), "Neuron info not updated after running validator" + + logging.info("✅ Passed test_subtensor_extrinsics") diff --git a/tests/e2e_tests/test_transfer.py b/tests/e2e_tests/test_transfer.py new file mode 100644 index 0000000000..9ec501d5bd --- /dev/null +++ b/tests/e2e_tests/test_transfer.py @@ -0,0 +1,52 @@ +from bittensor import Subtensor, logging +from bittensor.core.subtensor import transfer_extrinsic +from tests.e2e_tests.utils.test_utils import setup_wallet + + +def test_transfer(local_chain): + """ + Test the transfer mechanism on the chain + + Steps: + 1. Create a wallet for Alice + 2. Calculate existing balance and transfer 2 Tao + 3. Calculate balance after extrinsic call and verify calculations + Raises: + AssertionError: If any of the checks or verifications fail + """ + + logging.info("Testing test_transfer") + + # Set up Alice wallet + keypair, wallet = setup_wallet("//Alice") + + # Account details before transfer + acc_before = local_chain.query("System", "Account", [keypair.ss58_address]) + + # Transfer Tao using extrinsic + subtensor = Subtensor(network="ws://localhost:9945") + transfer_extrinsic( + subtensor=subtensor, + wallet=wallet, + dest="5GpzQgpiAKHMWNSH3RN4GLf96GVTDct9QxYEFAY7LWcVzTbx", + amount=2, + wait_for_finalization=True, + wait_for_inclusion=True, + prompt=False, + ) + + # Account details after transfer + acc_after = local_chain.query("System", "Account", [keypair.ss58_address]) + + # Transfer calculation assertions + expected_transfer = 2_000_000_000 + tolerance = 200_000 # Tx fee tolerance + + actual_difference = ( + acc_before.value["data"]["free"] - acc_after.value["data"]["free"] + ) + assert ( + expected_transfer <= actual_difference <= expected_transfer + tolerance + ), f"Expected transfer with tolerance: {expected_transfer} <= {actual_difference} <= {expected_transfer + tolerance}" + + logging.info("✅ Passed test_transfer") diff --git a/tests/e2e_tests/utils/chain_interactions.py b/tests/e2e_tests/utils/chain_interactions.py new file mode 100644 index 0000000000..aad53812c8 --- /dev/null +++ b/tests/e2e_tests/utils/chain_interactions.py @@ -0,0 +1,186 @@ +""" +This module provides functions interacting with the chain for end-to-end testing; +these are not present in btsdk but are required for e2e tests +""" + +import asyncio +from typing import Union, Optional, TYPE_CHECKING + +from bittensor import logging + +# for typing purposes +if TYPE_CHECKING: + from bittensor import Wallet + from bittensor.core.subtensor import Subtensor + from bittensor.utils.balance import Balance + from substrateinterface import SubstrateInterface + + +def sudo_set_hyperparameter_bool( + substrate: "SubstrateInterface", + wallet: "Wallet", + call_function: str, + value: bool, + netuid: int, +) -> bool: + """ + Sets boolean hyperparameter value through AdminUtils. Mimics setting hyperparams + """ + call = substrate.compose_call( + call_module="AdminUtils", + call_function=call_function, + call_params={"netuid": netuid, "enabled": value}, + ) + extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.coldkey) + response = substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + response.process_events() + return response.is_success + + +def sudo_set_hyperparameter_values( + substrate: "SubstrateInterface", + wallet: "Wallet", + call_function: str, + call_params: dict, + return_error_message: bool = False, +) -> Union[bool, tuple[bool, Optional[str]]]: + """ + Sets liquid alpha values using AdminUtils. Mimics setting hyperparams + """ + call = substrate.compose_call( + call_module="AdminUtils", + call_function=call_function, + call_params=call_params, + ) + extrinsic = substrate.create_signed_extrinsic(call=call, keypair=wallet.coldkey) + response = substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + response.process_events() + + if return_error_message: + return response.is_success, response.error_message + + return response.is_success + + +def add_stake( + substrate: "SubstrateInterface", wallet: "Wallet", amount: "Balance" +) -> bool: + """ + Adds stake to a hotkey using SubtensorModule. Mimics command of adding stake + """ + stake_call = substrate.compose_call( + call_module="SubtensorModule", + call_function="add_stake", + call_params={"hotkey": wallet.hotkey.ss58_address, "amount_staked": amount.rao}, + ) + extrinsic = substrate.create_signed_extrinsic( + call=stake_call, keypair=wallet.coldkey + ) + response = substrate.submit_extrinsic( + extrinsic, wait_for_finalization=True, wait_for_inclusion=True + ) + response.process_events() + return response.is_success + + +def register_subnet(substrate: "SubstrateInterface", wallet: "Wallet") -> bool: + """ + Registers a subnet on the chain using wallet. Mimics register subnet command. + """ + register_call = substrate.compose_call( + call_module="SubtensorModule", + call_function="register_network", + call_params={"immunity_period": 0, "reg_allowed": True}, + ) + extrinsic = substrate.create_signed_extrinsic( + call=register_call, keypair=wallet.coldkey + ) + response = substrate.submit_extrinsic( + extrinsic, wait_for_finalization=True, wait_for_inclusion=True + ) + response.process_events() + return response.is_success + + +def register_neuron( + substrate: "SubstrateInterface", wallet: "Wallet", netuid: int +) -> bool: + """ + Registers a neuron on a subnet. Mimics subnet register command. + """ + neuron_register_call = substrate.compose_call( + call_module="SubtensorModule", + call_function="burned_register", + call_params={ + "netuid": netuid, + "hotkey": wallet.hotkey.ss58_address, + }, + ) + extrinsic = substrate.create_signed_extrinsic( + call=neuron_register_call, keypair=wallet.coldkey + ) + response = substrate.submit_extrinsic( + extrinsic, wait_for_finalization=True, wait_for_inclusion=True + ) + response.process_events() + return response.is_success + + +async def wait_epoch(subtensor: "Subtensor", netuid: int = 1): + """ + Waits for the next epoch to start on a specific subnet. + + Queries the tempo value from the Subtensor module and calculates the + interval based on the tempo. Then waits for the next epoch to start + by monitoring the current block number. + + Raises: + Exception: If the tempo cannot be determined from the chain. + """ + q_tempo = [ + v.value + for [k, v] in subtensor.query_map_subtensor("Tempo") + if k.value == netuid + ] + if len(q_tempo) == 0: + raise Exception("could not determine tempo") + tempo = q_tempo[0] + logging.info(f"tempo = {tempo}") + await wait_interval(tempo, subtensor, netuid) + + +async def wait_interval(tempo: int, subtensor: "Subtensor", netuid: int = 1): + """ + Waits until the next tempo interval starts for a specific subnet. + + Calculates the next tempo block start based on the current block number + and the provided tempo, then enters a loop where it periodically checks + the current block number until the next tempo interval starts. + """ + interval = tempo + 1 + current_block = subtensor.get_current_block() + last_epoch = current_block - 1 - (current_block + netuid + 1) % interval + next_tempo_block_start = last_epoch + interval + last_reported = None + + while current_block < next_tempo_block_start: + await asyncio.sleep( + 1 + ) # Wait for 1 second before checking the block number again + current_block = subtensor.get_current_block() + if last_reported is None or current_block - last_reported >= 10: + last_reported = current_block + print( + f"Current Block: {current_block} Next tempo for netuid {netuid} at: {next_tempo_block_start}" + ) + logging.info( + f"Current Block: {current_block} Next tempo for netuid {netuid} at: {next_tempo_block_start}" + ) diff --git a/tests/e2e_tests/utils/test_utils.py b/tests/e2e_tests/utils/test_utils.py new file mode 100644 index 0000000000..ba662647a4 --- /dev/null +++ b/tests/e2e_tests/utils/test_utils.py @@ -0,0 +1,83 @@ +import os +import shutil +import subprocess +import sys + +from substrateinterface import Keypair + +import bittensor + +template_path = os.getcwd() + "/neurons/" +templates_repo = "templates repository" + + +def setup_wallet(uri: str) -> tuple[Keypair, bittensor.Wallet]: + """ + Sets up a wallet using the provided URI. + + This function creates a keypair from the given URI and initializes a wallet + at a temporary path. It sets the coldkey, coldkeypub, and hotkey for the wallet + using the generated keypair. + + Side Effects: + - Creates a wallet in a temporary directory. + - Sets keys in the wallet without encryption and with overwriting enabled. + """ + keypair = Keypair.create_from_uri(uri) + wallet_path = f"/tmp/btcli-e2e-wallet-{uri.strip('/')}" + wallet = bittensor.Wallet(path=wallet_path) + wallet.set_coldkey(keypair=keypair, encrypt=False, overwrite=True) + wallet.set_coldkeypub(keypair=keypair, encrypt=False, overwrite=True) + wallet.set_hotkey(keypair=keypair, encrypt=False, overwrite=True) + return keypair, wallet + + +def clone_or_update_templates(specific_commit=None): + """ + Clones or updates the Bittensor subnet template repository. + + This function clones the Bittensor subnet template repository if it does not + already exist in the specified installation directory. If the repository already + exists, it updates it by pulling the latest changes. Optionally, it can check out + a specific commit if the `specific_commit` variable is set. + """ + install_dir = template_path + repo_mapping = { + templates_repo: "https://github.com/opentensor/bittensor-subnet-template.git", + } + + os.makedirs(install_dir, exist_ok=True) + os.chdir(install_dir) + + for repo, git_link in repo_mapping.items(): + if not os.path.exists(repo): + print(f"\033[94mCloning {repo}...\033[0m") + subprocess.run(["git", "clone", git_link, repo], check=True) + else: + print(f"\033[94mUpdating {repo}...\033[0m") + os.chdir(repo) + subprocess.run(["git", "pull"], check=True) + os.chdir("..") + + # For pulling specific commit versions of repo + if specific_commit: + os.chdir(templates_repo) + print( + f"\033[94mChecking out commit {specific_commit} in {templates_repo}...\033[0m" + ) + subprocess.run(["git", "checkout", specific_commit], check=True) + os.chdir("..") + + return install_dir + templates_repo + "/" + + +def install_templates(install_dir): + subprocess.check_call([sys.executable, "-m", "pip", "install", install_dir]) + + +def uninstall_templates(install_dir): + subprocess.check_call( + [sys.executable, "-m", "pip", "uninstall", "bittensor_subnet_template", "-y"] + ) + # Delete everything in directory + shutil.rmtree(install_dir) diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py new file mode 100644 index 0000000000..f876d249bd --- /dev/null +++ b/tests/helpers/__init__.py @@ -0,0 +1,34 @@ +# The MIT License (MIT) +# Copyright © 2023 Opentensor Technologies Inc + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import os +from .helpers import ( # noqa: F401 + CLOSE_IN_VALUE, + MockConsole, + __mock_wallet_factory__, +) +from bittensor_wallet.mock.wallet_mock import ( # noqa: F401 + get_mock_coldkey, + get_mock_hotkey, + get_mock_keypair, + get_mock_wallet, +) + + +def is_running_in_circleci(): + """Checks that tests are running in the app.circleci.com environment.""" + return os.getenv("CIRCLECI") == "true" diff --git a/tests/helpers/helpers.py b/tests/helpers/helpers.py new file mode 100644 index 0000000000..417bd643b3 --- /dev/null +++ b/tests/helpers/helpers.py @@ -0,0 +1,170 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import Union + +from bittensor_wallet.mock.wallet_mock import MockWallet as _MockWallet +from bittensor_wallet.mock.wallet_mock import get_mock_coldkey +from bittensor_wallet.mock.wallet_mock import get_mock_hotkey +from bittensor_wallet.mock.wallet_mock import get_mock_wallet + +from rich.console import Console +from rich.text import Text + +from bittensor.utils.balance import Balance +from bittensor.core.chain_data import AxonInfo, NeuronInfo, PrometheusInfo + + +def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet: + """Returns a mock wallet object.""" + + mock_wallet = get_mock_wallet() + + return mock_wallet + + +class CLOSE_IN_VALUE: + value: Union[float, int, Balance] + tolerance: Union[float, int, Balance] + + def __init__( + self, + value: Union[float, int, Balance], + tolerance: Union[float, int, Balance] = 0.0, + ) -> None: + self.value = value + self.tolerance = tolerance + + def __eq__(self, __o: Union[float, int, Balance]) -> bool: + # True if __o \in [value - tolerance, value + tolerance] + # or if value \in [__o - tolerance, __o + tolerance] + return ( + (self.value - self.tolerance) <= __o <= (self.value + self.tolerance) + ) or ((__o - self.tolerance) <= self.value <= (__o + self.tolerance)) + + +def get_mock_neuron(**kwargs) -> NeuronInfo: + """ + Returns a mock neuron with the given kwargs overriding the default values. + """ + + mock_neuron_d = dict( + { + "netuid": -1, # mock netuid + "axon_info": AxonInfo( + block=0, + version=1, + ip=0, + port=0, + ip_type=0, + protocol=0, + placeholder1=0, + placeholder2=0, + ), + "prometheus_info": PrometheusInfo( + block=0, version=1, ip=0, port=0, ip_type=0 + ), + "validator_permit": True, + "uid": 1, + "hotkey": "some_hotkey", + "coldkey": "some_coldkey", + "active": 0, + "last_update": 0, + "stake": {"some_coldkey": 1e12}, + "total_stake": 1e12, + "rank": 0.0, + "trust": 0.0, + "consensus": 0.0, + "validator_trust": 0.0, + "incentive": 0.0, + "dividends": 0.0, + "emission": 0.0, + "bonds": [], + "weights": [], + "stake_dict": {}, + "pruning_score": 0.0, + "is_null": False, + } + ) + + mock_neuron_d.update(kwargs) # update with kwargs + + if kwargs.get("stake") is None and kwargs.get("coldkey") is not None: + mock_neuron_d["stake"] = {kwargs.get("coldkey"): 1e12} + + if kwargs.get("total_stake") is None: + mock_neuron_d["total_stake"] = sum(mock_neuron_d["stake"].values()) + + mock_neuron = NeuronInfo._neuron_dict_to_namespace(mock_neuron_d) + + return mock_neuron + + +def get_mock_neuron_by_uid(uid: int, **kwargs) -> NeuronInfo: + return get_mock_neuron( + uid=uid, hotkey=get_mock_hotkey(uid), coldkey=get_mock_coldkey(uid), **kwargs + ) + + +class MockStatus: + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + def start(self): + pass + + def stop(self): + pass + + def update(self, *args, **kwargs): + MockConsole().print(*args, **kwargs) + + +class MockConsole: + """ + Mocks the console object for status and print. + Captures the last print output as a string. + """ + + captured_print = None + + def status(self, *args, **kwargs): + return MockStatus() + + def print(self, *args, **kwargs): + console = Console( + width=1000, no_color=True, markup=False + ) # set width to 1000 to avoid truncation + console.begin_capture() + console.print(*args, **kwargs) + self.captured_print = console.end_capture() + + def clear(self, *args, **kwargs): + pass + + @staticmethod + def remove_rich_syntax(text: str) -> str: + """ + Removes rich syntax from the given text. + Removes markup and ansi syntax. + """ + output_no_syntax = Text.from_ansi(Text.from_markup(text).plain).plain + + return output_no_syntax diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py new file mode 100644 index 0000000000..640a132503 --- /dev/null +++ b/tests/integration_tests/__init__.py @@ -0,0 +1,16 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. diff --git a/tests/integration_tests/test_metagraph_integration.py b/tests/integration_tests/test_metagraph_integration.py new file mode 100644 index 0000000000..34bf4f590e --- /dev/null +++ b/tests/integration_tests/test_metagraph_integration.py @@ -0,0 +1,110 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import bittensor +import torch +import os +from bittensor.utils.mock import MockSubtensor +from bittensor.core.metagraph import METAGRAPH_STATE_DICT_NDARRAY_KEYS, get_save_dir + +_subtensor_mock: MockSubtensor = MockSubtensor() + + +def setUpModule(): + _subtensor_mock.reset() + _subtensor_mock.create_subnet(netuid=3) + _subtensor_mock.set_difficulty(netuid=3, difficulty=0) # Set diff 0 + + +class TestMetagraph: + def setup_method(self): + self.sub = MockSubtensor() + self.metagraph = bittensor.Metagraph(netuid=3, network="mock", sync=False) + + def test_print_empty(self): + print(self.metagraph) + + def test_lite_sync(self): + self.metagraph.sync(lite=True, subtensor=self.sub) + + def test_full_sync(self): + self.metagraph.sync(lite=False, subtensor=self.sub) + + def test_sync_block_0(self): + self.metagraph.sync(lite=True, block=0, subtensor=self.sub) + + def test_load_sync_save(self): + self.metagraph.sync(lite=True, subtensor=self.sub) + self.metagraph.save() + self.metagraph.load() + self.metagraph.save() + + def test_load_sync_save_from_torch(self): + self.metagraph.sync(lite=True, subtensor=self.sub) + + def deprecated_save_torch(metagraph): + save_directory = get_save_dir(metagraph.network, metagraph.netuid) + os.makedirs(save_directory, exist_ok=True) + graph_filename = save_directory + f"/block-{metagraph.block.item()}.pt" + state_dict = metagraph.state_dict() + for key in METAGRAPH_STATE_DICT_NDARRAY_KEYS: + state_dict[key] = torch.nn.Parameter( + torch.tensor(state_dict[key]), requires_grad=False + ) + torch.save(state_dict, graph_filename) + + deprecated_save_torch(self.metagraph) + self.metagraph.load() + + def test_state_dict(self): + self.metagraph.load() + state = self.metagraph.state_dict() + assert "version" in state + assert "n" in state + assert "block" in state + assert "stake" in state + assert "total_stake" in state + assert "ranks" in state + assert "trust" in state + assert "consensus" in state + assert "validator_trust" in state + assert "incentive" in state + assert "emission" in state + assert "dividends" in state + assert "active" in state + assert "last_update" in state + assert "validator_permit" in state + assert "weights" in state + assert "bonds" in state + assert "uids" in state + + def test_properties(self): + metagraph = self.metagraph + metagraph.hotkeys + metagraph.coldkeys + metagraph.addresses + metagraph.validator_trust + metagraph.S + metagraph.R + metagraph.I + metagraph.E + metagraph.C + metagraph.T + metagraph.Tv + metagraph.D + metagraph.B + metagraph.W diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py new file mode 100644 index 0000000000..8539839ccc --- /dev/null +++ b/tests/integration_tests/test_subtensor_integration.py @@ -0,0 +1,250 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import unittest +from unittest.mock import MagicMock, patch + +from substrateinterface import Keypair + +import bittensor +from bittensor.core import settings +from bittensor.utils.balance import Balance +from bittensor.utils.mock import MockSubtensor +from tests.helpers import ( + get_mock_coldkey, + MockConsole, + get_mock_keypair, + get_mock_wallet, +) +from bittensor.core.extrinsics import transfer + + +class TestSubtensor(unittest.TestCase): + _mock_console_patcher = None + _mock_subtensor: MockSubtensor + subtensor: MockSubtensor + + def setUp(self): + self.wallet = get_mock_wallet( + hotkey=get_mock_keypair(0, self.id()), + coldkey=get_mock_keypair(1, self.id()), + ) + self.balance = Balance.from_tao(1000) + self.mock_neuron = MagicMock() # NOTE: this might need more sophistication + self.subtensor = MockSubtensor() # own instance per test + + @classmethod + def setUpClass(cls) -> None: + # mock rich console status + mock_console = MockConsole() + cls._mock_console_patcher = patch( + "bittensor.core.settings.bt_console", mock_console + ) + cls._mock_console_patcher.start() + # Keeps the same mock network for all tests. This stops the network from being re-setup for each test. + cls._mock_subtensor = MockSubtensor() + cls._do_setup_subnet() + + @classmethod + def _do_setup_subnet(cls): + # reset the mock subtensor + cls._mock_subtensor.reset() + # Setup the mock subnet 3 + cls._mock_subtensor.create_subnet(netuid=3) + + @classmethod + def tearDownClass(cls) -> None: + cls._mock_console_patcher.stop() + + def test_network_overrides(self): + """Tests that the network overrides the chain_endpoint.""" + # Argument importance: chain_endpoint (arg) > network (arg) > config.subtensor.chain_endpoint > config.subtensor.network + config0 = bittensor.Subtensor.config() + config0.subtensor.network = "finney" + config0.subtensor.chain_endpoint = "wss://finney.subtensor.io" # Should not match bittensor.core.settings.FINNEY_ENTRYPOINT + assert config0.subtensor.chain_endpoint != settings.FINNEY_ENTRYPOINT + + config1 = bittensor.Subtensor.config() + config1.subtensor.network = "local" + config1.subtensor.chain_endpoint = None + + # Mock network calls + with patch("substrateinterface.SubstrateInterface.connect_websocket"): + with patch("substrateinterface.SubstrateInterface.reload_type_registry"): + print(bittensor.Subtensor, type(bittensor.Subtensor)) + # Choose network arg over config + sub1 = bittensor.Subtensor(config=config1, network="local") + self.assertEqual( + sub1.chain_endpoint, + settings.LOCAL_ENTRYPOINT, + msg="Explicit network arg should override config.network", + ) + + # Choose network config over chain_endpoint config + sub2 = bittensor.Subtensor(config=config0) + self.assertNotEqual( + sub2.chain_endpoint, + settings.FINNEY_ENTRYPOINT, # Here we expect the endpoint corresponding to the network "finney" + msg="config.network should override config.chain_endpoint", + ) + + sub3 = bittensor.Subtensor(config=config1) + # Should pick local instead of finney (default) + assert sub3.network == "local" + assert sub3.chain_endpoint == settings.LOCAL_ENTRYPOINT + + def test_get_current_block(self): + block = self.subtensor.get_current_block() + assert type(block) is int + + def test_do_block_step(self): + self.subtensor.do_block_step() + block = self.subtensor.get_current_block() + assert type(block) is int + + def test_do_block_step_query_previous_block(self): + self.subtensor.do_block_step() + block = self.subtensor.get_current_block() + self.subtensor.query_subtensor("NetworksAdded", block) + + def test_transfer(self): + fake_coldkey = get_mock_coldkey(1) + + transfer.do_transfer = MagicMock(return_value=(True, "0x", None)) + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + return_value=self.mock_neuron + ) + self.subtensor.get_balance = MagicMock(return_value=self.balance) + success = self.subtensor.transfer( + self.wallet, + fake_coldkey, + amount=200, + ) + self.assertTrue(success, msg="Transfer should succeed") + + def test_transfer_inclusion(self): + fake_coldkey = get_mock_coldkey(1) + transfer.do_transfer = MagicMock(return_value=(True, "0x", None)) + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + return_value=self.mock_neuron + ) + self.subtensor.get_balance = MagicMock(return_value=self.balance) + + success = self.subtensor.transfer( + self.wallet, fake_coldkey, amount=200, wait_for_inclusion=True + ) + self.assertTrue(success, msg="Transfer should succeed") + + def test_transfer_failed(self): + fake_coldkey = get_mock_coldkey(1) + transfer.do_transfer = MagicMock( + return_value=(False, None, "Mock failure message") + ) + + fail = self.subtensor.transfer( + self.wallet, fake_coldkey, amount=200, wait_for_inclusion=True + ) + self.assertFalse(fail, msg="Transfer should fail") + + def test_transfer_invalid_dest(self): + fake_coldkey = get_mock_coldkey(1) + + fail = self.subtensor.transfer( + self.wallet, + fake_coldkey[:-1], # invalid dest + amount=200, + wait_for_inclusion=True, + ) + self.assertFalse(fail, msg="Transfer should fail because of invalid dest") + + def test_transfer_dest_as_bytes(self): + fake_coldkey = get_mock_coldkey(1) + with patch( + "bittensor.core.extrinsics.transfer.do_transfer", + return_value=(True, "0x", None), + ): + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + return_value=self.mock_neuron + ) + self.subtensor.get_balance = MagicMock(return_value=self.balance) + + dest_as_bytes: bytes = Keypair(fake_coldkey).public_key + success = self.subtensor.transfer( + self.wallet, + dest_as_bytes, # invalid dest + amount=200, + wait_for_inclusion=True, + ) + self.assertTrue(success, msg="Transfer should succeed") + + def test_set_weights(self): + chain_weights = [0] + + self.subtensor.set_weights = MagicMock(return_value=True) + self.subtensor.do_set_weights = MagicMock(return_value=(True, None)) + + success = self.subtensor.set_weights( + wallet=self.wallet, + netuid=3, + uids=[1], + weights=chain_weights, + ) + assert success is True + + def test_set_weights_inclusion(self): + chain_weights = [0] + self.subtensor.do_set_weights = MagicMock(return_value=(True, None)) + self.subtensor.set_weights = MagicMock(return_value=True) + + success = self.subtensor.set_weights( + wallet=self.wallet, + netuid=1, + uids=[1], + weights=chain_weights, + wait_for_inclusion=True, + ) + assert success is True + + def test_set_weights_failed(self): + chain_weights = [0] + self.subtensor.do_set_weights = MagicMock( + return_value=(False, "Mock failure message") + ) + self.subtensor.set_weights = MagicMock(return_value=False) + + fail = self.subtensor.set_weights( + wallet=self.wallet, + netuid=3, + uids=[1], + weights=chain_weights, + wait_for_inclusion=True, + ) + assert fail is False + + def test_get_balance(self): + fake_coldkey = get_mock_coldkey(0) + balance = self.subtensor.get_balance(address=fake_coldkey) + assert type(balance) is bittensor.utils.balance.Balance + + def test_defaults_to_finney(self): + sub = bittensor.Subtensor() + assert sub.network == "finney" + assert sub.chain_endpoint == settings.FINNEY_ENTRYPOINT + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 0000000000..17ba4b865d --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +filterwarnings = + ignore::DeprecationWarning:pkg_resources.*: \ No newline at end of file diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/conftest.py b/tests/unit_tests/conftest.py new file mode 100644 index 0000000000..a5503f8961 --- /dev/null +++ b/tests/unit_tests/conftest.py @@ -0,0 +1,13 @@ +import pytest +from aioresponses import aioresponses + + +@pytest.fixture +def force_legacy_torch_compatible_api(monkeypatch): + monkeypatch.setenv("USE_TORCH", "1") + + +@pytest.fixture +def mock_aio_response(): + with aioresponses() as m: + yield m diff --git a/tests/unit_tests/extrinsics/test_commit_weights.py b/tests/unit_tests/extrinsics/test_commit_weights.py new file mode 100644 index 0000000000..35a1d4d426 --- /dev/null +++ b/tests/unit_tests/extrinsics/test_commit_weights.py @@ -0,0 +1,133 @@ +import pytest + +from bittensor.core import subtensor as subtensor_module +from bittensor.core.settings import version_as_int +from bittensor.core.subtensor import Subtensor +from bittensor.core.extrinsics.commit_weights import ( + do_commit_weights, + do_reveal_weights, +) + + +@pytest.fixture +def subtensor(mocker): + fake_substrate = mocker.MagicMock() + fake_substrate.websocket.sock.getsockopt.return_value = 0 + mocker.patch.object( + subtensor_module, "SubstrateInterface", return_value=fake_substrate + ) + return Subtensor() + + +def test_do_commit_weights(subtensor, mocker): + """Successful _do_commit_weights call.""" + # Preps + fake_wallet = mocker.MagicMock() + netuid = 1 + commit_hash = "fake_commit_hash" + wait_for_inclusion = True + wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = None + + mocked_format_error_message = mocker.MagicMock() + subtensor_module.format_error_message = mocked_format_error_message + + # Call + result = do_commit_weights( + self=subtensor, + wallet=fake_wallet, + netuid=netuid, + commit_hash=commit_hash, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # Assertions + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="commit_weights", + call_params={ + "netuid": netuid, + "commit_hash": commit_hash, + }, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, keypair=fake_wallet.hotkey + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + + assert result == ( + False, + subtensor.substrate.submit_extrinsic.return_value.error_message, + ) + + +def test_do_reveal_weights(subtensor, mocker): + """Verifies that the `_do_reveal_weights` method interacts with the right substrate methods.""" + # Preps + fake_wallet = mocker.MagicMock() + fake_wallet.hotkey = "hotkey" + + netuid = 1 + uids = [1, 2, 3, 4] + values = [1, 2, 3, 4] + salt = [4, 2, 2, 1] + wait_for_inclusion = True + wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = None + + mocked_format_error_message = mocker.MagicMock() + subtensor_module.format_error_message = mocked_format_error_message + + # Call + result = do_reveal_weights( + self=subtensor, + wallet=fake_wallet, + netuid=netuid, + uids=uids, + values=values, + salt=salt, + version_key=version_as_int, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="reveal_weights", + call_params={ + "netuid": netuid, + "uids": uids, + "values": values, + "salt": salt, + "version_key": version_as_int, + }, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, keypair=fake_wallet.hotkey + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + + assert result == ( + False, + subtensor.substrate.submit_extrinsic.return_value.error_message, + ) diff --git a/tests/unit_tests/extrinsics/test_init.py b/tests/unit_tests/extrinsics/test_init.py new file mode 100644 index 0000000000..8a2480a9b9 --- /dev/null +++ b/tests/unit_tests/extrinsics/test_init.py @@ -0,0 +1,114 @@ +"""Tests for bittensor/extrinsics/__ini__ module.""" + +from bittensor.utils import format_error_message + + +def test_format_error_message_with_right_error_message(): + """Verify that error message from extrinsic response parses correctly.""" + # Prep + fake_error_message = { + "type": "SomeType", + "name": "SomeErrorName", + "docs": ["Some error description."], + } + + # Call + result = format_error_message(fake_error_message) + + # Assertions + + assert "SomeType" in result + assert "SomeErrorName" in result + assert "Some error description." in result + + +def test_format_error_message_with_empty_error_message(): + """Verify that empty error message from extrinsic response parses correctly.""" + # Prep + fake_error_message = {} + + # Call + result = format_error_message(fake_error_message) + + # Assertions + + assert "UnknownType" in result + assert "UnknownError" in result + assert "Unknown Description" in result + + +def test_format_error_message_with_wrong_type_error_message(): + """Verify that error message from extrinsic response with wrong type parses correctly.""" + # Prep + fake_error_message = None + + # Call + result = format_error_message(fake_error_message) + + # Assertions + + assert "UnknownType" in result + assert "UnknownError" in result + assert "Unknown Description" in result + + +def test_format_error_message_with_custom_error_message_with_index(mocker): + """Tests error formatter if subtensor error is custom error with index.""" + # Preps + fake_custom_error = { + "code": 1010, + "message": "SomeErrorName", + "data": "Custom error: 1", + } + fake_subtensor_error = { + "docs": ["Some description"], + "fields": [], + "index": 1, + "name": "SomeErrorName", + } + + fake_substrate = mocker.MagicMock() + fake_substrate.metadata.get_metadata_pallet().errors.__getitem__().value.get = ( + mocker.Mock( + side_effect=[fake_custom_error["message"], fake_subtensor_error["docs"]] + ) + ) + + mocker.patch( + "substrateinterface.base.SubstrateInterface", return_value=fake_substrate + ) + + # Call + result = format_error_message(fake_custom_error, fake_substrate) + + # Assertions + assert ( + result + == f"Subtensor returned `SubstrateRequestException({fake_subtensor_error['name']})` error. This means: `Some description`." + ) + + +def test_format_error_message_with_custom_error_message_without_index(mocker): + """Tests error formatter if subtensor error is custom error without index.""" + # Preps + fake_custom_error = { + "code": 1010, + "message": "SomeErrorType", + "data": "Custom error description", + } + fake_substrate = mocker.MagicMock() + fake_substrate.metadata.get_metadata_pallet().errors.__getitem__().value.get.return_value = fake_custom_error[ + "message" + ] + mocker.patch( + "substrateinterface.base.SubstrateInterface", return_value=fake_substrate + ) + + # Call + result = format_error_message(fake_custom_error, fake_substrate) + + # Assertions + assert ( + result + == f"Subtensor returned `SubstrateRequestException({fake_custom_error['message']})` error. This means: `{fake_custom_error['data']}`." + ) diff --git a/tests/unit_tests/extrinsics/test_prometheus.py b/tests/unit_tests/extrinsics/test_prometheus.py new file mode 100644 index 0000000000..dbcfed1e47 --- /dev/null +++ b/tests/unit_tests/extrinsics/test_prometheus.py @@ -0,0 +1,167 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from unittest.mock import MagicMock, patch + +import pytest +from bittensor_wallet import Wallet + +from bittensor.core.extrinsics.prometheus import ( + prometheus_extrinsic, +) +from bittensor.core.subtensor import Subtensor +from bittensor.core.settings import version_as_int + + +# Mocking the bittensor and networking modules +@pytest.fixture +def mock_bittensor(): + with patch("bittensor.core.subtensor.Subtensor") as mock: + yield mock + + +@pytest.fixture +def mock_wallet(): + with patch("bittensor_wallet.Wallet") as mock: + yield mock + + +@pytest.fixture +def mock_net(): + with patch("bittensor.utils.networking") as mock: + yield mock + + +@pytest.mark.parametrize( + "ip, port, netuid, wait_for_inclusion, wait_for_finalization, expected_result, test_id", + [ + (None, 9221, 0, False, True, True, "happy-path-default-ip"), + ("192.168.0.1", 9221, 0, False, True, True, "happy-path-custom-ip"), + (None, 9221, 0, True, False, True, "happy-path-wait-for-inclusion"), + (None, 9221, 0, False, False, True, "happy-path-no-waiting"), + ], +) +def test_prometheus_extrinsic_happy_path( + mock_bittensor, + mock_wallet, + mock_net, + ip, + port, + netuid, + wait_for_inclusion, + wait_for_finalization, + expected_result, + test_id, +): + # Arrange + subtensor = MagicMock(spec=Subtensor) + subtensor.network = "test_network" + subtensor.substrate = MagicMock() + wallet = MagicMock(spec=Wallet) + mock_net.get_external_ip.return_value = "192.168.0.1" + mock_net.ip_to_int.return_value = 3232235521 # IP in integer form + mock_net.ip_version.return_value = 4 + neuron = MagicMock() + neuron.is_null = False + neuron.prometheus_info.version = version_as_int + neuron.prometheus_info.ip = 3232235521 + neuron.prometheus_info.port = port + neuron.prometheus_info.ip_type = 4 + subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron + subtensor._do_serve_prometheus.return_value = (True, None) + + # Act + result = prometheus_extrinsic( + subtensor=subtensor, + wallet=wallet, + ip=ip, + port=port, + netuid=netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # Assert + assert result == expected_result, f"Test ID: {test_id}" + + +# Edge cases +@pytest.mark.parametrize( + "ip, port, netuid, test_id", + [ + ("0.0.0.0", 0, 0, "edge-case-min-values"), + ("255.255.255.255", 65535, 2147483647, "edge-case-max-values"), + ], +) +def test_prometheus_extrinsic_edge_cases( + mock_bittensor, mock_wallet, mock_net, ip, port, netuid, test_id +): + # Arrange + subtensor = MagicMock(spec=Subtensor) + subtensor.network = "test_network" + subtensor.substrate = MagicMock() + wallet = MagicMock(spec=Wallet) + mock_net.get_external_ip.return_value = ip + mock_net.ip_to_int.return_value = 3232235521 # IP in integer form + mock_net.ip_version.return_value = 4 + neuron = MagicMock() + neuron.is_null = True + subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron + subtensor._do_serve_prometheus.return_value = (True, None) + + # Act + result = prometheus_extrinsic( + subtensor=subtensor, + wallet=wallet, + ip=ip, + port=port, + netuid=netuid, + wait_for_inclusion=False, + wait_for_finalization=True, + ) + + # Assert + assert result is True, f"Test ID: {test_id}" + + +# Error cases +def test_prometheus_extrinsic_error_cases(mock_bittensor, mock_wallet, mocker): + # Arrange + subtensor = MagicMock(spec=Subtensor) + subtensor.network = "test_network" + subtensor.substrate = MagicMock() + subtensor.substrate.websocket.sock.getsockopt.return_value = 0 + wallet = MagicMock(spec=Wallet) + neuron = MagicMock() + neuron.is_null = True + subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron + subtensor._do_serve_prometheus.return_value = (True,) + + with mocker.patch( + "bittensor.utils.networking.get_external_ip", side_effect=RuntimeError + ): + # Act & Assert + with pytest.raises(RuntimeError): + prometheus_extrinsic( + subtensor=subtensor, + wallet=wallet, + ip=None, + port=9221, + netuid=1, + wait_for_inclusion=False, + wait_for_finalization=True, + ) diff --git a/tests/unit_tests/extrinsics/test_serving.py b/tests/unit_tests/extrinsics/test_serving.py new file mode 100644 index 0000000000..a57e32d01c --- /dev/null +++ b/tests/unit_tests/extrinsics/test_serving.py @@ -0,0 +1,401 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from unittest.mock import MagicMock, patch + +import pytest +from bittensor_wallet import Wallet + +from bittensor.core.axon import Axon +from bittensor.core.subtensor import Subtensor +from bittensor.core.extrinsics import serving + + +@pytest.fixture +def mock_subtensor(mocker): + mock_subtensor = mocker.MagicMock(spec=Subtensor) + mock_subtensor.network = "test_network" + mock_subtensor.substrate = mocker.MagicMock() + return mock_subtensor + + +@pytest.fixture +def mock_wallet(mocker): + wallet = mocker.MagicMock(spec=Wallet) + wallet.hotkey.ss58_address = "hotkey_address" + wallet.coldkeypub.ss58_address = "coldkey_address" + return wallet + + +@pytest.fixture +def mock_axon(mock_wallet, mocker): + axon = mocker.MagicMock(spec=Axon) + axon.wallet = mock_wallet() + axon.external_port = 9221 + return axon + + +@pytest.mark.parametrize( + "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,", + [ + ( + "192.168.1.1", + 9221, + 1, + 0, + 0, + 0, + False, + True, + False, + True, + "happy-path-no-wait", + ), + ( + "192.168.1.2", + 9222, + 2, + 1, + 1, + 1, + True, + False, + False, + True, + "happy-path-wait-for-inclusion", + ), + ( + "192.168.1.3", + 9223, + 3, + 2, + 2, + 2, + False, + True, + True, + True, + "happy-path-wait-for-finalization-and-prompt", + ), + ], + ids=[ + "happy-path-no-wait", + "happy-path-wait-for-inclusion", + "happy-path-wait-for-finalization-and-prompt", + ], +) +def test_serve_extrinsic_happy_path( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + prompt, + expected, + test_id, + mocker, +): + # Arrange + serving.do_serve_axon = mocker.MagicMock(return_value=(True, "")) + with patch( + "bittensor.core.extrinsics.serving.Confirm.ask", + return_value=True, + ): + # Act + result = serving.serve_extrinsic( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + prompt, + ) + + # Assert + assert result == expected, f"Test ID: {test_id}" + + +# Various edge cases +@pytest.mark.parametrize( + "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,", + [ + ( + "192.168.1.4", + 9224, + 4, + 3, + 3, + 3, + True, + True, + False, + True, + "edge_case_max_values", + ), + ], + ids=["edge-case-max-values"], +) +def test_serve_extrinsic_edge_cases( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + prompt, + expected, + test_id, + mocker, +): + # Arrange + serving.do_serve_axon = mocker.MagicMock(return_value=(True, "")) + with patch( + "bittensor.core.extrinsics.serving.Confirm.ask", + return_value=True, + ): + # Act + result = serving.serve_extrinsic( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + prompt, + ) + + # Assert + assert result == expected, f"Test ID: {test_id}" + + +# Various error cases +@pytest.mark.parametrize( + "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected_error_message,test_id,", + [ + ( + "192.168.1.5", + 9225, + 5, + 4, + 4, + 4, + True, + True, + False, + False, + "error-case-failed-serve", + ), + ], + ids=["error-case-failed-serve"], +) +def test_serve_extrinsic_error_cases( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + prompt, + expected_error_message, + test_id, + mocker, +): + # Arrange + serving.do_serve_axon = mocker.MagicMock(return_value=(False, "Error serving axon")) + with patch( + "bittensor.core.extrinsics.serving.Confirm.ask", + return_value=True, + ): + # Act + result = serving.serve_extrinsic( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + prompt, + ) + + # Assert + assert result == expected_error_message, f"Test ID: {test_id}" + + +@pytest.mark.parametrize( + "netuid, wait_for_inclusion, wait_for_finalization, prompt, external_ip, external_ip_success, serve_success, expected_result, test_id", + [ + # Happy path test + (1, False, True, False, "192.168.1.1", True, True, True, "happy-ext-ip"), + (1, False, True, True, None, True, True, True, "happy-net-external-ip"), + # Edge cases + (1, True, True, False, "192.168.1.1", True, True, True, "edge-case-wait"), + # Error cases + (1, False, True, False, None, False, True, False, "error-fetching-external-ip"), + ( + 1, + False, + True, + False, + "192.168.1.1", + True, + False, + False, + "error-serving-axon", + ), + ], + ids=[ + "happy-axon-external-ip", + "happy-net-external-ip", + "edge-case-wait", + "error-fetching-external-ip", + "error-serving-axon", + ], +) +def test_serve_axon_extrinsic( + mock_subtensor, + mock_axon, + netuid, + wait_for_inclusion, + wait_for_finalization, + prompt, + external_ip, + external_ip_success, + serve_success, + expected_result, + test_id, + mocker, +): + mock_axon.external_ip = external_ip + # Arrange + with patch( + "bittensor.utils.networking.get_external_ip", + side_effect=Exception("Failed to fetch IP") + if not external_ip_success + else MagicMock(return_value="192.168.1.1"), + ): + serving.do_serve_axon = mocker.MagicMock(return_value=(serve_success, "")) + # Act + if not external_ip_success: + with pytest.raises(RuntimeError): + serving.serve_axon_extrinsic( + mock_subtensor, + netuid, + mock_axon, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + else: + result = serving.serve_axon_extrinsic( + mock_subtensor, + netuid, + mock_axon, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # Assert + assert result == expected_result, f"Test ID: {test_id}" + + +@pytest.mark.parametrize( + "wait_for_inclusion, wait_for_finalization, net_uid, type_u, data, response_success, expected_result, test_id", + [ + ( + True, + True, + 1, + "Sha256", + b"mock_bytes_data", + True, + True, + "happy-path-wait", + ), + ( + False, + False, + 1, + "Sha256", + b"mock_bytes_data", + True, + True, + "happy-path-no-wait", + ), + ], + ids=["happy-path-wait", "happy-path-no-wait"], +) +def test_publish_metadata( + mock_subtensor, + mock_wallet, + wait_for_inclusion, + wait_for_finalization, + net_uid, + type_u, + data, + response_success, + expected_result, + test_id, +): + # Arrange + with patch.object(mock_subtensor.substrate, "compose_call"), patch.object( + mock_subtensor.substrate, "create_signed_extrinsic" + ), patch.object( + mock_subtensor.substrate, + "submit_extrinsic", + return_value=MagicMock( + is_success=response_success, + process_events=MagicMock(), + error_message="error", + ), + ): + # Act + result = serving.publish_metadata( + self=mock_subtensor, + wallet=mock_wallet, + netuid=net_uid, + data_type=type_u, + data=data, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # Assert + assert result == expected_result, f"Test ID: {test_id}" diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py new file mode 100644 index 0000000000..9c32fc9bdf --- /dev/null +++ b/tests/unit_tests/extrinsics/test_set_weights.py @@ -0,0 +1,278 @@ +from unittest.mock import MagicMock, patch + +import pytest +import torch +from bittensor_wallet import Wallet + +from bittensor.core import subtensor as subtensor_module +from bittensor.core.extrinsics.set_weights import ( + do_set_weights, + set_weights_extrinsic, +) +from bittensor.core.settings import version_as_int +from bittensor.core.subtensor import Subtensor + + +@pytest.fixture +def mock_subtensor(): + mock = MagicMock(spec=Subtensor) + mock.network = "mock_network" + mock.substrate = MagicMock() + return mock + + +@pytest.fixture +def mock_wallet(): + mock = MagicMock(spec=Wallet) + return mock + + +@pytest.mark.parametrize( + "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, expected_message", + [ + ( + [1, 2], + [0.5, 0.5], + 0, + True, + False, + True, + True, + True, + "Successfully set weights and Finalized.", + ), + ( + [1, 2], + [0.5, 0.4], + 0, + False, + False, + False, + True, + True, + "Not waiting for finalization or inclusion.", + ), + ( + [1, 2], + [0.5, 0.5], + 0, + True, + False, + True, + True, + False, + "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.", + ), + ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."), + ], + ids=[ + "happy-flow", + "not-waiting-finalization-inclusion", + "error-flow", + "prompt-refused", + ], +) +def test_set_weights_extrinsic( + mock_subtensor, + mock_wallet, + uids, + weights, + version_key, + wait_for_inclusion, + wait_for_finalization, + prompt, + user_accepts, + expected_success, + expected_message, +): + uids_tensor = torch.tensor(uids, dtype=torch.int64) + weights_tensor = torch.tensor(weights, dtype=torch.float32) + with patch( + "bittensor.utils.weight_utils.convert_weights_and_uids_for_emit", + return_value=(uids_tensor, weights_tensor), + ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch( + "bittensor.core.extrinsics.set_weights.do_set_weights", + return_value=(expected_success, "Mock error message"), + ) as mock_do_set_weights: + result, message = set_weights_extrinsic( + subtensor=mock_subtensor, + wallet=mock_wallet, + netuid=123, + uids=uids, + weights=weights, + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + + assert result == expected_success, f"Test {expected_message} failed." + assert message == expected_message, f"Test {expected_message} failed." + if user_accepts is not False: + mock_do_set_weights.assert_called_once_with( + self=mock_subtensor, + wallet=mock_wallet, + netuid=123, + uids=uids_tensor, + vals=weights_tensor, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + +def test_do_set_weights_is_success(mock_subtensor, mocker): + """Successful _do_set_weights call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_uids = [1, 2, 3] + fake_vals = [4, 5, 6] + fake_netuid = 1 + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + mock_subtensor.substrate.submit_extrinsic.return_value.is_success = True + + # Call + result = do_set_weights( + self=mock_subtensor, + wallet=fake_wallet, + uids=fake_uids, + vals=fake_vals, + netuid=fake_netuid, + version_key=version_as_int, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + mock_subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="set_weights", + call_params={ + "dests": fake_uids, + "weights": fake_vals, + "netuid": fake_netuid, + "version_key": version_as_int, + }, + ) + + mock_subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=mock_subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + era={"period": 5}, + ) + + mock_subtensor.substrate.submit_extrinsic.assert_called_once_with( + mock_subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + mock_subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == (True, "Successfully set weights.") + + +def test_do_set_weights_is_not_success(mock_subtensor, mocker): + """Unsuccessful _do_set_weights call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_uids = [1, 2, 3] + fake_vals = [4, 5, 6] + fake_netuid = 1 + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + mock_subtensor.substrate.submit_extrinsic.return_value.is_success = False + mocked_format_error_message = mocker.MagicMock() + subtensor_module.format_error_message = mocked_format_error_message + + # Call + result = do_set_weights( + self=mock_subtensor, + wallet=fake_wallet, + uids=fake_uids, + vals=fake_vals, + netuid=fake_netuid, + version_key=version_as_int, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + mock_subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="set_weights", + call_params={ + "dests": fake_uids, + "weights": fake_vals, + "netuid": fake_netuid, + "version_key": version_as_int, + }, + ) + + mock_subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=mock_subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + era={"period": 5}, + ) + + mock_subtensor.substrate.submit_extrinsic.assert_called_once_with( + mock_subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + mock_subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == ( + False, + mock_subtensor.substrate.submit_extrinsic.return_value.error_message, + ) + + +def test_do_set_weights_no_waits(mock_subtensor, mocker): + """Successful _do_set_weights call without wait flags for fake_wait_for_inclusion and fake_wait_for_finalization.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_uids = [1, 2, 3] + fake_vals = [4, 5, 6] + fake_netuid = 1 + fake_wait_for_inclusion = False + fake_wait_for_finalization = False + + # Call + result = do_set_weights( + self=mock_subtensor, + wallet=fake_wallet, + uids=fake_uids, + vals=fake_vals, + netuid=fake_netuid, + version_key=version_as_int, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + mock_subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="set_weights", + call_params={ + "dests": fake_uids, + "weights": fake_vals, + "netuid": fake_netuid, + "version_key": version_as_int, + }, + ) + + mock_subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=mock_subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + era={"period": 5}, + ) + + mock_subtensor.substrate.submit_extrinsic.assert_called_once_with( + mock_subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + assert result == (True, "Not waiting for finalization or inclusion.") diff --git a/tests/unit_tests/extrinsics/test_transfer.py b/tests/unit_tests/extrinsics/test_transfer.py new file mode 100644 index 0000000000..af59d5769b --- /dev/null +++ b/tests/unit_tests/extrinsics/test_transfer.py @@ -0,0 +1,142 @@ +import pytest + +from bittensor.core import subtensor as subtensor_module +from bittensor.core.extrinsics.transfer import do_transfer +from bittensor.core.subtensor import Subtensor +from bittensor.utils.balance import Balance + + +@pytest.fixture +def subtensor(mocker): + fake_substrate = mocker.MagicMock() + fake_substrate.websocket.sock.getsockopt.return_value = 0 + mocker.patch.object( + subtensor_module, "SubstrateInterface", return_value=fake_substrate + ) + return Subtensor() + + +def test_do_transfer_is_success_true(subtensor, mocker): + """Successful do_transfer call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_dest = "SS58PUBLICKEY" + fake_transfer_balance = Balance(1) + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = True + + # Call + result = do_transfer( + subtensor, + fake_wallet, + fake_dest, + fake_transfer_balance, + fake_wait_for_inclusion, + fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": fake_dest, "value": fake_transfer_balance.rao}, + ) + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, keypair=fake_wallet.coldkey + ) + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == ( + True, + subtensor.substrate.submit_extrinsic.return_value.block_hash, + None, + ) + + +def test_do_transfer_is_success_false(subtensor, mocker): + """Successful do_transfer call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_dest = "SS58PUBLICKEY" + fake_transfer_balance = Balance(1) + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = False + + mocked_format_error_message = mocker.MagicMock() + subtensor_module.format_error_message = mocked_format_error_message + + # Call + result = do_transfer( + subtensor, + fake_wallet, + fake_dest, + fake_transfer_balance, + fake_wait_for_inclusion, + fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": fake_dest, "value": fake_transfer_balance.rao}, + ) + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, keypair=fake_wallet.coldkey + ) + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + + assert result == ( + False, + None, + subtensor.substrate.submit_extrinsic.return_value.error_message, + ) + + +def test_do_transfer_no_waits(subtensor, mocker): + """Successful do_transfer call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_dest = "SS58PUBLICKEY" + fake_transfer_balance = Balance(1) + fake_wait_for_inclusion = False + fake_wait_for_finalization = False + + # Call + result = do_transfer( + subtensor, + fake_wallet, + fake_dest, + fake_transfer_balance, + fake_wait_for_inclusion, + fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": fake_dest, "value": fake_transfer_balance.rao}, + ) + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, keypair=fake_wallet.coldkey + ) + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + assert result == (True, None, None) diff --git a/tests/unit_tests/factories/__init__.py b/tests/unit_tests/factories/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/factories/neuron_factory.py b/tests/unit_tests/factories/neuron_factory.py new file mode 100644 index 0000000000..f99a084acd --- /dev/null +++ b/tests/unit_tests/factories/neuron_factory.py @@ -0,0 +1,63 @@ +import factory + +from bittensor.core.chain_data import AxonInfo, NeuronInfoLite, PrometheusInfo +from bittensor.utils.balance import Balance + + +class BalanceFactory(factory.Factory): + class Meta: + model = Balance + + balance = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + + +class PrometheusInfoFactory(factory.Factory): + class Meta: + model = PrometheusInfo + + block = factory.Faker("random_int", min=0, max=100) + version = factory.Faker("random_int", min=0, max=100) + ip = factory.Faker("ipv4") + port = factory.Faker("random_int", min=0, max=100) + ip_type = factory.Faker("random_int", min=0, max=100) + + +class AxonInfoFactory(factory.Factory): + class Meta: + model = AxonInfo + + version = factory.Faker("random_int", min=0, max=100) + ip = factory.Faker("ipv4") + port = factory.Faker("random_int", min=0, max=100) + ip_type = factory.Faker("random_int", min=0, max=100) + hotkey = factory.Faker("uuid4") + coldkey = factory.Faker("uuid4") + + +class NeuronInfoLiteFactory(factory.Factory): + class Meta: + model = NeuronInfoLite + + hotkey = factory.Faker("uuid4") + coldkey = factory.Faker("uuid4") + uid = factory.Sequence(lambda n: n) + netuid = factory.Sequence(lambda n: n) + active = factory.Faker("random_int", min=0, max=1) + stake = factory.SubFactory(BalanceFactory) + stake_dict = factory.Dict({"balance": 10}) + total_stake = factory.SubFactory(BalanceFactory) + rank = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + emission = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + incentive = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + consensus = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + trust = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + validator_trust = factory.Faker( + "pyfloat", left_digits=3, right_digits=6, positive=True + ) + dividends = factory.Faker("pyfloat", left_digits=3, right_digits=6, positive=True) + last_update = factory.Faker("unix_time") + validator_permit = factory.Faker("boolean") + prometheus_info = factory.SubFactory(PrometheusInfoFactory) + axon_info = factory.SubFactory(AxonInfoFactory) + pruning_score = factory.Faker("random_int", min=0, max=100) + is_null = factory.Faker("boolean") diff --git a/tests/unit_tests/test_axon.py b/tests/unit_tests/test_axon.py new file mode 100644 index 0000000000..5df465e371 --- /dev/null +++ b/tests/unit_tests/test_axon.py @@ -0,0 +1,781 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +import re +import time +from dataclasses import dataclass +from typing import Any, Optional +from unittest import IsolatedAsyncioTestCase +from unittest.mock import AsyncMock, MagicMock, patch + +import fastapi +import netaddr +import pydantic +import pytest +from fastapi.testclient import TestClient +from starlette.requests import Request + +from bittensor.core.axon import AxonMiddleware, Axon +from bittensor.core.errors import RunException +from bittensor.core.settings import version_as_int +from bittensor.core.stream import StreamingSynapse +from bittensor.core.synapse import Synapse +from bittensor.core.threadpool import PriorityThreadPoolExecutor +from bittensor.utils.axon_utils import ( + allowed_nonce_window_ns, + calculate_diff_seconds, + ALLOWED_DELTA, + NANOSECONDS_IN_SECOND, +) + + +def test_attach_initial(): + # Create a mock AxonServer instance + server = Axon() + + # Define the Synapse type + class TestSynapse(Synapse): + pass + + # Define the functions with the correct signatures + def forward_fn(synapse: TestSynapse) -> Any: + pass + + def blacklist_fn(synapse: TestSynapse) -> tuple[bool, str]: + return True, "" + + def priority_fn(synapse: TestSynapse) -> float: + return 1.0 + + def verify_fn(synapse: TestSynapse) -> None: + pass + + # Test attaching with correct signatures + server.attach(forward_fn, blacklist_fn, priority_fn, verify_fn) + + # Define functions with incorrect signatures + def wrong_blacklist_fn(synapse: TestSynapse) -> int: + return 1 + + def wrong_priority_fn(synapse: TestSynapse) -> int: + return 1 + + def wrong_verify_fn(synapse: TestSynapse) -> bool: + return True + + # Test attaching with incorrect signatures + with pytest.raises(AssertionError): + server.attach(forward_fn, wrong_blacklist_fn, priority_fn, verify_fn) + + with pytest.raises(AssertionError): + server.attach(forward_fn, blacklist_fn, wrong_priority_fn, verify_fn) + + with pytest.raises(AssertionError): + server.attach(forward_fn, blacklist_fn, priority_fn, wrong_verify_fn) + + +def test_attach(): + # Create a mock AxonServer instance + server = Axon() + + # Define the Synapse type + class FakeSynapse: + pass + + # Define a class that inherits from Synapse + class InheritedSynapse(Synapse): + pass + + # Define a function with the correct signature + def forward_fn(synapse: InheritedSynapse) -> Any: + pass + + # Test attaching with correct signature and inherited class + server.attach(forward_fn) + + # Define a class that does not inherit from Synapse + class NonInheritedSynapse: + pass + + # Define a function with an argument of a class not inheriting from Synapse + def wrong_forward_fn(synapse: NonInheritedSynapse) -> Any: + pass + + # Test attaching with incorrect class inheritance + with pytest.raises(AssertionError): + server.attach(wrong_forward_fn) + + +def test_log_and_handle_error(): + from bittensor.core.axon import log_and_handle_error + + synapse = SynapseMock() + + synapse = log_and_handle_error(synapse, Exception("Error"), 500, 100) + assert synapse.axon.status_code == 500 + assert re.match(r"Internal Server Error #[\da-f\-]+", synapse.axon.status_message) + assert synapse.axon.process_time is not None + + +def test_create_error_response(): + from bittensor.core.axon import create_error_response + + synapse = SynapseMock() + synapse.axon.status_code = 500 + synapse.axon.status_message = "Error" + + response = create_error_response(synapse) + assert response.status_code == 500 + assert response.body == b'{"message":"Error"}' + + +# Fixtures +@pytest.fixture +def middleware(): + # Mock AxonMiddleware instance with empty axon object + axon = AxonMock() + return AxonMiddleware(None, axon) + + +@pytest.fixture +def mock_request(): + request = AsyncMock(spec=Request) + request.body = AsyncMock(return_value=b'{"field1": "value1", "field2": "value2"}') + request.url.path = "/test_endpoint" + request.headers = {"computed_body_hash": "correct_hash"} + return request + + +@pytest.fixture +def axon_instance(): + axon = Axon() + axon.required_hash_fields = {"test_endpoint": ["field1", "field2"]} + axon.forward_class_types = { + "test_endpoint": MagicMock(return_value=MagicMock(body_hash="correct_hash")) + } + return axon + + +# Mocks +@dataclass +class MockWallet: + hotkey: Any + coldkey: Any = None + coldkeypub: Any = None + + +class MockHotkey: + def __init__(self, ss58_address): + self.ss58_address = ss58_address + + def sign(self, *args, **kwargs): + return f"Signed: {args!r} {kwargs!r}".encode() + + +class MockInfo: + def to_string(self): + return "MockInfoString" + + +class AxonMock: + def __init__(self): + self.status_code = None + self.forward_class_types = {} + self.blacklist_fns = {} + self.priority_fns = {} + self.forward_fns = {} + self.verify_fns = {} + self.thread_pool = PriorityThreadPoolExecutor(max_workers=1) + + +class SynapseMock(Synapse): + pass + + +def verify_fn_pass(synapse): + pass + + +def verify_fn_fail(synapse): + raise Exception("Verification failed") + + +def blacklist_fn_pass(synapse): + return False, "" + + +def blacklist_fn_fail(synapse): + return True, "" + + +def priority_fn_pass(synapse) -> float: + return 0.0 + + +def priority_fn_timeout(synapse) -> float: + return 2.0 + + +@pytest.mark.asyncio +async def test_verify_pass(middleware): + synapse = SynapseMock() + middleware.axon.verify_fns = {"SynapseMock": verify_fn_pass} + await middleware.verify(synapse) + assert synapse.axon.status_code != 401 + + +@pytest.mark.asyncio +async def test_verify_fail(middleware): + synapse = SynapseMock() + middleware.axon.verify_fns = {"SynapseMock": verify_fn_fail} + with pytest.raises(Exception): + await middleware.verify(synapse) + assert synapse.axon.status_code == 401 + + +@pytest.mark.asyncio +async def test_blacklist_pass(middleware): + synapse = SynapseMock() + middleware.axon.blacklist_fns = {"SynapseMock": blacklist_fn_pass} + await middleware.blacklist(synapse) + assert synapse.axon.status_code != 403 + + +@pytest.mark.asyncio +async def test_blacklist_fail(middleware): + synapse = SynapseMock() + middleware.axon.blacklist_fns = {"SynapseMock": blacklist_fn_fail} + with pytest.raises(Exception): + await middleware.blacklist(synapse) + assert synapse.axon.status_code == 403 + + +@pytest.mark.asyncio +async def test_priority_pass(middleware): + synapse = SynapseMock() + middleware.axon.priority_fns = {"SynapseMock": priority_fn_pass} + await middleware.priority(synapse) + assert synapse.axon.status_code != 408 + + +@pytest.mark.parametrize( + "body, expected", + [ + ( + b'{"field1": "value1", "field2": "value2"}', + {"field1": "value1", "field2": "value2"}, + ), + ( + b'{"field1": "different_value", "field2": "another_value"}', + {"field1": "different_value", "field2": "another_value"}, + ), + ], +) +@pytest.mark.asyncio +async def test_verify_body_integrity_happy_path( + mock_request, axon_instance, body, expected +): + # Arrange + mock_request.body.return_value = body + + # Act + result = await axon_instance.verify_body_integrity(mock_request) + + # Assert + assert result == expected, "The parsed body should match the expected dictionary." + + +@pytest.mark.parametrize( + "body, expected_exception_message", + [ + (b"", "Expecting value: line 1 column 1 (char 0)"), # Empty body + (b"not_json", "Expecting value: line 1 column 1 (char 0)"), # Non-JSON body + ], + ids=["empty_body", "non_json_body"], +) +@pytest.mark.asyncio +async def test_verify_body_integrity_edge_cases( + mock_request, axon_instance, body, expected_exception_message +): + # Arrange + mock_request.body.return_value = body + + # Act & Assert + with pytest.raises(Exception) as exc_info: + await axon_instance.verify_body_integrity(mock_request) + assert expected_exception_message in str( + exc_info.value + ), "Expected specific exception message." + + +@pytest.mark.parametrize( + "computed_hash, expected_error", + [ + ("incorrect_hash", ValueError), + ], +) +@pytest.mark.asyncio +async def test_verify_body_integrity_error_cases( + mock_request, axon_instance, computed_hash, expected_error +): + # Arrange + mock_request.headers["computed_body_hash"] = computed_hash + + # Act & Assert + with pytest.raises(expected_error) as exc_info: + await axon_instance.verify_body_integrity(mock_request) + assert "Hash mismatch" in str(exc_info.value), "Expected a hash mismatch error." + + +@pytest.mark.parametrize( + "info_return, expected_output, test_id", + [ + (MockInfo(), "MockInfoString", "happy_path_basic"), + (MockInfo(), "MockInfoString", "edge_case_empty_string"), + ], +) +def test_to_string(info_return, expected_output, test_id): + # Arrange + axon = Axon() + with patch.object(axon, "info", return_value=info_return): + # Act + output = axon.to_string() + + # Assert + assert output == expected_output, f"Test ID: {test_id}" + + +@pytest.mark.parametrize( + "ip, port, expected_ip_type, test_id", + [ + # Happy path + ( + "127.0.0.1", + 8080, + 4, + "valid_ipv4", + ), + ( + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + 3030, + 6, + "valid_ipv6", + ), + ], +) +def test_valid_ipv4_and_ipv6_address(ip, port, expected_ip_type, test_id): + # Arrange + axon = Axon() + axon.ip = ip + axon.external_ip = ip + axon.port = port + + # Act + ip_type = axon.info().ip_type + + # Assert + assert ip_type == expected_ip_type, f"Test ID: {test_id}" + + +@pytest.mark.parametrize( + "ip, port, expected_exception", + [ + ( + "This Is not a valid address", + 65534, + netaddr.core.AddrFormatError, + ), + ], + ids=["failed to detect a valid IP " "address from %r"], +) +def test_invalid_ip_address(ip, port, expected_exception): + # Assert + with pytest.raises(expected_exception): + Axon(ip=ip, external_ip=ip, port=port).info() + + +@pytest.mark.parametrize( + "ip, port, ss58_address, started, forward_fns, expected_str, test_id", + [ + # Happy path + ( + "127.0.0.1", + 8080, + "5G9RtsTbiYJYQYJzUfTCs...", + True, + {"fn1": None}, + "Axon(127.0.0.1, 8080, 5G9RtsTbiYJYQYJzUfTCs..., started, ['fn1'])", + "happy_path_started_with_forward_fn", + ), + ( + "192.168.1.1", + 3030, + "5HqUkGuo62b5...", + False, + {}, + "Axon(192.168.1.1, 3030, 5HqUkGuo62b5..., stopped, [])", + "happy_path_stopped_no_forward_fn", + ), + # Edge cases + ("", 0, "", False, {}, "Axon(, 0, , stopped, [])", "edge_empty_values"), + ( + "255.255.255.255", + 65535, + "5G9RtsTbiYJYQYJzUfTCs...", + True, + {"fn1": None, "fn2": None}, + "Axon(255.255.255.255, 65535, 5G9RtsTbiYJYQYJzUfTCs..., started, ['fn1', 'fn2'])", + "edge_max_values", + ), + ], +) +def test_axon_str_representation( + ip, port, ss58_address, started, forward_fns, expected_str, test_id +): + # Arrange + hotkey = MockHotkey(ss58_address) + wallet = MockWallet(hotkey) + axon = Axon() + axon.ip = ip + axon.port = port + axon.wallet = wallet + axon.started = started + axon.forward_fns = forward_fns + + # Act + result_dunder_str = axon.__str__() + result_dunder_repr = axon.__repr__() + + # Assert + assert result_dunder_str == expected_str, f"Test ID: {test_id}" + assert result_dunder_repr == expected_str, f"Test ID: {test_id}" + + +class TestAxonMiddleware(IsolatedAsyncioTestCase): + def setUp(self): + # Create a mock app + self.mock_app = MagicMock() + # Create a mock axon + self.mock_axon = MagicMock() + self.mock_axon.uuid = "1234" + self.mock_axon.forward_class_types = { + "request_name": Synapse, + } + self.mock_axon.wallet.hotkey.sign.return_value = bytes.fromhex("aabbccdd") + # Create an instance of AxonMiddleware + self.axon_middleware = AxonMiddleware(self.mock_app, self.mock_axon) + return self.axon_middleware + + @pytest.mark.asyncio + async def test_preprocess(self): + # Mock the request + request = MagicMock(spec=Request) + request.url.path = "/request_name" + request.client.port = "5000" + request.client.host = "192.168.0.1" + request.headers = {} + + synapse = await self.axon_middleware.preprocess(request) + + # Check if the preprocess function fills the axon information into the synapse + assert synapse.axon.version == str(version_as_int) + assert synapse.axon.uuid == "1234" + assert synapse.axon.nonce is not None + assert synapse.axon.status_message is None + assert synapse.axon.status_code == 100 + assert synapse.axon.signature == "0xaabbccdd" + + # Check if the preprocess function fills the dendrite information into the synapse + assert synapse.dendrite.port == "5000" + assert synapse.dendrite.ip == "192.168.0.1" + + # Check if the preprocess function sets the request name correctly + assert synapse.name == "request_name" + + +class SynapseHTTPClient(TestClient): + def post_synapse(self, synapse: Synapse): + return self.post( + f"/{synapse.__class__.__name__}", + json=synapse.model_dump(), + headers={"computed_body_hash": synapse.body_hash}, + ) + + +@pytest.mark.asyncio +class TestAxonHTTPAPIResponses: + @pytest.fixture + def axon(self): + return Axon( + ip="192.0.2.1", + external_ip="192.0.2.1", + wallet=MockWallet(MockHotkey("A"), MockHotkey("B"), MockHotkey("PUB")), + ) + + @pytest.fixture + def no_verify_axon(self, axon): + axon.default_verify = self.no_verify_fn + return axon + + @pytest.fixture + def http_client(self, axon): + return SynapseHTTPClient(axon.app) + + async def no_verify_fn(self, synapse): + return + + class NonDeterministicHeaders(pydantic.BaseModel): + """ + Helper class to verify headers. + + Size headers are non-determistic as for example, header_size depends on non-deterministic + processing-time value. + """ + + bt_header_axon_process_time: float = pydantic.Field(gt=0, lt=30) + timeout: float = pydantic.Field(gt=0, lt=30) + header_size: int = pydantic.Field(None, gt=10, lt=400) + total_size: int = pydantic.Field(gt=100, lt=10000) + content_length: Optional[int] = pydantic.Field( + None, alias="content-length", gt=100, lt=10000 + ) + + def assert_headers(self, response, expected_headers): + expected_headers = { + "bt_header_axon_status_code": "200", + "bt_header_axon_status_message": "Success", + **expected_headers, + } + headers = dict(response.headers) + non_deterministic_headers_names = { + field.alias or field_name + for field_name, field in self.NonDeterministicHeaders.model_fields.items() + } + non_deterministic_headers = { + field: headers.pop(field, None) for field in non_deterministic_headers_names + } + assert headers == expected_headers + self.NonDeterministicHeaders.model_validate(non_deterministic_headers) + + async def test_unknown_path(self, http_client): + response = http_client.get("/no_such_path") + assert (response.status_code, response.json()) == ( + 404, + { + "message": "Synapse name 'no_such_path' not found. Available synapses ['Synapse']" + }, + ) + + async def test_ping__no_dendrite(self, http_client): + response = http_client.post_synapse(Synapse()) + assert (response.status_code, response.json()) == ( + 401, + { + "message": "Not Verified with error: No SS58 formatted address or public key provided" + }, + ) + + async def test_ping__without_verification(self, http_client, axon): + axon.verify_fns["Synapse"] = self.no_verify_fn + request_synapse = Synapse() + response = http_client.post_synapse(request_synapse) + assert response.status_code == 200 + response_synapse = Synapse(**response.json()) + assert response_synapse.axon.status_code == 200 + self.assert_headers( + response, + { + "computed_body_hash": "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", + "content-type": "application/json", + "name": "Synapse", + }, + ) + + @pytest.fixture + def custom_synapse_cls(self): + class CustomSynapse(Synapse): + pass + + return CustomSynapse + + @pytest.fixture + def streaming_synapse_cls(self): + class CustomStreamingSynapse(StreamingSynapse): + async def process_streaming_response(self, response): + pass + + def extract_response_json(self, response) -> dict: + return {} + + return CustomStreamingSynapse + + async def test_synapse__explicitly_set_status_code( + self, http_client, axon, custom_synapse_cls, no_verify_axon + ): + error_message = "Essential resource for CustomSynapse not found" + + async def forward_fn(synapse: custom_synapse_cls): + synapse.axon.status_code = 404 + synapse.axon.status_message = error_message + return synapse + + axon.attach(forward_fn) + + response = http_client.post_synapse(custom_synapse_cls()) + assert response.status_code == 404 + response_synapse = custom_synapse_cls(**response.json()) + assert ( + response_synapse.axon.status_code, + response_synapse.axon.status_message, + ) == (404, error_message) + + async def test_synapse__exception_with_set_status_code( + self, http_client, axon, custom_synapse_cls, no_verify_axon + ): + error_message = "Conflicting request" + + async def forward_fn(synapse: custom_synapse_cls): + synapse.axon.status_code = 409 + raise RunException(message=error_message, synapse=synapse) + + axon.attach(forward_fn) + + response = http_client.post_synapse(custom_synapse_cls()) + assert response.status_code == 409 + assert response.json() == {"message": error_message} + + async def test_synapse__internal_error( + self, http_client, axon, custom_synapse_cls, no_verify_axon + ): + async def forward_fn(synapse: custom_synapse_cls): + raise ValueError("error with potentially sensitive information") + + axon.attach(forward_fn) + + response = http_client.post_synapse(custom_synapse_cls()) + assert response.status_code == 500 + response_data = response.json() + assert sorted(response_data.keys()) == ["message"] + assert re.match(r"Internal Server Error #[\da-f\-]+", response_data["message"]) + + +def test_allowed_nonce_window_ns(): + mock_synapse = SynapseMock() + current_time = time.time_ns() + allowed_window_ns = allowed_nonce_window_ns(current_time, mock_synapse.timeout) + expected_window_ns = ( + current_time - ALLOWED_DELTA - (mock_synapse.timeout * NANOSECONDS_IN_SECOND) + ) + assert ( + allowed_window_ns < current_time + ), "Allowed window should be less than the current time" + assert ( + allowed_window_ns == expected_window_ns + ), f"Expected {expected_window_ns} but got {allowed_window_ns}" + + +@pytest.mark.parametrize("nonce_offset_seconds", [1, 3, 5, 10]) +def test_nonce_diff_seconds(nonce_offset_seconds): + mock_synapse = SynapseMock() + current_time_ns = time.time_ns() + synapse_nonce = current_time_ns - (nonce_offset_seconds * NANOSECONDS_IN_SECOND) + diff_seconds, allowed_delta_seconds = calculate_diff_seconds( + current_time_ns, mock_synapse.timeout, synapse_nonce + ) + + expected_diff_seconds = nonce_offset_seconds # Because we subtracted nonce_offset_seconds from current_time_ns + expected_allowed_delta_seconds = ( + ALLOWED_DELTA + (mock_synapse.timeout * NANOSECONDS_IN_SECOND) + ) / NANOSECONDS_IN_SECOND + + assert ( + diff_seconds == expected_diff_seconds + ), f"Expected {expected_diff_seconds} but got {diff_seconds}" + assert ( + allowed_delta_seconds == expected_allowed_delta_seconds + ), f"Expected {expected_allowed_delta_seconds} but got {allowed_delta_seconds}" + + +# Mimicking axon default_verify nonce verification +# True: Nonce is fresh, False: Nonce is old +def is_nonce_within_allowed_window(synapse_nonce, allowed_window_ns): + return not (synapse_nonce <= allowed_window_ns) + + +# Test assuming synapse timeout is the default 12 seconds +@pytest.mark.parametrize( + "nonce_offset_seconds, expected_result", + [(1, True), (3, True), (5, True), (15, True), (18, False), (19, False)], +) +def test_nonce_within_allowed_window(nonce_offset_seconds, expected_result): + mock_synapse = SynapseMock() + current_time_ns = time.time_ns() + synapse_nonce = current_time_ns - (nonce_offset_seconds * NANOSECONDS_IN_SECOND) + allowed_window_ns = allowed_nonce_window_ns(current_time_ns, mock_synapse.timeout) + + result = is_nonce_within_allowed_window(synapse_nonce, allowed_window_ns) + + assert result == expected_result, f"Expected {expected_result} but got {result}" + + @pytest.mark.parametrize( + "forward_fn_return_annotation", + [ + None, + fastapi.Response, + StreamingSynapse, + ], + ) + async def test_streaming_synapse( + self, + http_client, + axon, + streaming_synapse_cls, + no_verify_axon, + forward_fn_return_annotation, + ): + tokens = [f"data{i}\n" for i in range(10)] + + async def streamer(send): + for token in tokens: + await send( + { + "type": "http.response.body", + "body": token.encode(), + "more_body": True, + } + ) + await send({"type": "http.response.body", "body": b"", "more_body": False}) + + async def forward_fn(synapse: streaming_synapse_cls): + return synapse.create_streaming_response(token_streamer=streamer) + + if forward_fn_return_annotation is not None: + forward_fn.__annotations__["return"] = forward_fn_return_annotation + + axon.attach(forward_fn) + + response = http_client.post_synapse(streaming_synapse_cls()) + assert (response.status_code, response.text) == (200, "".join(tokens)) + self.assert_headers( + response, + { + "content-type": "text/event-stream", + "name": "CustomStreamingSynapse", + "computed_body_hash": "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", + }, + ) diff --git a/tests/unit_tests/test_chain_data.py b/tests/unit_tests/test_chain_data.py new file mode 100644 index 0000000000..353f697d46 --- /dev/null +++ b/tests/unit_tests/test_chain_data.py @@ -0,0 +1,479 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import pytest +import torch + +from bittensor.core.chain_data import AxonInfo, DelegateInfo +from bittensor.core.chain_data.utils import ChainDataType + +RAOPERTAO = 10**18 + + +@pytest.mark.parametrize( + "ip, expected, test_case", + [ + ("0.0.0.0", False, "ID_is_serving_false"), + ("127.0.0.1", True, "ID_is_serving_true"), + ], +) +def test_is_serving(ip, expected, test_case): + # Arrange + axon_info = AxonInfo( + version=1, ip=ip, port=8080, ip_type=4, hotkey="", coldkey="cold" + ) + + # Act + result = axon_info.is_serving + + # Assert + assert result == expected, f"Test case: {test_case}" + + +@pytest.mark.parametrize( + "ip_type, ip, port, expected, test_case", + [ + (4, "127.0.0.1", 8080, "/ipv4/127.0.0.1:8080", "ID_ip_str_ipv4"), + (6, "::1", 8080, "/ipv6/::1:8080", "ID_ip_str_ipv6"), + ], +) +def test_ip_str(ip_type, ip, port, expected, test_case): + # Arrange + axon_info = AxonInfo( + version=1, ip=ip, port=port, ip_type=ip_type, hotkey="hot", coldkey="cold" + ) + + # Act + result = axon_info.ip_str() + + # Assert + assert result == expected, f"Test case: {test_case}" + + +@pytest.mark.parametrize( + "other, expected, test_case", + [ + (None, False, "ID_eq_none"), + ( + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + True, + "ID_eq_equal", + ), + ( + AxonInfo( + version=2, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + False, + "ID_eq_diff_version", + ), + ], +) +def test_eq(other, expected, test_case): + # Arrange + axon_info = AxonInfo( + version=1, ip="127.0.0.1", port=8080, ip_type=4, hotkey="hot", coldkey="cold" + ) + + # Act + result = axon_info == other + + # Assert + assert result == expected, f"Test case: {test_case}" + + +@pytest.mark.parametrize( + "axon_info, expected, test_case", + [ + ( + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + '{"version": 1, "ip": "127.0.0.1", "port": 8080, "ip_type": 4, "hotkey": "hot", "coldkey": "cold", "protocol": 4, "placeholder1": 0, "placeholder2": 0}', + "ID_to_string", + ), + ], +) +def test_to_string(axon_info, expected, test_case): + # Act + result = axon_info.to_string() + + # Assert + assert result == expected, f"Test case: {test_case}" + + +# Test AxonInfo.from_string method +@pytest.mark.parametrize( + "string, expected, test_case", + [ + ( + '{"version": 1, "ip": "127.0.0.1", "port": 8080, "ip_type": 4, "hotkey": "hot", "coldkey": "cold"}', + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + "ID_from_string_valid", + ), + ("invalid_json", AxonInfo(0, "", 0, 0, "", ""), "ID_from_string_invalid_json"), + ], +) +def test_from_string(string, expected, test_case): + # Act + result = AxonInfo.from_string(string) + + # Assert + assert result == expected, f"Test case: {test_case}" + + +# Test AxonInfo.from_neuron_info method +@pytest.mark.parametrize( + "neuron_info, expected, test_case", + [ + ( + { + "axon_info": { + "version": 1, + "ip": 2130706433, + "port": 8080, + "ip_type": 4, + }, + "hotkey": "hot", + "coldkey": "cold", + }, + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + "ID_from_neuron_info", + ), + ], +) +def test_from_neuron_info(neuron_info, expected, test_case): + # Act + result = AxonInfo.from_neuron_info(neuron_info) + + # Assert + assert result == expected, f"Test case: {test_case}" + + +# Test AxonInfo.to_parameter_dict method +@pytest.mark.parametrize( + "axon_info, test_case", + [ + ( + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + "ID_to_parameter_dict", + ), + ], +) +def test_to_parameter_dict(axon_info, test_case): + # Act + result = axon_info.to_parameter_dict() + + # Assert + assert isinstance(result, dict) + for key, value in axon_info.__dict__.items(): + assert key in result + assert result[key] == value, f"Test case: {test_case}" + + +@pytest.mark.parametrize( + "axon_info, test_case", + [ + ( + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + "ID_to_parameter_dict", + ), + ], +) +def test_to_parameter_dict_torch( + axon_info, + test_case, + force_legacy_torch_compatible_api, +): + result = axon_info.to_parameter_dict() + + # Assert + assert isinstance(result, torch.nn.ParameterDict) + for key, value in axon_info.__dict__.items(): + assert key in result + assert result[key] == value, f"Test case: {test_case}" + + +@pytest.mark.parametrize( + "parameter_dict, expected, test_case", + [ + ( + { + "version": 1, + "ip": "127.0.0.1", + "port": 8080, + "ip_type": 4, + "hotkey": "hot", + "coldkey": "cold", + }, + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + "ID_from_parameter_dict", + ), + ], +) +def test_from_parameter_dict(parameter_dict, expected, test_case): + # Act + result = AxonInfo.from_parameter_dict(parameter_dict) + + # Assert + assert result == expected, f"Test case: {test_case}" + + +@pytest.mark.parametrize( + "parameter_dict, expected, test_case", + [ + ( + torch.nn.ParameterDict( + { + "version": 1, + "ip": "127.0.0.1", + "port": 8080, + "ip_type": 4, + "hotkey": "hot", + "coldkey": "cold", + } + ), + AxonInfo( + version=1, + ip="127.0.0.1", + port=8080, + ip_type=4, + hotkey="hot", + coldkey="cold", + ), + "ID_from_parameter_dict", + ), + ], +) +def test_from_parameter_dict_torch( + parameter_dict, expected, test_case, force_legacy_torch_compatible_api +): + # Act + result = AxonInfo.from_parameter_dict(parameter_dict) + + # Assert + assert result == expected, f"Test case: {test_case}" + + +def create_neuron_info_decoded( + hotkey, + coldkey, + stake, + weights, + bonds, + rank, + emission, + incentive, + consensus, + trust, + validator_trust, + dividends, + uid, + netuid, + active, + last_update, + validator_permit, + pruning_score, + prometheus_info, + axon_info, +): + return { + "hotkey": hotkey, + "coldkey": coldkey, + "stake": stake, + "weights": weights, + "bonds": bonds, + "rank": rank, + "emission": emission, + "incentive": incentive, + "consensus": consensus, + "trust": trust, + "validator_trust": validator_trust, + "dividends": dividends, + "uid": uid, + "netuid": netuid, + "active": active, + "last_update": last_update, + "validator_permit": validator_permit, + "pruning_score": pruning_score, + "prometheus_info": prometheus_info, + "axon_info": axon_info, + } + + +@pytest.fixture +def mock_from_scale_encoding(mocker): + return mocker.patch("bittensor.core.chain_data.delegate_info.from_scale_encoding") + + +@pytest.fixture +def mock_fix_decoded_values(mocker): + return mocker.patch( + "bittensor.core.chain_data.DelegateInfo.fix_decoded_values", + side_effect=lambda x: x, + ) + + +@pytest.mark.parametrize( + "test_id, vec_u8, expected", + [ + ( + "happy-path-1", + [1, 2, 3], + [ + DelegateInfo( + hotkey_ss58="hotkey", + total_stake=1000, + nominators=[ + "nominator1", + "nominator2", + ], + owner_ss58="owner", + take=10.1, + validator_permits=[1, 2, 3], + registrations=[4, 5, 6], + return_per_1000=100, + total_daily_return=1000, + ) + ], + ), + ( + "happy-path-2", + [4, 5, 6], + [ + DelegateInfo( + hotkey_ss58="hotkey", + total_stake=1000, + nominators=[ + "nominator1", + "nominator2", + ], + owner_ss58="owner", + take=2.1, + validator_permits=[1, 2, 3], + registrations=[4, 5, 6], + return_per_1000=100, + total_daily_return=1000, + ) + ], + ), + ], +) +def test_list_from_vec_u8_happy_path( + mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected +): + # Arrange + mock_from_scale_encoding.return_value = expected + + # Act + result = DelegateInfo.list_from_vec_u8(vec_u8) + + # Assert + mock_from_scale_encoding.assert_called_once_with( + vec_u8, ChainDataType.DelegateInfo, is_vec=True + ) + assert result == expected, f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, vec_u8, expected", + [ + ("edge_empty_list", [], []), + ], +) +def test_list_from_vec_u8_edge_cases( + mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected +): + # Arrange + mock_from_scale_encoding.return_value = None + + # Act + result = DelegateInfo.list_from_vec_u8(vec_u8) + + # Assert + mock_from_scale_encoding.assert_called_once_with( + vec_u8, ChainDataType.DelegateInfo, is_vec=True + ) + assert result == expected, f"Failed {test_id}" + + +@pytest.mark.parametrize( + "vec_u8, expected_exception", + [ + ("not_a_list", TypeError), + ], +) +def test_list_from_vec_u8_error_cases( + vec_u8, + expected_exception, +): + # No Arrange section needed as input values are provided via test parameters + + # Act & Assert + with pytest.raises(expected_exception): + _ = DelegateInfo.list_from_vec_u8(vec_u8) diff --git a/tests/unit_tests/test_dendrite.py b/tests/unit_tests/test_dendrite.py new file mode 100644 index 0000000000..3150aaf648 --- /dev/null +++ b/tests/unit_tests/test_dendrite.py @@ -0,0 +1,416 @@ +# The MIT License (MIT) +# Copyright © 2022 Yuma Rao +# Copyright © 2022-2023 Opentensor Foundation +# Copyright © 2023 Opentensor Technologies Inc + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import asyncio +import typing +from unittest.mock import MagicMock, Mock + +import aiohttp +import pytest + +from bittensor.core.axon import Axon +from bittensor.core.dendrite import ( + DENDRITE_ERROR_MAPPING, + DENDRITE_DEFAULT_ERROR, + Dendrite, +) +from bittensor.core.synapse import TerminalInfo +from tests.helpers import get_mock_wallet +from bittensor.core.synapse import Synapse +from bittensor.core.chain_data import AxonInfo + + +class SynapseDummy(Synapse): + input: int + output: typing.Optional[int] = None + + +def dummy(synapse: SynapseDummy) -> SynapseDummy: + synapse.output = synapse.input + 1 + return synapse + + +@pytest.fixture +def setup_dendrite(): + # Assuming bittensor.Wallet() returns a wallet object + user_wallet = get_mock_wallet() + dendrite_obj = Dendrite(user_wallet) + return dendrite_obj + + +@pytest.fixture +def dendrite_obj(setup_dendrite): + return setup_dendrite + + +@pytest.fixture +def axon_info(): + return AxonInfo( + version=1, + ip="127.0.0.1", + port=666, + ip_type=4, + hotkey="hot", + coldkey="cold", + ) + + +@pytest.fixture(scope="session") +def setup_axon(): + axon = Axon() + axon.attach(forward_fn=dummy) + axon.start() + yield axon + del axon + + +def test_init(setup_dendrite): + dendrite_obj = setup_dendrite + assert isinstance(dendrite_obj, Dendrite) + assert dendrite_obj.keypair == setup_dendrite.keypair + + +def test_str(dendrite_obj): + expected_string = f"dendrite({dendrite_obj.keypair.ss58_address})" + assert str(dendrite_obj) == expected_string + + +def test_repr(dendrite_obj): + expected_string = f"dendrite({dendrite_obj.keypair.ss58_address})" + assert repr(dendrite_obj) == expected_string + + +def test_close(dendrite_obj, setup_axon): + axon = setup_axon + # Query the axon to open a session + dendrite_obj.query(axon, SynapseDummy(input=1)) + # Session should be automatically closed after query + assert dendrite_obj._session is None + + +@pytest.mark.asyncio +async def test_aclose(dendrite_obj, setup_axon): + axon = setup_axon + # Use context manager to open an async session + async with dendrite_obj: + await dendrite_obj([axon], SynapseDummy(input=1), deserialize=False) + # Close should automatically be called on the session after context manager scope + assert dendrite_obj._session is None + + +class AsyncMock(Mock): + def __call__(self, *args, **kwargs): + sup = super(AsyncMock, self) + + async def coro(): + return sup.__call__(*args, **kwargs) + + return coro() + + def __await__(self): + return self().__await__() + + +def test_dendrite_create_wallet(): + d = Dendrite(get_mock_wallet()) + d = Dendrite(get_mock_wallet().hotkey) + d = Dendrite(get_mock_wallet().coldkeypub) + assert d.__str__() == d.__repr__() + + +@pytest.mark.asyncio +async def test_forward_many(): + n = 10 + d = Dendrite(wallet=get_mock_wallet()) + d.call = AsyncMock() + axons = [MagicMock() for _ in range(n)] + + resps = await d(axons) + assert len(resps) == n + resp = await d(axons[0]) + assert len([resp]) == 1 + + resps = await d.forward(axons) + assert len(resps) == n + resp = await d.forward(axons[0]) + assert len([resp]) == 1 + + +def test_pre_process_synapse(): + d = Dendrite(wallet=get_mock_wallet()) + s = Synapse() + synapse = d.preprocess_synapse_for_request( + target_axon_info=Axon(wallet=get_mock_wallet()).info(), + synapse=s, + timeout=12, + ) + assert synapse.timeout == 12 + assert synapse.dendrite + assert synapse.axon + assert synapse.dendrite.ip + assert synapse.dendrite.version + assert synapse.dendrite.nonce + assert synapse.dendrite.uuid + assert synapse.dendrite.hotkey + assert synapse.axon.ip + assert synapse.axon.port + assert synapse.axon.hotkey + assert synapse.dendrite.signature + + +# Helper functions for casting, assuming they exist and work correctly. +def cast_int(value: typing.Any) -> int: + return int(value) + + +def cast_float(value: typing.Any) -> float: + return float(value) + + +# Happy path tests +@pytest.mark.parametrize( + "status_code, status_message, process_time, ip, port, version, nonce, uuid, hotkey, signature, expected", + [ + ( + 200, + "Success", + 0.1, + "198.123.23.1", + 9282, + 111, + 111111, + "5ecbd69c-1cec-11ee-b0dc-e29ce36fec1a", + "5EnjDGNqqWnuL2HCAdxeEtN2oqtXZw6BMBe936Kfy2PFz1J1", + "0x0813029319030129u4120u10841824y0182u091u230912u", + True, + ), + # Add more test cases with different combinations of realistic values + ], + ids=["basic-success"], +) +def test_terminal_info_happy_path( + status_code, + status_message, + process_time, + ip, + port, + version, + nonce, + uuid, + hotkey, + signature, + expected, +): + # Act + terminal_info = TerminalInfo( + status_code=status_code, + status_message=status_message, + process_time=process_time, + ip=ip, + port=port, + version=version, + nonce=nonce, + uuid=uuid, + hotkey=hotkey, + signature=signature, + ) + + # Assert + assert isinstance(terminal_info, TerminalInfo) == expected + assert terminal_info.status_code == status_code + assert terminal_info.status_message == status_message + assert terminal_info.process_time == process_time + assert terminal_info.ip == ip + assert terminal_info.port == port + assert terminal_info.version == version + assert terminal_info.nonce == nonce + assert terminal_info.uuid == uuid + assert terminal_info.hotkey == hotkey + assert terminal_info.signature == signature + + +# Edge cases +@pytest.mark.parametrize( + "status_code, process_time, port, version, nonce, expected_exception", + [ + ("not-an-int", 0.1, 9282, 111, 111111, ValueError), # status_code not an int + (200, "not-a-float", 9282, 111, 111111, ValueError), # process_time not a float + (200, 0.1, "not-an-int", 111, 111111, ValueError), # port not an int + # Add more edge cases as needed + ], + ids=["status_code-not-int", "process_time-not-float", "port-not-int"], +) +def test_terminal_info_edge_cases( + status_code, process_time, port, version, nonce, expected_exception +): + # Act & Assert + with pytest.raises(expected_exception): + TerminalInfo( + status_code=status_code, + process_time=process_time, + port=port, + version=version, + nonce=nonce, + ) + + +# Error case +@pytest.mark.parametrize( + "status_code, process_time, port, ip, version, nonce, expected_exception", + [ + (None, 0.1, 9282, 111, TerminalInfo(), 111111, TypeError), + ], + ids=[ + "int() argument must be a string, a bytes-like object or a real number, not 'TerminalInfo'" + ], +) +def test_terminal_info_error_cases( + status_code, process_time, port, ip, version, nonce, expected_exception +): + # Act & Assert + with pytest.raises(expected_exception): + TerminalInfo( + status_code=status_code, + process_time=process_time, + port=port, + ip=ip, + version=version, + nonce=nonce, + ) + + +@pytest.mark.asyncio +async def test_dendrite__call__success_response( + axon_info, dendrite_obj, mock_aio_response +): + input_synapse = SynapseDummy(input=1) + expected_synapse = SynapseDummy( + **( + input_synapse.model_dump() + | dict( + output=2, + axon=TerminalInfo( + status_code=200, + status_message="Success", + process_time=0.1, + ), + ) + ) + ) + mock_aio_response.post( + f"http://127.0.0.1:666/SynapseDummy", + body=expected_synapse.json(), + ) + synapse = await dendrite_obj.call(axon_info, synapse=input_synapse) + + assert synapse.input == 1 + assert synapse.output == 2 + assert synapse.dendrite.status_code == 200 + assert synapse.dendrite.status_message == "Success" + assert synapse.dendrite.process_time >= 0 + + +@pytest.mark.asyncio +async def test_dendrite__call__handles_http_error_response( + axon_info, dendrite_obj, mock_aio_response +): + status_code = 414 + message = "Custom Error" + + mock_aio_response.post( + "http://127.0.0.1:666/SynapseDummy", + status=status_code, + payload={"message": message}, + ) + synapse = await dendrite_obj.call(axon_info, synapse=SynapseDummy(input=1)) + + assert synapse.axon.status_code == synapse.dendrite.status_code == status_code + assert synapse.axon.status_message == synapse.dendrite.status_message == message + + +@pytest.mark.parametrize( + "exception, expected_status_code, expected_message, synapse_timeout, synapse_ip, synapse_port, request_name", + [ + ( + aiohttp.ClientConnectorError(Mock(), Mock()), + DENDRITE_ERROR_MAPPING[aiohttp.ClientConnectorError][0], + f"{DENDRITE_ERROR_MAPPING[aiohttp.ClientConnectorError][1]} at 127.0.0.1:8080/test_request", + None, + "127.0.0.1", + "8080", + "test_request_client_connector_error", + ), + ( + asyncio.TimeoutError(), + DENDRITE_ERROR_MAPPING[asyncio.TimeoutError][0], + f"{DENDRITE_ERROR_MAPPING[asyncio.TimeoutError][1]} after 5 seconds", + 5, + None, + None, + "test_request_timeout", + ), + ( + aiohttp.ClientResponseError(Mock(), Mock(), status=404), + "404", + f"{DENDRITE_ERROR_MAPPING[aiohttp.ClientResponseError][1]}: 404, message=''", + None, + None, + None, + "test_request_client_response_error", + ), + ( + Exception("Unknown error"), + DENDRITE_DEFAULT_ERROR[0], + f"{DENDRITE_DEFAULT_ERROR[1]}: Unknown error", + None, + None, + None, + "test_request_unknown_error", + ), + ], + ids=[ + "ClientConnectorError", + "TimeoutError", + "ClientResponseError", + "GenericException", + ], +) +def test_process_error_message( + exception, + expected_status_code, + expected_message, + synapse_timeout, + synapse_ip, + synapse_port, + request_name, +): + # Arrange + dendrite = Dendrite() + synapse = Mock() + + synapse.timeout = synapse_timeout + synapse.axon.ip = synapse_ip + synapse.axon.port = synapse_port + + # Act + result = dendrite.process_error_message(synapse, request_name, exception) + + # Assert + assert result.dendrite.status_code == expected_status_code + assert expected_message in result.dendrite.status_message diff --git a/tests/unit_tests/test_deprecated.py b/tests/unit_tests/test_deprecated.py new file mode 100644 index 0000000000..c4b906a0ca --- /dev/null +++ b/tests/unit_tests/test_deprecated.py @@ -0,0 +1,51 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import sys + + +def test_mock_import(): + """ + Tests that `bittensor.mock` can be imported and is the same as `bittensor.utils.mock`. + """ + import bittensor.mock as redirected_mock + import bittensor.utils.mock as real_mock + + assert "bittensor.mock" in sys.modules + assert redirected_mock is real_mock + + +def test_extrinsics_import(): + """Tests that `bittensor.extrinsics` can be imported and is the same as `bittensor.utils.deprecated.extrinsics`.""" + import bittensor.extrinsics as redirected_extrinsics + import bittensor.core.extrinsics as real_extrinsics + + assert "bittensor.extrinsics" in sys.modules + assert redirected_extrinsics is real_extrinsics + + +def test_object_aliases_are_correctly_mapped(): + """Ensures all object aliases correctly map to their respective classes in Bittensor package.""" + import bittensor + + assert issubclass(bittensor.axon, bittensor.Axon) + assert issubclass(bittensor.config, bittensor.Config) + assert issubclass(bittensor.dendrite, bittensor.Dendrite) + assert issubclass(bittensor.keyfile, bittensor.Keyfile) + assert issubclass(bittensor.metagraph, bittensor.Metagraph) + assert issubclass(bittensor.wallet, bittensor.Wallet) + assert issubclass(bittensor.synapse, bittensor.Synapse) diff --git a/tests/unit_tests/test_logging.py b/tests/unit_tests/test_logging.py new file mode 100644 index 0000000000..2c5e593f0e --- /dev/null +++ b/tests/unit_tests/test_logging.py @@ -0,0 +1,199 @@ +import logging as stdlogging +import multiprocessing +from unittest.mock import MagicMock, patch + +import pytest + +from bittensor.utils.btlogging import LoggingMachine +from bittensor.utils.btlogging.defines import ( + DEFAULT_LOG_FILE_NAME, + BITTENSOR_LOGGER_NAME, +) +from bittensor.utils.btlogging.loggingmachine import LoggingConfig, _concat_message + + +@pytest.fixture(autouse=True, scope="session") +def disable_stdout_streaming(): + # Backup original handlers + original_handlers = stdlogging.root.handlers[:] + + # Remove all handlers that stream to stdout + stdlogging.root.handlers = [ + h + for h in stdlogging.root.handlers + if not isinstance(h, stdlogging.StreamHandler) + ] + + yield # Yield control to the test or fixture setup + + # Restore original handlers after the test + stdlogging.root.handlers = original_handlers + + +@pytest.fixture +def mock_config(tmp_path): + # Using pytest's tmp_path fixture to generate a temporary directory + log_dir = tmp_path / "logs" + log_dir.mkdir() # Create the temporary directory + log_file_path = log_dir / DEFAULT_LOG_FILE_NAME + + mock_config = LoggingConfig( + debug=False, trace=False, record_log=True, logging_dir=str(log_dir) + ) + + yield mock_config, log_file_path + # Cleanup: No need to explicitly delete the log file or directory, tmp_path does it automatically + + +@pytest.fixture +def logging_machine(mock_config): + config, _ = mock_config + logging_machine = LoggingMachine(config=config) + return logging_machine + + +def test_initialization(logging_machine, mock_config): + """ + Test initialization of LoggingMachine. + """ + config, log_file_path = mock_config # Unpack to get the log_file_path + + assert logging_machine.get_queue() is not None + assert isinstance(logging_machine.get_queue(), multiprocessing.queues.Queue) + assert logging_machine.get_config() == config + + # Ensure that handlers are set up correctly + assert any( + isinstance(handler, stdlogging.StreamHandler) + for handler in logging_machine._handlers + ) + if config.record_log and config.logging_dir: + assert any( + isinstance(handler, stdlogging.FileHandler) + for handler in logging_machine._handlers + ) + assert log_file_path.exists() # Check if log file is created + + +def test_state_transitions(logging_machine, mock_config): + """ + Test state transitions and the associated logging level changes. + """ + config, log_file_path = mock_config + with patch( + "bittensor.utils.btlogging.loggingmachine.all_loggers" + ) as mocked_all_loggers: + # mock the main bittensor logger, identified by its `name` field + mocked_bt_logger = MagicMock() + mocked_bt_logger.name = BITTENSOR_LOGGER_NAME + # third party loggers are treated differently and silenced under default + # logging settings + mocked_third_party_logger = MagicMock() + logging_machine._logger = mocked_bt_logger + mocked_all_loggers.return_value = [mocked_third_party_logger, mocked_bt_logger] + + # Enable/Disable Debug + # from default + assert logging_machine.current_state_value == "Default" + logging_machine.enable_debug() + assert logging_machine.current_state_value == "Debug" + # check log levels + mocked_bt_logger.setLevel.assert_called_with(stdlogging.DEBUG) + mocked_third_party_logger.setLevel.assert_called_with(stdlogging.DEBUG) + + logging_machine.disable_debug() + + # Enable/Disable Trace + assert logging_machine.current_state_value == "Default" + logging_machine.enable_trace() + assert logging_machine.current_state_value == "Trace" + # check log levels + mocked_bt_logger.setLevel.assert_called_with(stdlogging.TRACE) + mocked_third_party_logger.setLevel.assert_called_with(stdlogging.TRACE) + logging_machine.disable_trace() + assert logging_machine.current_state_value == "Default" + + # Enable Default + logging_machine.enable_debug() + assert logging_machine.current_state_value == "Debug" + logging_machine.enable_default() + assert logging_machine.current_state_value == "Default" + # main logger set to INFO + mocked_bt_logger.setLevel.assert_called_with(stdlogging.INFO) + # 3rd party loggers should be disabled by setting to CRITICAL + mocked_third_party_logger.setLevel.assert_called_with(stdlogging.CRITICAL) + + # Disable Logging + # from default + logging_machine.disable_logging() + assert logging_machine.current_state_value == "Disabled" + mocked_bt_logger.setLevel.assert_called_with(stdlogging.CRITICAL) + mocked_third_party_logger.setLevel.assert_called_with(stdlogging.CRITICAL) + + +def test_enable_file_logging_with_new_config(tmp_path): + """ + Test enabling file logging by setting a new config. + """ + log_dir = tmp_path / "logs" + log_dir.mkdir() # Create the temporary directory + log_file_path = log_dir / DEFAULT_LOG_FILE_NAME + + # check no file handler is created + config = LoggingConfig(debug=False, trace=False, record_log=True, logging_dir=None) + lm = LoggingMachine(config) + assert not any( + isinstance(handler, stdlogging.FileHandler) for handler in lm._handlers + ) + + # check file handler now exists + new_config = LoggingConfig( + debug=False, trace=False, record_log=True, logging_dir=str(log_dir) + ) + lm.set_config(new_config) + assert any(isinstance(handler, stdlogging.FileHandler) for handler in lm._handlers) + + +def test_all_log_levels_output(logging_machine, caplog): + """ + Test that all log levels are captured. + """ + logging_machine.set_trace() + + logging_machine.trace("Test trace") + logging_machine.debug("Test debug") + logging_machine.info("Test info") + logging_machine.success("Test success") + logging_machine.warning("Test warning") + logging_machine.error("Test error") + logging_machine.critical("Test critical") + + assert "Test trace" in caplog.text + assert "Test debug" in caplog.text + assert "Test info" in caplog.text + assert "Test success" in caplog.text + assert "Test warning" in caplog.text + assert "Test error" in caplog.text + assert "Test critical" in caplog.text + + +@pytest.mark.parametrize( + "msg, prefix, suffix, expected_result", + [ + ("msg", "", "", "msg"), + ("msg", None, None, "msg"), + ("msg", "prefix", None, "prefix - msg"), + ("msg", None, "suffix", "msg - suffix"), + ("msg", "prefix", "suffix", "prefix - msg - suffix"), + ], + ids=[ + "message, no prefix (str), no suffix (str)", + "message, no prefix (None), no suffix (None)", + "message and prefix only", + "message and suffix only", + "message, prefix, and suffix", + ], +) +def test_concat(msg, prefix, suffix, expected_result): + """Test different options of message concatenation with prefix and suffix.""" + assert _concat_message(msg, prefix, suffix) == expected_result diff --git a/tests/unit_tests/test_metagraph.py b/tests/unit_tests/test_metagraph.py new file mode 100644 index 0000000000..e4dca70a1d --- /dev/null +++ b/tests/unit_tests/test_metagraph.py @@ -0,0 +1,176 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from unittest.mock import MagicMock +from unittest.mock import Mock + +import numpy as np +import pytest + +from bittensor.core import settings +from bittensor.core.metagraph import Metagraph + + +@pytest.fixture +def mock_environment(): + # Create a Mock for subtensor + subtensor = Mock() + + # Create a list of Mock Neurons + neurons = [ + Mock( + uid=i, + trust=i + 0.5, + consensus=i + 0.1, + incentive=i + 0.2, + dividends=i + 0.3, + rank=i + 0.4, + emission=i + 0.5, + active=i, + last_update=i, + validator_permit=i % 2 == 0, + validator_trust=i + 0.6, + total_stake=Mock(tao=i + 0.7), + stake=i + 0.8, + axon_info=f"axon_info_{i}", + weights=[(j, j + 0.1) for j in range(5)], + bonds=[(j, j + 0.2) for j in range(5)], + ) + for i in range(10) + ] + + return subtensor, neurons + + +def test_set_metagraph_attributes(mock_environment): + subtensor, neurons = mock_environment + metagraph = Metagraph(1, sync=False) + metagraph.neurons = neurons + metagraph._set_metagraph_attributes(block=5, subtensor=subtensor) + + # Check the attributes are set as expected + assert metagraph.n.item() == len(neurons) + assert metagraph.block.item() == 5 + assert ( + np.array_equal( + metagraph.uids, + np.array([neuron.uid for neuron in neurons], dtype=np.int64), + ) + is True + ) + + assert ( + np.array_equal( + metagraph.trust, + np.array([neuron.trust for neuron in neurons], dtype=np.float32), + ) + is True + ) + + assert ( + np.array_equal( + metagraph.consensus, + np.array([neuron.consensus for neuron in neurons], dtype=np.float32), + ) + is True + ) + # Similarly for other attributes... + + # Test the axons + assert metagraph.axons == [n.axon_info for n in neurons] + + +def test_process_weights_or_bonds(mock_environment): + _, neurons = mock_environment + metagraph = Metagraph(1, sync=False) + metagraph.neurons = neurons + + # Test weights processing + weights = metagraph._process_weights_or_bonds( + data=[neuron.weights for neuron in neurons], attribute="weights" + ) + assert weights.shape[0] == len( + neurons + ) # Number of rows should be equal to number of neurons + assert weights.shape[1] == len( + neurons + ) # Number of columns should be equal to number of neurons + # TODO: Add more checks to ensure the weights have been processed correctly + + # Test bonds processing + bonds = metagraph._process_weights_or_bonds( + data=[neuron.bonds for neuron in neurons], attribute="bonds" + ) + assert bonds.shape[0] == len( + neurons + ) # Number of rows should be equal to number of neurons + assert bonds.shape[1] == len( + neurons + ) # Number of columns should be equal to number of neurons + + # TODO: Add more checks to ensure the bonds have been processed correctly + + +# Mocking the bittensor.Subtensor class for testing purposes +@pytest.fixture +def mock_subtensor(): + subtensor = MagicMock() + subtensor.chain_endpoint = settings.FINNEY_ENTRYPOINT + subtensor.network = "finney" + subtensor.get_current_block.return_value = 601 + return subtensor + + +# Mocking the metagraph instance for testing purposes +@pytest.fixture +def metagraph_instance(): + metagraph = Metagraph(netuid=1337, sync=False) + metagraph._assign_neurons = MagicMock() + metagraph._set_metagraph_attributes = MagicMock() + metagraph._set_weights_and_bonds = MagicMock() + return metagraph + + +@pytest.fixture +def loguru_sink(): + class LogSink: + def __init__(self): + self.messages = [] + + def write(self, message): + # Assuming `message` is an object, you might need to adjust how you extract the text + self.messages.append(str(message)) + + def __contains__(self, item): + return any(item in message for message in self.messages) + + return LogSink() + + +@pytest.mark.parametrize( + "block, test_id", + [ + (300, "warning_case_block_greater_than_300"), + ], +) +def test_sync_warning_cases(block, test_id, metagraph_instance, mock_subtensor, caplog): + metagraph_instance.sync(block=block, lite=True, subtensor=mock_subtensor) + + expected_message = "Attempting to sync longer than 300 blocks ago on a non-archive node. Please use the 'archive' network for subtensor and retry." + assert ( + expected_message in caplog.text + ), f"Test ID: {test_id} - Expected warning message not found in Loguru sink." diff --git a/tests/unit_tests/test_subnets.py b/tests/unit_tests/test_subnets.py new file mode 100644 index 0000000000..9cec02e935 --- /dev/null +++ b/tests/unit_tests/test_subnets.py @@ -0,0 +1,82 @@ +import pytest +from mpmath.ctx_mp_python import return_mpc + +from bittensor.utils import subnets + + +class MySubnetsAPI(subnets.SubnetsAPI): + """Example of user class inherited from SubnetsAPI.""" + + def prepare_synapse(self, *args, **kwargs): + """Prepare the synapse-specific payload.""" + + def process_responses(self, responses): + """Process the responses from the network.""" + return responses + + +def test_instance_creation(mocker): + """Test the creation of a MySubnetsAPI instance.""" + # Prep + mocked_dendrite = mocker.patch.object(subnets, "Dendrite") + fake_wallet = mocker.MagicMock() + + # Call + instance = MySubnetsAPI(fake_wallet) + + # Asserts + assert isinstance(instance, subnets.SubnetsAPI) + mocked_dendrite.assert_called_once_with(wallet=fake_wallet) + assert instance.dendrite == mocked_dendrite.return_value + assert instance.wallet == fake_wallet + + +@pytest.mark.asyncio +async def test_query_api(mocker): + """Test querying the MySubnetsAPI instance asynchronously.""" + # Prep + mocked_async_dendrite = mocker.AsyncMock() + mocked_dendrite = mocker.patch.object( + subnets, "Dendrite", return_value=mocked_async_dendrite + ) + + fake_wallet = mocker.MagicMock() + fake_axon = mocker.MagicMock() + + mocked_synapse = mocker.MagicMock() + mocked_synapse.return_value.name = "test synapse" + mocked_prepare_synapse = mocker.patch.object( + MySubnetsAPI, "prepare_synapse", return_value=mocked_synapse + ) + + # Call + instance = MySubnetsAPI(fake_wallet) + result = await instance.query_api(fake_axon, **{"key": "val"}) + + # Asserts + mocked_prepare_synapse.assert_called_once_with(key="val") + mocked_dendrite.assert_called_once_with(wallet=fake_wallet) + assert result == mocked_async_dendrite.return_value + + +@pytest.mark.asyncio +async def test_test_instance_call(mocker): + """Test the MySubnetsAPI instance call with asynchronous handling.""" + # Prep + mocked_async_dendrite = mocker.AsyncMock() + mocked_dendrite = mocker.patch.object( + subnets, "Dendrite", return_value=mocked_async_dendrite + ) + mocked_query_api = mocker.patch.object( + MySubnetsAPI, "query_api", new=mocker.AsyncMock() + ) + fake_wallet = mocker.MagicMock() + fake_axon = mocker.MagicMock() + + # Call + instance = MySubnetsAPI(fake_wallet) + await instance(fake_axon) + + # Asserts + mocked_dendrite.assert_called_once_with(wallet=fake_wallet) + mocked_query_api.assert_called_once_with(fake_axon) diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py new file mode 100644 index 0000000000..d0783d20ff --- /dev/null +++ b/tests/unit_tests/test_subtensor.py @@ -0,0 +1,2053 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import argparse +import unittest.mock as mock +from unittest.mock import MagicMock + +import pytest +from bittensor_wallet import Wallet + +from bittensor.core import subtensor as subtensor_module, settings +from bittensor.core.axon import Axon +from bittensor.core.chain_data import SubnetHyperparameters +from bittensor.core.settings import version_as_int +from bittensor.core.subtensor import Subtensor, logging +from bittensor.utils import u16_normalized_float, u64_normalized_float +from bittensor.utils.balance import Balance + +U16_MAX = 65535 +U64_MAX = 18446744073709551615 + + +def test_serve_axon_with_external_ip_set(): + internal_ip: str = "192.0.2.146" + external_ip: str = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + + mock_serve_axon = MagicMock(return_value=True) + + mock_subtensor = MagicMock(spec=Subtensor, serve_axon=mock_serve_axon) + + mock_wallet = MagicMock( + spec=Wallet, + coldkey=MagicMock(), + coldkeypub=MagicMock( + # mock ss58 address + ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" + ), + hotkey=MagicMock( + ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" + ), + ) + + mock_config = Axon.config() + mock_axon_with_external_ip_set = Axon( + wallet=mock_wallet, + ip=internal_ip, + external_ip=external_ip, + config=mock_config, + ) + + mock_subtensor.serve_axon( + netuid=-1, + axon=mock_axon_with_external_ip_set, + ) + + mock_serve_axon.assert_called_once() + + # verify that the axon is served to the network with the external ip + _, kwargs = mock_serve_axon.call_args + axon_info = kwargs["axon"].info() + assert axon_info.ip == external_ip + + +def test_serve_axon_with_external_port_set(): + external_ip: str = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + + internal_port: int = 1234 + external_port: int = 5678 + + mock_serve = MagicMock(return_value=True) + + mock_serve_axon = MagicMock(return_value=True) + + mock_subtensor = MagicMock( + spec=Subtensor, + serve=mock_serve, + serve_axon=mock_serve_axon, + ) + + mock_wallet = MagicMock( + spec=Wallet, + coldkey=MagicMock(), + coldkeypub=MagicMock( + # mock ss58 address + ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" + ), + hotkey=MagicMock( + ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" + ), + ) + + mock_config = Axon.config() + + mock_axon_with_external_port_set = Axon( + wallet=mock_wallet, + port=internal_port, + external_port=external_port, + config=mock_config, + ) + + with mock.patch( + "bittensor.utils.networking.get_external_ip", return_value=external_ip + ): + # mock the get_external_ip function to return the external ip + mock_subtensor.serve_axon( + netuid=-1, + axon=mock_axon_with_external_port_set, + ) + + mock_serve_axon.assert_called_once() + # verify that the axon is served to the network with the external port + _, kwargs = mock_serve_axon.call_args + axon_info = kwargs["axon"].info() + assert axon_info.port == external_port + + +class ExitEarly(Exception): + """Mock exception to exit early from the called code""" + + pass + + +@pytest.mark.parametrize( + "test_id, expected_output", + [ + # Happy path test + ( + "happy_path_default", + "Create and return a new object. See help(type) for accurate signature.", + ), + ], +) +def test_help(test_id, expected_output, capsys): + # Act + Subtensor.help() + + # Assert + captured = capsys.readouterr() + assert expected_output in captured.out, f"Test case {test_id} failed" + + +@pytest.fixture +def parser(): + return argparse.ArgumentParser() + + +# Mocking argparse.ArgumentParser.add_argument method to simulate ArgumentError +def test_argument_error_handling(monkeypatch, parser): + def mock_add_argument(*args, **kwargs): + raise argparse.ArgumentError(None, "message") + + monkeypatch.setattr(argparse.ArgumentParser, "add_argument", mock_add_argument) + # No exception should be raised + Subtensor.add_args(parser) + + +@pytest.mark.parametrize( + "network, expected_network, expected_endpoint", + [ + # Happy path tests + ("finney", "finney", settings.FINNEY_ENTRYPOINT), + ("local", "local", settings.LOCAL_ENTRYPOINT), + ("test", "test", settings.FINNEY_TEST_ENTRYPOINT), + ("archive", "archive", settings.ARCHIVE_ENTRYPOINT), + # Endpoint override tests + ( + settings.FINNEY_ENTRYPOINT, + "finney", + settings.FINNEY_ENTRYPOINT, + ), + ( + "entrypoint-finney.opentensor.ai", + "finney", + settings.FINNEY_ENTRYPOINT, + ), + ( + settings.FINNEY_TEST_ENTRYPOINT, + "test", + settings.FINNEY_TEST_ENTRYPOINT, + ), + ( + "test.finney.opentensor.ai", + "test", + settings.FINNEY_TEST_ENTRYPOINT, + ), + ( + settings.ARCHIVE_ENTRYPOINT, + "archive", + settings.ARCHIVE_ENTRYPOINT, + ), + ( + "archive.chain.opentensor.ai", + "archive", + settings.ARCHIVE_ENTRYPOINT, + ), + ("127.0.0.1", "local", "127.0.0.1"), + ("localhost", "local", "localhost"), + # Edge cases + (None, None, None), + ("unknown", "unknown", "unknown"), + ], +) +def test_determine_chain_endpoint_and_network( + network, expected_network, expected_endpoint +): + # Act + result_network, result_endpoint = Subtensor.determine_chain_endpoint_and_network( + network + ) + + # Assert + assert result_network == expected_network + assert result_endpoint == expected_endpoint + + +@pytest.fixture +def subtensor(mocker): + fake_substrate = mocker.MagicMock() + fake_substrate.websocket.sock.getsockopt.return_value = 0 + mocker.patch.object( + subtensor_module, "SubstrateInterface", return_value=fake_substrate + ) + return Subtensor() + + +@pytest.fixture +def mock_logger(): + with mock.patch.object(logging, "warning") as mock_warning: + yield mock_warning + + +def test_hyperparameter_subnet_does_not_exist(subtensor, mocker): + """Tests when the subnet does not exist.""" + subtensor.subnet_exists = mocker.MagicMock(return_value=False) + assert subtensor._get_hyperparameter("Difficulty", 1, None) is None + subtensor.subnet_exists.assert_called_once_with(1, None) + + +def test_hyperparameter_result_is_none(subtensor, mocker): + """Tests when query_subtensor returns None.""" + subtensor.subnet_exists = mocker.MagicMock(return_value=True) + subtensor.query_subtensor = mocker.MagicMock(return_value=None) + assert subtensor._get_hyperparameter("Difficulty", 1, None) is None + subtensor.subnet_exists.assert_called_once_with(1, None) + subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) + + +def test_hyperparameter_result_has_no_value(subtensor, mocker): + """Test when the result has no 'value' attribute.""" + subtensor.subnet_exists = mocker.MagicMock(return_value=True) + subtensor.query_subtensor = mocker.MagicMock(return_value=None) + assert subtensor._get_hyperparameter("Difficulty", 1, None) is None + subtensor.subnet_exists.assert_called_once_with(1, None) + subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) + + +def test_hyperparameter_success_int(subtensor, mocker): + """Test when query_subtensor returns an integer value.""" + subtensor.subnet_exists = mocker.MagicMock(return_value=True) + subtensor.query_subtensor = mocker.MagicMock( + return_value=mocker.MagicMock(value=100) + ) + assert subtensor._get_hyperparameter("Difficulty", 1, None) == 100 + subtensor.subnet_exists.assert_called_once_with(1, None) + subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) + + +def test_hyperparameter_success_float(subtensor, mocker): + """Test when query_subtensor returns a float value.""" + subtensor.subnet_exists = mocker.MagicMock(return_value=True) + subtensor.query_subtensor = mocker.MagicMock( + return_value=mocker.MagicMock(value=0.5) + ) + assert subtensor._get_hyperparameter("Difficulty", 1, None) == 0.5 + subtensor.subnet_exists.assert_called_once_with(1, None) + subtensor.query_subtensor.assert_called_once_with("Difficulty", None, [1]) + + +def test_blocks_since_last_update_success_calls(subtensor, mocker): + """Tests the weights_rate_limit method to ensure it correctly fetches the LastUpdate hyperparameter.""" + # Prep + uid = 7 + mocked_current_block = 2 + mocked_result = {uid: 1} + subtensor._get_hyperparameter = mocker.MagicMock(return_value=mocked_result) + subtensor.get_current_block = mocker.MagicMock(return_value=mocked_current_block) + + # Call + result = subtensor.blocks_since_last_update(netuid=7, uid=uid) + + # Assertions + subtensor.get_current_block.assert_called_once() + subtensor._get_hyperparameter.assert_called_once_with( + param_name="LastUpdate", netuid=7 + ) + assert result == 1 + # if we change the methods logic in the future we have to be make sure the returned type is correct + assert isinstance(result, int) + + +def test_weights_rate_limit_success_calls(subtensor, mocker): + """Tests the weights_rate_limit method to ensure it correctly fetches the WeightsSetRateLimit hyperparameter.""" + # Prep + subtensor._get_hyperparameter = mocker.MagicMock(return_value=5) + + # Call + result = subtensor.weights_rate_limit(netuid=7) + + # Assertions + subtensor._get_hyperparameter.assert_called_once_with( + param_name="WeightsSetRateLimit", netuid=7 + ) + # if we change the methods logic in the future we have to be make sure the returned type is correct + assert isinstance(result, int) + + +@pytest.fixture +def sample_hyperparameters(): + return MagicMock(spec=SubnetHyperparameters) + + +def normalize_hyperparameters( + subnet: "SubnetHyperparameters", +) -> list[tuple[str, str, str]]: + """ + Normalizes the hyperparameters of a subnet. + + Args: + subnet: The subnet hyperparameters object. + + Returns: + A list of tuples containing the parameter name, value, and normalized value. + """ + param_mappings = { + "adjustment_alpha": u64_normalized_float, + "min_difficulty": u64_normalized_float, + "max_difficulty": u64_normalized_float, + "difficulty": u64_normalized_float, + "bonds_moving_avg": u64_normalized_float, + "max_weight_limit": u16_normalized_float, + "kappa": u16_normalized_float, + "alpha_high": u16_normalized_float, + "alpha_low": u16_normalized_float, + "min_burn": Balance.from_rao, + "max_burn": Balance.from_rao, + } + + normalized_values: list[tuple[str, str, str]] = [] + subnet_dict = subnet.__dict__ + + for param, value in subnet_dict.items(): + try: + if param in param_mappings: + norm_value = param_mappings[param](value) + if isinstance(norm_value, float): + norm_value = f"{norm_value:.{10}g}" + else: + norm_value = value + except Exception as e: + logging.warning(f"Error normalizing parameter '{param}': {e}") + norm_value = "-" + + normalized_values.append((param, str(value), str(norm_value))) + + return normalized_values + + +def get_normalized_value(normalized_data, param_name): + return next( + ( + norm_value + for p_name, _, norm_value in normalized_data + if p_name == param_name + ), + None, + ) + + +@pytest.mark.parametrize( + "param_name, max_value, mid_value, zero_value, is_balance", + [ + ("adjustment_alpha", U64_MAX, U64_MAX / 2, 0, False), + ("max_weight_limit", U16_MAX, U16_MAX / 2, 0, False), + ("difficulty", U64_MAX, U64_MAX / 2, 0, False), + ("min_difficulty", U64_MAX, U64_MAX / 2, 0, False), + ("max_difficulty", U64_MAX, U64_MAX / 2, 0, False), + ("bonds_moving_avg", U64_MAX, U64_MAX / 2, 0, False), + ("min_burn", 10000000000, 5000000000, 0, True), # These are in rao + ("max_burn", 20000000000, 10000000000, 0, True), + ], + ids=[ + "adjustment-alpha", + "max_weight_limit", + "difficulty", + "min_difficulty", + "max_difficulty", + "bonds_moving_avg", + "min_burn", + "max_burn", + ], +) +def test_hyperparameter_normalization( + sample_hyperparameters, param_name, max_value, mid_value, zero_value, is_balance +): + setattr(sample_hyperparameters, param_name, mid_value) + normalized = normalize_hyperparameters(sample_hyperparameters) + norm_value = get_normalized_value(normalized, param_name) + + # Mid-value test + if is_balance: + numeric_value = float(str(norm_value).lstrip(settings.TAO_SYMBOL)) + expected_tao = mid_value / 1e9 + assert ( + numeric_value == expected_tao + ), f"Mismatch in tao value for {param_name} at mid value" + else: + assert float(norm_value) == 0.5, f"Failed mid-point test for {param_name}" + + # Max-value test + setattr(sample_hyperparameters, param_name, max_value) + normalized = normalize_hyperparameters(sample_hyperparameters) + norm_value = get_normalized_value(normalized, param_name) + + if is_balance: + numeric_value = float(str(norm_value).lstrip(settings.TAO_SYMBOL)) + expected_tao = max_value / 1e9 + assert ( + numeric_value == expected_tao + ), f"Mismatch in tao value for {param_name} at max value" + else: + assert float(norm_value) == 1.0, f"Failed max value test for {param_name}" + + # Zero-value test + setattr(sample_hyperparameters, param_name, zero_value) + normalized = normalize_hyperparameters(sample_hyperparameters) + norm_value = get_normalized_value(normalized, param_name) + + if is_balance: + numeric_value = float(str(norm_value).lstrip(settings.TAO_SYMBOL)) + expected_tao = zero_value / 1e9 + assert ( + numeric_value == expected_tao + ), f"Mismatch in tao value for {param_name} at zero value" + else: + assert float(norm_value) == 0.0, f"Failed zero value test for {param_name}" + + +########################### +# Account functions tests # +########################### + + +# get_prometheus_info tests +def test_get_prometheus_info_success(mocker, subtensor): + """Test get_prometheus_info returns correct data when information is found.""" + # Prep + netuid = 1 + hotkey_ss58 = "test_hotkey" + block = 123 + mock_result = mocker.MagicMock( + value={ + "ip": 3232235777, # 192.168.1.1 + "ip_type": 4, + "port": 9090, + "version": "1.0", + "block": 1000, + } + ) + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_prometheus_info(netuid, hotkey_ss58, block) + + # Asserts + assert result is not None + assert result.ip == "192.168.1.1" + assert result.ip_type == 4 + assert result.port == 9090 + assert result.version == "1.0" + assert result.block == 1000 + subtensor.query_subtensor.assert_called_once_with( + "Prometheus", block, [netuid, hotkey_ss58] + ) + + +def test_get_prometheus_info_no_data(mocker, subtensor): + """Test get_prometheus_info returns None when no information is found.""" + # Prep + netuid = 1 + hotkey_ss58 = "test_hotkey" + block = 123 + mocker.patch.object(subtensor, "query_subtensor", return_value=None) + + # Call + result = subtensor.get_prometheus_info(netuid, hotkey_ss58, block) + + # Asserts + assert result is None + subtensor.query_subtensor.assert_called_once_with( + "Prometheus", block, [netuid, hotkey_ss58] + ) + + +def test_get_prometheus_info_no_value_attribute(mocker, subtensor): + """Test get_prometheus_info returns None when result has no value attribute.""" + # Prep + netuid = 1 + hotkey_ss58 = "test_hotkey" + block = 123 + mock_result = mocker.MagicMock() + del mock_result.value + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_prometheus_info(netuid, hotkey_ss58, block) + + # Asserts + assert result is None + subtensor.query_subtensor.assert_called_once_with( + "Prometheus", block, [netuid, hotkey_ss58] + ) + + +def test_get_prometheus_info_no_block(mocker, subtensor): + """Test get_prometheus_info with no block specified.""" + # Prep + netuid = 1 + hotkey_ss58 = "test_hotkey" + mock_result = MagicMock( + value={ + "ip": "192.168.1.1", + "ip_type": 4, + "port": 9090, + "version": "1.0", + "block": 1000, + } + ) + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_prometheus_info(netuid, hotkey_ss58) + + # Asserts + assert result is not None + assert result.ip == "192.168.1.1" + assert result.ip_type == 4 + assert result.port == 9090 + assert result.version == "1.0" + assert result.block == 1000 + subtensor.query_subtensor.assert_called_once_with( + "Prometheus", None, [netuid, hotkey_ss58] + ) + + +########################### +# Global Parameters tests # +########################### + + +# `block` property test +def test_block_property(mocker, subtensor): + """Test block property returns the correct block number.""" + expected_block = 123 + mocker.patch.object(subtensor, "get_current_block", return_value=expected_block) + + result = subtensor.block + + assert result == expected_block + subtensor.get_current_block.assert_called_once() + + +# `subnet_exists` tests +def test_subnet_exists_success(mocker, subtensor): + """Test subnet_exists returns True when subnet exists.""" + # Prep + netuid = 1 + block = 123 + mock_result = mocker.MagicMock(value=True) + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.subnet_exists(netuid, block) + + # Asserts + assert result is True + subtensor.query_subtensor.assert_called_once_with("NetworksAdded", block, [netuid]) + + +def test_subnet_exists_no_data(mocker, subtensor): + """Test subnet_exists returns False when no subnet information is found.""" + # Prep + netuid = 1 + block = 123 + mocker.patch.object(subtensor, "query_subtensor", return_value=None) + + # Call + result = subtensor.subnet_exists(netuid, block) + + # Asserts + assert result is False + subtensor.query_subtensor.assert_called_once_with("NetworksAdded", block, [netuid]) + + +def test_subnet_exists_no_value_attribute(mocker, subtensor): + """Test subnet_exists returns False when result has no value attribute.""" + # Prep + netuid = 1 + block = 123 + mock_result = mocker.MagicMock() + del mock_result.value + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.subnet_exists(netuid, block) + + # Asserts + assert result is False + subtensor.query_subtensor.assert_called_once_with("NetworksAdded", block, [netuid]) + + +def test_subnet_exists_no_block(mocker, subtensor): + """Test subnet_exists with no block specified.""" + # Prep + netuid = 1 + mock_result = mocker.MagicMock(value=True) + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.subnet_exists(netuid) + + # Asserts + assert result is True + subtensor.query_subtensor.assert_called_once_with("NetworksAdded", None, [netuid]) + + +# `get_total_subnets` tests +def test_get_total_subnets_success(mocker, subtensor): + """Test get_total_subnets returns correct data when total subnet information is found.""" + # Prep + block = 123 + total_subnets_value = 10 + mock_result = mocker.MagicMock(value=total_subnets_value) + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_total_subnets(block) + + # Asserts + assert result is not None + assert result == total_subnets_value + subtensor.query_subtensor.assert_called_once_with("TotalNetworks", block) + + +def test_get_total_subnets_no_data(mocker, subtensor): + """Test get_total_subnets returns None when no total subnet information is found.""" + # Prep + block = 123 + mocker.patch.object(subtensor, "query_subtensor", return_value=None) + + # Call + result = subtensor.get_total_subnets(block) + + # Asserts + assert result is None + subtensor.query_subtensor.assert_called_once_with("TotalNetworks", block) + + +def test_get_total_subnets_no_value_attribute(mocker, subtensor): + """Test get_total_subnets returns None when result has no value attribute.""" + # Prep + block = 123 + mock_result = mocker.MagicMock() + del mock_result.value # Simulating a missing value attribute + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_total_subnets(block) + + # Asserts + assert result is None + subtensor.query_subtensor.assert_called_once_with("TotalNetworks", block) + + +def test_get_total_subnets_no_block(mocker, subtensor): + """Test get_total_subnets with no block specified.""" + # Prep + total_subnets_value = 10 + mock_result = mocker.MagicMock(value=total_subnets_value) + mocker.patch.object(subtensor, "query_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_total_subnets() + + # Asserts + assert result is not None + assert result == total_subnets_value + subtensor.query_subtensor.assert_called_once_with("TotalNetworks", None) + + +# `get_subnets` tests +def test_get_subnets_success(mocker, subtensor): + """Test get_subnets returns correct list when subnet information is found.""" + # Prep + block = 123 + mock_netuid1 = mocker.MagicMock(value=1) + mock_netuid2 = mocker.MagicMock(value=2) + mock_result = mocker.MagicMock() + mock_result.records = [(mock_netuid1, True), (mock_netuid2, True)] + mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_subnets(block) + + # Asserts + assert result == [1, 2] + subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) + + +def test_get_subnets_no_data(mocker, subtensor): + """Test get_subnets returns empty list when no subnet information is found.""" + # Prep + block = 123 + mock_result = mocker.MagicMock() + mock_result.records = [] + mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_subnets(block) + + # Asserts + assert result == [] + subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) + + +def test_get_subnets_no_records_attribute(mocker, subtensor): + """Test get_subnets returns empty list when result has no records attribute.""" + # Prep + block = 123 + mock_result = mocker.MagicMock() + del mock_result.records # Simulating a missing records attribute + mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_subnets(block) + + # Asserts + assert result == [] + subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", block) + + +def test_get_subnets_no_block_specified(mocker, subtensor): + """Test get_subnets with no block specified.""" + # Prep + mock_netuid1 = mocker.MagicMock(value=1) + mock_netuid2 = mocker.MagicMock(value=2) + mock_result = mocker.MagicMock() + mock_result.records = [(mock_netuid1, True), (mock_netuid2, True)] + mocker.patch.object(subtensor, "query_map_subtensor", return_value=mock_result) + + # Call + result = subtensor.get_subnets() + + # Asserts + assert result == [1, 2] + subtensor.query_map_subtensor.assert_called_once_with("NetworksAdded", None) + + +# `get_subnet_hyperparameters` tests +def test_get_subnet_hyperparameters_success(mocker, subtensor): + """Test get_subnet_hyperparameters returns correct data when hyperparameters are found.""" + # Prep + netuid = 1 + block = 123 + hex_bytes_result = "0x010203" + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + mocker.patch.object(subtensor, "query_runtime_api", return_value=hex_bytes_result) + mocker.patch.object( + subtensor_module.SubnetHyperparameters, + "from_vec_u8", + return_value=["from_vec_u8"], + ) + + # Call + result = subtensor.get_subnet_hyperparameters(netuid, block) + + # Asserts + subtensor.query_runtime_api.assert_called_once_with( + runtime_api="SubnetInfoRuntimeApi", + method="get_subnet_hyperparams", + params=[netuid], + block=block, + ) + subtensor_module.SubnetHyperparameters.from_vec_u8.assert_called_once_with( + bytes_result + ) + + +def test_get_subnet_hyperparameters_hex_without_prefix(subtensor, mocker): + """Test get_subnet_hyperparameters correctly processes hex string without '0x' prefix.""" + # Prep + netuid = 1 + block = 123 + hex_bytes_result = "010203" + bytes_result = bytes.fromhex(hex_bytes_result) + mocker.patch.object(subtensor, "query_runtime_api", return_value=hex_bytes_result) + mocker.patch.object(subtensor_module.SubnetHyperparameters, "from_vec_u8") + + # Call + result = subtensor.get_subnet_hyperparameters(netuid, block) + + # Asserts + subtensor.query_runtime_api.assert_called_once_with( + runtime_api="SubnetInfoRuntimeApi", + method="get_subnet_hyperparams", + params=[netuid], + block=block, + ) + subtensor_module.SubnetHyperparameters.from_vec_u8.assert_called_once_with( + bytes_result + ) + + +def test_get_subnet_hyperparameters_no_data(mocker, subtensor): + """Test get_subnet_hyperparameters returns empty list when no data is found.""" + # Prep + netuid = 1 + block = 123 + mocker.patch.object(subtensor, "query_runtime_api", return_value=None) + mocker.patch.object(subtensor_module.SubnetHyperparameters, "from_vec_u8") + + # Call + result = subtensor.get_subnet_hyperparameters(netuid, block) + + # Asserts + assert result == [] + subtensor.query_runtime_api.assert_called_once_with( + runtime_api="SubnetInfoRuntimeApi", + method="get_subnet_hyperparams", + params=[netuid], + block=block, + ) + subtensor_module.SubnetHyperparameters.from_vec_u8.assert_not_called() + + +def test_query_subtensor(subtensor, mocker): + """Tests query_subtensor call.""" + # Prep + fake_name = "module_name" + + # Call + result = subtensor.query_subtensor(fake_name) + + # Asserts + subtensor.substrate.query.assert_called_once_with( + module="SubtensorModule", + storage_function=fake_name, + params=None, + block_hash=None, + ) + assert result == subtensor.substrate.query.return_value + + +def test_query_runtime_api(subtensor, mocker): + """Tests query_runtime_api call.""" + # Prep + fake_runtime_api = "NeuronInfoRuntimeApi" + fake_method = "get_neuron_lite" + + mocked_state_call = mocker.MagicMock() + subtensor.state_call = mocked_state_call + + mocked_runtime_configuration = mocker.patch.object( + subtensor_module, "RuntimeConfiguration" + ) + mocked_scalecodec = mocker.patch.object(subtensor_module.scalecodec, "ScaleBytes") + + # Call + result = subtensor.query_runtime_api(fake_runtime_api, fake_method, None) + + # Asserts + subtensor.state_call.assert_called_once_with( + method=f"{fake_runtime_api}_{fake_method}", data="0x", block=None + ) + mocked_scalecodec.assert_called_once_with( + subtensor.state_call.return_value.__getitem__.return_value + ) + mocked_runtime_configuration.assert_called_once() + mocked_runtime_configuration.return_value.update_type_registry.assert_called() + mocked_runtime_configuration.return_value.create_scale_object.assert_called() + assert ( + result + == mocked_runtime_configuration.return_value.create_scale_object.return_value.decode.return_value + ) + + +def test_query_map_subtensor(subtensor, mocker): + """Tests query_map_subtensor call.""" + # Prep + fake_name = "module_name" + + # Call + result = subtensor.query_map_subtensor(fake_name) + + # Asserts + subtensor.substrate.query_map.assert_called_once_with( + module="SubtensorModule", + storage_function=fake_name, + params=None, + block_hash=None, + ) + assert result == subtensor.substrate.query_map.return_value + + +def test_state_call(subtensor, mocker): + """Tests state_call call.""" + # Prep + fake_method = "method" + fake_data = "data" + + # Call + result = subtensor.state_call(fake_method, fake_data) + + # Asserts + subtensor.substrate.rpc_request.assert_called_once_with( + method="state_call", + params=[fake_method, fake_data], + ) + assert result == subtensor.substrate.rpc_request.return_value + + +def test_query_map(subtensor, mocker): + """Tests query_map call.""" + # Prep + fake_module_name = "module_name" + fake_name = "constant_name" + + # Call + result = subtensor.query_map(fake_module_name, fake_name) + + # Asserts + subtensor.substrate.query_map.assert_called_once_with( + module=fake_module_name, + storage_function=fake_name, + params=None, + block_hash=None, + ) + assert result == subtensor.substrate.query_map.return_value + + +def test_query_constant(subtensor, mocker): + """Tests query_constant call.""" + # Prep + fake_module_name = "module_name" + fake_constant_name = "constant_name" + + # Call + result = subtensor.query_constant(fake_module_name, fake_constant_name) + + # Asserts + subtensor.substrate.get_constant.assert_called_once_with( + module_name=fake_module_name, + constant_name=fake_constant_name, + block_hash=None, + ) + assert result == subtensor.substrate.get_constant.return_value + + +def test_query_module(subtensor): + # Prep + fake_module = "module" + fake_name = "function_name" + + # Call + result = subtensor.query_module(fake_module, fake_name) + + # Asserts + subtensor.substrate.query.assert_called_once_with( + module=fake_module, + storage_function=fake_name, + params=None, + block_hash=None, + ) + assert result == subtensor.substrate.query.return_value + + +def test_metagraph(subtensor, mocker): + """Tests subtensor.metagraph call.""" + # Prep + fake_netuid = 1 + fake_lite = True + mocked_metagraph = mocker.patch.object(subtensor_module, "Metagraph") + + # Call + result = subtensor.metagraph(fake_netuid, fake_lite) + + # Asserts + mocked_metagraph.assert_called_once_with( + network=subtensor.network, netuid=fake_netuid, lite=fake_lite, sync=False + ) + mocked_metagraph.return_value.sync.assert_called_once_with( + block=None, lite=fake_lite, subtensor=subtensor + ) + assert result == mocked_metagraph.return_value + + +def test_get_netuids_for_hotkey(subtensor, mocker): + """Tests get_netuids_for_hotkey call.""" + # Prep + fake_hotkey_ss58 = "hotkey_ss58" + fake_block = 123 + + mocked_query_map_subtensor = mocker.MagicMock() + subtensor.query_map_subtensor = mocked_query_map_subtensor + + # Call + result = subtensor.get_netuids_for_hotkey(fake_hotkey_ss58, fake_block) + + # Asserts + mocked_query_map_subtensor.assert_called_once_with( + "IsNetworkMember", fake_block, [fake_hotkey_ss58] + ) + assert result == [] + + +def test_get_current_block(subtensor): + """Tests get_current_block call.""" + # Call + result = subtensor.get_current_block() + + # Asserts + subtensor.substrate.get_block_number.assert_called_once_with(None) + assert result == subtensor.substrate.get_block_number.return_value + + +def test_is_hotkey_registered_any(subtensor, mocker): + """Tests is_hotkey_registered_any call""" + # Prep + fake_hotkey_ss58 = "hotkey_ss58" + fake_block = 123 + return_value = [1, 2] + + mocked_get_netuids_for_hotkey = mocker.MagicMock(return_value=return_value) + subtensor.get_netuids_for_hotkey = mocked_get_netuids_for_hotkey + + # Call + result = subtensor.is_hotkey_registered_any(fake_hotkey_ss58, fake_block) + + # Asserts + mocked_get_netuids_for_hotkey.assert_called_once_with(fake_hotkey_ss58, fake_block) + assert result is (len(return_value) > 0) + + +def test_is_hotkey_registered_on_subnet(subtensor, mocker): + """Tests is_hotkey_registered_on_subnet call.""" + # Prep + fake_hotkey_ss58 = "hotkey_ss58" + fake_netuid = 1 + fake_block = 123 + + mocked_get_uid_for_hotkey_on_subnet = mocker.MagicMock() + subtensor.get_uid_for_hotkey_on_subnet = mocked_get_uid_for_hotkey_on_subnet + + # Call + result = subtensor.is_hotkey_registered_on_subnet( + fake_hotkey_ss58, fake_netuid, fake_block + ) + + # Asserts + mocked_get_uid_for_hotkey_on_subnet.assert_called_once_with( + fake_hotkey_ss58, fake_netuid, fake_block + ) + assert result is (mocked_get_uid_for_hotkey_on_subnet.return_value is not None) + + +def test_is_hotkey_registered_without_netuid(subtensor, mocker): + """Tests is_hotkey_registered call with no netuid specified.""" + # Prep + fake_hotkey_ss58 = "hotkey_ss58" + + mocked_is_hotkey_registered_any = mocker.MagicMock() + subtensor.is_hotkey_registered_any = mocked_is_hotkey_registered_any + + # Call + + result = subtensor.is_hotkey_registered(fake_hotkey_ss58) + + # Asserts + mocked_is_hotkey_registered_any.assert_called_once_with(fake_hotkey_ss58, None) + assert result == mocked_is_hotkey_registered_any.return_value + + +def test_is_hotkey_registered_with_netuid(subtensor, mocker): + """Tests is_hotkey_registered call with netuid specified.""" + # Prep + fake_hotkey_ss58 = "hotkey_ss58" + fake_netuid = 123 + + mocked_is_hotkey_registered_on_subnet = mocker.MagicMock() + subtensor.is_hotkey_registered_on_subnet = mocked_is_hotkey_registered_on_subnet + + # Call + + result = subtensor.is_hotkey_registered(fake_hotkey_ss58, fake_netuid) + + # Asserts + mocked_is_hotkey_registered_on_subnet.assert_called_once_with( + fake_hotkey_ss58, fake_netuid, None + ) + assert result == mocked_is_hotkey_registered_on_subnet.return_value + + +def test_set_weights(subtensor, mocker): + """Successful set_weights call.""" + # Preps + fake_wallet = mocker.MagicMock() + fake_netuid = 1 + fake_uids = [2, 4] + fake_weights = [0.4, 0.6] + fake_wait_for_inclusion = False + fake_wait_for_finalization = False + fake_prompt = False + fake_max_retries = 5 + + expected_result = (True, None) + + mocked_get_uid_for_hotkey_on_subnet = mocker.MagicMock() + subtensor.get_uid_for_hotkey_on_subnet = mocked_get_uid_for_hotkey_on_subnet + + mocked_blocks_since_last_update = mocker.MagicMock(return_value=2) + subtensor.blocks_since_last_update = mocked_blocks_since_last_update + + mocked_weights_rate_limit = mocker.MagicMock(return_value=1) + subtensor.weights_rate_limit = mocked_weights_rate_limit + + mocked_set_weights_extrinsic = mocker.patch.object( + subtensor_module, "set_weights_extrinsic", return_value=expected_result + ) + + # Call + result = subtensor.set_weights( + wallet=fake_wallet, + netuid=fake_netuid, + uids=fake_uids, + weights=fake_weights, + version_key=settings.version_as_int, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + prompt=fake_prompt, + max_retries=fake_max_retries, + ) + + # Asserts + mocked_get_uid_for_hotkey_on_subnet.assert_called_once_with( + fake_wallet.hotkey.ss58_address, fake_netuid + ) + mocked_blocks_since_last_update.assert_called_with( + fake_netuid, mocked_get_uid_for_hotkey_on_subnet.return_value + ) + mocked_weights_rate_limit.assert_called_with(fake_netuid) + mocked_set_weights_extrinsic.assert_called_with( + subtensor=subtensor, + wallet=fake_wallet, + netuid=fake_netuid, + uids=fake_uids, + weights=fake_weights, + version_key=settings.version_as_int, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + prompt=fake_prompt, + ) + assert result == expected_result + + +def test_serve_axon(subtensor, mocker): + """Tests successful serve_axon call.""" + # Prep + fake_netuid = 123 + fake_axon = mocker.MagicMock() + fake_wait_for_inclusion = False + fake_wait_for_finalization = True + + mocked_serve_axon_extrinsic = mocker.patch.object( + subtensor_module, "serve_axon_extrinsic" + ) + + # Call + result = subtensor.serve_axon( + fake_netuid, fake_axon, fake_wait_for_inclusion, fake_wait_for_finalization + ) + + # Asserts + mocked_serve_axon_extrinsic.assert_called_once_with( + subtensor, + fake_netuid, + fake_axon, + fake_wait_for_inclusion, + fake_wait_for_finalization, + ) + assert result == mocked_serve_axon_extrinsic.return_value + + +def test_get_block_hash(subtensor, mocker): + """Tests successful get_block_hash call.""" + # Prep + fake_block_id = 123 + + # Call + result = subtensor.get_block_hash(fake_block_id) + + # Asserts + subtensor.substrate.get_block_hash.assert_called_once_with(block_id=fake_block_id) + assert result == subtensor.substrate.get_block_hash.return_value + + +def test_commit(subtensor, mocker): + """Test successful commit call.""" + # Preps + fake_wallet = mocker.MagicMock() + fake_netuid = 1 + fake_data = "some data to network" + mocked_publish_metadata = mocker.patch.object(subtensor_module, "publish_metadata") + + # Call + result = subtensor.commit(fake_wallet, fake_netuid, fake_data) + + # Asserts + mocked_publish_metadata.assert_called_once_with( + subtensor, fake_wallet, fake_netuid, f"Raw{len(fake_data)}", fake_data.encode() + ) + assert result is None + + +def test_subnetwork_n(subtensor, mocker): + """Test successful subnetwork_n call.""" + # Prep + fake_netuid = 1 + fake_block = 123 + fake_result = 2 + + mocked_get_hyperparameter = mocker.MagicMock() + mocked_get_hyperparameter.return_value = fake_result + subtensor._get_hyperparameter = mocked_get_hyperparameter + + # Call + result = subtensor.subnetwork_n(fake_netuid, fake_block) + + # Asserts + mocked_get_hyperparameter.assert_called_once_with( + param_name="SubnetworkN", netuid=fake_netuid, block=fake_block + ) + assert result == mocked_get_hyperparameter.return_value + + +def test_transfer(subtensor, mocker): + """Tests successful transfer call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_dest = "SS58PUBLICKEY" + fake_amount = 1.1 + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + fake_prompt = False + mocked_transfer_extrinsic = mocker.patch.object( + subtensor_module, "transfer_extrinsic" + ) + + # Call + result = subtensor.transfer( + fake_wallet, + fake_dest, + fake_amount, + fake_wait_for_inclusion, + fake_wait_for_finalization, + fake_prompt, + ) + + # Asserts + mocked_transfer_extrinsic.assert_called_once_with( + subtensor=subtensor, + wallet=fake_wallet, + dest=fake_dest, + amount=fake_amount, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + prompt=fake_prompt, + ) + assert result == mocked_transfer_extrinsic.return_value + + +def test_get_neuron_for_pubkey_and_subnet(subtensor, mocker): + """Successful call to get_neuron_for_pubkey_and_subnet.""" + # Prep + fake_hotkey_ss58 = "fake_hotkey" + fake_netuid = 1 + fake_block = 123 + + mocked_neuron_for_uid = mocker.MagicMock() + subtensor.neuron_for_uid = mocked_neuron_for_uid + + mocked_get_uid_for_hotkey_on_subnet = mocker.MagicMock() + subtensor.get_uid_for_hotkey_on_subnet = mocked_get_uid_for_hotkey_on_subnet + + # Call + result = subtensor.get_neuron_for_pubkey_and_subnet( + hotkey_ss58=fake_hotkey_ss58, + netuid=fake_netuid, + block=fake_block, + ) + + # Asserts + mocked_neuron_for_uid.assert_called_once_with( + mocked_get_uid_for_hotkey_on_subnet.return_value, + fake_netuid, + block=fake_block, + ) + assert result == mocked_neuron_for_uid.return_value + + +def test_neuron_for_uid_none(subtensor, mocker): + """Test neuron_for_uid successful call.""" + # Prep + fake_uid = None + fake_netuid = 2 + fake_block = 123 + mocked_neuron_info = mocker.patch.object( + subtensor_module.NeuronInfo, "get_null_neuron" + ) + + # Call + result = subtensor.neuron_for_uid( + uid=fake_uid, netuid=fake_netuid, block=fake_block + ) + + # Asserts + mocked_neuron_info.assert_called_once() + assert result == mocked_neuron_info.return_value + + +def test_neuron_for_uid_response_none(subtensor, mocker): + """Test neuron_for_uid successful call.""" + # Prep + fake_uid = 1 + fake_netuid = 2 + fake_block = 123 + mocked_neuron_info = mocker.patch.object( + subtensor_module.NeuronInfo, "get_null_neuron" + ) + + subtensor.substrate.rpc_request.return_value.get.return_value = None + + # Call + result = subtensor.neuron_for_uid( + uid=fake_uid, netuid=fake_netuid, block=fake_block + ) + + # Asserts + subtensor.substrate.get_block_hash.assert_called_once_with(fake_block) + subtensor.substrate.rpc_request.assert_called_once_with( + method="neuronInfo_getNeuron", + params=[fake_netuid, fake_uid, subtensor.substrate.get_block_hash.return_value], + ) + + mocked_neuron_info.assert_called_once() + assert result == mocked_neuron_info.return_value + + +def test_neuron_for_uid_success(subtensor, mocker): + """Test neuron_for_uid successful call.""" + # Prep + fake_uid = 1 + fake_netuid = 2 + fake_block = 123 + mocked_neuron_from_vec_u8 = mocker.patch.object( + subtensor_module.NeuronInfo, "from_vec_u8" + ) + + # Call + result = subtensor.neuron_for_uid( + uid=fake_uid, netuid=fake_netuid, block=fake_block + ) + + # Asserts + subtensor.substrate.get_block_hash.assert_called_once_with(fake_block) + subtensor.substrate.rpc_request.assert_called_once_with( + method="neuronInfo_getNeuron", + params=[fake_netuid, fake_uid, subtensor.substrate.get_block_hash.return_value], + ) + + mocked_neuron_from_vec_u8.assert_called_once_with( + subtensor.substrate.rpc_request.return_value.get.return_value + ) + assert result == mocked_neuron_from_vec_u8.return_value + + +def test_do_serve_prometheus_is_success(subtensor, mocker): + """Successful do_serve_prometheus call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_call_params = mocker.MagicMock() + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = True + + # Call + result = subtensor._do_serve_prometheus( + wallet=fake_wallet, + call_params=fake_call_params, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="serve_prometheus", + call_params=fake_call_params, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == (True, None) + + +def test_do_serve_prometheus_is_not_success(subtensor, mocker): + """Unsuccessful do_serve_axon call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_call_params = mocker.MagicMock() + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = None + + # Call + result = subtensor._do_serve_prometheus( + wallet=fake_wallet, + call_params=fake_call_params, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="serve_prometheus", + call_params=fake_call_params, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == ( + False, + subtensor.substrate.submit_extrinsic.return_value.error_message, + ) + + +def test_do_serve_prometheus_no_waits(subtensor, mocker): + """Unsuccessful do_serve_axon call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_call_params = mocker.MagicMock() + fake_wait_for_inclusion = False + fake_wait_for_finalization = False + + # Call + result = subtensor._do_serve_prometheus( + wallet=fake_wallet, + call_params=fake_call_params, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="serve_prometheus", + call_params=fake_call_params, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + assert result == (True, None) + + +def test_serve_prometheus(subtensor, mocker): + """Test serve_prometheus function successful call.""" + # Preps + fake_wallet = mocker.MagicMock() + fake_port = 1234 + fake_netuid = 1 + wait_for_inclusion = True + wait_for_finalization = False + + mocked_prometheus_extrinsic = mocker.patch.object( + subtensor_module, "prometheus_extrinsic" + ) + + # Call + result = subtensor.serve_prometheus( + fake_wallet, + fake_port, + fake_netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # Asserts + mocked_prometheus_extrinsic.assert_called_once_with( + subtensor, + wallet=fake_wallet, + port=fake_port, + netuid=fake_netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + assert result == mocked_prometheus_extrinsic.return_value + + +def test_do_serve_axon_is_success(subtensor, mocker): + """Successful do_serve_axon call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_call_params = mocker.MagicMock() + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = True + + # Call + result = subtensor._do_serve_axon( + wallet=fake_wallet, + call_params=fake_call_params, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="serve_axon", + call_params=fake_call_params, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == (True, None) + + +def test_do_serve_axon_is_not_success(subtensor, mocker): + """Unsuccessful do_serve_axon call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_call_params = mocker.MagicMock() + fake_wait_for_inclusion = True + fake_wait_for_finalization = True + + subtensor.substrate.submit_extrinsic.return_value.is_success = None + + # Call + result = subtensor._do_serve_axon( + wallet=fake_wallet, + call_params=fake_call_params, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="serve_axon", + call_params=fake_call_params, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() + assert result == ( + False, + subtensor.substrate.submit_extrinsic.return_value.error_message, + ) + + +def test_do_serve_axon_no_waits(subtensor, mocker): + """Unsuccessful do_serve_axon call.""" + # Prep + fake_wallet = mocker.MagicMock() + fake_call_params = mocker.MagicMock() + fake_wait_for_inclusion = False + fake_wait_for_finalization = False + + # Call + result = subtensor._do_serve_axon( + wallet=fake_wallet, + call_params=fake_call_params, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="SubtensorModule", + call_function="serve_axon", + call_params=fake_call_params, + ) + + subtensor.substrate.create_signed_extrinsic.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.hotkey, + ) + + subtensor.substrate.submit_extrinsic.assert_called_once_with( + subtensor.substrate.create_signed_extrinsic.return_value, + wait_for_inclusion=fake_wait_for_inclusion, + wait_for_finalization=fake_wait_for_finalization, + ) + assert result == (True, None) + + +def test_immunity_period(subtensor, mocker): + """Successful immunity_period call.""" + # Preps + fake_netuid = 1 + fake_block = 123 + fare_result = 101 + + mocked_get_hyperparameter = mocker.MagicMock() + mocked_get_hyperparameter.return_value = fare_result + subtensor._get_hyperparameter = mocked_get_hyperparameter + + # Call + result = subtensor.immunity_period(netuid=fake_netuid, block=fake_block) + + # Assertions + mocked_get_hyperparameter.assert_called_once_with( + param_name="ImmunityPeriod", + netuid=fake_netuid, + block=fake_block, + ) + assert result == mocked_get_hyperparameter.return_value + + +def test_get_uid_for_hotkey_on_subnet(subtensor, mocker): + """Successful get_uid_for_hotkey_on_subnet call.""" + # Prep + fake_hotkey_ss58 = "fake_hotkey_ss58" + fake_netuid = 1 + fake_block = 123 + mocked_query_subtensor = mocker.MagicMock() + subtensor.query_subtensor = mocked_query_subtensor + + # Call + result = subtensor.get_uid_for_hotkey_on_subnet( + hotkey_ss58=fake_hotkey_ss58, netuid=fake_netuid, block=fake_block + ) + + # Assertions + mocked_query_subtensor.assert_called_once_with( + "Uids", fake_block, [fake_netuid, fake_hotkey_ss58] + ) + + assert result == mocked_query_subtensor.return_value.value + + +def test_tempo(subtensor, mocker): + """Successful tempo call.""" + # Preps + fake_netuid = 1 + fake_block = 123 + fare_result = 101 + + mocked_get_hyperparameter = mocker.MagicMock() + mocked_get_hyperparameter.return_value = fare_result + subtensor._get_hyperparameter = mocked_get_hyperparameter + + # Call + result = subtensor.tempo(netuid=fake_netuid, block=fake_block) + + # Assertions + mocked_get_hyperparameter.assert_called_once_with( + param_name="Tempo", + netuid=fake_netuid, + block=fake_block, + ) + assert result == mocked_get_hyperparameter.return_value + + +def test_get_commitment(subtensor, mocker): + """Successful get_commitment call.""" + # Preps + fake_netuid = 1 + fake_uid = 2 + fake_block = 3 + fake_hotkey = "hotkey" + fake_hex_data = "0x010203" + expected_result = bytes.fromhex(fake_hex_data[2:]).decode() + + mocked_metagraph = mocker.MagicMock() + subtensor.metagraph = mocked_metagraph + mocked_metagraph.return_value.hotkeys = {fake_uid: fake_hotkey} + + mocked_get_metadata = mocker.patch.object(subtensor_module, "get_metadata") + mocked_get_metadata.return_value = { + "info": {"fields": [{fake_hex_data: fake_hex_data}]} + } + + # Call + result = subtensor.get_commitment( + netuid=fake_netuid, uid=fake_uid, block=fake_block + ) + + # Assertions + mocked_metagraph.assert_called_once_with(fake_netuid) + assert result == expected_result + + +def test_min_allowed_weights(subtensor, mocker): + """Successful min_allowed_weights call.""" + fake_netuid = 1 + fake_block = 123 + return_value = 10 + + mocked_get_hyperparameter = mocker.MagicMock(return_value=return_value) + subtensor._get_hyperparameter = mocked_get_hyperparameter + + # Call + result = subtensor.min_allowed_weights(netuid=fake_netuid, block=fake_block) + + # Assertion + mocked_get_hyperparameter.assert_called_once_with( + param_name="MinAllowedWeights", block=fake_block, netuid=fake_netuid + ) + assert result == return_value + + +def test_max_weight_limit(subtensor, mocker): + """Successful max_weight_limit call.""" + fake_netuid = 1 + fake_block = 123 + return_value = 100 + + mocked_get_hyperparameter = mocker.MagicMock(return_value=return_value) + subtensor._get_hyperparameter = mocked_get_hyperparameter + + mocked_u16_normalized_float = mocker.MagicMock() + subtensor_module.u16_normalized_float = mocked_u16_normalized_float + + # Call + result = subtensor.max_weight_limit(netuid=fake_netuid, block=fake_block) + + # Assertion + mocked_get_hyperparameter.assert_called_once_with( + param_name="MaxWeightsLimit", block=fake_block, netuid=fake_netuid + ) + assert result == mocked_u16_normalized_float.return_value + + +def test_get_transfer_fee(subtensor, mocker): + """Successful get_transfer_fee call.""" + # Preps + fake_wallet = mocker.MagicMock() + fake_dest = "SS58ADDRESS" + value = 1 + + fake_payment_info = {"partialFee": int(2e10)} + subtensor.substrate.get_payment_info.return_value = fake_payment_info + + # Call + result = subtensor.get_transfer_fee(wallet=fake_wallet, dest=fake_dest, value=value) + + # Asserts + subtensor.substrate.compose_call.assert_called_once_with( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": fake_dest, "value": value}, + ) + + subtensor.substrate.get_payment_info.assert_called_once_with( + call=subtensor.substrate.compose_call.return_value, + keypair=fake_wallet.coldkeypub, + ) + + assert result == 2e10 + + +def test_get_transfer_fee_incorrect_value(subtensor, mocker): + """Successful get_transfer_fee call.""" + # Preps + fake_wallet = mocker.MagicMock() + fake_dest = mocker.MagicMock() + value = "no_int_no_float_no_Balance" + + mocked_substrate = mocker.MagicMock() + subtensor.substrate = mocked_substrate + spy_balance_from_rao = mocker.spy(Balance, "from_rao") + + # Call + result = subtensor.get_transfer_fee(wallet=fake_wallet, dest=fake_dest, value=value) + + # Asserts + spy_balance_from_rao.assert_called_once_with(2e7) + + assert result == Balance.from_rao(int(2e7)) + + +def test_get_existential_deposit(subtensor, mocker): + """Successful get_existential_deposit call.""" + # Prep + block = 123 + + mocked_query_constant = mocker.MagicMock() + value = 10 + mocked_query_constant.return_value.value = value + subtensor.query_constant = mocked_query_constant + + # Call + result = subtensor.get_existential_deposit(block=block) + + # Assertions + mocked_query_constant.assert_called_once_with( + module_name="Balances", constant_name="ExistentialDeposit", block=block + ) + + assert isinstance(result, Balance) + assert result == Balance.from_rao(value) + + +def test_commit_weights(subtensor, mocker): + """Successful commit_weights call.""" + # Preps + fake_wallet = mocker.MagicMock() + netuid = 1 + salt = [1, 3] + uids = [2, 4] + weights = [0.4, 0.6] + wait_for_inclusion = False + wait_for_finalization = False + prompt = False + max_retries = 5 + + expected_result = (True, None) + mocked_generate_weight_hash = mocker.patch.object( + subtensor_module, "generate_weight_hash", return_value=expected_result + ) + mocked_commit_weights_extrinsic = mocker.patch.object( + subtensor_module, "commit_weights_extrinsic", return_value=expected_result + ) + + # Call + result = subtensor.commit_weights( + wallet=fake_wallet, + netuid=netuid, + salt=salt, + uids=uids, + weights=weights, + version_key=settings.version_as_int, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + max_retries=max_retries, + ) + + # Asserts + mocked_generate_weight_hash.assert_called_once_with( + address=fake_wallet.hotkey.ss58_address, + netuid=netuid, + uids=list(uids), + values=list(weights), + salt=list(salt), + version_key=settings.version_as_int, + ) + + mocked_commit_weights_extrinsic.assert_called_once_with( + subtensor=subtensor, + wallet=fake_wallet, + netuid=netuid, + commit_hash=mocked_generate_weight_hash.return_value, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + assert result == expected_result + + +def test_reveal_weights(subtensor, mocker): + """Successful test_reveal_weights call.""" + # Preps + fake_wallet = mocker.MagicMock() + netuid = 1 + uids = [1, 2, 3, 4] + weights = [0.1, 0.2, 0.3, 0.4] + salt = [4, 2, 2, 1] + expected_result = (True, None) + mocked_extrinsic = mocker.patch.object( + subtensor_module, "reveal_weights_extrinsic", return_value=expected_result + ) + + # Call + result = subtensor.reveal_weights( + wallet=fake_wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + wait_for_inclusion=False, + wait_for_finalization=False, + prompt=False, + ) + + # Assertions + assert result == (True, None) + mocked_extrinsic.assert_called_once_with( + subtensor=subtensor, + wallet=fake_wallet, + netuid=netuid, + uids=uids, + version_key=version_as_int, + weights=weights, + salt=salt, + wait_for_inclusion=False, + wait_for_finalization=False, + prompt=False, + ) + + +def test_reveal_weights_false(subtensor, mocker): + """Failed test_reveal_weights call.""" + # Preps + fake_wallet = mocker.MagicMock() + netuid = 1 + uids = [1, 2, 3, 4] + weights = [0.1, 0.2, 0.3, 0.4] + salt = [4, 2, 2, 1] + + expected_result = ( + False, + "No attempt made. Perhaps it is too soon to reveal weights!", + ) + mocked_extrinsic = mocker.patch.object(subtensor_module, "reveal_weights_extrinsic") + + # Call + result = subtensor.reveal_weights( + wallet=fake_wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + wait_for_inclusion=False, + wait_for_finalization=False, + prompt=False, + ) + + # Assertion + assert result == expected_result + assert mocked_extrinsic.call_count == 5 + + +def test_connect_without_substrate(mocker): + """Ensure re-connection is called when using an alive substrate.""" + # Prep + fake_substrate = mocker.MagicMock() + fake_substrate.websocket.sock.getsockopt.return_value = 1 + mocker.patch.object( + subtensor_module, "SubstrateInterface", return_value=fake_substrate + ) + fake_subtensor = Subtensor() + spy_get_substrate = mocker.spy(Subtensor, "_get_substrate") + + # Call + _ = fake_subtensor.block + + # Assertions + assert spy_get_substrate.call_count == 1 + + +def test_connect_with_substrate(mocker): + """Ensure re-connection is non called when using an alive substrate.""" + # Prep + fake_substrate = mocker.MagicMock() + fake_substrate.websocket.sock.getsockopt.return_value = 0 + mocker.patch.object( + subtensor_module, "SubstrateInterface", return_value=fake_substrate + ) + fake_subtensor = Subtensor() + spy_get_substrate = mocker.spy(Subtensor, "_get_substrate") + + # Call + _ = fake_subtensor.block + + # Assertions + assert spy_get_substrate.call_count == 0 diff --git a/tests/unit_tests/test_synapse.py b/tests/unit_tests/test_synapse.py new file mode 100644 index 0000000000..80c127c587 --- /dev/null +++ b/tests/unit_tests/test_synapse.py @@ -0,0 +1,269 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import base64 +import json +from typing import Optional, ClassVar + +import pytest + +from bittensor.core.synapse import Synapse + + +def test_parse_headers_to_inputs(): + class Test(Synapse): + key1: list[int] + + # Define a mock headers dictionary to use for testing + headers = { + "bt_header_axon_nonce": "111", + "bt_header_dendrite_ip": "12.1.1.2", + "bt_header_input_obj_key1": base64.b64encode( + json.dumps([1, 2, 3, 4]).encode("utf-8") + ).decode("utf-8"), + "timeout": "12", + "name": "Test", + "header_size": "111", + "total_size": "111", + "computed_body_hash": "0xabcdef", + } + print(headers) + + # Run the function to test + inputs_dict = Test.parse_headers_to_inputs(headers) + print(inputs_dict) + # Check the resulting dictionary + assert inputs_dict == { + "axon": {"nonce": "111"}, + "dendrite": {"ip": "12.1.1.2"}, + "key1": [1, 2, 3, 4], + "timeout": "12", + "name": "Test", + "header_size": "111", + "total_size": "111", + "computed_body_hash": "0xabcdef", + } + + +def test_from_headers(): + class Test(Synapse): + key1: list[int] + + # Define a mock headers dictionary to use for testing + headers = { + "bt_header_axon_nonce": "111", + "bt_header_dendrite_ip": "12.1.1.2", + "bt_header_input_obj_key1": base64.b64encode( + json.dumps([1, 2, 3, 4]).encode("utf-8") + ).decode("utf-8"), + "timeout": "12", + "name": "Test", + "header_size": "111", + "total_size": "111", + "computed_body_hash": "0xabcdef", + } + + # Run the function to test + synapse = Test.from_headers(headers) + + # Check that the resulting object is an instance of YourClass + assert isinstance(synapse, Test) + + # Check the properties of the resulting object + # Replace with actual checks based on the structure of your class + assert synapse.axon.nonce == 111 + assert synapse.dendrite.ip == "12.1.1.2" + assert synapse.key1 == [1, 2, 3, 4] + assert synapse.timeout == 12 + assert synapse.name == "Test" + assert synapse.header_size == 111 + assert synapse.total_size == 111 + + +def test_synapse_create(): + # Create an instance of Synapse + synapse = Synapse() + + # Ensure the instance created is of type Synapse + assert isinstance(synapse, Synapse) + + # Check default properties of a newly created Synapse + assert synapse.name == "Synapse" + assert synapse.timeout == 12.0 + assert synapse.header_size == 0 + assert synapse.total_size == 0 + + # Convert the Synapse instance to a headers dictionary + headers = synapse.to_headers() + + # Ensure the headers is a dictionary and contains the expected keys + assert isinstance(headers, dict) + assert "timeout" in headers + assert "name" in headers + assert "header_size" in headers + assert "total_size" in headers + + # Ensure the 'name' and 'timeout' values match the Synapse's properties + assert headers["name"] == "Synapse" + assert headers["timeout"] == "12.0" + + # Create a new Synapse from the headers and check its 'timeout' property + next_synapse = synapse.from_headers(synapse.to_headers()) + assert next_synapse.timeout == 12.0 + + +def test_custom_synapse(): + # Define a custom Synapse subclass + class Test(Synapse): + a: int # Carried through because required. + b: int = None # Not carried through headers + c: Optional[int] # Required, carried through headers, cannot be None + d: Optional[list[int]] # Required, carried though headers, cannot be None + e: list[int] # Carried through headers + f: Optional[int] = ( + None # Not Required, Not carried through headers, can be None + ) + g: Optional[list[int]] = ( + None # Not Required, Not carried though headers, can be None + ) + + # Create an instance of the custom Synapse subclass + synapse = Test( + a=1, + c=3, + d=[1, 2, 3, 4], + e=[1, 2, 3, 4], + ) + + # Ensure the instance created is of type Test and has the expected properties + assert isinstance(synapse, Test) + assert synapse.name == "Test" + assert synapse.a == 1 + assert synapse.b is None + assert synapse.c == 3 + assert synapse.d == [1, 2, 3, 4] + assert synapse.e == [1, 2, 3, 4] + assert synapse.f is None + assert synapse.g is None + + # Convert the Test instance to a headers dictionary + headers = synapse.to_headers() + + # Ensure the headers contains 'a' but not 'b' + assert "bt_header_input_obj_a" in headers + assert "bt_header_input_obj_b" not in headers + + # Create a new Test from the headers and check its properties + next_synapse = synapse.from_headers(synapse.to_headers()) + assert next_synapse.a == 0 # Default value is 0 + assert next_synapse.b is None + assert next_synapse.c == 0 # Default is 0 + assert next_synapse.d == [] # Default is [] + assert next_synapse.e == [] # Empty list is default for list types + assert next_synapse.f is None + assert next_synapse.g is None + + +def test_body_hash_override(): + # Create a Synapse instance + synapse_instance = Synapse() + + # Try to set the body_hash property and expect an AttributeError + with pytest.raises( + AttributeError, + match="body_hash property is read-only and cannot be overridden.", + ): + synapse_instance.body_hash = [] + + +def test_default_instance_fields_dict_consistency(): + synapse_instance = Synapse() + assert synapse_instance.model_dump() == { + "name": "Synapse", + "timeout": 12.0, + "total_size": 0, + "header_size": 0, + "dendrite": { + "status_code": None, + "status_message": None, + "process_time": None, + "ip": None, + "port": None, + "version": None, + "nonce": None, + "uuid": None, + "hotkey": None, + "signature": None, + }, + "axon": { + "status_code": None, + "status_message": None, + "process_time": None, + "ip": None, + "port": None, + "version": None, + "nonce": None, + "uuid": None, + "hotkey": None, + "signature": None, + }, + "computed_body_hash": "", + } + + +class LegacyHashedSynapse(Synapse): + """Legacy Synapse subclass that serialized `required_hash_fields`.""" + + a: int + b: int + c: Optional[int] = None + d: Optional[list[str]] = None + required_hash_fields: Optional[list[str]] = ["b", "a", "d"] + + +class HashedSynapse(Synapse): + a: int + b: int + c: Optional[int] = None + d: Optional[list[str]] = None + required_hash_fields: ClassVar[tuple[str, ...]] = ("a", "b", "d") + + +@pytest.mark.parametrize("synapse_cls", [LegacyHashedSynapse, HashedSynapse]) +def test_synapse_body_hash(synapse_cls): + synapse_instance = synapse_cls(a=1, b=2, d=["foobar"]) + assert ( + synapse_instance.body_hash + == "ae06397d08f30f75c91395c59f05c62ac3b62b88250eb78b109213258e6ced0c" + ) + + # Extra non-hashed values should not influence the body hash + synapse_instance_slightly_different = synapse_cls(d=["foobar"], c=3, a=1, b=2) + assert synapse_instance.body_hash == synapse_instance_slightly_different.body_hash + + # Even if someone tries to override the required_hash_fields, it should still be the same + synapse_instance_try_override_hash_fields = synapse_cls( + a=1, b=2, d=["foobar"], required_hash_fields=["a"] + ) + assert ( + synapse_instance.body_hash + == synapse_instance_try_override_hash_fields.body_hash + ) + + # Different hashed values should result in different body hashes + synapse_different = synapse_cls(a=1, b=2) + assert synapse_instance.body_hash != synapse_different.body_hash diff --git a/tests/unit_tests/test_tensor.py b/tests/unit_tests/test_tensor.py new file mode 100644 index 0000000000..8bf7bf06ac --- /dev/null +++ b/tests/unit_tests/test_tensor.py @@ -0,0 +1,245 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import numpy +import numpy as np +import pytest +import torch + +from bittensor.core.tensor import Tensor + + +# This is a fixture that creates an example tensor for testing +@pytest.fixture +def example_tensor(): + # Create a tensor from a list using PyTorch + data = np.array([1, 2, 3, 4]) + + # Serialize the tensor into a Tensor instance and return it + return Tensor.serialize(data) + + +@pytest.fixture +def example_tensor_torch(force_legacy_torch_compatible_api): + # Create a tensor from a list using PyTorch + data = torch.tensor([1, 2, 3, 4]) + + # Serialize the tensor into a Tensor instance and return it + return Tensor.serialize(data) + + +def test_deserialize(example_tensor): + # Deserialize the tensor from the Tensor instance + tensor = example_tensor.deserialize() + + # Check that the result is a np.array with the correct values + assert isinstance(tensor, np.ndarray) + assert tensor.tolist() == [1, 2, 3, 4] + + +def test_deserialize_torch(example_tensor_torch, force_legacy_torch_compatible_api): + tensor = example_tensor_torch.deserialize() + # Check that the result is a PyTorch tensor with the correct values + assert isinstance(tensor, torch.Tensor) + assert tensor.tolist() == [1, 2, 3, 4] + + +def test_serialize(example_tensor): + # Check that the serialized tensor is an instance of Tensor + assert isinstance(example_tensor, Tensor) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor.buffer == example_tensor.buffer + assert example_tensor.dtype == example_tensor.dtype + assert example_tensor.shape == example_tensor.shape + + assert isinstance(example_tensor.tolist(), list) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor.buffer == example_tensor.buffer + assert example_tensor.dtype == example_tensor.dtype + assert example_tensor.shape == example_tensor.shape + + assert isinstance(example_tensor.numpy(), numpy.ndarray) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor.buffer == example_tensor.buffer + assert example_tensor.dtype == example_tensor.dtype + assert example_tensor.shape == example_tensor.shape + + assert isinstance(example_tensor.tensor(), np.ndarray) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor.buffer == example_tensor.buffer + assert example_tensor.dtype == example_tensor.dtype + assert example_tensor.shape == example_tensor.shape + + +def test_serialize_torch(example_tensor_torch, force_legacy_torch_compatible_api): + # Check that the serialized tensor is an instance of Tensor + assert isinstance(example_tensor_torch, Tensor) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor_torch.buffer == example_tensor_torch.buffer + assert example_tensor_torch.dtype == example_tensor_torch.dtype + assert example_tensor_torch.shape == example_tensor_torch.shape + + assert isinstance(example_tensor_torch.tolist(), list) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor_torch.buffer == example_tensor_torch.buffer + assert example_tensor_torch.dtype == example_tensor_torch.dtype + assert example_tensor_torch.shape == example_tensor_torch.shape + + assert isinstance(example_tensor_torch.numpy(), numpy.ndarray) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor_torch.buffer == example_tensor_torch.buffer + assert example_tensor_torch.dtype == example_tensor_torch.dtype + assert example_tensor_torch.shape == example_tensor_torch.shape + + assert isinstance(example_tensor_torch.tensor(), torch.Tensor) + + # Check that the Tensor instance has the correct buffer, dtype, and shape + assert example_tensor_torch.buffer == example_tensor_torch.buffer + assert example_tensor_torch.dtype == example_tensor_torch.dtype + assert example_tensor_torch.shape == example_tensor_torch.shape + + +def test_buffer_field(): + # Create a Tensor instance with a specified buffer, dtype, and shape + tensor = Tensor( + buffer="0x321e13edqwds231231231232131", dtype="float32", shape=[3, 3] + ) + + # Check that the buffer field matches the provided value + assert tensor.buffer == "0x321e13edqwds231231231232131" + + +def test_buffer_field_torch(force_legacy_torch_compatible_api): + # Create a Tensor instance with a specified buffer, dtype, and shape + tensor = Tensor( + buffer="0x321e13edqwds231231231232131", dtype="torch.float32", shape=[3, 3] + ) + + # Check that the buffer field matches the provided value + assert tensor.buffer == "0x321e13edqwds231231231232131" + + +def test_dtype_field(): + # Create a Tensor instance with a specified buffer, dtype, and shape + tensor = Tensor( + buffer="0x321e13edqwds231231231232131", dtype="float32", shape=[3, 3] + ) + + # Check that the dtype field matches the provided value + assert tensor.dtype == "float32" + + +def test_dtype_field_torch(force_legacy_torch_compatible_api): + tensor = Tensor( + buffer="0x321e13edqwds231231231232131", dtype="torch.float32", shape=[3, 3] + ) + assert tensor.dtype == "torch.float32" + + +def test_shape_field(): + # Create a Tensor instance with a specified buffer, dtype, and shape + tensor = Tensor( + buffer="0x321e13edqwds231231231232131", dtype="float32", shape=[3, 3] + ) + + # Check that the shape field matches the provided value + assert tensor.shape == [3, 3] + + +def test_shape_field_torch(force_legacy_torch_compatible_api): + tensor = Tensor( + buffer="0x321e13edqwds231231231232131", dtype="torch.float32", shape=[3, 3] + ) + assert tensor.shape == [3, 3] + + +def test_serialize_all_types(): + Tensor.serialize(np.array([1], dtype=np.float16)) + Tensor.serialize(np.array([1], dtype=np.float32)) + Tensor.serialize(np.array([1], dtype=np.float64)) + Tensor.serialize(np.array([1], dtype=np.uint8)) + Tensor.serialize(np.array([1], dtype=np.int32)) + Tensor.serialize(np.array([1], dtype=np.int64)) + Tensor.serialize(np.array([1], dtype=bool)) + + +def test_serialize_all_types_torch(force_legacy_torch_compatible_api): + Tensor.serialize(torch.tensor([1], dtype=torch.float16)) + Tensor.serialize(torch.tensor([1], dtype=torch.float32)) + Tensor.serialize(torch.tensor([1], dtype=torch.float64)) + Tensor.serialize(torch.tensor([1], dtype=torch.uint8)) + Tensor.serialize(torch.tensor([1], dtype=torch.int32)) + Tensor.serialize(torch.tensor([1], dtype=torch.int64)) + Tensor.serialize(torch.tensor([1], dtype=torch.bool)) + + +def test_serialize_all_types_equality(): + rng = np.random.default_rng() + + tensor = rng.standard_normal((100,), dtype=np.float32) + assert np.all(Tensor.serialize(tensor).tensor() == tensor) + + tensor = rng.standard_normal((100,), dtype=np.float64) + assert np.all(Tensor.serialize(tensor).tensor() == tensor) + + tensor = np.random.randint(255, 256, (1000,), dtype=np.uint8) + assert np.all(Tensor.serialize(tensor).tensor() == tensor) + + tensor = np.random.randint(2_147_483_646, 2_147_483_647, (1000,), dtype=np.int32) + assert np.all(Tensor.serialize(tensor).tensor() == tensor) + + tensor = np.random.randint( + 9_223_372_036_854_775_806, 9_223_372_036_854_775_807, (1000,), dtype=np.int64 + ) + assert np.all(Tensor.serialize(tensor).tensor() == tensor) + + tensor = rng.standard_normal((100,), dtype=np.float32) < 0.5 + assert np.all(Tensor.serialize(tensor).tensor() == tensor) + + +def test_serialize_all_types_equality_torch(force_legacy_torch_compatible_api): + torchtensor = torch.randn([100], dtype=torch.float16) + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) + + torchtensor = torch.randn([100], dtype=torch.float32) + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) + + torchtensor = torch.randn([100], dtype=torch.float64) + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) + + torchtensor = torch.randint(255, 256, (1000,), dtype=torch.uint8) + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) + + torchtensor = torch.randint( + 2_147_483_646, 2_147_483_647, (1000,), dtype=torch.int32 + ) + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) + + torchtensor = torch.randint( + 9_223_372_036_854_775_806, 9_223_372_036_854_775_807, (1000,), dtype=torch.int64 + ) + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) + + torchtensor = torch.randn([100], dtype=torch.float32) < 0.5 + assert torch.all(Tensor.serialize(torchtensor).tensor() == torchtensor) diff --git a/tests/unit_tests/utils/__init__.py b/tests/unit_tests/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/utils/test_balance.py b/tests/unit_tests/utils/test_balance.py new file mode 100644 index 0000000000..66aa550f21 --- /dev/null +++ b/tests/unit_tests/utils/test_balance.py @@ -0,0 +1,520 @@ +"""Test the Balance class.""" + +from typing import Union + +import pytest +from hypothesis import given +from hypothesis import strategies as st + +from bittensor.utils.balance import Balance +from tests.helpers import CLOSE_IN_VALUE + + +valid_tao_numbers_strategy = st.one_of( + st.integers(max_value=21_000_000, min_value=-21_000_000), + st.floats( + allow_infinity=False, + allow_nan=False, + allow_subnormal=False, + max_value=21_000_000.00, + min_value=-21_000_000.00, + ), +) + + +def remove_zero_filter(x): + """Remove zero and rounded to zero from the list of valid numbers""" + return int(x * pow(10, 9)) != 0 + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_init(balance: Union[int, float]): + """ + Test the initialization of the Balance object. + """ + balance_ = Balance(balance) + if isinstance(balance, int): + assert balance_.rao == balance + elif isinstance(balance, float): + assert balance_.tao == CLOSE_IN_VALUE(balance, 0.00001) + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_add(balance: Union[int, float], balance2: Union[int, float]): + """ + Test the addition of two Balance objects. + """ + balance_ = Balance(balance) + balance2_ = Balance(balance2) + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + if isinstance(balance2, int): + rao2_ = balance2 + elif isinstance(balance2, float): + rao2_ = int(balance2 * pow(10, 9)) + + sum_ = balance_ + balance2_ + assert isinstance(sum_, Balance) + assert CLOSE_IN_VALUE(sum_.rao, 5) == rao_ + rao2_ + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_add_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the addition of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # convert balance2 to rao. Assume balance2 was rao + rao2_ = int(balance2) + + sum_ = balance_ + balance2_ + assert isinstance(sum_, Balance) + assert CLOSE_IN_VALUE(sum_.rao, 5) == rao_ + rao2_ + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_eq_other_not_balance(balance: Union[int, float]): + """ + Test the equality of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + rao2_: int + # convert balance2 to rao. This assumes balance2 is a rao value + rao2_ = int(balance_.rao) + + assert CLOSE_IN_VALUE(rao2_, 5) == balance_ + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_radd_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the right addition (radd) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = int(balance2) + + sum_ = balance2_ + balance_ # This is an radd + assert isinstance(sum_, Balance) + assert CLOSE_IN_VALUE(sum_.rao, 5) == rao2_ + rao_ + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_sub(balance: Union[int, float], balance2: Union[int, float]): + """ + Test the subtraction of two Balance objects. + """ + balance_ = Balance(balance) + balance2_ = Balance(balance2) + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + if isinstance(balance2, int): + rao2_ = balance2 + elif isinstance(balance2, float): + rao2_ = int(balance2 * pow(10, 9)) + + diff_ = balance_ - balance2_ + assert isinstance(diff_, Balance) + assert CLOSE_IN_VALUE(diff_.rao, 5) == rao_ - rao2_ + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_sub_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the subtraction of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = int(balance2) + + diff_ = balance_ - balance2_ + assert isinstance(diff_, Balance) + assert CLOSE_IN_VALUE(diff_.rao, 5) == rao_ - rao2_ + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_rsub_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the right subtraction (rsub) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = int(balance2) + + diff_ = balance2_ - balance_ # This is an rsub + assert isinstance(diff_, Balance) + assert CLOSE_IN_VALUE(diff_.rao, 5) == rao2_ - rao_ + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_mul(balance: Union[int, float], balance2: Union[int, float]): + """ + Test the multiplication of two Balance objects. + """ + balance_ = Balance(balance) + balance2_ = Balance(balance2) + rao_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + if isinstance(balance2, int): + rao2_ = balance2 + elif isinstance(balance2, float): + rao2_ = int(balance2 * pow(10, 9)) + + prod_ = balance_ * balance2_ + assert isinstance(prod_, Balance) + + assert ( + prod_.rao == pytest.approx(rao_ * rao2_, 9) + ), f"{balance_} * {balance2_} == {prod_.rao} != {rao_} * {balance2} == {rao_ * balance2}" + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_mul_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the multiplication of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + + prod_ = balance_ * balance2_ + assert isinstance(prod_, Balance) + + assert ( + abs(prod_.rao - int(rao_ * balance2)) <= 20 + ), f"{prod_.rao} != {int(rao_ * balance2)}" + assert prod_.rao == pytest.approx(int(rao_ * balance2)) + + +@given(balance=valid_tao_numbers_strategy, balance2=valid_tao_numbers_strategy) +def test_balance_rmul_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the right multiplication (rmul) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + + prod_ = balance2_ * balance_ # This is an rmul + assert isinstance(prod_, Balance) + + assert ( + abs(prod_.rao - int(balance2 * rao_)) <= 20 + ), f"{prod_.rao} != {int(balance2 * rao_)}" + assert prod_.rao == pytest.approx(int(balance2 * rao_)) + + +@given( + balance=valid_tao_numbers_strategy, + balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), +) # Avoid zero division +def test_balance_truediv(balance: Union[int, float], balance2: Union[int, float]): + """ + Test the true division (/) of two Balance objects. + """ + balance_ = Balance(balance) + balance2_ = Balance(balance2) + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + if isinstance(balance2, int): + rao2_ = balance2 + elif isinstance(balance2, float): + rao2_ = int(balance2 * pow(10, 9)) + + quot_ = balance_ / balance2_ + assert isinstance(quot_, Balance) + assert ( + abs(quot_.rao - int(rao_ / rao2_)) <= 2 + ), f"{quot_.rao} != {int(rao_ / rao2_)}" + assert quot_.rao == pytest.approx(int(rao_ / rao2_)) + + +@given( + balance=valid_tao_numbers_strategy, + balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), +) +def test_balance_truediv_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the true division (/) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = balance2 + + quot_ = balance_ / balance2_ + assert quot_.rao == pytest.approx(int(rao_ / rao2_)) + assert ( + abs(quot_.rao - int(rao_ / rao2_)) <= 10 + ), f"{quot_.rao} != {int(rao_ / rao2_)}" + + +@given( + balance=valid_tao_numbers_strategy.filter(remove_zero_filter), + balance2=valid_tao_numbers_strategy, +) # This is a filter to avoid division by zero +def test_balance_rtruediv_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the right true division (rtruediv) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = balance2 + + quot_ = balance2_ / balance_ # This is an rtruediv + assert isinstance(quot_, Balance) + expected_value = int(rao2_ / rao_) + assert ( + abs(quot_.rao - expected_value) <= 5 + ), f"{balance2_} / {balance_} = {quot_.rao} != {expected_value}" + assert quot_.rao == pytest.approx(expected_value) + + +@given( + balance=valid_tao_numbers_strategy, + balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), +) # Avoid zero division +def test_balance_floordiv(balance: Union[int, float], balance2: Union[int, float]): + """ + Test the floor division (//) of two Balance objects. + """ + balance_ = Balance(balance) + balance2_ = Balance(balance2) + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + if isinstance(balance2, int): + rao2_ = balance2 + elif isinstance(balance2, float): + rao2_ = int(balance2 * pow(10, 9)) + + quot_ = balance_ // balance2_ + assert isinstance(quot_, Balance) + assert CLOSE_IN_VALUE(quot_.rao, 5) == rao_ // rao2_ + + +@given( + balance=valid_tao_numbers_strategy, + balance2=valid_tao_numbers_strategy.filter(remove_zero_filter), +) +def test_balance_floordiv_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the floor division (//) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = balance2 + + quot_ = balance_ // balance2_ + assert isinstance(quot_, Balance) + expected_value = rao_ // rao2_ + assert ( + abs(quot_.rao - expected_value) <= 5 + ), f"{balance_} // {balance2_} = {quot_.rao} != {expected_value}" + assert quot_.rao == pytest.approx(rao_ // rao2_) + + +@given( + balance=valid_tao_numbers_strategy.filter(remove_zero_filter), + balance2=valid_tao_numbers_strategy, +) # This is a filter to avoid division by zero +def test_balance_rfloordiv_other_not_balance( + balance: Union[int, float], balance2: Union[int, float] +): + """ + Test the right floor division (rfloordiv) of a Balance object and a non-Balance object. + """ + balance_ = Balance(balance) + balance2_ = balance2 + rao_: int + rao2_: int + if isinstance(balance, int): + rao_ = balance + elif isinstance(balance, float): + rao_ = int(balance * pow(10, 9)) + # assume balance2 is a rao value + rao2_ = balance2 + + quot_ = balance2_ // balance_ # This is an rfloordiv + assert isinstance(quot_, Balance) + expected_value = rao2_ // rao_ + assert quot_.rao == pytest.approx(rao2_ // rao_) + assert abs(quot_.rao - expected_value) <= 5 + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_not_eq_none(balance: Union[int, float]): + """ + Test the inequality (!=) of a Balance object and None. + """ + balance_ = Balance(balance) + assert balance_ is not None + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_neq_none(balance: Union[int, float]): + """ + Test the inequality (!=) of a Balance object and None. + """ + balance_ = Balance(balance) + assert balance_ is not None + + +def test_balance_init_from_invalid_value(): + """ + Test the initialization of a Balance object with an invalid value. + """ + with pytest.raises(TypeError): + Balance("invalid not a number") + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_add_invalid_type(balance: Union[int, float]): + """ + Test the addition of a Balance object with an invalid type. + """ + balance_ = Balance(balance) + with pytest.raises(NotImplementedError): + _ = balance_ + "" + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_sub_invalid_type(balance: Union[int, float]): + """ + Test the subtraction of a Balance object with an invalid type. + """ + balance_ = Balance(balance) + with pytest.raises(NotImplementedError): + _ = balance_ - "" + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_div_invalid_type(balance: Union[int, float]): + """ + Test the division of a Balance object with an invalid type. + """ + balance_ = Balance(balance) + with pytest.raises(NotImplementedError): + _ = balance_ / "" + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_mul_invalid_type(balance: Union[int, float]): + """ + Test the multiplication of a Balance object with an invalid type. + """ + balance_ = Balance(balance) + with pytest.raises(NotImplementedError): + _ = balance_ * "" + + +@given(balance=valid_tao_numbers_strategy) +def test_balance_eq_invalid_type(balance: Union[int, float]): + """ + Test the equality of a Balance object with an invalid type. + """ + balance_ = Balance(balance) + with pytest.raises(NotImplementedError): + balance_ == "" + + +def test_from_float(): + """Tests from_float method call.""" + assert Balance.from_tao(1.0) == Balance(1000000000) + + +def test_from_rao(): + """Tests from_rao method call.""" + assert Balance.from_tao(1) == Balance(1000000000) diff --git a/tests/unit_tests/utils/test_init.py b/tests/unit_tests/utils/test_init.py new file mode 100644 index 0000000000..fbbc8d5bc9 --- /dev/null +++ b/tests/unit_tests/utils/test_init.py @@ -0,0 +1,27 @@ +import pytest + +from bittensor import warnings, __getattr__, version_split, logging, trace, debug + + +def test_getattr_version_split(): + """Test that __getattr__ for 'version_split' issues a deprecation warning and returns the correct value.""" + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert __getattr__("version_split") == version_split + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "version_split is deprecated" in str(w[-1].message) + + +@pytest.mark.parametrize("test_input, expected", [(True, "Trace"), (False, "Default")]) +def test_trace(test_input, expected): + """Test the trace function turns tracing on|off.""" + trace(test_input) + assert logging.current_state_value == expected + + +@pytest.mark.parametrize("test_input, expected", [(True, "Debug"), (False, "Default")]) +def test_debug(test_input, expected): + """Test the debug function turns tracing on|off.""" + debug(test_input) + assert logging.current_state_value == expected diff --git a/tests/unit_tests/utils/test_networking.py b/tests/unit_tests/utils/test_networking.py new file mode 100644 index 0000000000..2037718578 --- /dev/null +++ b/tests/unit_tests/utils/test_networking.py @@ -0,0 +1,167 @@ +import os +import urllib +import pytest +import requests +import unittest.mock as mock +from bittensor import utils +from unittest.mock import MagicMock + + +# Test conversion functions for IPv4 +def test_int_to_ip_zero(): + """Test converting integer to IPv4 address for 0.""" + assert utils.networking.int_to_ip(0) == "0.0.0.0" + assert utils.networking.ip_to_int("0.0.0.0") == 0 + assert utils.networking.ip__str__(4, "0.0.0.0", 8888) == "/ipv4/0.0.0.0:8888" + + +def test_int_to_ip_range(): + """Test converting integer to IPv4 addresses in a range.""" + for i in range(10): + assert utils.networking.int_to_ip(i) == f"0.0.0.{i}" + assert utils.networking.ip_to_int(f"0.0.0.{i}") == i + assert ( + utils.networking.ip__str__(4, f"0.0.0.{i}", 8888) == f"/ipv4/0.0.0.{i}:8888" + ) + + +def test_int_to_ip4_max(): + """Test converting integer to maximum IPv4 address.""" + assert utils.networking.int_to_ip(4294967295) == "255.255.255.255" + assert utils.networking.ip_to_int("255.255.255.255") == 4294967295 + assert ( + utils.networking.ip__str__(4, "255.255.255.255", 8888) + == "/ipv4/255.255.255.255:8888" + ) + + +# Test conversion functions for IPv6 +def test_int_to_ip6_zero(): + """Test converting integer to IPv6 address for 0.""" + assert utils.networking.int_to_ip(4294967296) == "::1:0:0" + assert utils.networking.ip_to_int("::1:0:0") == 4294967296 + assert utils.networking.ip__str__(6, "::1:0:0", 8888) == "/ipv6/::1:0:0:8888" + + +def test_int_to_ip6_range(): + """Test converting integer to IPv6 addresses in a range.""" + for i in range(10): + assert utils.networking.int_to_ip(4294967296 + i) == f"::1:0:{i}" + assert utils.networking.ip_to_int(f"::1:0:{i}") == 4294967296 + i + assert ( + utils.networking.ip__str__(6, f"::1:0:{i}", 8888) == f"/ipv6/::1:0:{i}:8888" + ) + + +def test_int_to_ip6_max(): + """Test converting integer to maximum IPv6 address.""" + max_val = 340282366920938463463374607431768211455 + assert ( + utils.networking.int_to_ip(max_val) == "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" + ) + assert ( + utils.networking.ip_to_int("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") == max_val + ) + assert ( + utils.networking.ip__str__(6, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", 8888) + == "/ipv6/ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:8888" + ) + + +def test_int_to_ip6_overflow(): + """Test handling overflow when converting integer to IPv6 address.""" + overflow = 340282366920938463463374607431768211455 + 1 + with pytest.raises(Exception): + utils.networking.int_to_ip(overflow) + + +def test_int_to_ip6_underflow(): + """Test handling underflow when converting integer to IPv6 address.""" + underflow = -1 + with pytest.raises(Exception): + utils.networking.int_to_ip(underflow) + + +# Test getting external IP address +def test_get_external_ip(): + """Test getting the external IP address.""" + assert utils.networking.get_external_ip() + + +def test_get_external_ip_os_broken(): + """Test getting the external IP address when os.popen is broken.""" + + class FakeReadline: + def readline(self): + return 1 + + def mock_call(): + return FakeReadline() + + with mock.patch.object(os, "popen", new=mock_call): + assert utils.networking.get_external_ip() + + +def test_get_external_ip_os_request_urllib_broken(): + """Test getting the external IP address when os.popen and requests.get/urllib.request are broken.""" + + class FakeReadline: + def readline(self): + return 1 + + def mock_call(): + return FakeReadline() + + class FakeResponse: + def text(self): + return 1 + + def mock_call_two(): + return FakeResponse() + + class FakeRequest: + def urlopen(self): + return 1 + + with mock.patch.object(os, "popen", new=mock_call): + with mock.patch.object(requests, "get", new=mock_call_two): + urllib.request = MagicMock(return_value=FakeRequest()) + with pytest.raises(Exception): + assert utils.networking.get_external_ip() + + +# Test formatting WebSocket endpoint URL +@pytest.mark.parametrize( + "url, expected", + [ + ("wss://exampleendpoint:9944", "wss://exampleendpoint:9944"), + ("ws://exampleendpoint:9944", "ws://exampleendpoint:9944"), + ( + "exampleendpoint:9944", + "ws://exampleendpoint:9944", + ), # should add ws:// not wss:// + ( + "ws://exampleendpoint", + "ws://exampleendpoint", + ), # should not add port if not specified + ( + "wss://exampleendpoint", + "wss://exampleendpoint", + ), # should not add port if not specified + ( + "exampleendpoint", + "ws://exampleendpoint", + ), # should not add port if not specified + ( + "exampleendpointwithws://:9944", + "ws://exampleendpointwithws://:9944", + ), # should only care about the front + ( + "exampleendpointwithwss://:9944", + "ws://exampleendpointwithwss://:9944", + ), # should only care about the front + ], +) +def test_format(url: str, expected: str): + """Test formatting WebSocket endpoint URL.""" + assert utils.networking.get_formatted_ws_endpoint_url(url) == expected diff --git a/tests/unit_tests/utils/test_registration.py b/tests/unit_tests/utils/test_registration.py new file mode 100644 index 0000000000..c85608b5f3 --- /dev/null +++ b/tests/unit_tests/utils/test_registration.py @@ -0,0 +1,62 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import pytest + +from bittensor.utils.registration import LazyLoadedTorch + + +class MockBittensorLogging: + def __init__(self): + self.messages = [] + + def error(self, message): + self.messages.append(message) + + +@pytest.fixture +def mock_bittensor_logging(monkeypatch): + mock_logger = MockBittensorLogging() + monkeypatch.setattr("bittensor.utils.registration.logging", mock_logger) + return mock_logger + + +def test_lazy_loaded_torch__torch_installed(monkeypatch, mock_bittensor_logging): + import torch + + lazy_torch = LazyLoadedTorch() + + assert bool(torch) is True + + assert lazy_torch.nn is torch.nn + with pytest.raises(AttributeError): + lazy_torch.no_such_thing + + +def test_lazy_loaded_torch__no_torch(monkeypatch, mock_bittensor_logging): + monkeypatch.setattr("bittensor.utils.registration._get_real_torch", lambda: None) + + torch = LazyLoadedTorch() + + assert not torch + + with pytest.raises(ImportError): + torch.some_attribute + + # Check if the error message is logged correctly + assert len(mock_bittensor_logging.messages) == 1 + assert "This command requires torch." in mock_bittensor_logging.messages[0] diff --git a/tests/unit_tests/utils/test_utils.py b/tests/unit_tests/utils/test_utils.py new file mode 100644 index 0000000000..8ed28c0670 --- /dev/null +++ b/tests/unit_tests/utils/test_utils.py @@ -0,0 +1,169 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from bittensor import utils +from bittensor.core.settings import SS58_FORMAT +import pytest + + +def test_ss58_to_vec_u8(mocker): + """Tests `utils.ss58_to_vec_u8` function.""" + # Prep + test_ss58_address = "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" + fake_return = b"2\xa6?" + mocked_ss58_address_to_bytes = mocker.patch.object( + utils, "ss58_address_to_bytes", return_value=fake_return + ) + + # Call + result = utils.ss58_to_vec_u8(test_ss58_address) + + # Asserts + mocked_ss58_address_to_bytes.assert_called_once_with(test_ss58_address) + assert result == [int(byte) for byte in fake_return] + + +@pytest.mark.parametrize( + "test_input,expected", + [ + ("y", True), + ("yes", True), + ("t", True), + ("true", True), + ("on", True), + ("1", True), + ("n", False), + ("no", False), + ("f", False), + ("false", False), + ("off", False), + ("0", False), + ], +) +def test_strtobool(test_input, expected): + """Test truthy values.""" + assert utils.strtobool(test_input) is expected + + +@pytest.mark.parametrize( + "test_input", + [ + "maybe", + "2", + "onoff", + ], +) +def test_strtobool_raise_error(test_input): + """Tests invalid values.""" + with pytest.raises(ValueError): + utils.strtobool(test_input) + + +def test_get_explorer_root_url_by_network_from_map(): + """Tests private utils._get_explorer_root_url_by_network_from_map function.""" + # Prep + # Test with a known network + network_map = { + "entity1": {"network1": "url1", "network2": "url2"}, + "entity2": {"network1": "url3", "network3": "url4"}, + } + # Test with no matching network in the map + network_map_empty = { + "entity1": {}, + "entity2": {}, + } + + # Assertions + assert utils._get_explorer_root_url_by_network_from_map( + "network1", network_map + ) == { + "entity1": "url1", + "entity2": "url3", + } + # Test with an unknown network + assert ( + utils._get_explorer_root_url_by_network_from_map("unknown_network", network_map) + == {} + ) + assert ( + utils._get_explorer_root_url_by_network_from_map("network1", network_map_empty) + == {} + ) + + +def test_get_explorer_url_for_network(): + """Tests `utils.get_explorer_url_for_network` function.""" + # Prep + fake_block_hash = "0x1234567890abcdef" + fake_map = {"opentensor": {"network": "url"}, "taostats": {"network": "url2"}} + + # Call + result = utils.get_explorer_url_for_network("network", fake_block_hash, fake_map) + + # Assert + assert result == { + "opentensor": f"url/query/{fake_block_hash}", + "taostats": f"url2/extrinsic/{fake_block_hash}", + } + + +def test_ss58_address_to_bytes(mocker): + """Tests utils.ss58_address_to_bytes function.""" + # Prep + fake_ss58_address = "ss58_address" + mocked_scalecodec_ss58_decode = mocker.patch.object( + utils.scalecodec, "ss58_decode", return_value="" + ) + + # Call + result = utils.ss58_address_to_bytes(fake_ss58_address) + + # Asserts + mocked_scalecodec_ss58_decode.assert_called_once_with( + fake_ss58_address, SS58_FORMAT + ) + assert result == bytes.fromhex(mocked_scalecodec_ss58_decode.return_value) + + +@pytest.mark.parametrize( + "test_input, expected_result", + [ + (123, False), + ("0x234SD", True), + ("5D34SD", True), + (b"0x234SD", True), + ], +) +def test_is_valid_bittensor_address_or_public_key(mocker, test_input, expected_result): + """Tests utils.is_valid_bittensor_address_or_public_key function.""" + # Prep + mocked_is_valid_ed25519_pubkey = mocker.patch.object( + utils, "_is_valid_ed25519_pubkey", return_value=True + ) + mocked_ss58_is_valid_ss58_address = mocker.patch.object( + utils.ss58, "is_valid_ss58_address", side_effect=[False, True] + ) + + # Call + result = utils.is_valid_bittensor_address_or_public_key(test_input) + + # Asserts + if not isinstance(test_input, int) and isinstance(test_input, bytes): + mocked_is_valid_ed25519_pubkey.assert_called_with(test_input) + if isinstance(test_input, str) and not test_input.startswith("0x"): + assert mocked_ss58_is_valid_ss58_address.call_count == 2 + assert result == expected_result diff --git a/tests/unit_tests/utils/test_version.py b/tests/unit_tests/utils/test_version.py new file mode 100644 index 0000000000..fa96bddad3 --- /dev/null +++ b/tests/unit_tests/utils/test_version.py @@ -0,0 +1,171 @@ +# The MIT License (MIT) +# Copyright © 2021 Yuma Rao +# Copyright © 2022 Opentensor Foundation +# Copyright © 2023 Opentensor Technologies Inc + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from pathlib import Path +import pytest +from freezegun import freeze_time +from datetime import datetime, timedelta, timezone + +# from bittensor.utils.version import ( +# VERSION_CHECK_THRESHOLD, +# VersionCheckError, +# get_and_save_latest_version, +# check_version, +# version_checking, +# __version__ +# ) +from bittensor.utils import version + +from unittest.mock import MagicMock +from pytest_mock import MockerFixture + + +@pytest.fixture +def pypi_version(): + return "6.9.3" + + +@pytest.fixture +def mock_get_version_from_pypi(mocker: MockerFixture, pypi_version: str): + return mocker.patch( + "bittensor.utils.version._get_version_from_pypi", + return_value=pypi_version, + autospec=True, + ) + + +@pytest.fixture +def version_file_path(mocker: MockerFixture, tmp_path: Path): + file_path = tmp_path / ".version" + + mocker.patch( + "bittensor.utils.version._get_version_file_path", return_value=file_path + ) + return file_path + + +def test_get_and_save_latest_version_no_file( + mock_get_version_from_pypi: MagicMock, version_file_path: Path, pypi_version: str +): + assert not version_file_path.exists() + + assert version.get_and_save_latest_version() == pypi_version + + mock_get_version_from_pypi.assert_called_once() + assert version_file_path.exists() + assert version_file_path.read_text() == pypi_version + + +@pytest.mark.parametrize("elapsed", [0, version.VERSION_CHECK_THRESHOLD - 5]) +def test_get_and_save_latest_version_file_fresh_check( + mock_get_version_from_pypi: MagicMock, version_file_path: Path, elapsed: int +): + now = datetime.now(timezone.utc) + + version_file_path.write_text("6.9.5") + + with freeze_time(now + timedelta(seconds=elapsed)): + assert version.get_and_save_latest_version() == "6.9.5" + + mock_get_version_from_pypi.assert_not_called() + + +def test_get_and_save_latest_version_file_expired_check( + mock_get_version_from_pypi: MagicMock, version_file_path: Path, pypi_version: str +): + now = datetime.now(timezone.utc) + + version_file_path.write_text("6.9.5") + + with freeze_time(now + timedelta(seconds=version.VERSION_CHECK_THRESHOLD + 1)): + assert version.get_and_save_latest_version() == pypi_version + + mock_get_version_from_pypi.assert_called_once() + assert version_file_path.read_text() == pypi_version + + +@pytest.mark.parametrize( + ("current_version", "latest_version"), + [ + ("6.9.3", "6.9.4"), + ("6.9.3a1", "6.9.3a2"), + ("6.9.3a1", "6.9.3b1"), + ("6.9.3", "6.10"), + ("6.9.3", "7.0"), + ("6.0.15", "6.1.0"), + ], +) +def test_check_version_newer_available( + mocker: MockerFixture, current_version: str, latest_version: str, capsys +): + version.__version__ = current_version + mocker.patch( + "bittensor.utils.version.get_and_save_latest_version", + return_value=latest_version, + ) + + version.check_version() + + captured = capsys.readouterr() + + assert "update" in captured.out + assert current_version in captured.out + assert latest_version in captured.out + + +@pytest.mark.parametrize( + ("current_version", "latest_version"), + [ + ("6.9.3", "6.9.3"), + ("6.9.3", "6.9.2"), + ("6.9.3b", "6.9.3a"), + ], +) +def test_check_version_up_to_date( + mocker: MockerFixture, current_version: str, latest_version: str, capsys +): + version.__version__ = current_version + mocker.patch( + "bittensor.utils.version.get_and_save_latest_version", + return_value=latest_version, + ) + + version.check_version() + + captured = capsys.readouterr() + + assert captured.out == "" + + +def test_version_checking(mocker: MockerFixture): + mock = mocker.patch("bittensor.utils.version.check_version") + + version.version_checking() + + mock.assert_called_once() + + +def test_version_checking_exception(mocker: MockerFixture): + mock = mocker.patch( + "bittensor.utils.version.check_version", side_effect=version.VersionCheckError + ) + + version.version_checking() + + mock.assert_called_once() diff --git a/tests/unit_tests/utils/test_weight_utils.py b/tests/unit_tests/utils/test_weight_utils.py new file mode 100644 index 0000000000..74009434b9 --- /dev/null +++ b/tests/unit_tests/utils/test_weight_utils.py @@ -0,0 +1,681 @@ +# The MIT License (MIT) +# Copyright © 2021 Yuma Rao +# Copyright © 2022 Opentensor Foundation +# Copyright © 2023 Opentensor Technologies Inc + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import logging +import numpy as np +from hypothesis import settings + +import bittensor.utils.weight_utils as weight_utils +import pytest + +from bittensor.utils import torch +from bittensor.core.settings import version_as_int + + +def test_convert_weight_and_uids(): + uids = np.arange(10) + weights = np.random.rand(10) + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + # min weight < 0 + weights[5] = -1 + with pytest.raises(ValueError) as pytest_wrapped_e: + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + # min uid < 0 + weights[5] = 0 + uids[3] = -1 + with pytest.raises(ValueError) as pytest_wrapped_e: + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + # len(uids) != len(weights) + uids[3] = 3 + with pytest.raises(ValueError) as pytest_wrapped_e: + weight_utils.convert_weights_and_uids_for_emit(uids, weights[1:]) + + # sum(weights) == 0 + weights = np.zeros(10) + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + # test for overflow and underflow + for _ in range(5): + uids = np.arange(10) + weights = np.random.rand(10) + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + +def test_convert_weight_and_uids_torch(force_legacy_torch_compatible_api): + uids = torch.tensor(list(range(10))) + weights = torch.rand(10) + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + # min weight < 0 + weights[5] = -1 + with pytest.raises(ValueError) as pytest_wrapped_e: + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + # min uid < 0 + weights[5] = 0 + uids[3] = -1 + with pytest.raises(ValueError) as pytest_wrapped_e: + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + # len(uids) != len(weights) + uids[3] = 3 + with pytest.raises(ValueError) as pytest_wrapped_e: + weight_utils.convert_weights_and_uids_for_emit(uids, weights[1:]) + + # sum(weights) == 0 + weights = torch.zeros(10) + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + # test for overflow and underflow + for _ in range(5): + uids = torch.tensor(list(range(10))) + weights = torch.rand(10) + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + + +def test_normalize_with_max_weight(): + weights = np.random.rand(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.01) + assert wn.max() <= 0.01 + + weights = np.zeros(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.01) + assert wn.max() <= 0.01 + + weights = np.random.rand(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.02) + assert wn.max() <= 0.02 + + weights = np.zeros(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.02) + assert wn.max() <= 0.02 + + weights = np.random.rand(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.03) + assert wn.max() <= 0.03 + + weights = np.zeros(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.03) + assert wn.max() <= 0.03 + + # Check for Limit + limit = 0.001 + weights = np.random.rand(2000) + w = weights / weights.sum() + wn = weight_utils.normalize_max_weight(weights, limit=limit) + assert abs((w.max() >= limit and (limit - wn.max())) < 0.001) or ( + w.max() < limit and wn.max() < limit + ) + + # Check for Zeros + limit = 0.01 + weights = np.zeros(2000) + wn = weight_utils.normalize_max_weight(weights, limit=limit) + assert wn.max() == 1 / 2000 + + # Check for Ordering after normalization + weights = np.random.rand(100) + wn = weight_utils.normalize_max_weight(weights, limit=1) + assert np.array_equal(wn, weights / weights.sum()) + + # Check for epsilon changes + epsilon = 0.01 + weights = np.sort(np.random.rand(100)) + x = weights / weights.sum() + limit = x[-10] + change = epsilon * limit + y = weight_utils.normalize_max_weight(x, limit=limit - change) + z = weight_utils.normalize_max_weight(x, limit=limit + change) + assert np.abs(y - z).sum() < epsilon + + +def test_normalize_with_max_weight__legacy_torch_api_compat( + force_legacy_torch_compatible_api, +): + weights = torch.rand(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.01) + assert wn.max() <= 0.01 + + weights = torch.zeros(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.01) + assert wn.max() <= 0.01 + + weights = torch.rand(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.02) + assert wn.max() <= 0.02 + + weights = torch.zeros(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.02) + assert wn.max() <= 0.02 + + weights = torch.rand(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.03) + assert wn.max() <= 0.03 + + weights = torch.zeros(1000) + wn = weight_utils.normalize_max_weight(weights, limit=0.03) + assert wn.max() <= 0.03 + + # Check for Limit + limit = 0.001 + weights = torch.rand(2000) + w = weights / weights.sum() + wn = weight_utils.normalize_max_weight(weights, limit=limit) + assert (w.max() >= limit and (limit - wn.max()).abs() < 0.001) or ( + w.max() < limit and wn.max() < limit + ) + + # Check for Zeros + limit = 0.01 + weights = torch.zeros(2000) + wn = weight_utils.normalize_max_weight(weights, limit=limit) + assert wn.max() == 1 / 2000 + + # Check for Ordering after normalization + weights = torch.rand(100) + wn = weight_utils.normalize_max_weight(weights, limit=1) + assert torch.isclose(wn, weights / weights.sum(), atol=1e-08, rtol=0).all() + + # Check for epsilon changes + epsilon = 0.01 + weights, _ = torch.sort(torch.rand(100)) + x = weights / weights.sum() + limit = x[-10] + change = epsilon * limit + y = weight_utils.normalize_max_weight(x, limit=limit - change) + z = weight_utils.normalize_max_weight(x, limit=limit + change) + assert (y - z).abs().sum() < epsilon + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, expected", + [ + ("happy-path-1", 3, [0, 1, 2], [15, 5, 80], np.array([0.15, 0.05, 0.8])), + ("happy-path-2", 4, [1, 3], [50, 50], np.array([0.0, 0.5, 0.0, 0.5])), + ], +) +def test_convert_weight_uids_and_vals_to_tensor_happy_path( + test_id, n, uids, weights, expected +): + # Act + result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) + + # Assert + assert np.allclose(result, expected), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, subnets, expected", + [ + ( + "happy-path-1", + 3, + [0, 1, 2], + [15, 5, 80], + [0, 1, 2], + torch.tensor([0.15, 0.05, 0.8]), + ), + ( + "happy-path-2", + 3, + [0, 2], + [300, 300], + [0, 1, 2], + torch.tensor([0.5, 0.0, 0.5]), + ), + ], +) +def test_convert_weight_uids_and_vals_to_tensor_happy_path_torch( + test_id, n, uids, weights, subnets, expected, force_legacy_torch_compatible_api +): + # Act + result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) + + # Assert + assert torch.allclose(result, expected), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, expected", + [ + ("edge_case_empty", 5, [], [], np.zeros(5)), + ("edge_case_single", 1, [0], [100], np.array([1.0])), + ("edge_case_all_zeros", 4, [0, 1, 2, 3], [0, 0, 0, 0], np.zeros(4)), + ], +) +def test_convert_weight_uids_and_vals_to_tensor_edge_cases( + test_id, n, uids, weights, expected +): + # Act + result = weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) + + # Assert + assert np.allclose(result, expected), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, exception", + [ + ("error-case-mismatched-lengths", 3, [0, 1, 3, 4, 5], [10, 20, 30], IndexError), + ("error-case-negative-n", -1, [0, 1], [10, 20], ValueError), + ("error-case-invalid-uids", 3, [0, 3], [10, 20], IndexError), + ], +) +def test_convert_weight_uids_and_vals_to_tensor_error_cases( + test_id, n, uids, weights, exception +): + # Act / Assert + with pytest.raises(exception): + weight_utils.convert_weight_uids_and_vals_to_tensor(n, uids, weights) + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, subnets, expected", + [ + ( + "happy-path-1", + 3, + [0, 1, 2], + [15, 5, 80], + [0, 1, 2], + np.array([0.15, 0.05, 0.8]), + ), + ( + "happy-path-2", + 3, + [0, 2], + [300, 300], + [0, 1, 2], + np.array([0.5, 0.0, 0.5]), + ), + ], +) +def test_convert_root_weight_uids_and_vals_to_tensor_happy_paths( + test_id, n, uids, weights, subnets, expected +): + # Act + result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( + n, uids, weights, subnets + ) + + # Assert + assert np.allclose(result, expected, atol=1e-4), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, subnets, expected", + [ + ( + "edge-1", + 1, + [0], + [0], + [0], + torch.tensor([0.0]), + ), # Single neuron with zero weight + ( + "edge-2", + 2, + [0, 1], + [0, 0], + [0, 1], + torch.tensor([0.0, 0.0]), + ), # All zero weights + ], +) +def test_convert_root_weight_uids_and_vals_to_tensor_edge_cases( + test_id, n, uids, weights, subnets, expected, force_legacy_torch_compatible_api +): + # Act + result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( + n, uids, weights, subnets + ) + + # Assert + assert torch.allclose(result, expected, atol=1e-4), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, subnets, expected", + [ + ( + "edge-1", + 1, + [0], + [0], + [0], + np.array([0.0]), + ), # Single neuron with zero weight + ( + "edge-2", + 2, + [0, 1], + [0, 0], + [0, 1], + np.array([0.0, 0.0]), + ), # All zero weights + ], +) +def test_convert_root_weight_uids_and_vals_to_tensor_edge_cases( + test_id, n, uids, weights, subnets, expected +): + # Act + result = weight_utils.convert_root_weight_uids_and_vals_to_tensor( + n, uids, weights, subnets + ) + + # Assert + assert np.allclose(result, expected, atol=1e-4), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, weights, subnets, exception", + [ + # uid not in subnets + ( + "error-1", + 3, + [1, 3], + [100, 200], + [1, 2], + "The subnet is unavailable at the moment.", + ), + # More uids than subnets + ( + "error-2", + 3, + [1, 2, 3], + [100, 200], + [1], + "The subnet is unavailable at the moment.", + ), + ], +) +def test_convert_root_weight_uids_and_vals_to_tensor_error_cases( + test_id, n, uids, weights, subnets, exception, caplog +): + with caplog.at_level(logging.WARNING): + weight_utils.convert_root_weight_uids_and_vals_to_tensor( + n, uids, weights, subnets + ) + + assert any( + exception in record.message and record.levelname == "WARNING" + for record in caplog.records + ) + + +@pytest.mark.parametrize( + "test_id, n, uids, bonds, expected_output", + [ + ( + "happy-path-1", + 5, + [1, 3, 4], + [10, 20, 30], + np.array([0, 10, 0, 20, 30], dtype=np.int64), + ), + ( + "happy-path-2", + 3, + [0, 1, 2], + [7, 8, 9], + np.array([7, 8, 9], dtype=np.int64), + ), + ("happy-path-3", 4, [2], [15], np.array([0, 0, 15, 0], dtype=np.int64)), + ], +) +def test_happy_path(test_id, n, uids, bonds, expected_output): + # Act + result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) + + # Assert + assert np.array_equal(result, expected_output), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, bonds, expected_output", + [ + ( + "happy-path-1", + 5, + [1, 3, 4], + [10, 20, 30], + torch.tensor([0, 10, 0, 20, 30], dtype=torch.int64), + ), + ( + "happy-path-2", + 3, + [0, 1, 2], + [7, 8, 9], + torch.tensor([7, 8, 9], dtype=torch.int64), + ), + ("happy-path-3", 4, [2], [15], torch.tensor([0, 0, 15, 0], dtype=torch.int64)), + ], +) +def test_happy_path_torch( + test_id, n, uids, bonds, expected_output, force_legacy_torch_compatible_api +): + # Act + result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) + + # Assert + assert torch.equal(result, expected_output), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, bonds, expected_output", + [ + ("edge-1", 1, [0], [0], np.array([0], dtype=np.int64)), # Single element + ( + "edge-2", + 10, + [], + [], + np.zeros(10, dtype=np.int64), + ), # Empty uids and bonds + ], +) +def test_edge_cases(test_id, n, uids, bonds, expected_output): + # Act + result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) + + # Assert + assert np.array_equal(result, expected_output), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, bonds, expected_output", + [ + ("edge-1", 1, [0], [0], torch.tensor([0], dtype=torch.int64)), # Single element + ( + "edge-2", + 10, + [], + [], + torch.zeros(10, dtype=torch.int64), + ), # Empty uids and bonds + ], +) +def test_edge_cases_torch( + test_id, n, uids, bonds, expected_output, force_legacy_torch_compatible_api +): + # Act + result = weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) + + # Assert + assert torch.equal(result, expected_output), f"Failed {test_id}" + + +@pytest.mark.parametrize( + "test_id, n, uids, bonds, exception", + [ + ("error-1", 5, [1, 3, 6], [10, 20, 30], IndexError), # uid out of bounds + ("error-2", -1, [0], [10], ValueError), # Negative number of neurons + ], +) +def test_error_cases(test_id, n, uids, bonds, exception): + # Act / Assert + with pytest.raises(exception): + weight_utils.convert_bond_uids_and_vals_to_tensor(n, uids, bonds) + + +def test_process_weights_for_netuid(mocker): + """Test the process_weights_for_netuid function.""" + # Prep + fake_uids = np.array([1, 2, 3, 4, 5], dtype=np.int64) + fake_weights = np.array([1.0, 2.5, 3.3, 4.7, 5.9], dtype=np.float32) + fake_netuid = 1 + fake_subtensor = mocker.MagicMock() + fake_metagraph = mocker.MagicMock() + fake_exclude_quantile = 0 + + fake_subtensor.min_allowed_weights.return_value = 0.1 + fake_subtensor.max_weight_limit.return_value = 1.0 + fake_metagraph.n = 1 + mocked_normalize_max_weight = mocker.patch.object( + weight_utils, "normalize_max_weight" + ) + + # Call + result = weight_utils.process_weights_for_netuid( + uids=fake_uids, + weights=fake_weights, + netuid=fake_netuid, + subtensor=fake_subtensor, + metagraph=fake_metagraph, + exclude_quantile=fake_exclude_quantile, + ) + + # Asserts + fake_subtensor.min_allowed_weights.assert_called_once_with(netuid=fake_netuid) + fake_subtensor.max_weight_limit.assert_called_once_with(netuid=fake_netuid) + + res1, res2 = result + assert np.array_equal(res1, fake_uids) + assert res2 == mocked_normalize_max_weight.return_value + + +def test_process_weights_with_all_zero_weights(mocker): + """Test the process_weights_for_netuid function with all zero weights.""" + # Prep + fake_uids = np.array([1, 2, 3, 4, 5], dtype=np.int64) + fake_weights = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) + fake_netuid = 1 + fake_subtensor = mocker.MagicMock() + fake_metagraph = mocker.MagicMock() + fake_exclude_quantile = 0 + + fake_subtensor.min_allowed_weights.return_value = 0.1 + fake_subtensor.max_weight_limit.return_value = 1.0 + fake_metagraph.n = 1 + + # Call + result = weight_utils.process_weights_for_netuid( + uids=fake_uids, + weights=fake_weights, + netuid=fake_netuid, + subtensor=fake_subtensor, + metagraph=fake_metagraph, + exclude_quantile=fake_exclude_quantile, + ) + + # Asserts + fake_subtensor.min_allowed_weights.assert_called_once_with(netuid=fake_netuid) + fake_subtensor.max_weight_limit.assert_called_once_with(netuid=fake_netuid) + + res1, res2 = result + assert np.array_equal(res1, np.array([0])) + assert np.array_equal(res2, np.array([1.0])) + + +def test_process_weights_for_netuid_with_nzs_less_min_allowed_weights(mocker): + """Tests process_weights_for_netuid method when non-zero weights are less than the min allowed weights.""" + # Prep + fake_uids = np.array([1, 2, 3, 4, 5], dtype=np.int64) + fake_weights = np.array([0.1, 0.2, 0.3, 0.0, 0.0], dtype=np.float32) + fake_netuid = 1 + fake_subtensor = mocker.MagicMock() + fake_metagraph = None + fake_exclude_quantile = 0 + + fake_subtensor.min_allowed_weights.return_value = 4 + fake_subtensor.max_weight_limit.return_value = 1.0 + fake_subtensor.metagraph.return_value.n = 5 + mocked_np_arange = mocker.patch.object(np, "arange") + mocked_normalize_max_weight = mocker.patch.object( + weight_utils, "normalize_max_weight" + ) + + # Call + result = weight_utils.process_weights_for_netuid( + uids=fake_uids, + weights=fake_weights, + netuid=fake_netuid, + subtensor=fake_subtensor, + metagraph=fake_metagraph, + exclude_quantile=fake_exclude_quantile, + ) + + # Asserts + fake_subtensor.metagraph.assert_called_once_with(fake_netuid) + fake_subtensor.min_allowed_weights.assert_called_once_with(netuid=fake_netuid) + fake_subtensor.max_weight_limit.assert_called_once_with(netuid=fake_netuid) + assert result == ( + mocked_np_arange.return_value, + mocked_normalize_max_weight.return_value, + ) + + +def test_generate_weight_hash(mocker): + """Tests weight_utils.generate_weight_hash function.""" + # Prep + fake_address = "5D1ABCD" + fake_netuid = 1 + fake_uids = [1, 2] + fake_values = [10, 20] + fake_version_key = 80000 + fake_salt = [1, 2] + + mocked_scale_bytes = mocker.patch.object(weight_utils, "ScaleBytes") + mocked_keypair = mocker.patch.object(weight_utils, "Keypair") + mocker_vec = mocker.patch.object(weight_utils, "Vec") + mocked_u16 = mocker.patch.object(weight_utils, "U16") + mocked_hasher = mocker.patch.object(weight_utils.hashlib, "blake2b") + + # Call + result = weight_utils.generate_weight_hash( + address=fake_address, + netuid=fake_netuid, + uids=fake_uids, + values=fake_values, + version_key=fake_version_key, + salt=fake_salt, + ) + + # Asserts + mocked_scale_bytes.assert_called() + mocked_keypair.assert_called() + mocker_vec.assert_called() + mocked_u16.assert_called() + assert ( + result + == mocked_hasher.return_value.hexdigest.return_value.__radd__.return_value + ) From 60994359f0c0b9914c2e9c897a75c2ca988a8d17 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 24 Sep 2024 15:30:16 -0700 Subject: [PATCH 03/11] rename e2e tests utils file --- tests/e2e_tests/conftest.py | 2 +- tests/e2e_tests/test_axon.py | 2 +- tests/e2e_tests/test_commit_weights.py | 2 +- tests/e2e_tests/test_dendrite.py | 2 +- tests/e2e_tests/test_incentive.py | 2 +- tests/e2e_tests/test_liquid_alpha.py | 2 +- tests/e2e_tests/test_metagraph.py | 2 +- tests/e2e_tests/test_subtensor_functions.py | 2 +- tests/e2e_tests/test_transfer.py | 2 +- tests/e2e_tests/utils/{test_utils.py => e2e_test_utils.py} | 0 10 files changed, 9 insertions(+), 9 deletions(-) rename tests/e2e_tests/utils/{test_utils.py => e2e_test_utils.py} (100%) diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index 9fc9faec68..d82944afd5 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -9,7 +9,7 @@ from substrateinterface import SubstrateInterface from bittensor import logging -from tests.e2e_tests.utils.test_utils import ( +from tests.e2e_tests.utils.e2e_test_utils import ( clone_or_update_templates, install_templates, template_path, diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py index bcf8650fd1..853719f85d 100644 --- a/tests/e2e_tests/test_axon.py +++ b/tests/e2e_tests/test_axon.py @@ -7,7 +7,7 @@ from bittensor import logging from bittensor.utils import networking from tests.e2e_tests.utils.chain_interactions import register_neuron, register_subnet -from tests.e2e_tests.utils.test_utils import ( +from tests.e2e_tests.utils.e2e_test_utils import ( setup_wallet, template_path, templates_repo, diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 1974854b9b..ca9b0a0a2c 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -14,7 +14,7 @@ sudo_set_hyperparameter_values, wait_interval, ) -from tests.e2e_tests.utils.test_utils import setup_wallet +from tests.e2e_tests.utils.e2e_test_utils import setup_wallet @pytest.mark.asyncio diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py index 3f02d021c0..e075326ca5 100644 --- a/tests/e2e_tests/test_dendrite.py +++ b/tests/e2e_tests/test_dendrite.py @@ -6,7 +6,7 @@ import bittensor from bittensor import logging, Subtensor -from tests.e2e_tests.utils.test_utils import ( +from tests.e2e_tests.utils.e2e_test_utils import ( setup_wallet, template_path, templates_repo, diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py index 355bf44077..3e309f4f64 100644 --- a/tests/e2e_tests/test_incentive.py +++ b/tests/e2e_tests/test_incentive.py @@ -10,7 +10,7 @@ register_subnet, wait_epoch, ) -from tests.e2e_tests.utils.test_utils import ( +from tests.e2e_tests.utils.e2e_test_utils import ( setup_wallet, template_path, templates_repo, diff --git a/tests/e2e_tests/test_liquid_alpha.py b/tests/e2e_tests/test_liquid_alpha.py index 21492fba8d..d73162fbb4 100644 --- a/tests/e2e_tests/test_liquid_alpha.py +++ b/tests/e2e_tests/test_liquid_alpha.py @@ -7,7 +7,7 @@ sudo_set_hyperparameter_bool, sudo_set_hyperparameter_values, ) -from tests.e2e_tests.utils.test_utils import setup_wallet +from tests.e2e_tests.utils.e2e_test_utils import setup_wallet def liquid_alpha_call_params(netuid: int, alpha_values: str): diff --git a/tests/e2e_tests/test_metagraph.py b/tests/e2e_tests/test_metagraph.py index 60dc2826a3..ff16dde369 100644 --- a/tests/e2e_tests/test_metagraph.py +++ b/tests/e2e_tests/test_metagraph.py @@ -7,7 +7,7 @@ register_neuron, register_subnet, ) -from tests.e2e_tests.utils.test_utils import ( +from tests.e2e_tests.utils.e2e_test_utils import ( setup_wallet, ) diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py index 5665e6e058..32d0f6e14d 100644 --- a/tests/e2e_tests/test_subtensor_functions.py +++ b/tests/e2e_tests/test_subtensor_functions.py @@ -9,7 +9,7 @@ register_neuron, register_subnet, ) -from tests.e2e_tests.utils.test_utils import ( +from tests.e2e_tests.utils.e2e_test_utils import ( setup_wallet, template_path, templates_repo, diff --git a/tests/e2e_tests/test_transfer.py b/tests/e2e_tests/test_transfer.py index 9ec501d5bd..b6be1cd6ae 100644 --- a/tests/e2e_tests/test_transfer.py +++ b/tests/e2e_tests/test_transfer.py @@ -1,6 +1,6 @@ from bittensor import Subtensor, logging from bittensor.core.subtensor import transfer_extrinsic -from tests.e2e_tests.utils.test_utils import setup_wallet +from tests.e2e_tests.utils.e2e_test_utils import setup_wallet def test_transfer(local_chain): diff --git a/tests/e2e_tests/utils/test_utils.py b/tests/e2e_tests/utils/e2e_test_utils.py similarity index 100% rename from tests/e2e_tests/utils/test_utils.py rename to tests/e2e_tests/utils/e2e_test_utils.py From f811bc437579b01bc3e11cdaa026b553cc773a39 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 24 Sep 2024 15:58:58 -0700 Subject: [PATCH 04/11] fix axon.py typing.Tuple in import and blacklist_sig --- bittensor/core/axon.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bittensor/core/axon.py b/bittensor/core/axon.py index cd32ba4212..63b79d49c8 100644 --- a/bittensor/core/axon.py +++ b/bittensor/core/axon.py @@ -29,7 +29,7 @@ import uuid import warnings from inspect import signature, Signature, Parameter -from typing import Any, Awaitable, Callable, Optional +from typing import Any, Awaitable, Callable, Optional, Tuple import uvicorn from bittensor_wallet import Wallet @@ -556,7 +556,7 @@ async def endpoint(*args, **kwargs): ] if blacklist_fn: blacklist_sig = Signature( - expected_params, return_annotation=tuple[bool, str] + expected_params, return_annotation=Tuple[bool, str] ) assert ( signature(blacklist_fn) == blacklist_sig From 69f6f74cb97d305dae45229faa904b56f2c71ffd Mon Sep 17 00:00:00 2001 From: ibraheem-opentensor Date: Tue, 24 Sep 2024 18:29:16 -0700 Subject: [PATCH 05/11] Bumps version & changelog --- CHANGELOG.md | 10 ++++++++++ VERSION | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c76ed59957..cd4e53fe5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 8.0.0 /2024-09-24 + +## What's Changed + +* Removes Bittensor CLI and Wallet functionalities +* Changes the Bittensor SDK package to be light while maintaining backwards compatibility +* Bittensor SDK development by @RomanCh-OT in https://github.com/opentensor/bittensor/tree/btsdk + +**Full Changelog**: https://github.com/opentensor/bittensor/compare/v7.4.0...v8.0.0 + ## 7.4.0 /2024-08-29 ## What's Changed diff --git a/VERSION b/VERSION index 8b23b8d47c..fa5fce04b3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.3.0 \ No newline at end of file +8.0.0 \ No newline at end of file From dfc55493920eef9f82c7d06eb758143e3f9f999d Mon Sep 17 00:00:00 2001 From: ibraheem-opentensor Date: Tue, 24 Sep 2024 18:31:03 -0700 Subject: [PATCH 06/11] Updates changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd4e53fe5d..f95a9dbf5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 8.0.0 /2024-09-24 +## 8.0.0 /2024-09-25 ## What's Changed From a9e3423a99121587be20092c01eef6818f8b39a3 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 25 Sep 2024 09:30:27 -0700 Subject: [PATCH 07/11] fix tests/unit_tests/test_axon.py --- tests/unit_tests/test_axon.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit_tests/test_axon.py b/tests/unit_tests/test_axon.py index 5df465e371..689d217379 100644 --- a/tests/unit_tests/test_axon.py +++ b/tests/unit_tests/test_axon.py @@ -19,7 +19,7 @@ import re import time from dataclasses import dataclass -from typing import Any, Optional +from typing import Any, Optional, Tuple from unittest import IsolatedAsyncioTestCase from unittest.mock import AsyncMock, MagicMock, patch @@ -56,7 +56,7 @@ class TestSynapse(Synapse): def forward_fn(synapse: TestSynapse) -> Any: pass - def blacklist_fn(synapse: TestSynapse) -> tuple[bool, str]: + def blacklist_fn(synapse: TestSynapse) -> Tuple[bool, str]: return True, "" def priority_fn(synapse: TestSynapse) -> float: From ca3b183ed469cfc6d2765589b8cd7264c20a9338 Mon Sep 17 00:00:00 2001 From: ibraheem-opentensor Date: Wed, 25 Sep 2024 11:32:58 -0700 Subject: [PATCH 08/11] Changelog updated --- CHANGELOG.md | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f95a9dbf5d..011a1464d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,41 @@ ## What's Changed -* Removes Bittensor CLI and Wallet functionalities -* Changes the Bittensor SDK package to be light while maintaining backwards compatibility -* Bittensor SDK development by @RomanCh-OT in https://github.com/opentensor/bittensor/tree/btsdk +Removes Bittensor CLI and Wallet functionalities and changes the Bittensor SDK package to be light while maintaining backwards compatibility + +* Update README.md by @rajkaramchedu in https://github.com/opentensor/bittensor/pull/2320 +* remove unused code (tensor.py-> class tensor), remove old tests, add new tests by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2311 +* Updating/improving/creating docstring codebase by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2310 +* README updates for SDK by @rajkaramchedu in https://github.com/opentensor/bittensor/pull/2309 +* Improved logic for concatenating message, prefix, and suffix in bittensor logging + test by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2306 +* BTSDK: Implementation of substrait custom errors handler for bittensor by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2305 +* btsdk cleanup by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2303 +* Fix mypy error for btlogging by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2299 +* Integrate `bt_decode` into BTSDK by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2298 +* BTSDK: Corrected arguments order in logging methods + test by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2292 +* BTSDK: removed exit sys call for ConnectionRefusedError in _get_substrate by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2288 +* BTSDK: Move `do*` methods to related extrinsic by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2286 +* add reconnection logic for correctly closed connection by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2283 +* Move extrinsics, update `deprecated.py` module. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2278 +* Add substrate reconnection logic by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2269 +* Prod requirements cleanup by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2266 +* Decoupling chain_data.py to sub-package by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2264 +* Increase Bittensor SDK test coverage by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2262 +* Increase SDK test coverage (Part3) by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2257 +* Increase bittensor SDK test coverage by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2256 +* Increase test coverage for subtensor.py by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2252 +* Adds e2e and fixes metagraph save()/load() by @ibraheem-opentensor in https://github.com/opentensor/bittensor/pull/2231 +* feat/roman/reafctoring-before-test-coverage by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2230 +* Enhance: Switch from format() to f-strings by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2228 +* Commit-reveal re-added & e2e coverage by @ibraheem-opentensor in https://github.com/opentensor/bittensor/pull/2224 +* Adds e2e setup & tests by @ibraheem-opentensor in https://github.com/opentensor/bittensor/pull/2221 +* Updates after review session by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2220 +* Fix the usage of env vars in default settings. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2218 +* Add dendrite reference to backwords compatibility by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2217 +* Bringing `btsdk` up-to-date with `staging` branch. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2210 +* Part 3: Create new 'bittensor-sdk` package by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2206 +* Part 2: Redesign, fix namespace conflicts, remove btcli by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2204 +* Part1: Removing content related to the wallet. Start use the pip installable package. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2191 **Full Changelog**: https://github.com/opentensor/bittensor/compare/v7.4.0...v8.0.0 From c1cf21cf819ee01bfa69a2173b6d5b17f2b4ffaa Mon Sep 17 00:00:00 2001 From: Watchmaker Date: Wed, 25 Sep 2024 10:39:18 -0700 Subject: [PATCH 09/11] Update README.md --- README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 08f060aaf6..541b8ec6bb 100644 --- a/README.md +++ b/README.md @@ -152,13 +152,11 @@ You can install using any of the below options: pip install bittensor[torch] ``` -- **Install SDK with `cubit`**: Install Bittensor SDK with [`cubit`](https://pytorch.org/docs/stable/torch.html). - - ```python - pip install bittensor[cubit] - ``` - +- **Install SDK with `cubit`**: Install Bittensor SDK with [`cubit`](https://github.com/opentensor/cubit). + 1. Install `cubit` first. See the [Install](https://github.com/opentensor/cubit?tab=readme-ov-file#install) section. **Only Python 3.9 and 3.10 versions are supported**. + 2. Then install SDK with `pip install bittensor`. + --- ## Install on Windows From 6a2ad87f2cde341f3a3eaacb0d7448f16bf93589 Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:28:22 -0700 Subject: [PATCH 10/11] Merge pull request #2321 from opentensor/feat/roman/logging-default-warning Set `WARNING` level ad default logging level --- bittensor/utils/btlogging/loggingmachine.py | 2 +- tests/unit_tests/test_logging.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bittensor/utils/btlogging/loggingmachine.py b/bittensor/utils/btlogging/loggingmachine.py index 056aa206cd..b2cfb2918e 100644 --- a/bittensor/utils/btlogging/loggingmachine.py +++ b/bittensor/utils/btlogging/loggingmachine.py @@ -302,7 +302,7 @@ def after_transition(self, event, state): def before_enable_default(self): """Logs status before enable Default.""" self._logger.info(f"Enabling default logging.") - self._logger.setLevel(stdlogging.INFO) + self._logger.setLevel(stdlogging.WARNING) for logger in all_loggers(): if logger.name in self._primary_loggers: continue diff --git a/tests/unit_tests/test_logging.py b/tests/unit_tests/test_logging.py index 2c5e593f0e..2aa43d1e70 100644 --- a/tests/unit_tests/test_logging.py +++ b/tests/unit_tests/test_logging.py @@ -119,7 +119,7 @@ def test_state_transitions(logging_machine, mock_config): logging_machine.enable_default() assert logging_machine.current_state_value == "Default" # main logger set to INFO - mocked_bt_logger.setLevel.assert_called_with(stdlogging.INFO) + mocked_bt_logger.setLevel.assert_called_with(stdlogging.WARNING) # 3rd party loggers should be disabled by setting to CRITICAL mocked_third_party_logger.setLevel.assert_called_with(stdlogging.CRITICAL) From 884e2d5c143dbc7d5b3926d3271c1eab2ba05b0b Mon Sep 17 00:00:00 2001 From: ibraheem-opentensor Date: Wed, 25 Sep 2024 12:51:07 -0700 Subject: [PATCH 11/11] Updates requirements --- requirements/btcli.txt | 2 +- requirements/prod.txt | 2 +- setup.py | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/requirements/btcli.txt b/requirements/btcli.txt index 429461facf..6a86c87bcb 100644 --- a/requirements/btcli.txt +++ b/requirements/btcli.txt @@ -1 +1 @@ -git+https://github.com/opentensor/btcli.git@main#egg=bittensor-cli \ No newline at end of file +bittensor-cli \ No newline at end of file diff --git a/requirements/prod.txt b/requirements/prod.txt index fab144bf76..631f949be3 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -20,4 +20,4 @@ python-Levenshtein scalecodec==1.2.11 substrate-interface~=1.7.9 uvicorn -git+https://github.com/opentensor/btwallet.git#egg=bittensor-wallet \ No newline at end of file +bittensor-wallet==1.0.0 \ No newline at end of file diff --git a/setup.py b/setup.py index bb0db98080..290274bd84 100644 --- a/setup.py +++ b/setup.py @@ -76,7 +76,6 @@ def read_requirements(path): install_requires=requirements, extras_require={ "btcli": extra_requirements_btcli, - "cubit": extra_requirements_cubit, "dev": extra_requirements_dev, "torch": extra_requirements_torch, },