diff --git a/.circleci/config.yml b/.circleci/config.yml
index 2138ea4..0473afe 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -28,7 +28,7 @@ jobs:
python -m venv env/
. env/bin/activate
python -m pip install --upgrade pip
- pip install black==23.7.0
+ pip install black
- save_cache:
name: Save cached black venv
diff --git a/.dependencies_installed b/.dependencies_installed
deleted file mode 100644
index e69de29..0000000
diff --git a/.devcontainer/compose.yml b/.devcontainer/compose.yml
new file mode 100644
index 0000000..8cfda90
--- /dev/null
+++ b/.devcontainer/compose.yml
@@ -0,0 +1,26 @@
+version: '3.8'
+services:
+ # Update this to the name of the service you want to work with in your docker-compose.yml file
+ bittensor-dev:
+ # Uncomment if you want to override the service's Dockerfile to one in the .devcontainer
+ # folder. Note that the path of the Dockerfile and context is relative to the *primary*
+ # docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile"
+ # array). The sample below assumes your primary file is in the root of your project.
+ #
+ # build:
+ # context: .
+ # dockerfile: .devcontainer/Dockerfile
+
+ volumes:
+ # Update this to wherever you want VS Code to mount the folder of your project
+ - ..:/workspaces:cached
+
+ # Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
+ # cap_add:
+ # - SYS_PTRACE
+ # security_opt:
+ # - seccomp:unconfined
+
+ # Overrides default command so things don't shut down after the process ends.
+ command: /bin/sh -c "while sleep 1000; do :; done"
+
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 0000000..a2b2d14
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,61 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
+{
+ "name": "Existing Docker Compose (Extend)",
+
+ // Update the 'dockerComposeFile' list if you have more compose files or use different names.
+ // The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
+ "dockerComposeFile": [
+ "../compose.yml",
+ "compose.yml"
+ ],
+
+ // The 'service' property is the name of the service for the container that VS Code should
+ // use. Update this value and .devcontainer/compose.yml to the real service name.
+ "service": "bittensor-dev",
+
+ // The optional 'workspaceFolder' property is the path VS Code should open by default when
+ // connected. This is typically a file mount in .devcontainer/compose.yml
+ "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
+ "features": {
+ "ghcr.io/devcontainers/features/rust:1": {
+ "installTools": true,
+ "version": "latest"
+ },
+ "ghcr.io/devcontainers/features/python:1": {
+ "installTools": true,
+ "version": "latest"
+ },
+ "ghcr.io/devcontainers-contrib/features/black:2": {
+ "version": "latest"
+ },
+ "ghcr.io/devcontainers-contrib/features/flake8:2": {
+ "version": "latest",
+ "plugins": "flake8-black flake8-isort flake8-print flake8-bandit flake8-pylint flake8-builtins flake8-spellcheck flake8-pytest-style flake8-django flake8-fastapi"
+ },
+ "ghcr.io/devcontainers-contrib/features/isort:2": {
+ "version": "latest"
+ }
+ },
+
+ // Features to add to the dev container. More info: https://containers.dev/features.
+ // "features": {},
+
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
+ // "forwardPorts": [],
+
+ // Uncomment the next line if you want start specific services in your Docker Compose config.
+ // "runServices": [],
+
+ // Uncomment the next line if you want to keep your containers running after VS Code shuts down.
+ // "shutdownAction": "none",
+
+ // Uncomment the next line to run commands after the container is created.
+ // "postCreateCommand": "cat /etc/os-release",
+
+ // Configure tool-specific properties.
+ // "customizations": {},
+
+ // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
+ "remoteUser": "nonroot"
+}
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..f33a02c
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,12 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for more information:
+# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+# https://containers.dev/guide/dependabot
+
+version: 2
+updates:
+ - package-ecosystem: "devcontainers"
+ directory: "/"
+ schedule:
+ interval: weekly
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..c6c3187
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,37 @@
+FROM pytorch/pytorch:2.2.1-cuda12.1-cudnn8-runtime
+
+# Create a non-root user
+RUN useradd --create-home nonroot
+
+# Install dependencies
+RUN apt-get update && apt-get install -y \
+ make \
+ build-essential \
+ git \
+ clang \
+ curl \
+ libssl-dev \
+ llvm \
+ libudev-dev \
+ protobuf-compiler \
+ python3 \
+ python3-pip \
+ && rm -rf /var/lib/apt/lists/*
+
+# Switch to non-root user
+USER nonroot
+WORKDIR /home/nonroot
+
+# Install Rust and add wasm target
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \
+ && . "$HOME/.cargo/env" \
+ && rustup update nightly \
+ && rustup update stable \
+ && rustup target add wasm32-unknown-unknown --toolchain nightly
+
+# Update PATH environment variable
+ENV PATH="$HOME/.local/bin:${PATH}"
+
+# Upgrade pip and install bittensor
+RUN pip install --upgrade pip \
+ && pip install bittensor
diff --git a/README.md b/README.md
index 7083168..d78a181 100644
--- a/README.md
+++ b/README.md
@@ -1,54 +1,145 @@
-
+
# **Bittensor Subnet Template**
+
[![Discord Chat](https://img.shields.io/discord/308323056592486420.svg)](https://discord.gg/bittensor)
-[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+
+
+
+---
+
+# Quickstart using VSCode + Remote Containers (recommended)
+
+1. Install Docker
+2. Install [VSCode](https://code.visualstudio.com/)
+3. Install the [Remote Development](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack) extension
+4. Clone this repository
+5. Create your application within a container (see gif below)
+
+![Create application within a container](./docs/vscode-open-in-container.gif)
+
+Once the container is running inside VSCode, you can run the project locally as follows:
+
+## Subtensor
+
+1. Create the owner and the subnet
+
+```sh
+./scripts/setup_owner_subnet.sh
+```
+
+2. Create a miner and register to the subnet
+
+```sh
+./scripts/setup_miner.sh [wallet_name]
+
+# Example: ./scripts/setup_miner.sh miner
+```
+
+3. Create a validator and register to the subnet
+
+```sh
+./scripts/setup_validator.sh [wallet_name]
+
+# Example: ./scripts/setup_validator.sh miner
+```
+
+## Bittensor
+
+### Run the OpenAI miner
+
+```bash
+./scripts/run_openai_miner.sh [wallet_name]
+
+# Example: ./scripts/run_openai_miner.sh miner
+```
+
+### Run a validator's neuron
+
+```bash
+./scripts/run_validator.sh [wallet_name]
+
+# Example: ./scripts/run_validator.sh validator
+```
+
+
+
+# Installation
+
+This repository requires python3.8 or higher. To install, simply clone this repository and install the requirements.
+
+```bash
+git clone https://github.com/nanlabs/bittensor-subnet-example.git
+cd bittensor-subnet-example
+python -m pip install -r requirements.txt
+python -m pip install -e .
+```
+
+If you are running a specific miner or validator, you might need install its specific requirements. For example, the Langchain-based miner requires the following:
+
+```bash
+cd neurons/miners/openai
+python -m pip install -r requirements.txt
+```
---
## The Incentivized Internet
[Discord](https://discord.gg/bittensor) • [Network](https://taostats.io/) • [Research](https://bittensor.com/whitepaper)
+
---
-- [Quickstarter template](#quickstarter-template)
-- [Introduction](#introduction)
- - [Example](#example)
+
+- [Quickstart using VSCode + Remote Containers (recommended)](#quickstart-using-vscode--remote-containers-recommended)
+ - [Subtensor](#subtensor)
+ - [Bittensor](#bittensor)
+ - [Run the OpenAI miner](#run-the-openai-miner)
+ - [Run a validator's neuron](#run-a-validators-neuron)
- [Installation](#installation)
- - [Before you proceed](#before-you-proceed)
- - [Install](#install)
-- [Writing your own incentive mechanism](#writing-your-own-incentive-mechanism)
-- [Writing your own subnet API](#writing-your-own-subnet-api)
+ - [Quickstarter template](#quickstarter-template)
+ - [In order to simplify the building of subnets, this template abstracts away the complexity of the underlying blockchain and other boilerplate code. While the default behavior of the template is sufficient for a simple subnet, you should customize the template in order to meet your specific requirements](#in-order-to-simplify-the-building-of-subnets-this-template-abstracts-away-the-complexity-of-the-underlying-blockchain-and-other-boilerplate-code-while-the-default-behavior-of-the-template-is-sufficient-for-a-simple-subnet-you-should-customize-the-template-in-order-to-meet-your-specific-requirements)
+ - [Introduction](#introduction)
+ - [Example](#example)
+ - [Installation](#installation-1)
+ - [Before you proceed](#before-you-proceed)
+ - [Install](#install)
+ - [Writing your own incentive mechanism](#writing-your-own-incentive-mechanism)
- [Subnet Links](#subnet-links)
-- [License](#license)
+ - [License](#license)
---
+
## Quickstarter template
This template contains all the required installation instructions, scripts, and files and functions for:
+
- Building Bittensor subnets.
-- Creating custom incentive mechanisms and running these mechanisms on the subnets.
+- Creating custom incentive mechanisms and running these mechanisms on the subnets.
-In order to simplify the building of subnets, this template abstracts away the complexity of the underlying blockchain and other boilerplate code. While the default behavior of the template is sufficient for a simple subnet, you should customize the template in order to meet your specific requirements.
----
+## In order to simplify the building of subnets, this template abstracts away the complexity of the underlying blockchain and other boilerplate code. While the default behavior of the template is sufficient for a simple subnet, you should customize the template in order to meet your specific requirements
## Introduction
-**IMPORTANT**: If you are new to Bittensor subnets, read this section before proceeding to [Installation](#installation) section.
+**IMPORTANT**: If you are new to Bittensor subnets, read this section before proceeding to [Installation](#installation) section.
The Bittensor blockchain hosts multiple self-contained incentive mechanisms called **subnets**. Subnets are playing fields in which:
+
- Subnet miners who produce value, and
- Subnet validators who produce consensus
-determine together the proper distribution of TAO for the purpose of incentivizing the creation of value, i.e., generating digital commodities, such as intelligence or data.
+determine together the proper distribution of TAO for the purpose of incentivizing the creation of value, i.e., generating digital commodities, such as intelligence or data.
Each subnet consists of:
+
- Subnet miners and subnet validators.
- A protocol using which the subnet miners and subnet validators interact with one another. This protocol is part of the incentive mechanism.
-- The Bittensor API using which the subnet miners and subnet validators interact with Bittensor's onchain consensus engine [Yuma Consensus](https://bittensor.com/documentation/validating/yuma-consensus). The Yuma Consensus is designed to drive these actors: subnet validators and subnet miners, into agreement on who is creating value and what that value is worth.
+- The Bittensor API using which the subnet miners and subnet validators interact with Bittensor's onchain consensus engine [Yuma Consensus](https://bittensor.com/documentation/validating/yuma-consensus). The Yuma Consensus is designed to drive these actors: subnet validators and subnet miners, into agreement on who is creating value and what that value is worth.
This starter template is split into three primary files. To write your own incentive mechanism, you should edit these files. These files are:
+
1. `template/protocol.py`: Contains the definition of the protocol used by subnet miners and subnet validators.
2. `neurons/miner.py`: Script that defines the subnet miner's behavior, i.e., how the subnet miner responds to requests from subnet validators.
3. `neurons/validator.py`: This script defines the subnet validator's behavior, i.e., how the subnet validator requests information from the subnet miners and determines the scores.
@@ -62,13 +153,14 @@ The Bittensor Subnet 1 for Text Prompting is built using this template. See [Bit
## Installation
### Before you proceed
-Before you proceed with the installation of the subnet, note the following:
-- Use these instructions to run your subnet locally for your development and testing, or on Bittensor testnet or on Bittensor mainnet.
+Before you proceed with the installation of the subnet, note the following:
+
+- Use these instructions to run your subnet locally for your development and testing, or on Bittensor testnet or on Bittensor mainnet.
- **IMPORTANT**: We **strongly recommend** that you first run your subnet locally and complete your development and testing before running the subnet on Bittensor testnet. Furthermore, make sure that you next run your subnet on Bittensor testnet before running it on the Bittensor mainnet.
-- You can run your subnet either as a subnet owner, or as a subnet validator or as a subnet miner.
+- You can run your subnet either as a subnet owner, or as a subnet validator or as a subnet miner.
- **IMPORTANT:** Make sure you are aware of the minimum compute requirements for your subnet. See the [Minimum compute YAML configuration](./min_compute.yml).
-- Note that installation instructions differ based on your situation: For example, installing for local development and testing will require a few additional steps compared to installing for testnet. Similarly, installation instructions differ for a subnet owner vs a validator or a miner.
+- Note that installation instructions differ based on your situation: For example, installing for local development and testing will require a few additional steps compared to installing for testnet. Similarly, installation instructions differ for a subnet owner vs a validator or a miner.
### Install
@@ -81,6 +173,7 @@ Before you proceed with the installation of the subnet, note the following:
## Writing your own incentive mechanism
As described in [Quickstarter template](#quickstarter-template) section above, when you are ready to write your own incentive mechanism, update this template repository by editing the following files. The code in these files contains detailed documentation on how to update the template. Read the documentation in each of the files to understand how to update the template. There are multiple **TODO**s in each of the files identifying sections you should update. These files are:
+
- `template/protocol.py`: Contains the definition of the wire-protocol used by miners and validators.
- `neurons/miner.py`: Script that defines the miner's behavior, i.e., how the miner responds to requests from validators.
- `neurons/validator.py`: This script defines the validator's behavior, i.e., how the validator requests information from the miners and determines the scores.
@@ -88,203 +181,22 @@ As described in [Quickstarter template](#quickstarter-template) section above, w
- `template/reward.py`: Contains the definition of how validators reward miner responses.
In addition to the above files, you should also update the following files:
+
- `README.md`: This file contains the documentation for your project. Update this file to reflect your project's documentation.
- `CONTRIBUTING.md`: This file contains the instructions for contributing to your project. Update this file to reflect your project's contribution guidelines.
- `template/__init__.py`: This file contains the version of your project.
- `setup.py`: This file contains the metadata about your project. Update this file to reflect your project's metadata.
- `docs/`: This directory contains the documentation for your project. Update this directory to reflect your project's documentation.
-__Note__
-The `template` directory should also be renamed to your project name.
----
-
-# Writing your own subnet API
-To leverage the abstract `SubnetsAPI` in Bittensor, you can implement a standardized interface. This interface is used to interact with the Bittensor network and can is used by a client to interact with the subnet through its exposed axons.
-
-What does Bittensor communication entail? Typically two processes, (1) preparing data for transit (creating and filling `synapse`s) and (2), processing the responses received from the `axon`(s).
-
-This protocol uses a handler registry system to associate bespoke interfaces for subnets by implementing two simple abstract functions:
-- `prepare_synapse`
-- `process_responses`
-
-These can be implemented as extensions of the generic `SubnetsAPI` interface. E.g.:
-
+**Note**
+The `template` directory should also be renamed to your project name
-This is abstract, generic, and takes(`*args`, `**kwargs`) for flexibility. See the extremely simple base class:
-```python
-class SubnetsAPI(ABC):
- def __init__(self, wallet: "bt.wallet"):
- self.wallet = wallet
- self.dendrite = bt.dendrite(wallet=wallet)
-
- async def __call__(self, *args, **kwargs):
- return await self.query_api(*args, **kwargs)
-
- @abstractmethod
- def prepare_synapse(self, *args, **kwargs) -> Any:
- """
- Prepare the synapse-specific payload.
- """
- ...
-
- @abstractmethod
- def process_responses(self, responses: List[Union["bt.Synapse", Any]]) -> Any:
- """
- Process the responses from the network.
- """
- ...
-
-```
-
-
-Here is a toy example:
-
-```python
-from bittensor.subnets import SubnetsAPI
-from MySubnet import MySynapse
-
-class MySynapseAPI(SubnetsAPI):
- def __init__(self, wallet: "bt.wallet"):
- super().__init__(wallet)
- self.netuid = 99
-
- def prepare_synapse(self, prompt: str) -> MySynapse:
- # Do any preparatory work to fill the synapse
- data = do_prompt_injection(prompt)
-
- # Fill the synapse for transit
- synapse = StoreUser(
- messages=[data],
- )
- # Send it along
- return synapse
-
- def process_responses(self, responses: List[Union["bt.Synapse", Any]]) -> str:
- # Look through the responses for information required by your application
- for response in responses:
- if response.dendrite.status_code != 200:
- continue
- # potentially apply post processing
- result_data = postprocess_data_from_response(response)
- # return data to the client
- return result_data
-```
-
-You can use a subnet API to the registry by doing the following:
-1. Download and install the specific repo you want
-1. Import the appropriate API handler from bespoke subnets
-1. Make the query given the subnet specific API
-
-
-See a simplified example for subnet 21 (`FileTao` storage) below. See `examples/subnet21.py` file for a full implementation example to follow:
-
-```python
+---
-# Subnet 21 Interface Example
-
-class StoreUserAPI(SubnetsAPI):
- def __init__(self, wallet: "bt.wallet"):
- super().__init__(wallet)
- self.netuid = 21
-
- def prepare_synapse(
- self,
- data: bytes,
- encrypt=False,
- ttl=60 * 60 * 24 * 30,
- encoding="utf-8",
- ) -> StoreUser:
- data = bytes(data, encoding) if isinstance(data, str) else data
- encrypted_data, encryption_payload = (
- encrypt_data(data, self.wallet) if encrypt else (data, "{}")
- )
- expected_cid = generate_cid_string(encrypted_data)
- encoded_data = base64.b64encode(encrypted_data)
-
- synapse = StoreUser(
- encrypted_data=encoded_data,
- encryption_payload=encryption_payload,
- ttl=ttl,
- )
-
- return synapse
-
- def process_responses(
- self, responses: List[Union["bt.Synapse", Any]]
- ) -> str:
- for response in responses:
- if response.dendrite.status_code != 200:
- continue
- stored_cid = (
- response.data_hash.decode("utf-8")
- if isinstance(response.data_hash, bytes)
- else response.data_hash
- )
- bt.logging.debug("received data CID: {}".format(stored_cid))
- break
-
- return stored_cid
-
-
-class RetrieveUserAPI(SubnetsAPI):
- def __init__(self, wallet: "bt.wallet"):
- super().__init__(wallet)
- self.netuid = 21
-
- def prepare_synapse(self, cid: str) -> RetrieveUser:
- synapse = RetrieveUser(data_hash=cid)
- return synapse
-
- def process_responses(self, responses: List[Union["bt.Synapse", Any]]) -> bytes:
- success = False
- decrypted_data = b""
- for response in responses:
- if response.dendrite.status_code != 200:
- continue
- decrypted_data = decrypt_data_with_private_key(
- encrypted_data,
- response.encryption_payload,
- bytes(self.wallet.coldkey.private_key.hex(), "utf-8"),
- )
- return data
-
-
-Example usage of the `FileTao` interface, which can serve as an example for other subnets.
-
-# import the bespoke subnet API
-from storage import StoreUserAPI, RetrieveUserAPI
-
-wallet = bt.wallet(wallet="default", hotkey="default") # the wallet used for querying
-metagraph = bt.metagraph(netuid=21) # metagraph of the subnet desired
-query_axons = metagraph.axons... # define custom logic to retrieve desired axons (e.g. validator set, specific miners, etc)
-
-# Store the data on subnet 21
-bt.logging.info(f"Initiating store_handler: {store_handler}")
-cid = await StoreUserAPI(
- axons=query_axons, # the axons you wish to query
- # Below: Parameters passed to `prepare_synapse` for this API subclass
- data=b"Hello Bittensor!",
- encrypt=False,
- ttl=60 * 60 * 24 * 30,
- encoding="utf-8",
- uid=None,
-)
-# The Content Identifier that corresponds to the stored data
-print(cid)
-> "bafkreifv6hp4o6bllj2nkdtzbq6uh7iia6bgqgd3aallvfhagym2s757v4
-
-# Now retrieve data from SN21 (storage)
-data = await RetrieveUserAPI(
- axons=query_axons, # axons desired to query
- cid=cid, # the content identifier to fetch the data
-)
-print(data)
-> b"Hello Bittensor!"
-```
+# Subnet Links
+In order to see real-world examples of subnets in-action, see the `subnet_links.json` document or access them from inside the `template` package by:
-# Subnet Links
-In order to see real-world examples of subnets in-action, see the `subnet_links.py` document or access them from inside the `template` package by:
```python
import template
template.SUBNET_LINKS
@@ -298,7 +210,9 @@ template.SUBNET_LINKS
```
## License
+
This repository is licensed under the MIT License.
+
```text
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
diff --git a/compose.yml b/compose.yml
new file mode 100644
index 0000000..ddc842b
--- /dev/null
+++ b/compose.yml
@@ -0,0 +1,32 @@
+version: '3.8'
+
+volumes:
+ subtensor-volume:
+ bitensor-volume:
+
+services:
+ common-subtensor: &common-subtensor
+ build:
+ context: ./docker/subtensor
+ dockerfile: Dockerfile
+ cpu_count: 4
+ mem_limit: 40000000000
+ memswap_limit: 80000000000
+
+ subtensor:
+ <<: *common-subtensor
+ volumes:
+ - subtensor-volume:/tmp/blockchain
+
+ common-bittensor: &common-bittensor
+ build:
+ context: .
+ dockerfile: Dockerfile
+ cpu_count: 4
+ mem_limit: 40000000000
+ memswap_limit: 80000000000
+
+ bittensor-dev:
+ <<: *common-bittensor
+ volumes:
+ - bitensor-volume:/tmp/bittensor
diff --git a/docker/subtensor/Dockerfile b/docker/subtensor/Dockerfile
new file mode 100644
index 0000000..6424ca7
--- /dev/null
+++ b/docker/subtensor/Dockerfile
@@ -0,0 +1,79 @@
+FROM ubuntu:20.04 as subtensor-builder
+
+# Install dependencies
+RUN apt-get update && apt-get install -y \
+ curl \
+ build-essential \
+ protobuf-compiler \
+ clang \
+ git \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install Rust
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
+ENV PATH="/root/.cargo/bin:${PATH}"
+
+# Clone and build Subtensor
+RUN git clone https://github.com/opentensor/subtensor.git /subtensor && \
+ cd /subtensor && \
+ ./scripts/init.sh && \
+ cargo build --release --features pow-faucet
+
+# Start with the PyTorch base image
+FROM pytorch/pytorch:2.2.1-cuda12.1-cudnn8-runtime
+
+# Create a non-root user for better security
+RUN useradd --create-home nonroot
+
+# Install dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ curl \
+ python3-pip \
+ git \
+ make \
+ build-essential \
+ clang \
+ libssl-dev \
+ llvm \
+ libudev-dev \
+ protobuf-compiler \
+ && rm -rf /var/lib/apt/lists/*
+
+# Switch to non-root user
+USER nonroot
+WORKDIR /home/nonroot
+
+# Install Rust for the non-root user
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \
+ && . "$HOME/.cargo/env" \
+ && rustup update nightly \
+ && rustup update stable \
+ && rustup target add wasm32-unknown-unknown --toolchain nightly
+
+# Create necessary directories
+RUN mkdir -p /home/nonroot/scripts/subtensor \
+ && mkdir /home/nonroot/target \
+ && mkdir /home/nonroot/target/release
+
+# Copy necessary files from the builder stage
+COPY --chown=nonroot:nonroot --from=subtensor-builder /subtensor/target/release/node-subtensor /home/nonroot/target/release/
+COPY --chown=nonroot:nonroot --from=subtensor-builder /subtensor/scripts/* /home/nonroot/scripts/subtensor/
+COPY --chown=nonroot:nonroot --from=subtensor-builder /subtensor/Cargo.toml /home/nonroot/
+
+# Ensure scripts are executable
+RUN chmod +x /home/nonroot/scripts/subtensor/*
+
+# Set PATH environment variable
+ENV PATH="/home/nonroot/.local/bin:${PATH}"
+
+# Upgrade pip and install Python dependencies
+RUN pip install --no-cache-dir --upgrade pip \
+ && pip install --no-cache-dir bittensor
+
+# Copy the entrypoint script
+COPY --chown=nonroot:nonroot entrypoint.sh /home/nonroot
+
+RUN chmod +x /home/nonroot/entrypoint.sh
+
+# Set the ENTRYPOINT
+ENTRYPOINT ["/home/nonroot/entrypoint.sh"]
diff --git a/docker/subtensor/entrypoint.sh b/docker/subtensor/entrypoint.sh
new file mode 100644
index 0000000..638cfbd
--- /dev/null
+++ b/docker/subtensor/entrypoint.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+FEATURES='pow-faucet runtime-benchmarks' bash ./scripts/subtensor/localnet.sh
diff --git a/docs/vscode-open-in-container.gif b/docs/vscode-open-in-container.gif
new file mode 100644
index 0000000..eed3e75
Binary files /dev/null and b/docs/vscode-open-in-container.gif differ
diff --git a/neurons/__init__.py b/neurons/__init__.py
index e69de29..4600a7f 100644
--- a/neurons/__init__.py
+++ b/neurons/__init__.py
@@ -0,0 +1,19 @@
+# The MIT License (MIT)
+# Copyright © 2023 Yuma Rao
+# Copyright © 2024 nanlabs
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+from . import validator
diff --git a/neurons/miner.py b/neurons/miner.py
deleted file mode 100644
index d764a4e..0000000
--- a/neurons/miner.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import time
-import typing
-import bittensor as bt
-
-# Bittensor Miner Template:
-import template
-
-# import base miner class which takes care of most of the boilerplate
-from template.base.miner import BaseMinerNeuron
-
-
-class Miner(BaseMinerNeuron):
- """
- Your miner neuron class. You should use this class to define your miner's behavior. In particular, you should replace the forward function with your own logic. You may also want to override the blacklist and priority functions according to your needs.
-
- This class inherits from the BaseMinerNeuron class, which in turn inherits from BaseNeuron. The BaseNeuron class takes care of routine tasks such as setting up wallet, subtensor, metagraph, logging directory, parsing config, etc. You can override any of the methods in BaseNeuron if you need to customize the behavior.
-
- This class provides reasonable default behavior for a miner such as blacklisting unrecognized hotkeys, prioritizing requests based on stake, and forwarding requests to the forward function. If you need to define custom
- """
-
- def __init__(self, config=None):
- super(Miner, self).__init__(config=config)
-
- # TODO(developer): Anything specific to your use case you can do here
-
- async def forward(
- self, synapse: template.protocol.Dummy
- ) -> template.protocol.Dummy:
- """
- Processes the incoming 'Dummy' synapse by performing a predefined operation on the input data.
- This method should be replaced with actual logic relevant to the miner's purpose.
-
- Args:
- synapse (template.protocol.Dummy): The synapse object containing the 'dummy_input' data.
-
- Returns:
- template.protocol.Dummy: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value.
-
- The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
- the miner's intended operation. This method demonstrates a basic transformation of input data.
- """
- # TODO(developer): Replace with actual implementation logic.
- synapse.dummy_output = synapse.dummy_input * 2
- return synapse
-
- async def blacklist(
- self, synapse: template.protocol.Dummy
- ) -> typing.Tuple[bool, str]:
- """
- Determines whether an incoming request should be blacklisted and thus ignored. Your implementation should
- define the logic for blacklisting requests based on your needs and desired security parameters.
-
- Blacklist runs before the synapse data has been deserialized (i.e. before synapse.data is available).
- The synapse is instead contructed via the headers of the request. It is important to blacklist
- requests before they are deserialized to avoid wasting resources on requests that will be ignored.
-
- Args:
- synapse (template.protocol.Dummy): A synapse object constructed from the headers of the incoming request.
-
- Returns:
- Tuple[bool, str]: A tuple containing a boolean indicating whether the synapse's hotkey is blacklisted,
- and a string providing the reason for the decision.
-
- This function is a security measure to prevent resource wastage on undesired requests. It should be enhanced
- to include checks against the metagraph for entity registration, validator status, and sufficient stake
- before deserialization of synapse data to minimize processing overhead.
-
- Example blacklist logic:
- - Reject if the hotkey is not a registered entity within the metagraph.
- - Consider blacklisting entities that are not validators or have insufficient stake.
-
- In practice it would be wise to blacklist requests from entities that are not validators, or do not have
- enough stake. This can be checked via metagraph.S and metagraph.validator_permit. You can always attain
- the uid of the sender via a metagraph.hotkeys.index( synapse.dendrite.hotkey ) call.
-
- Otherwise, allow the request to be processed further.
- """
- # TODO(developer): Define how miners should blacklist requests.
- uid = self.metagraph.hotkeys.index(synapse.dendrite.hotkey)
- if (
- not self.config.blacklist.allow_non_registered
- and synapse.dendrite.hotkey not in self.metagraph.hotkeys
- ):
- # Ignore requests from un-registered entities.
- bt.logging.trace(
- f"Blacklisting un-registered hotkey {synapse.dendrite.hotkey}"
- )
- return True, "Unrecognized hotkey"
-
- if self.config.blacklist.force_validator_permit:
- # If the config is set to force validator permit, then we should only allow requests from validators.
- if not self.metagraph.validator_permit[uid]:
- bt.logging.warning(
- f"Blacklisting a request from non-validator hotkey {synapse.dendrite.hotkey}"
- )
- return True, "Non-validator hotkey"
-
- bt.logging.trace(
- f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}"
- )
- return False, "Hotkey recognized!"
-
- async def priority(self, synapse: template.protocol.Dummy) -> float:
- """
- The priority function determines the order in which requests are handled. More valuable or higher-priority
- requests are processed before others. You should design your own priority mechanism with care.
-
- This implementation assigns priority to incoming requests based on the calling entity's stake in the metagraph.
-
- Args:
- synapse (template.protocol.Dummy): The synapse object that contains metadata about the incoming request.
-
- Returns:
- float: A priority score derived from the stake of the calling entity.
-
- Miners may recieve messages from multiple entities at once. This function determines which request should be
- processed first. Higher values indicate that the request should be processed first. Lower values indicate
- that the request should be processed later.
-
- Example priority logic:
- - A higher stake results in a higher priority value.
- """
- # TODO(developer): Define how miners should prioritize requests.
- caller_uid = self.metagraph.hotkeys.index(
- synapse.dendrite.hotkey
- ) # Get the caller index.
- prirority = float(
- self.metagraph.S[caller_uid]
- ) # Return the stake as the priority.
- bt.logging.trace(
- f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority
- )
- return prirority
-
-
-# This is the main function, which runs the miner.
-if __name__ == "__main__":
- with Miner() as miner:
- while True:
- bt.logging.info("Miner running...", time.time())
- time.sleep(5)
diff --git a/neurons/miners/openai/README.md b/neurons/miners/openai/README.md
new file mode 100644
index 0000000..200ddb3
--- /dev/null
+++ b/neurons/miners/openai/README.md
@@ -0,0 +1,34 @@
+# OpenAI Bittensor Miner
+
+This repository contains a Bittensor Miner that uses OpenAI's GPT-3.5-turbo model as its synapse. The miner connects to the Bittensor network, registers its wallet, and serves the GPT-3.5-turbo model to the network by attaching the prompt function to the axon.
+
+## Prerequisites
+
+- Python 3.8+
+- OpenAI Python API ()
+
+## Installation
+
+1. Clone the repository
+2. Install the required packages with `pip install -r requirements.txt`
+3. Ensure that you have your OpenAI key in your os environment variable
+
+```bash
+# Sets your openai key in os envs variable
+export OPENAI_API_KEY='your_openai_key_here'
+
+# Verifies if openai key is set correctly
+echo "$OPENAI_API_KEY"
+```
+
+For more configuration options related to the wallet, axon, subtensor, logging, and metagraph, please refer to the Bittensor documentation.
+
+## Example Usage
+
+To run the OpenAI Bittensor Miner with default settings, use the following command:
+
+```bash
+python3 -m pip install -r ./neurons/miners/openai/requirements.txt
+export OPENAI_API_KEY='sk-yourkey'
+python3 ./neurons/miners/openai/miner.py
+```
diff --git a/neurons/miners/openai/miner.py b/neurons/miners/openai/miner.py
new file mode 100644
index 0000000..4db613d
--- /dev/null
+++ b/neurons/miners/openai/miner.py
@@ -0,0 +1,276 @@
+# The MIT License (MIT)
+# Copyright © 2023 Yuma Rao
+# Copyright © 2024 nanlabs
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import argparse
+import openai
+import bittensor as bt
+import os
+import time
+import typing
+
+# Bittensor Miner Template:
+import prompting
+
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.miner import BaseMinerNeuron
+
+
+class OpenAIMiner(BaseMinerNeuron):
+ """Langchain-based miner which uses OpenAI's API as the LLM.
+
+ You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
+ """
+
+ @classmethod
+ def add_args(cls, parser: argparse.ArgumentParser):
+ """
+ Adds OpenAI-specific arguments to the command line parser.
+ """
+ super().add_args(parser)
+
+ def __init__(self, config=None):
+ super().__init__(config=config)
+
+ parser = argparse.ArgumentParser(description="OpenAI Miner")
+
+ parser.add_argument(
+ "--openai.api_key",
+ type=str,
+ default=None,
+ help="OpenAI API key for authenticating requests.",
+ )
+
+ parser.add_argument(
+ "--openai.suffix",
+ type=str,
+ default=None,
+ help="The suffix that comes after a completion of inserted text.",
+ )
+ parser.add_argument(
+ "--openai.max_tokens",
+ type=int,
+ default=100,
+ help="The maximum number of tokens to generate in the completion.",
+ )
+ parser.add_argument(
+ "--openai.temperature",
+ type=float,
+ default=0.4,
+ help="Sampling temperature to use, between 0 and 2.",
+ )
+ parser.add_argument(
+ "--openai.top_p",
+ type=float,
+ default=1,
+ help="Nucleus sampling parameter, top_p probability mass.",
+ )
+ parser.add_argument(
+ "--openai.n",
+ type=int,
+ default=1,
+ help="How many completions to generate for each prompt.",
+ )
+ parser.add_argument(
+ "--openai.presence_penalty",
+ type=float,
+ default=0.1,
+ help="Penalty for tokens based on their presence in the text so far.",
+ )
+ parser.add_argument(
+ "--openai.frequency_penalty",
+ type=float,
+ default=0.1,
+ help="Penalty for tokens based on their frequency in the text so far.",
+ )
+ parser.add_argument(
+ "--openai.model_name",
+ type=str,
+ default="gpt-3.5-turbo",
+ help="OpenAI model to use for completion.",
+ )
+
+ self.add_args(parser)
+
+ # Load the configuration for the miner
+ config = self.config
+
+ # Log the model being used for completion
+ bt.logging.info(f"Initializing with model {config.openai.model_name}")
+
+ api_key = config.openai.api_key # Fetch from configuration
+ if api_key is None:
+ api_key = os.getenv("OPENAI_API_KEY") # Fallback to environment variable
+ if api_key is None:
+ raise ValueError(
+ "OpenAI API key is required: the miner requires an `OPENAI_API_KEY` either passed directly to the constructor, defined in the configuration, or set in the environment variables."
+ )
+
+ # Additional configurations for wandb
+ if config.wandb.on:
+ self.wandb_run.tags = self.wandb_run.tags + ("openai_miner",)
+
+ # Set the OpenAI API key
+ openai.api_key = api_key
+
+ self.client = openai.OpenAI(api_key=api_key)
+
+ async def forward(
+ self, synapse: prompting.protocol.Prompting
+ ) -> prompting.protocol.Prompting:
+ """
+ Processes the incoming synapse by performing a predefined operation on the input data.
+ This method should be replaced with actual logic relevant to the miner's purpose.
+
+ Args:
+ synapse (Prompting): The synapse object containing the input data to be processed.
+
+ Returns:
+ Prompting: The synapse object with the processed data.
+
+ The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
+ the miner's intended operation. This method demonstrates a basic transformation of input data.
+ """
+ try:
+ start_time = time.time()
+ bt.logging.debug(f"Message received, forwarding synapse: {synapse}")
+
+ messages = [
+ (
+ {
+ "role": message.name,
+ "content": self.append_criteria(
+ message.content + synapse.character_info, synapse.criteria
+ ),
+ }
+ if message.name == "system"
+ else {"role": message.name, "content": message.content}
+ )
+ for message in synapse.messages
+ ]
+ bt.logging.debug(f"messages: {messages}")
+
+ bt.logging.debug(f"💬 Querying openai with message: {messages}")
+ response = (
+ self.client.chat.completions.create(
+ model=self.config.openai.model_name,
+ messages=messages,
+ temperature=self.config.openai.temperature,
+ max_tokens=self.config.openai.max_tokens,
+ top_p=self.config.openai.top_p,
+ frequency_penalty=self.config.openai.frequency_penalty,
+ presence_penalty=self.config.openai.presence_penalty,
+ n=self.config.openai.n,
+ )
+ .choices[0]
+ .message.content
+ )
+ synapse.completion = response
+ synapse_latency = time.time() - start_time
+ # Log the time taken to process the request.
+ bt.logging.info(f"Processed synapse in {synapse_latency} seconds.")
+
+ bt.logging.debug(f"✅ Served Response: {response}")
+ return synapse
+ except Exception as e:
+ bt.logging.error(f"Error in forward: {e}")
+ synapse.completion = "Error: " + str(e)
+ finally:
+ return synapse
+
+ async def blacklist(
+ self, synapse: prompting.protocol.Prompting
+ ) -> typing.Tuple[bool, str]:
+ """
+ Determines whether an incoming request should be blacklisted and thus ignored. Your implementation should
+ define the logic for blacklisting requests based on your needs and desired security parameters.
+
+ Blacklist runs before the synapse data has been deserialized (i.e. before synapse.data is available).
+ The synapse is instead contructed via the headers of the request. It is important to blacklist
+ requests before they are deserialized to avoid wasting resources on requests that will be ignored.
+
+ Args:
+ synapse (Prompting): A synapse object constructed from the headers of the incoming request.
+
+ Returns:
+ Tuple[bool, str]: A tuple containing a boolean indicating whether the synapse's hotkey is blacklisted,
+ and a string providing the reason for the decision.
+
+ This function is a security measure to prevent resource wastage on undesired requests. It should be enhanced
+ to include checks against the metagraph for entity registration, validator status, and sufficient stake
+ before deserialization of synapse data to minimize processing overhead.
+
+ Example blacklist logic:
+ - Reject if the hotkey is not a registered entity within the metagraph.
+ - Consider blacklisting entities that are not validators or have insufficient stake.
+
+ In practice it would be wise to blacklist requests from entities that are not validators, or do not have
+ enough stake. This can be checked via metagraph.S and metagraph.validator_permit. You can always attain
+ the uid of the sender via a metagraph.hotkeys.index( synapse.dendrite.hotkey ) call.
+
+ Otherwise, allow the request to be processed further.
+ """
+ if synapse.dendrite.hotkey not in self.metagraph.hotkeys:
+ # Ignore requests from unrecognized entities.
+ bt.logging.trace(
+ f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}"
+ )
+ return True, "Unrecognized hotkey"
+
+ bt.logging.trace(
+ f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}"
+ )
+ return False, "Hotkey recognized!"
+
+ async def priority(self, synapse: prompting.protocol.Prompting) -> float:
+ """
+ The priority function determines the order in which requests are handled. More valuable or higher-priority
+ requests are processed before others. You should design your own priority mechanism with care.
+
+ This implementation assigns priority to incoming requests based on the calling entity's stake in the metagraph.
+
+ Args:
+ synapse (Prompting): The synapse object that contains metadata about the incoming request.
+
+ Returns:
+ float: A priority score derived from the stake of the calling entity.
+
+ Miners may recieve messages from multiple entities at once. This function determines which request should be
+ processed first. Higher values indicate that the request should be processed first. Lower values indicate
+ that the request should be processed later.
+
+ Example priority logic:
+ - A higher stake results in a higher priority value.
+ """
+ caller_uid = self.metagraph.hotkeys.index(
+ synapse.dendrite.hotkey
+ ) # Get the caller index.
+ priority = float(
+ self.metagraph.S[caller_uid]
+ ) # Return the stake as the priority.
+ bt.logging.trace(
+ f"Prioritizing {synapse.dendrite.hotkey} with value: ", priority
+ )
+ return priority
+
+
+# This is the main function, which runs the miner.
+if __name__ == "__main__":
+ with OpenAIMiner() as miner:
+ while True:
+ bt.logging.info("Miner running...", time.time())
+ time.sleep(5)
diff --git a/neurons/miners/openai/requirements.txt b/neurons/miners/openai/requirements.txt
new file mode 100644
index 0000000..ec838c5
--- /dev/null
+++ b/neurons/miners/openai/requirements.txt
@@ -0,0 +1 @@
+openai
diff --git a/template/api/dummy.py b/neurons/validators/__init__.py
similarity index 55%
rename from template/api/dummy.py
rename to neurons/validators/__init__.py
index f6a433f..4600a7f 100644
--- a/template/api/dummy.py
+++ b/neurons/validators/__init__.py
@@ -1,7 +1,6 @@
# The MIT License (MIT)
-# Copyright © 2021 Yuma Rao
-# Copyright © 2023 Opentensor Foundation
-# Copyright © 2023 Opentensor Technologies Inc
+# Copyright © 2023 Yuma Rao
+# Copyright © 2024 nanlabs
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,28 +16,4 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-import bittensor as bt
-from typing import List, Optional, Union, Any, Dict
-from template.protocol import Dummy
-from bittensor.subnets import SubnetsAPI
-
-
-class DummyAPI(SubnetsAPI):
- def __init__(self, wallet: "bt.wallet"):
- super().__init__(wallet)
- self.netuid = 33
- self.name = "dummy"
-
- def prepare_synapse(self, dummy_input: int) -> Dummy:
- synapse.dummy_input = dummy_input
- return synapse
-
- def process_responses(
- self, responses: List[Union["bt.Synapse", Any]]
- ) -> List[int]:
- outputs = []
- for response in responses:
- if response.dendrite.status_code != 200:
- continue
- return outputs.append(response.dummy_output)
- return outputs
+from . import validator
diff --git a/neurons/validator.py b/neurons/validators/validator.py
similarity index 91%
rename from neurons/validator.py
rename to neurons/validators/validator.py
index 7b50202..4537908 100644
--- a/neurons/validator.py
+++ b/neurons/validators/validator.py
@@ -1,7 +1,6 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
+# Copyright © 2024 nanlabs
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -24,11 +23,11 @@
import bittensor as bt
# Bittensor Validator Template:
-import template
-from template.validator import forward
+import prompting
+from prompting.validator import forward
# import base validator class which takes care of most of the boilerplate
-from template.base.validator import BaseValidatorNeuron
+from prompting.base.validator import BaseValidatorNeuron
class Validator(BaseValidatorNeuron):
@@ -57,7 +56,6 @@ async def forward(self):
- Rewarding the miners
- Updating the scores
"""
- # TODO(developer): Rewrite this function based on your protocol definition.
return await forward(self)
diff --git a/template/__init__.py b/prompting/__init__.py
similarity index 86%
rename from template/__init__.py
rename to prompting/__init__.py
index cb07b8c..3f48e79 100644
--- a/template/__init__.py
+++ b/prompting/__init__.py
@@ -1,7 +1,6 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
+# Copyright © 2024 nanlabs
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,9 +16,8 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-# TODO(developer): Change this value when updating your code base.
# Define the version of the template module.
-__version__ = "0.0.0"
+__version__ = "0.1.0"
version_split = __version__.split(".")
__spec_version__ = (
(1000 * int(version_split[0]))
@@ -31,5 +29,10 @@
from . import protocol
from . import base
from . import validator
-from . import api
-from .subnet_links import SUBNET_LINKS
+
+import json
+
+SUBNET_LINKS = None
+with open("subnet_links.json") as f:
+ links_dict = json.load(f)
+ SUBNET_LINKS = links_dict.get("subnet_repositories", None)
diff --git a/template/api/__init__.py b/prompting/base/__init__.py
similarity index 100%
rename from template/api/__init__.py
rename to prompting/base/__init__.py
diff --git a/template/base/miner.py b/prompting/base/miner.py
similarity index 98%
rename from template/base/miner.py
rename to prompting/base/miner.py
index e906310..f67e96c 100644
--- a/template/base/miner.py
+++ b/prompting/base/miner.py
@@ -24,8 +24,8 @@
import bittensor as bt
-from template.base.neuron import BaseNeuron
-from template.utils.config import add_miner_args
+from prompting.base.neuron import BaseNeuron
+from prompting.utils.config import add_miner_args
class BaseMinerNeuron(BaseNeuron):
diff --git a/template/base/neuron.py b/prompting/base/neuron.py
similarity index 96%
rename from template/base/neuron.py
rename to prompting/base/neuron.py
index d3cbbc6..d5c2f8c 100644
--- a/template/base/neuron.py
+++ b/prompting/base/neuron.py
@@ -23,10 +23,10 @@
from abc import ABC, abstractmethod
# Sync calls set weights and also resyncs the metagraph.
-from template.utils.config import check_config, add_args, config
-from template.utils.misc import ttl_get_block
-from template import __spec_version__ as spec_version
-from template.mock import MockSubtensor, MockMetagraph
+from prompting.utils.config import check_config, add_args, config
+from prompting.utils.misc import ttl_get_block
+from prompting import __spec_version__ as spec_version
+from prompting.mock import MockSubtensor, MockMetagraph
class BaseNeuron(ABC):
diff --git a/template/base/validator.py b/prompting/base/validator.py
similarity index 98%
rename from template/base/validator.py
rename to prompting/base/validator.py
index 2c030db..67acd1b 100644
--- a/template/base/validator.py
+++ b/prompting/base/validator.py
@@ -1,7 +1,6 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
+# Copyright © 2024 nanlabs
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -28,9 +27,9 @@
from typing import List
from traceback import print_exception
-from template.base.neuron import BaseNeuron
-from template.mock import MockDendrite
-from template.utils.config import add_validator_args
+from prompting.base.neuron import BaseNeuron
+from prompting.mock import MockDendrite
+from prompting.utils.config import add_validator_args
class BaseValidatorNeuron(BaseNeuron):
diff --git a/template/mock.py b/prompting/mock.py
similarity index 95%
rename from template/mock.py
rename to prompting/mock.py
index 69eb78d..e916f86 100644
--- a/template/mock.py
+++ b/prompting/mock.py
@@ -88,13 +88,12 @@ async def single_axon_response(i, axon):
if process_time < timeout:
s.dendrite.process_time = str(time.time() - start_time)
# Update the status code and status message of the dendrite to match the axon
- # TODO (developer): replace with your own expected synapse data
- s.dummy_output = s.dummy_input * 2
+ s.completion = s.messages[0].content
s.dendrite.status_code = 200
s.dendrite.status_message = "OK"
synapse.dendrite.process_time = str(process_time)
else:
- s.dummy_output = 0
+ s.completion = ""
s.dendrite.status_code = 408
s.dendrite.status_message = "Timeout"
synapse.dendrite.process_time = str(timeout)
diff --git a/prompting/protocol.py b/prompting/protocol.py
new file mode 100644
index 0000000..d1a2c82
--- /dev/null
+++ b/prompting/protocol.py
@@ -0,0 +1,148 @@
+# The MIT License (MIT)
+# Copyright © 2023 Yuma Rao
+# Copyright © 2024 nanlabs
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+from pydantic import BaseModel, Field
+from typing import List
+import bittensor as bt
+
+
+class Message(BaseModel):
+ content: str = Field(
+ ..., title="Content", description="The content of the message."
+ )
+
+
+class PromptingMixin(BaseModel):
+ """
+ A Pydantic model representing a chat session between a single user and a large language model (LLM),
+ potentially extending functionality from a base class for integration into a broader system.
+
+ This model manages the chat session, including initializing the session with LLM details,
+ handling message exchange, and updating the chat's completion status.
+
+ Attributes:
+ character_info (str): Descriptive information about the LLM, such as its version or capabilities.
+ criteria (List[str]): Guidelines or criteria for the LLM's responses to ensure they meet certain standards or styles.
+ messages (List[Message]): Records the history of messages exchanged in the session.
+ completion (str): Tracks the latest LLM response or the overall completion status of the chat session.
+
+ Example of Usage:
+ ```python
+ # Initialize a chat session with LLM details and criteria
+ chat_session = PromptingMixin(
+ character_info="GPT-4, the latest language model.",
+ criteria=["Be informative and engaging."],
+ messages=[],
+ )
+
+ # Add a message to the session
+ chat_session.add_message("What is the weather like today?")
+
+ # Update the session's completion status after getting a response
+ chat_session.update_completion("It's sunny and warm outside.")
+ ```
+ """
+
+ class Config:
+ """
+ Pydantic model configuration class for Prompting. This class sets validation of attribute assignment as True.
+ validate_assignment set to True means the pydantic model will validate attribute assignments on the class.
+ """
+
+ validate_assignment = True
+
+ character_info: str = Field(
+ ...,
+ title="Character Info",
+ description="Information about the LLM.",
+ allow_mutation=False,
+ )
+ criteria: List[str] = Field(
+ ...,
+ title="Criteria",
+ description="Criteria guiding the LLM's responses.",
+ allow_mutation=False,
+ )
+ messages: List[Message] = Field(
+ ...,
+ title="Messages",
+ description="Dialogue history of the chat session.",
+ allow_mutation=False,
+ )
+ completion: str = Field(
+ "",
+ title="Completion",
+ description="Latest response or completion status of the chat.",
+ )
+
+ def add_message(self, content: str):
+ """
+ Adds a new message to the dialogue history.
+
+ Parameters:
+ content (str): The content of the message to be added.
+ """
+ self.messages.append(Message(content=content))
+
+ def update_completion(self, completion: str):
+ """
+ Updates the completion status of the chat.
+
+ Parameters:
+ completion (str): The new completion status or LLM's response.
+ """
+ self.completion = completion
+
+class Prompting(PromptingMixin, bt.Synapse):
+ """
+ The Prompting class encapsulates functionalities related to a simplified chat session
+ between a single user and an LLM, leveraging the infrastructure or methods provided by Synapse.
+
+ This class inherits from ChatSession to manage the chat details and from Synapse to incorporate
+ any additional functionalities or requirements specific to the underlying system or LLM interaction.
+
+ Methods such as `deserialize` from Synapse can be utilized or overridden here to suit the
+ deserialization needs of the Prompting instances, alongside any other methods that Synapse might offer.
+
+ Example of Usage:
+ ```python
+ # Assuming Synapse provides certain functionalities required for integration
+ prompting = Prompting(
+ character_info="GPT-4, for engaging and informative conversations.",
+ criteria=["Ensure accuracy.", "Maintain a friendly tone."],
+ messages=[],
+ )
+
+ # Interacting with the LLM
+ prompting.add_message("Tell me a joke.")
+ prompting.update_completion("Why did the computer go to the doctor? Because it had a virus!")
+
+ # Utilizing Synapse specific methods, if any
+ deserialized_prompting = prompting.deserialize()
+ ```
+ """
+
+ def deserialize(self) -> "Prompting":
+ """
+ Returns the instance of the current Prompting object, potentially utilizing
+ custom deserialization logic provided by Synapse or defined specifically for Prompting.
+
+ Returns:
+ Prompting: The current instance of the Prompting class.
+ """
+ return self
diff --git a/template/utils/__init__.py b/prompting/utils/__init__.py
similarity index 100%
rename from template/utils/__init__.py
rename to prompting/utils/__init__.py
diff --git a/template/utils/config.py b/prompting/utils/config.py
similarity index 100%
rename from template/utils/config.py
rename to prompting/utils/config.py
diff --git a/template/utils/misc.py b/prompting/utils/misc.py
similarity index 100%
rename from template/utils/misc.py
rename to prompting/utils/misc.py
diff --git a/template/utils/uids.py b/prompting/utils/uids.py
similarity index 100%
rename from template/utils/uids.py
rename to prompting/utils/uids.py
diff --git a/template/validator/__init__.py b/prompting/validator/__init__.py
similarity index 100%
rename from template/validator/__init__.py
rename to prompting/validator/__init__.py
diff --git a/template/validator/forward.py b/prompting/validator/forward.py
similarity index 81%
rename from template/validator/forward.py
rename to prompting/validator/forward.py
index e269023..039b906 100644
--- a/template/validator/forward.py
+++ b/prompting/validator/forward.py
@@ -1,7 +1,6 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
+# Copyright © 2024 nanlabs
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -19,9 +18,9 @@
import bittensor as bt
-from template.protocol import Dummy
-from template.validator.reward import get_rewards
-from template.utils.uids import get_random_uids
+from prompting.protocol import Prompting
+from prompting.validator.reward import get_rewards
+from prompting.utils.uids import get_random_uids
async def forward(self):
@@ -38,12 +37,21 @@ async def forward(self):
# get_random_uids is an example method, but you can replace it with your own.
miner_uids = get_random_uids(self, k=self.config.neuron.sample_size)
+ # Assuming Synapse provides certain functionalities required for integration
+ # TODO(developer): Define the Synapse instance for our use case.
+ prompting = Prompting(
+ character_info="GPT-4, for engaging and informative conversations.",
+ criteria=["Ensure accuracy.", "Maintain a friendly tone."],
+ messages=[],
+ )
+
+ prompting.add_message("Tell me a joke.")
+
# The dendrite client queries the network.
responses = await self.dendrite(
# Send the query to selected miner axons in the network.
axons=[self.metagraph.axons[uid] for uid in miner_uids],
- # Construct a dummy query. This simply contains a single integer.
- synapse=Dummy(dummy_input=self.step),
+ synapse=prompting,
# All responses have the deserialize function called on them before returning.
# You are encouraged to define your own deserialization function.
deserialize=True,
@@ -52,7 +60,6 @@ async def forward(self):
# Log the results for monitoring purposes.
bt.logging.info(f"Received responses: {responses}")
- # TODO(developer): Define how the validator scores responses.
# Adjust the scores based on responses from miners.
rewards = get_rewards(self, query=self.step, responses=responses)
diff --git a/template/validator/reward.py b/prompting/validator/reward.py
similarity index 76%
rename from template/validator/reward.py
rename to prompting/validator/reward.py
index ab2d435..91d6f1b 100644
--- a/template/validator/reward.py
+++ b/prompting/validator/reward.py
@@ -1,7 +1,7 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
+# Copyright © 2024 adanmauri
+# Copyright © 2024 ulises-jeremias
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,26 +17,32 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+from functools import reduce
+import textblob
import torch
from typing import List
-def reward(query: int, response: int) -> float:
+def reward(query: int, response: str) -> float:
"""
- Reward the miner response to the dummy request. This method returns a reward
+ Reward the miner response to the prompting request. This method returns a reward
value for the miner, which is used to update the miner's score.
Returns:
- float: The reward value for the miner.
"""
- return 1.0 if response == query * 2 else 0
+ blob = textblob.TextBlob(response)
+ sentiment_sum = reduce(lambda x, y: x + y, [sentence.sentiment.polarity for sentence in blob.sentences])
+ sentiment_avg = sentiment_sum / len(blob.sentences)
+ sentiment_normalized = (sentiment_avg + 1) / 2
+ return sentiment_normalized
def get_rewards(
self,
query: int,
- responses: List[float],
+ responses: List[str],
) -> torch.FloatTensor:
"""
Returns a tensor of rewards for the given query and responses.
diff --git a/requirements.txt b/requirements.txt
index c1b866e..8ca8f33 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,3 @@
bittensor
-torch
\ No newline at end of file
+torch
+textblob==0.18.0.post0
diff --git a/scripts/run_openai_miner.sh b/scripts/run_openai_miner.sh
new file mode 100644
index 0000000..d2ec0ce
--- /dev/null
+++ b/scripts/run_openai_miner.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+WALLET_NAME=${1:-"openai_miner"}
+WALLET_HOTKEY=${2:-"default"}
+NETUID=${3:-1}
+CHAIN_ENDPOINT=${4:-"ws://127.0.0.1:9946"}
+
+python -m pip install --upgrade --user -r ./neurons/miners/openai/requirements.txt
+
+python ./neurons/miners/openai/miner.py --netuid $NETUID --subtensor.chain_endpoint $CHAIN_ENDPOINT --wallet.name $WALLET_NAME --wallet.hotkey $WALLET_HOTKEY --logging.debug
diff --git a/scripts/run_validator.sh b/scripts/run_validator.sh
new file mode 100644
index 0000000..137c8a7
--- /dev/null
+++ b/scripts/run_validator.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+WALLET_NAME=${1:-"openai_miner"}
+WALLET_HOTKEY=${2:-"default"}
+NETUID=${3:-1}
+CHAIN_ENDPOINT=${4:-"ws://127.0.0.1:9946"}
+
+python3 neurons/validators/validator.py --netuid $NETUID --subtensor.chain_endpoint $CHAIN_ENDPOINT --wallet.name $WALLET_NAME --wallet.hotkey $WALLET_HOTKEY --logging.debug
\ No newline at end of file
diff --git a/scripts/setup.sh b/scripts/setup.sh
new file mode 100644
index 0000000..5f586a0
--- /dev/null
+++ b/scripts/setup.sh
@@ -0,0 +1,23 @@
+if [ -z "$wallet_name" ]; then
+ echo "Please provide the wallet name as an argument"
+ exit 1
+fi
+
+if [[ ! -f ~/.bittensor/wallets/"${wallet_name}"/coldkeypub.txt ]]; then
+ echo "Coldkey for wallet $wallet_name not found. Creating a new coldkey."
+
+ btcli wallet new_coldkey --wallet.name "${wallet_name}" --no_password --no_prompt
+ btcli wallet new_hotkey --wallet.name "${wallet_name}" --wallet.hotkey default --no_prompt
+fi
+
+# Transfer tokens to coldkeys
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+
+# Register wallet hotkeys to subnet
+btcli subnet register --wallet.name "${wallet_name}" --netuid 1 --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+
+# Ensure both the miner and validator keys are successfully registered.
+btcli subnet list --subtensor.chain_endpoint ws://127.0.0.1:9946
+btcli wallet overview --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
diff --git a/scripts/setup_miner.sh b/scripts/setup_miner.sh
new file mode 100644
index 0000000..6c8fff2
--- /dev/null
+++ b/scripts/setup_miner.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )"
+SCRIPTS_DIR="${ROOT}"/scripts
+
+wallet_name=$1
+
+source "${SCRIPTS_DIR}"/setup.sh
diff --git a/scripts/setup_owner_subnet.sh b/scripts/setup_owner_subnet.sh
new file mode 100644
index 0000000..a4fa4b5
--- /dev/null
+++ b/scripts/setup_owner_subnet.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+wallet_name=owner
+
+# Install the bittensor-prompting-example python package
+python -m pip install --upgrade --user -e .
+
+# Create and set up wallets
+# This section can be skipped if wallets are already set up
+if [ ! -f ~/.bittensor/wallets/"${wallet_name}"/coldkeypub.txt ]; then
+ btcli wallet new_coldkey --wallet.name "${wallet_name}" --no_password --no_prompt
+fi
+
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+btcli wallet faucet --wallet.name "${wallet_name}" --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+
+# Register a subnet (this needs to be run each time we start a new local chain)
+btcli subnet create --wallet.name "${wallet_name}" --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --no_prompt
+
+btcli subnet list --subtensor.chain_endpoint ws://127.0.0.1:9946
diff --git a/scripts/setup_validator.sh b/scripts/setup_validator.sh
new file mode 100644
index 0000000..be4b51e
--- /dev/null
+++ b/scripts/setup_validator.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )"
+SCRIPTS_DIR="${ROOT}"/scripts
+
+wallet_name=$1
+
+source "${SCRIPTS_DIR}"/setup.sh
+
+# Add stake to the validator
+btcli stake add --wallet.name "${wallet_name}" --wallet.hotkey default --subtensor.chain_endpoint ws://127.0.0.1:9946 --amount 10000 --no_prompt
diff --git a/setup.py b/setup.py
index f76ec9b..8966567 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,6 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
+# Copyright © 2024 nanlabs
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -55,7 +54,7 @@ def read_requirements(path):
# loading version from setup.py
with codecs.open(
- os.path.join(here, "template/__init__.py"), encoding="utf-8"
+ os.path.join(here, "prompting/__init__.py"), encoding="utf-8"
) as init_file:
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", init_file.read(), re.M
@@ -63,16 +62,16 @@ def read_requirements(path):
version_string = version_match.group(1)
setup(
- name="bittensor_subnet_template", # TODO(developer): Change this value to your module subnet name.
+ name="bittensor_subnet_prompting",
version=version_string,
- description="bittensor_subnet_template", # TODO(developer): Change this value to your module subnet description.
+ description="Bittensor Subnet Prompting created by NaNLabs",
long_description=long_description,
long_description_content_type="text/markdown",
- url="https://github.com/opentensor/bittensor-subnet-template", # TODO(developer): Change this url to your module subnet github url.
- author="bittensor.com", # TODO(developer): Change this value to your module subnet author name.
+ url="https://github.com/nanlabs/bittensor-subnet-prompting",
+ author="nan-labs.com",
packages=find_packages(),
include_package_data=True,
- author_email="", # TODO(developer): Change this value to your module subnet author email.
+ author_email="contact@nan-labs.com",
license="MIT",
python_requires=">=3.8",
install_requires=requirements,
diff --git a/subnet_links.json b/subnet_links.json
new file mode 100644
index 0000000..cb594d1
--- /dev/null
+++ b/subnet_links.json
@@ -0,0 +1,136 @@
+{
+ "subnet_repositories": [
+ {
+ "name": "sn0",
+ "url": ""
+ },
+ {
+ "name": "sn1",
+ "url": "https://github.com/opentensor/text-prompting/"
+ },
+ {
+ "name": "sn2",
+ "url": "https://github.com/bittranslateio/bittranslate/"
+ },
+ {
+ "name": "sn3",
+ "url": "https://github.com/gitphantomman/scraping_subnet/"
+ },
+ {
+ "name": "sn4",
+ "url": "https://github.com/manifold-inc/targon/"
+ },
+ {
+ "name": "sn5",
+ "url": "https://github.com/unconst/ImageSubnet/"
+ },
+ {
+ "name": "sn6",
+ "url": ""
+ },
+ {
+ "name": "sn7",
+ "url": "https://github.com/tensorage/tensorage/"
+ },
+ {
+ "name": "sn8",
+ "url": "https://github.com/taoshidev/time-series-prediction-subnet/"
+ },
+ {
+ "name": "sn9",
+ "url": "https://github.com/unconst/pretrain-subnet/"
+ },
+ {
+ "name": "sn10",
+ "url": "https://github.com/dream-well/map-reduce-subnet/"
+ },
+ {
+ "name": "sn11",
+ "url": "https://github.com/opentensor/text-prompting/"
+ },
+ {
+ "name": "sn12",
+ "url": ""
+ },
+ {
+ "name": "sn13",
+ "url": "https://github.com/RusticLuftig/data-universe/"
+ },
+ {
+ "name": "sn14",
+ "url": "https://github.com/ceterum1/llm-defender-subnet/"
+ },
+ {
+ "name": "sn15",
+ "url": "https://github.com/blockchain-insights/blockchain-data-subnet/"
+ },
+ {
+ "name": "sn16",
+ "url": "https://github.com/UncleTensor/AudioSubnet/"
+ },
+ {
+ "name": "sn17",
+ "url": "https://github.com/CortexLM/flavia/"
+ },
+ {
+ "name": "sn18",
+ "url": "https://github.com/corcel-api/cortex.t/"
+ },
+ {
+ "name": "sn19",
+ "url": "https://github.com/namoray/vision/"
+ },
+ {
+ "name": "sn20",
+ "url": "https://github.com/oracle-subnet/oracle-subnet/"
+ },
+ {
+ "name": "sn21",
+ "url": "https://github.com/ifrit98/storage-subnet/"
+ },
+ {
+ "name": "sn22",
+ "url": "https://github.com/surcyf123/smart-scrape/"
+ },
+ {
+ "name": "sn23",
+ "url": "https://github.com/NicheTensor/NicheImage/"
+ },
+ {
+ "name": "sn24",
+ "url": "https://github.com/eseckft/BitAds.ai/tree/main"
+ },
+ {
+ "name": "sn25",
+ "url": "https://github.com/KMFODA/DistributedTraining/"
+ },
+ {
+ "name": "sn26",
+ "url": "https://github.com/Supreme-Emperor-Wang/ImageAlchemy/"
+ },
+ {
+ "name": "sn27",
+ "url": "https://github.com/neuralinternet/compute-subnet/"
+ },
+ {
+ "name": "sn28",
+ "url": "https://github.com/zktensor/zktensor_subnet/"
+ },
+ {
+ "name": "sn29",
+ "url": "https://github.com/404-Repo/Subnet-29/"
+ },
+ {
+ "name": "sn30",
+ "url": ""
+ },
+ {
+ "name": "sn31",
+ "url": "https://github.com/bthealthcare/healthcare-subnet"
+ },
+ {
+ "name": "sn32",
+ "url": "https://github.com/RoyalTensor/roleplay/"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/template/api/examples/subnet21.py b/template/api/examples/subnet21.py
deleted file mode 100644
index 0d0bc0a..0000000
--- a/template/api/examples/subnet21.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2021 Yuma Rao
-# Copyright © 2023 Opentensor Foundation
-# Copyright © 2023 Opentensor Technologies Inc
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import torch
-import base64
-import bittensor as bt
-from abc import ABC, abstractmethod
-from typing import Any, List, Union
-from bittensor.subnets import SubnetsAPI
-
-try:
- from storage.validator.cid import generate_cid_string
- from storage.validator.encryption import (
- encrypt_data,
- decrypt_data_with_private_key,
- )
-except:
- storage_url = "https://github.com/ifrit98/storage-subnet"
- bt.logging.error(
- f"Storage Subnet 21 not installed. Please visit: {storage_url} and install the package to use this example."
- )
-
-
-class StoreUserAPI(SubnetsAPI):
- def __init__(self, wallet: "bt.wallet"):
- super().__init__(wallet)
- self.netuid = 21
-
- def prepare_synapse(
- self,
- data: bytes,
- encrypt=False,
- ttl=60 * 60 * 24 * 30,
- encoding="utf-8",
- ) -> StoreUser:
- data = bytes(data, encoding) if isinstance(data, str) else data
- encrypted_data, encryption_payload = (
- encrypt_data(data, self.wallet) if encrypt else (data, "{}")
- )
- expected_cid = generate_cid_string(encrypted_data)
- encoded_data = base64.b64encode(encrypted_data)
-
- synapse = StoreUser(
- encrypted_data=encoded_data,
- encryption_payload=encryption_payload,
- ttl=ttl,
- )
-
- return synapse
-
- def process_responses(
- self, responses: List[Union["bt.Synapse", Any]]
- ) -> str:
- success = False
- failure_modes = {"code": [], "message": []}
- for response in responses:
- if response.dendrite.status_code != 200:
- failure_modes["code"].append(response.dendrite.status_code)
- failure_modes["message"].append(
- response.dendrite.status_message
- )
- continue
-
- stored_cid = (
- response.data_hash.decode("utf-8")
- if isinstance(response.data_hash, bytes)
- else response.data_hash
- )
- bt.logging.debug("received data CID: {}".format(stored_cid))
- success = True
- break
-
- if success:
- bt.logging.info(
- f"Stored data on the Bittensor network with CID {stored_cid}"
- )
- else:
- bt.logging.error(
- f"Failed to store data. Response failure codes & messages {failure_modes}"
- )
- stored_cid = ""
-
- return stored_cid
-
-
-class RetrieveUserAPI(SubnetsAPI):
- def __init__(self, wallet: "bt.wallet"):
- super().__init__(wallet)
- self.netuid = 21
-
- def prepare_synapse(self, cid: str) -> RetrieveUser:
- synapse = RetrieveUser(data_hash=cid)
- return synapse
-
- def process_responses(
- self, responses: List[Union["bt.Synapse", Any]]
- ) -> bytes:
- success = False
- decrypted_data = b""
- for response in responses:
- bt.logging.trace(f"response: {response.dendrite.dict()}")
- if (
- response.dendrite.status_code != 200
- or response.encrypted_data is None
- ):
- continue
-
- # Decrypt the response
- bt.logging.trace(
- f"encrypted_data: {response.encrypted_data[:100]}"
- )
- encrypted_data = base64.b64decode(response.encrypted_data)
- bt.logging.debug(
- f"encryption_payload: {response.encryption_payload}"
- )
- if (
- response.encryption_payload is None
- or response.encryption_payload == ""
- or response.encryption_payload == "{}"
- ):
- bt.logging.warning(
- "No encryption payload found. Unencrypted data."
- )
- decrypted_data = encrypted_data
- else:
- decrypted_data = decrypt_data_with_private_key(
- encrypted_data,
- response.encryption_payload,
- bytes(self.wallet.coldkey.private_key.hex(), "utf-8"),
- )
- bt.logging.trace(f"decrypted_data: {decrypted_data[:100]}")
- success = True
- break
-
- if success:
- bt.logging.info(
- f"Returning retrieved data: {decrypted_data[:100]}"
- )
- else:
- bt.logging.error("Failed to retrieve data.")
-
- return decrypted_data
-
-
-async def test_store_and_retrieve(
- netuid: int = 22, wallet: "bt.wallet" = None
-):
- # Example usage
- wallet = wallet or bt.wallet()
-
- # Instantiate the handler
- store_handler = StoreUserAPI(wallet)
-
- # Fetch the axons you want to query
- metagraph = bt.subtensor("test").metagraph(netuid=22)
- query_axons = metagraph.axons
-
- cid = await store_handler(
- axons=query_axons,
- # any arguments for the proper synapse
- data=b"some data",
- encrypt=True,
- ttl=60 * 60 * 24 * 30,
- encoding="utf-8",
- uid=None,
- )
- print("CID:", cid)
-
- retrieve_handler = RetrieveUserAPI(wallet)
- retrieve_response = await retrieve_handler(axons=query_axons, cid=cid)
diff --git a/template/api/get_query_axons.py b/template/api/get_query_axons.py
deleted file mode 100644
index b42cb7f..0000000
--- a/template/api/get_query_axons.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2021 Yuma Rao
-# Copyright © 2023 Opentensor Foundation
-# Copyright © 2023 Opentensor Technologies Inc
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import torch
-import random
-import bittensor as bt
-
-
-async def ping_uids(dendrite, metagraph, uids, timeout=3):
- """
- Pings a list of UIDs to check their availability on the Bittensor network.
-
- Args:
- dendrite (bittensor.dendrite): The dendrite instance to use for pinging nodes.
- metagraph (bittensor.metagraph): The metagraph instance containing network information.
- uids (list): A list of UIDs (unique identifiers) to ping.
- timeout (int, optional): The timeout in seconds for each ping. Defaults to 3.
-
- Returns:
- tuple: A tuple containing two lists:
- - The first list contains UIDs that were successfully pinged.
- - The second list contains UIDs that failed to respond.
- """
- axons = [metagraph.axons[uid] for uid in uids]
- try:
- responses = await dendrite(
- axons,
- bt.Synapse(), # TODO: potentially get the synapses available back?
- deserialize=False,
- timeout=timeout,
- )
- successful_uids = [
- uid
- for uid, response in zip(uids, responses)
- if response.dendrite.status_code == 200
- ]
- failed_uids = [
- uid
- for uid, response in zip(uids, responses)
- if response.dendrite.status_code != 200
- ]
- except Exception as e:
- bt.logging.error(f"Dendrite ping failed: {e}")
- successful_uids = []
- failed_uids = uids
- bt.logging.debug("ping() successful uids:", successful_uids)
- bt.logging.debug("ping() failed uids :", failed_uids)
- return successful_uids, failed_uids
-
-
-async def get_query_api_nodes(dendrite, metagraph, n=0.1, timeout=3):
- """
- Fetches the available API nodes to query for the particular subnet.
-
- Args:
- wallet (bittensor.wallet): The wallet instance to use for querying nodes.
- metagraph (bittensor.metagraph): The metagraph instance containing network information.
- n (float, optional): The fraction of top nodes to consider based on stake. Defaults to 0.1.
- timeout (int, optional): The timeout in seconds for pinging nodes. Defaults to 3.
-
- Returns:
- list: A list of UIDs representing the available API nodes.
- """
- bt.logging.debug(
- f"Fetching available API nodes for subnet {metagraph.netuid}"
- )
- vtrust_uids = [
- uid.item()
- for uid in metagraph.uids
- if metagraph.validator_trust[uid] > 0
- ]
- top_uids = torch.where(metagraph.S > torch.quantile(metagraph.S, 1 - n))
- top_uids = top_uids[0].tolist()
- init_query_uids = set(top_uids).intersection(set(vtrust_uids))
- query_uids, _ = await ping_uids(
- dendrite, metagraph, init_query_uids, timeout=timeout
- )
- bt.logging.debug(
- f"Available API node UIDs for subnet {metagraph.netuid}: {query_uids}"
- )
- if len(query_uids) > 3:
- query_uids = random.sample(query_uids, 3)
- return query_uids
-
-
-async def get_query_api_axons(
- wallet, metagraph=None, n=0.1, timeout=3, uids=None
-):
- """
- Retrieves the axons of query API nodes based on their availability and stake.
-
- Args:
- wallet (bittensor.wallet): The wallet instance to use for querying nodes.
- metagraph (bittensor.metagraph, optional): The metagraph instance containing network information.
- n (float, optional): The fraction of top nodes to consider based on stake. Defaults to 0.1.
- timeout (int, optional): The timeout in seconds for pinging nodes. Defaults to 3.
- uids (Union[List[int], int], optional): The specific UID(s) of the API node(s) to query. Defaults to None.
-
- Returns:
- list: A list of axon objects for the available API nodes.
- """
- dendrite = bt.dendrite(wallet=wallet)
-
- if metagraph is None:
- metagraph = bt.metagraph(netuid=21)
-
- if uids is not None:
- query_uids = [uids] if isinstance(uids, int) else uids
- else:
- query_uids = await get_query_api_nodes(
- dendrite, metagraph, n=n, timeout=timeout
- )
- return [metagraph.axons[uid] for uid in query_uids]
diff --git a/template/base/__init__.py b/template/base/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/protocol.py b/template/protocol.py
deleted file mode 100644
index b7c50b9..0000000
--- a/template/protocol.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2023 Yuma Rao
-# TODO(developer): Set your name
-# Copyright © 2023
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import typing
-import bittensor as bt
-
-# TODO(developer): Rewrite with your protocol definition.
-
-# This is the protocol for the dummy miner and validator.
-# It is a simple request-response protocol where the validator sends a request
-# to the miner, and the miner responds with a dummy response.
-
-# ---- miner ----
-# Example usage:
-# def dummy( synapse: Dummy ) -> Dummy:
-# synapse.dummy_output = synapse.dummy_input + 1
-# return synapse
-# axon = bt.axon().attach( dummy ).serve(netuid=...).start()
-
-# ---- validator ---
-# Example usage:
-# dendrite = bt.dendrite()
-# dummy_output = dendrite.query( Dummy( dummy_input = 1 ) )
-# assert dummy_output == 2
-
-
-class Dummy(bt.Synapse):
- """
- A simple dummy protocol representation which uses bt.Synapse as its base.
- This protocol helps in handling dummy request and response communication between
- the miner and the validator.
-
- Attributes:
- - dummy_input: An integer value representing the input request sent by the validator.
- - dummy_output: An optional integer value which, when filled, represents the response from the miner.
- """
-
- # Required request input, filled by sending dendrite caller.
- dummy_input: int
-
- # Optional request output, filled by recieving axon.
- dummy_output: typing.Optional[int] = None
-
- def deserialize(self) -> int:
- """
- Deserialize the dummy output. This method retrieves the response from
- the miner in the form of dummy_output, deserializes it and returns it
- as the output of the dendrite.query() call.
-
- Returns:
- - int: The deserialized response, which in this case is the value of dummy_output.
-
- Example:
- Assuming a Dummy instance has a dummy_output value of 5:
- >>> dummy_instance = Dummy(dummy_input=4)
- >>> dummy_instance.dummy_output = 5
- >>> dummy_instance.deserialize()
- 5
- """
- return self.dummy_output
diff --git a/template/subnet_links.py b/template/subnet_links.py
deleted file mode 100644
index ae22230..0000000
--- a/template/subnet_links.py
+++ /dev/null
@@ -1,59 +0,0 @@
-SUBNET_LINKS = [
- {"name": "sn0", "url": ""},
- {"name": "sn1", "url": "https://github.com/opentensor/text-prompting/"},
- {"name": "sn2", "url": "https://github.com/bittranslateio/bittranslate/"},
- {
- "name": "sn3",
- "url": "https://github.com/gitphantomman/scraping_subnet/",
- },
- {"name": "sn4", "url": "https://github.com/manifold-inc/targon/"},
- {"name": "sn5", "url": "https://github.com/unconst/ImageSubnet/"},
- {"name": "sn6", "url": ""},
- {"name": "sn7", "url": "https://github.com/tensorage/tensorage/"},
- {
- "name": "sn8",
- "url": "https://github.com/taoshidev/time-series-prediction-subnet/",
- },
- {"name": "sn9", "url": "https://github.com/unconst/pretrain-subnet/"},
- {
- "name": "sn10",
- "url": "https://github.com/dream-well/map-reduce-subnet/",
- },
- {"name": "sn11", "url": "https://github.com/opentensor/text-prompting/"},
- {"name": "sn12", "url": ""},
- {"name": "sn13", "url": "https://github.com/RusticLuftig/data-universe/"},
- {
- "name": "sn14",
- "url": "https://github.com/ceterum1/llm-defender-subnet/",
- },
- {
- "name": "sn15",
- "url": "https://github.com/blockchain-insights/blockchain-data-subnet/",
- },
- {"name": "sn16", "url": "https://github.com/UncleTensor/AudioSubnet/"},
- {"name": "sn17", "url": "https://github.com/CortexLM/flavia/"},
- {"name": "sn18", "url": "https://github.com/corcel-api/cortex.t/"},
- {"name": "sn19", "url": "https://github.com/namoray/vision/"},
- {"name": "sn20", "url": "https://github.com/oracle-subnet/oracle-subnet/"},
- {"name": "sn21", "url": "https://github.com/ifrit98/storage-subnet/"},
- {"name": "sn22", "url": "https://github.com/surcyf123/smart-scrape/"},
- {"name": "sn23", "url": "https://github.com/NicheTensor/NicheImage/"},
- {"name": "sn24", "url": "https://github.com/eseckft/BitAds.ai/tree/main"},
- {"name": "sn25", "url": "https://github.com/KMFODA/DistributedTraining/"},
- {
- "name": "sn26",
- "url": "https://github.com/Supreme-Emperor-Wang/ImageAlchemy/",
- },
- {
- "name": "sn27",
- "url": "https://github.com/neuralinternet/compute-subnet/",
- },
- {"name": "sn28", "url": "https://github.com/zktensor/zktensor_subnet/"},
- {"name": "sn29", "url": "https://github.com/404-Repo/Subnet-29/"},
- {"name": "sn30", "url": ""},
- {
- "name": "sn31",
- "url": "https://github.com/bthealthcare/healthcare-subnet",
- },
- {"name": "sn32", "url": "https://github.com/RoyalTensor/roleplay/"},
-]
diff --git a/tests/test_mock.py b/tests/test_mock.py
index e102a06..a80d0ed 100644
--- a/tests/test_mock.py
+++ b/tests/test_mock.py
@@ -82,11 +82,11 @@ async def run():
if dendrite.process_time >= timeout + 0.1:
assert dendrite.status_code == 408
assert dendrite.status_message == 'Timeout'
- assert synapse.dummy_output == synapse.dummy_input
+ assert synapse.content == ""
# check that responses which take less than timeout have 200 status code
elif dendrite.process_time < timeout:
assert dendrite.status_code == 200
assert dendrite.status_message == 'OK'
# check that outputs are not empty for successful responses
- assert synapse.dummy_output == synapse.dummy_input * 2
+ assert synapse.content == ""
# dont check for responses which take between timeout and max_time because they are not guaranteed to have a status code of 200 or 408
diff --git a/tests/test_template_validator.py b/tests/test_template_validator.py
index 5d0110a..e69fe76 100644
--- a/tests/test_template_validator.py
+++ b/tests/test_template_validator.py
@@ -21,14 +21,14 @@
import unittest
import bittensor as bt
-from neurons.validator import Neuron as Validator
-from neurons.miner import Neuron as Miner
+from neurons.validators import Neuron as Validator
+from neurons.miners.openai.miner import Neuron as Miner
-from template.protocol import Dummy
-from template.validator.forward import forward
-from template.utils.uids import get_random_uids
-from template.validator.reward import get_rewards
-from template.base.validator import BaseValidatorNeuron
+from prompting.protocol import Prompting
+from prompting.validator.forward import forward
+from prompting.utils.uids import get_random_uids
+from prompting.validator.reward import get_rewards
+from prompting.base.validator import BaseValidatorNeuron
class TemplateValidatorNeuronTestCase(unittest.TestCase):
@@ -61,16 +61,27 @@ def test_forward(self):
# TODO: Test that the forward function returns the correct value
pass
- def test_dummy_responses(self):
- # TODO: Test that the dummy responses are correctly constructed
+ def test_prompting_responses(self):
+ # TODO: Test that the prompting responses are correctly constructed
+
+ # Assuming Synapse provides certain functionalities required for integration
+ prompting = Prompting(
+ character_info="GPT-4, for engaging and informative conversations.",
+ criteria=["Ensure accuracy.", "Maintain a friendly tone."],
+ messages=[],
+ )
+
+ # Interacting with the LLM
+ prompting.add_message("Tell me a joke.")
+ prompting.update_completion("Why did the computer go to the doctor? Because it had a virus!")
responses = self.neuron.dendrite.query(
# Send the query to miners in the network.
axons=[
self.neuron.metagraph.axons[uid] for uid in self.miner_uids
],
- # Construct a dummy query.
- synapse=Dummy(dummy_input=self.neuron.step),
+ # Construct a synapse.
+ synapse=prompting,
# All responses have the deserialize function called on them before returning.
deserialize=True,
)
@@ -79,12 +90,23 @@ def test_dummy_responses(self):
self.assertEqual(response, self.neuron.step * 2)
def test_reward(self):
+ # Assuming Synapse provides certain functionalities required for integration
+ prompting = Prompting(
+ character_info="GPT-4, for engaging and informative conversations.",
+ criteria=["Ensure accuracy.", "Maintain a friendly tone."],
+ messages=[],
+ )
+
+ # Interacting with the LLM
+ prompting.add_message("Tell me a joke.")
+ prompting.update_completion("Why did the computer go to the doctor? Because it had a virus!")
+
# TODO: Test that the reward function returns the correct value
responses = self.dendrite.query(
# Send the query to miners in the network.
axons=[self.metagraph.axons[uid] for uid in self.miner_uids],
- # Construct a dummy query.
- synapse=Dummy(dummy_input=self.neuron.step),
+ # Construct a synapse.
+ synapse=prompting,
# All responses have the deserialize function called on them before returning.
deserialize=True,
)
@@ -94,13 +116,24 @@ def test_reward(self):
self.assertEqual(rewards, expected_rewards)
def test_reward_with_nan(self):
+ # Assuming Synapse provides certain functionalities required for integration
+ prompting = Prompting(
+ character_info="GPT-4, for engaging and informative conversations.",
+ criteria=["Ensure accuracy.", "Maintain a friendly tone."],
+ messages=[],
+ )
+
+ # Interacting with the LLM
+ prompting.add_message("Tell me a joke.")
+ prompting.update_completion("Why did the computer go to the doctor? Because it had a virus!")
+
# TODO: Test that NaN rewards are correctly sanitized
# TODO: Test that a bt.logging.warning is thrown when a NaN reward is sanitized
responses = self.dendrite.query(
# Send the query to miners in the network.
axons=[self.metagraph.axons[uid] for uid in self.miner_uids],
- # Construct a dummy query.
- synapse=Dummy(dummy_input=self.neuron.step),
+ # Construct a synapse.
+ synapse=prompting,
# All responses have the deserialize function called on them before returning.
deserialize=True,
)