diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0fb1701e0..dbe5ab6dd 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -5,7 +5,7 @@ on: branches: [main] jobs: - build: + exchange: runs-on: ubuntu-latest steps: @@ -19,9 +19,82 @@ jobs: - name: Ruff run: | - uvx ruff check - uvx ruff format --check + uvx ruff check packages/exchange + uvx ruff format packages/exchange --check - name: Run tests + working-directory: ./packages/exchange run: | uv run pytest tests -m 'not integration' + + goose: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install UV + run: curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Source Cargo Environment + run: source $HOME/.cargo/env + + - name: Ruff + run: | + uvx ruff check src tests + uvx ruff format src tests --check + + - name: Run tests + run: | + uv run pytest tests -m 'not integration' + + + # This runs integration tests of the OpenAI API, using Ollama to host models. + # This lets us test PRs from forks which can't access secrets like API keys. + ollama: + runs-on: ubuntu-latest + + strategy: + matrix: + python-version: + # Only test the lastest python version. + - "3.12" + ollama-model: + # For quicker CI, use a smaller, tool-capable model than the default. + - "qwen2.5:0.5b" + + steps: + - uses: actions/checkout@v4 + + - name: Install UV + run: curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Source Cargo Environment + run: source $HOME/.cargo/env + + - name: Set up Python + run: uv python install ${{ matrix.python-version }} + + - name: Install Ollama + run: curl -fsSL https://ollama.com/install.sh | sh + + - name: Start Ollama + run: | + # Run the background, in a way that survives to the next step + nohup ollama serve > ollama.log 2>&1 & + + # Block using the ready endpoint + time curl --retry 5 --retry-connrefused --retry-delay 1 -sf http://localhost:11434 + + # Tests use OpenAI which does not have a mechanism to pull models. Run a + # simple prompt to (pull and) test the model first. + - name: Test Ollama model + run: ollama run $OLLAMA_MODEL hello || cat ollama.log + env: + OLLAMA_MODEL: ${{ matrix.ollama-model }} + + - name: Run Ollama tests + run: uv run pytest tests -m integration -k ollama + working-directory: ./packages/exchange + env: + OLLAMA_MODEL: ${{ matrix.ollama-model }} diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 000000000..969ebb7e7 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,50 @@ +name: Publish + +# A release on goose will also publish exchange, if it has updated +# This means in some cases we may need to make a bump in goose without other changes to release exchange +on: + release: + types: [published] + +jobs: + publish: + permissions: + id-token: write + contents: read + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Get current version from pyproject.toml + id: get_version + run: | + echo "VERSION=$(grep -m 1 'version =' "pyproject.toml" | awk -F'"' '{print $2}')" >> $GITHUB_ENV + + - name: Extract tag version + id: extract_tag + run: | + TAG_VERSION=$(echo "${{ github.event.release.tag_name }}" | sed -E 's/v(.*)/\1/') + echo "TAG_VERSION=$TAG_VERSION" >> $GITHUB_ENV + + - name: Check if tag matches version from pyproject.toml + id: check_tag + run: | + if [ "${{ env.TAG_VERSION }}" != "${{ env.VERSION }}" ]; then + echo "::error::Tag version (${{ env.TAG_VERSION }}) does not match version in pyproject.toml (${{ env.VERSION }})." + exit 1 + fi + + - name: Install the latest version of uv + uses: astral-sh/setup-uv@v1 + with: + version: "latest" + + - name: Build Package + run: | + uv build -o dist --package goose-ai + uv build -o dist --package ai-exchange + + - name: Publish package to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + skip-existing: true diff --git a/.github/workflows/pypi_release.yaml b/.github/workflows/pypi_release.yaml deleted file mode 100644 index 98758fb96..000000000 --- a/.github/workflows/pypi_release.yaml +++ /dev/null @@ -1,47 +0,0 @@ -name: PYPI Release - -on: - push: - tags: - - 'v*' - -jobs: - pypi_release: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Install UV - run: curl -LsSf https://astral.sh/uv/install.sh | sh - - - name: Source Cargo Environment - run: source $HOME/.cargo/env - - - name: Build with UV - run: uvx --from build pyproject-build --installer uv - - - name: Check version - id: check_version - run: | - PACKAGE_NAME=$(grep '^name =' pyproject.toml | sed -E 's/name = "(.*)"/\1/') - TAG_VERSION=$(echo "$GITHUB_REF" | sed -E 's/refs\/tags\/v(.+)/\1/') - CURRENT_VERSION=$(curl -s https://pypi.org/pypi/$PACKAGE_NAME/json | jq -r .info.version) - PROJECT_VERSION=$(grep '^version =' pyproject.toml | sed -E 's/version = "(.*)"/\1/') - if [ "$TAG_VERSION" != "$PROJECT_VERSION" ]; then - echo "Tag version does not match version in pyproject.toml" - exit 1 - fi - if python -c "from packaging.version import parse as parse_version; exit(0 if parse_version('$TAG_VERSION') > parse_version('$CURRENT_VERSION') else 1)"; then - echo "new_version=true" >> $GITHUB_OUTPUT - else - exit 1 - fi - - - name: Publish - uses: pypa/gh-action-pypi-publish@v1.4.2 - if: steps.check_version.outputs.new_version == 'true' - with: - user: __token__ - password: ${{ secrets.PYPI_TOKEN_TEMP }} - packages_dir: ./dist/ diff --git a/packages/exchange/README.md b/packages/exchange/README.md new file mode 100644 index 000000000..207030844 --- /dev/null +++ b/packages/exchange/README.md @@ -0,0 +1,95 @@ +

+ +

+ +

+ Example • + Plugins +

+ +

Exchange - a uniform python SDK for message generation with LLMs

+ +- Provides a flexible layer for message handling and generation +- Directly integrates python functions into tool calling +- Persistently surfaces errors to the underlying models to support reflection + +## Example + +> [!NOTE] +> Before you can run this example, you need to setup an API key with +> `export OPENAI_API_KEY=your-key-here` + +``` python +from exchange import Exchange, Message, Tool +from exchange.providers import OpenAiProvider + +def word_count(text: str): + """Get the count of words in text + + Args: + text (str): The text with words to count + """ + return len(text.split(" ")) + +ex = Exchange( + provider=OpenAiProvider.from_env(), + model="gpt-4o", + system="You are a helpful assistant.", + tools=[Tool.from_function(word_count)], +) +ex.add(Message.user("Count the number of words in this current message")) + +# The model sees it has a word count tool, and should use it along the way to answer +# This will call all the tools as needed until the model replies with the final result +reply = ex.reply() +print(reply.text) + +# you can see all the tool calls in the message history +print(ex.messages) +``` + +## Plugins + +*exchange* has a plugin mechanism to add support for additional providers and moderators. If you need a +provider not supported here, we'd be happy to review [contributions][CONTRIBUTING]. But you +can also consider building and using your own plugin. + +To create a `Provider` plugin, subclass `exchange.provider.Provider`. You will need to +implement the `complete` method. For example this is what we use as a mock in our tests. +You can see a full implementation example of the [OpenAiProvider][openaiprovider]. We +also generally recommend implementing a `from_env` classmethod to instantiate the provider. + +``` python +class MockProvider(Provider): + def __init__(self, sequence: List[Message]): + # We'll use init to provide a preplanned reply sequence + self.sequence = sequence + self.call_count = 0 + + def complete( + self, model: str, system: str, messages: List[Message], tools: List[Tool] + ) -> Message: + output = self.sequence[self.call_count] + self.call_count += 1 + return output +``` + +Then use [python packaging's entrypoints][plugins] to register your plugin. + +``` toml +[project.entry-points.'exchange.provider'] +example = 'path.to.plugin:ExampleProvider' +``` + +Your plugin will then be available in your application or other applications built on *exchange* +through: + +``` python +from exchange.providers import get_provider + +provider = get_provider('example').from_env() +``` + +[CONTRIBUTING]: CONTRIBUTING.md +[openaiprovider]: src/exchange/providers/openai.py +[plugins]: https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/ diff --git a/packages/exchange/pyproject.toml b/packages/exchange/pyproject.toml new file mode 100644 index 000000000..83a9e3c25 --- /dev/null +++ b/packages/exchange/pyproject.toml @@ -0,0 +1,48 @@ +[project] +name = "ai-exchange" +version = "0.9.3" +description = "a uniform python SDK for message generation with LLMs" +readme = "README.md" +requires-python = ">=3.10" +author = [{ name = "Block", email = "ai-oss-tools@block.xyz" }] +packages = [{ include = "exchange", from = "src" }] +dependencies = [ + "griffe>=1.1.1", + "attrs>=24.2.0", + "jinja2>=3.1.4", + "tiktoken>=0.7.0", + "httpx>=0.27.0", + "tenacity>=9.0.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["src/exchange"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv] +dev-dependencies = ["pytest>=8.3.2", "pytest-vcr>=1.0.2", "codecov>=2.1.13"] + +[project.entry-points."exchange.provider"] +openai = "exchange.providers.openai:OpenAiProvider" +azure = "exchange.providers.azure:AzureProvider" +databricks = "exchange.providers.databricks:DatabricksProvider" +anthropic = "exchange.providers.anthropic:AnthropicProvider" +bedrock = "exchange.providers.bedrock:BedrockProvider" +ollama = "exchange.providers.ollama:OllamaProvider" +google = "exchange.providers.google:GoogleProvider" + +[project.entry-points."exchange.moderator"] +passive = "exchange.moderators.passive:PassiveModerator" +truncate = "exchange.moderators.truncate:ContextTruncate" +summarize = "exchange.moderators.summarizer:ContextSummarizer" + +[project.entry-points."metadata.plugins"] +ai-exchange = "exchange:module_name" + +[tool.pytest.ini_options] +markers = [ + "integration: marks tests that need to authenticate (deselect with '-m \"not integration\"')", +] diff --git a/packages/exchange/src/exchange/__init__.py b/packages/exchange/src/exchange/__init__.py new file mode 100644 index 000000000..41adfcf3a --- /dev/null +++ b/packages/exchange/src/exchange/__init__.py @@ -0,0 +1,9 @@ +"""Classes for interacting with the exchange API.""" + +from exchange.tool import Tool # noqa +from exchange.content import Text, ToolResult, ToolUse # noqa +from exchange.message import Message # noqa +from exchange.exchange import Exchange # noqa +from exchange.checkpoint import CheckpointData, Checkpoint # noqa + +module_name = "ai-exchange" diff --git a/packages/exchange/src/exchange/checkpoint.py b/packages/exchange/src/exchange/checkpoint.py new file mode 100644 index 000000000..f355dd0a2 --- /dev/null +++ b/packages/exchange/src/exchange/checkpoint.py @@ -0,0 +1,67 @@ +from copy import deepcopy +from typing import List +from attrs import define, field + + +@define +class Checkpoint: + """Checkpoint that counts the tokens in messages between the start and end index""" + + start_index: int = field(default=0) # inclusive + end_index: int = field(default=0) # inclusive + token_count: int = field(default=0) + + def __deepcopy__(self, _) -> "Checkpoint": # noqa: ANN001 + """ + Returns a deep copy of the Checkpoint object. + """ + return Checkpoint( + start_index=self.start_index, + end_index=self.end_index, + token_count=self.token_count, + ) + + +@define +class CheckpointData: + """Aggregates all information about checkpoints""" + + # the total number of tokens in the exchange. this is updated every time a checkpoint is + # added or removed + total_token_count: int = field(default=0) + + # in order list of individual checkpoints in the exchange + checkpoints: List[Checkpoint] = field(factory=list) + + # the offset to apply to the message index when calculating the last message index + # this is useful because messages on the exchange behave like a queue, where you can only + # pop from the left or right sides. This offset allows us to map the checkpoint indices + # to the correct message index, even if we have popped messages from the left side of + # the exchange in the past. we reset this offset to 0 when we empty the checkpoint data. + message_index_offset: int = field(default=0) + + def __deepcopy__(self, memo: dict) -> "CheckpointData": + """Returns a deep copy of the CheckpointData object.""" + return CheckpointData( + total_token_count=self.total_token_count, + checkpoints=deepcopy(self.checkpoints, memo), + message_index_offset=self.message_index_offset, + ) + + @property + def last_message_index(self) -> int: + if not self.checkpoints: + return -1 # we don't have enough information to know + return self.checkpoints[-1].end_index - self.message_index_offset + + def reset(self) -> None: + """Resets the checkpoint data to its initial state.""" + self.checkpoints = [] + self.message_index_offset = 0 + self.total_token_count = 0 + + def pop(self, index: int = -1) -> Checkpoint: + """Removes and returns the checkpoint at the given index.""" + popped_checkpoint = self.checkpoints.pop(index) + self.total_token_count = self.total_token_count - popped_checkpoint.token_count + return popped_checkpoint diff --git a/packages/exchange/src/exchange/content.py b/packages/exchange/src/exchange/content.py new file mode 100644 index 000000000..b9cc986fc --- /dev/null +++ b/packages/exchange/src/exchange/content.py @@ -0,0 +1,38 @@ +from typing import Any, Dict, Optional + +from attrs import define, asdict + + +CONTENT_TYPES = {} + + +class Content: + def __init_subclass__(cls, **kwargs: Dict[str, Any]) -> None: + super().__init_subclass__(**kwargs) + CONTENT_TYPES[cls.__name__] = cls + + def to_dict(self) -> Dict[str, Any]: + data = asdict(self, recurse=True) + data["type"] = self.__class__.__name__ + return data + + +@define +class Text(Content): + text: str + + +@define +class ToolUse(Content): + id: str + name: str + parameters: Any + is_error: bool = False + error_message: Optional[str] = None + + +@define +class ToolResult(Content): + tool_use_id: str + output: str + is_error: bool = False diff --git a/packages/exchange/src/exchange/exchange.py b/packages/exchange/src/exchange/exchange.py new file mode 100644 index 000000000..b2fdbc5ec --- /dev/null +++ b/packages/exchange/src/exchange/exchange.py @@ -0,0 +1,336 @@ +import json +import traceback +from copy import deepcopy +from typing import Any, Dict, List, Mapping, Tuple + +from attrs import define, evolve, field, Factory +from tiktoken import get_encoding + +from exchange.checkpoint import Checkpoint, CheckpointData +from exchange.content import Text, ToolResult, ToolUse +from exchange.message import Message +from exchange.moderators import Moderator +from exchange.moderators.truncate import ContextTruncate +from exchange.providers import Provider, Usage +from exchange.tool import Tool +from exchange.token_usage_collector import _token_usage_collector + + +def validate_tool_output(output: str) -> None: + """Validate tool output for the given model""" + max_output_chars = 2**20 + max_output_tokens = 16000 + encoder = get_encoding("cl100k_base") + if len(output) > max_output_chars or len(encoder.encode(output)) > max_output_tokens: + raise ValueError("This tool call created an output that was too long to handle!") + + +@define(frozen=True) +class Exchange: + """An exchange of messages with an LLM + + The exchange class is meant to be largely immutable, with only the message list + growing once constructed. Use .replace to alter the model, tools, etc. + + The exchange supports tool usage, calling tools and letting the model respond when + using the .reply method. It handles most forms of errors and sends those errors back + to the model, to let it attempt to recover. + """ + + provider: Provider + model: str + system: str + moderator: Moderator = field(default=ContextTruncate()) + tools: Tuple[Tool] = field(factory=tuple, converter=tuple) + messages: List[Message] = field(factory=list) + checkpoint_data: CheckpointData = field(factory=CheckpointData) + generation_args: dict = field(default=Factory(dict)) + + @property + def _toolmap(self) -> Mapping[str, Tool]: + return {tool.name: tool for tool in self.tools} + + def replace(self, **kwargs: Dict[str, Any]) -> "Exchange": + """Make a copy of the exchange, replacing any passed arguments""" + # TODO: ensure that the checkpoint data is updated correctly. aka, + # if we replace the messages, we need to update the checkpoint data + # if we change the model, we need to update the checkpoint data (?) + + if kwargs.get("messages") is None: + kwargs["messages"] = deepcopy(self.messages) + if kwargs.get("checkpoint_data") is None: + kwargs["checkpoint_data"] = deepcopy( + self.checkpoint_data, + ) + return evolve(self, **kwargs) + + def add(self, message: Message) -> None: + """Add a message to the history.""" + if self.messages and message.role == self.messages[-1].role: + raise ValueError("Messages in the exchange must alternate between user and assistant") + self.messages.append(message) + + def generate(self) -> Message: + """Generate the next message.""" + self.moderator.rewrite(self) + message, usage = self.provider.complete( + self.model, + self.system, + messages=self.messages, + tools=self.tools, + **self.generation_args, + ) + self.add(message) + self.add_checkpoints_from_usage(usage) # this has to come after adding the response + + # TODO: also call `rewrite` here, as this will make our + # messages *consistently* below the token limit. this currently + # is not the case because we could append a large message after calling + # `rewrite` above. + # self.moderator.rewrite(self) + + _token_usage_collector.collect(self.model, usage) + return message + + def reply(self, max_tool_use: int = 128) -> Message: + """Get the reply from the underlying model. + + This will process any requests for tool calls, calling them immediately, and + storing the intermediate tool messages in the queue. It will return after the + first response that does not request a tool use + + Args: + max_tool_use: The maximum number of tool calls to make before returning. Defaults to 128. + """ + if max_tool_use <= 0: + raise ValueError("max_tool_use must be greater than 0") + response = self.generate() + curr_iter = 1 # generate() already called once + while response.tool_use: + content = [] + for tool_use in response.tool_use: + tool_result = self.call_function(tool_use) + content.append(tool_result) + self.add(Message(role="user", content=content)) + + # We've reached the limit of tool calls - break out of the loop + if curr_iter >= max_tool_use: + # At this point, the most recent message is `Message(role='user', content=ToolResult(...))` + response = Message.assistant( + f"We've stopped executing additional tool cause because we reached the limit of {max_tool_use}", + ) + self.add(response) + break + else: + response = self.generate() + curr_iter += 1 + + return response + + def call_function(self, tool_use: ToolUse) -> ToolResult: + """Call the function indicated by the tool use""" + tool = self._toolmap.get(tool_use.name) + + if tool is None or tool_use.is_error: + output = f"ERROR: Failed to use tool {tool_use.id}.\nDo NOT use the same tool name and parameters again - that will lead to the same error." # noqa: E501 + + if tool_use.is_error: + output += f"\n{tool_use.error_message}" + elif tool is None: + valid_tool_names = ", ".join(self._toolmap.keys()) + output += f"\nNo tool exists with the name '{tool_use.name}'. Valid tool names are: {valid_tool_names}" + + return ToolResult(tool_use_id=tool_use.id, output=output, is_error=True) + + try: + if isinstance(tool_use.parameters, dict): + output = json.dumps(tool.function(**tool_use.parameters)) + elif isinstance(tool_use.parameters, list): + output = json.dumps(tool.function(*tool_use.parameters)) + else: + raise ValueError( + f"The provided tool parameters, {tool_use.parameters} could not be interpreted as a mapping of arguments." # noqa: E501 + ) + + validate_tool_output(output) + + is_error = False + except Exception as e: + tb = traceback.format_exc() + output = str(tb) + "\n" + str(e) + is_error = True + + return ToolResult(tool_use_id=tool_use.id, output=output, is_error=is_error) + + def add_tool_use(self, tool_use: ToolUse) -> None: + """Manually add a tool use and corresponding result + + This will call the implied function and add an assistant + message requesting the ToolUse and a user message with the ToolResult + """ + tool_result = self.call_function(tool_use) + self.add(Message(role="assistant", content=[tool_use])) + self.add(Message(role="user", content=[tool_result])) + + def add_checkpoints_from_usage(self, usage: Usage) -> None: + """ + Add checkpoints to the exchange based on the token counts of the last two + groups of messages, as well as the current token total count of the exchange + """ + # we know we just appended one message as the response from the LLM + # so we need to create two checkpoints as we know the token counts + # of the last two groups of messages: + # 1. from the last checkpoint to the most recent user message + # 2. the most recent assistant message + last_checkpoint_end_index = ( + self.checkpoint_data.checkpoints[-1].end_index - self.checkpoint_data.message_index_offset + if len(self.checkpoint_data.checkpoints) > 0 + else -1 + ) + new_start_index = last_checkpoint_end_index + 1 + + # here, our self.checkpoint_data.total_token_count is the previous total token count from the last time + # that we performed a request. if we subtract this value from the input_tokens from our + # latest response, we know how many tokens our **1** from above is. + first_block_token_count = usage.input_tokens - self.checkpoint_data.total_token_count + second_block_token_count = usage.output_tokens + + if len(self.messages) - new_start_index > 1: + # this will occur most of the time, as we will have one new user message and one + # new assistant message. + + self.checkpoint_data.checkpoints.append( + Checkpoint( + start_index=new_start_index + self.checkpoint_data.message_index_offset, + # end index below is equivalent to the second last message. why? becuase + # the last message is the assistant message that we add below. we need to also + # track the token count of the user message sent. + end_index=len(self.messages) - 2 + self.checkpoint_data.message_index_offset, + token_count=first_block_token_count, + ) + ) + self.checkpoint_data.checkpoints.append( + Checkpoint( + start_index=len(self.messages) - 1 + self.checkpoint_data.message_index_offset, + end_index=len(self.messages) - 1 + self.checkpoint_data.message_index_offset, + token_count=second_block_token_count, + ) + ) + + # TODO: check if the front of the checkpoints doesn't overlap with + # the first message. if so, we are missing checkpoint data from + # message[0] to message[checkpoint_data.checkpoints[0].start_index] + # we can fill in this data by performing an extra request and doing some math + self.checkpoint_data.total_token_count = usage.total_tokens + + def pop_last_message(self) -> Message: + """Pop the last message from the exchange, handling checkpoints correctly""" + if ( + len(self.checkpoint_data.checkpoints) > 0 + and self.checkpoint_data.last_message_index > len(self.messages) - 1 + ): + raise ValueError("Our checkpoint data is out of sync with our message data") + if ( + len(self.checkpoint_data.checkpoints) > 0 + and self.checkpoint_data.last_message_index == len(self.messages) - 1 + ): + # remove the last checkpoint, because we no longer know the token count of it's contents. + # note that this is not the same as reverting to the last checkpoint, as we want to + # keep the messages from the last checkpoint. they will have a new checkpoint created for + # them when we call generate() again + self.checkpoint_data.pop() + self.messages.pop() + + def pop_first_message(self) -> Message: + """Pop the first message from the exchange, handling checkpoints correctly""" + if len(self.messages) == 0: + raise ValueError("There are no messages to pop") + if len(self.checkpoint_data.checkpoints) == 0: + raise ValueError("There must be at least one checkpoint to pop the first message") + + # get the start and end indexes of the first checkpoint, use these to remove message + first_checkpoint = self.checkpoint_data.checkpoints[0] + first_checkpoint_start_index = first_checkpoint.start_index - self.checkpoint_data.message_index_offset + + # check if the first message is part of the first checkpoint + if first_checkpoint_start_index == 0: + # remove this checkpoint, as it no longer has any messages + self.checkpoint_data.pop(0) + + self.messages.pop(0) + self.checkpoint_data.message_index_offset += 1 + + if len(self.checkpoint_data.checkpoints) == 0: + # we've removed all the checkpoints, so we need to reset the message index offset + self.checkpoint_data.message_index_offset = 0 + + def pop_last_checkpoint(self) -> Tuple[Checkpoint, List[Message]]: + """ + Reverts the exchange back to the last checkpoint, removing associated messages + """ + removed_checkpoint = self.checkpoint_data.checkpoints.pop() + # pop messages until we reach the start of the next checkpoint + messages = [] + while len(self.messages) > removed_checkpoint.start_index - self.checkpoint_data.message_index_offset: + messages.append(self.messages.pop()) + return removed_checkpoint, messages + + def pop_first_checkpoint(self) -> Tuple[Checkpoint, List[Message]]: + """ + Pop the first checkpoint from the exchange, removing associated messages + """ + if len(self.checkpoint_data.checkpoints) == 0: + raise ValueError("There are no checkpoints to pop") + first_checkpoint = self.checkpoint_data.pop(0) + + # remove messages until we reach the start of the next checkpoint + messages = [] + stop_at_index = first_checkpoint.end_index - self.checkpoint_data.message_index_offset + for _ in range(stop_at_index + 1): # +1 because it's inclusive + messages.append(self.messages.pop(0)) + self.checkpoint_data.message_index_offset += 1 + + if len(self.checkpoint_data.checkpoints) == 0: + # we've removed all the checkpoints, so we need to reset the message index offset + self.checkpoint_data.message_index_offset = 0 + return first_checkpoint, messages + + def prepend_checkpointed_message(self, message: Message, token_count: int) -> None: + """Prepend a message to the exchange, updating the checkpoint data""" + self.messages.insert(0, message) + new_index = max(0, self.checkpoint_data.message_index_offset - 1) + self.checkpoint_data.checkpoints.insert( + 0, + Checkpoint( + start_index=new_index, + end_index=new_index, + token_count=token_count, + ), + ) + self.checkpoint_data.message_index_offset = new_index + + def rewind(self) -> None: + if not self.messages: + return + + # we remove messages until we find the last user text message + while not (self.messages[-1].role == "user" and type(self.messages[-1].content[-1]) is Text): + self.pop_last_message() + + # now we remove that last user text message, putting us at a good point + # to ask the user for their input again + if self.messages: + self.pop_last_message() + + @property + def is_allowed_to_call_llm(self) -> bool: + """ + Returns True if the exchange is allowed to call the LLM, False otherwise + """ + # TODO: reconsider whether this function belongs here and whether it is necessary + # Some models will have different requirements than others, so it may be better for + # this to be a required method of the provider instead. + return len(self.messages) > 0 and self.messages[-1].role == "user" + + def get_token_usage(self) -> Dict[str, Usage]: + return _token_usage_collector.get_token_usage_group_by_model() diff --git a/packages/exchange/src/exchange/message.py b/packages/exchange/src/exchange/message.py new file mode 100644 index 000000000..035c60345 --- /dev/null +++ b/packages/exchange/src/exchange/message.py @@ -0,0 +1,121 @@ +import inspect +import time +from pathlib import Path +from typing import Any, Dict, List, Literal, Type + +from attrs import define, field +from jinja2 import Environment, FileSystemLoader + +from exchange.content import CONTENT_TYPES, Content, Text, ToolResult, ToolUse +from exchange.utils import create_object_id + +Role = Literal["user", "assistant"] + + +def validate_role_and_content(instance: "Message", *_: Any) -> None: # noqa: ANN401 + if instance.role == "user": + if not (instance.text or instance.tool_result): + raise ValueError("User message must include a Text or ToolResult") + if instance.tool_use: + raise ValueError("User message does not support ToolUse") + elif instance.role == "assistant": + if not (instance.text or instance.tool_use): + raise ValueError("Assistant message must include a Text or ToolUsage") + if instance.tool_result: + raise ValueError("Assistant message does not support ToolResult") + + +def content_converter(contents: List[Dict[str, Any]]) -> List[Content]: + return [(CONTENT_TYPES[c.pop("type")](**c) if c.__class__ not in CONTENT_TYPES.values() else c) for c in contents] + + +@define +class Message: + """A message to or from a language model. + + This supports several content types to extend to tool usage and (tbi) images. + + We also provide shortcuts for simplified text usage; these two are identical: + ``` + m = Message(role='user', content=[Text(text='abcd')]) + assert m.content[0].text == 'abcd' + + m = Message.user('abcd') + assert m.text == 'abcd' + ``` + """ + + role: Role = field(default="user") + id: str = field(factory=lambda: str(create_object_id(prefix="msg"))) + created: int = field(factory=lambda: int(time.time())) + content: List[Content] = field(factory=list, validator=validate_role_and_content, converter=content_converter) + + def to_dict(self) -> Dict[str, Any]: + return { + "role": self.role, + "id": self.id, + "created": self.created, + "content": [item.to_dict() for item in self.content], + } + + @property + def text(self) -> str: + """The text content of this message.""" + result = [] + for content in self.content: + if isinstance(content, Text): + result.append(content.text) + return "\n".join(result) + + @property + def tool_use(self) -> List[ToolUse]: + """All tool use content of this message.""" + result = [] + for content in self.content: + if isinstance(content, ToolUse): + result.append(content) + return result + + @property + def tool_result(self) -> List[ToolResult]: + """All tool result content of this message.""" + result = [] + for content in self.content: + if isinstance(content, ToolResult): + result.append(content) + return result + + @classmethod + def load( + cls: Type["Message"], + filename: str, + role: Role = "user", + **kwargs: Dict[str, Any], + ) -> "Message": + """Load the message from filename relative to where the load is called. + + This only supports simplified content, with a single text entry + + This is meant to emulate importing code rather than a runtime filesystem. So + if you have a directory of code that contains example.py, and example.py has + a function that calls User.load('example.jinja'), it will look in the same + directory as example.py for the jinja file. + """ + frm = inspect.stack()[1] + mod = inspect.getmodule(frm[0]) + + base_path = Path(mod.__file__).parent + + env = Environment(loader=FileSystemLoader(base_path)) + template = env.get_template(filename) + rendered_content = template.render(**kwargs) + + return cls(role=role, content=[Text(text=rendered_content)]) + + @classmethod + def user(cls: Type["Message"], text: str) -> "Message": + return cls(role="user", content=[Text(text)]) + + @classmethod + def assistant(cls: Type["Message"], text: str) -> "Message": + return cls(role="assistant", content=[Text(text)]) diff --git a/packages/exchange/src/exchange/moderators/__init__.py b/packages/exchange/src/exchange/moderators/__init__.py new file mode 100644 index 000000000..56b198a75 --- /dev/null +++ b/packages/exchange/src/exchange/moderators/__init__.py @@ -0,0 +1,13 @@ +from functools import cache +from typing import Type + +from exchange.moderators.base import Moderator +from exchange.utils import load_plugins +from exchange.moderators.passive import PassiveModerator # noqa +from exchange.moderators.truncate import ContextTruncate # noqa +from exchange.moderators.summarizer import ContextSummarizer # noqa + + +@cache +def get_moderator(name: str) -> Type[Moderator]: + return load_plugins(group="exchange.moderator")[name] diff --git a/packages/exchange/src/exchange/moderators/base.py b/packages/exchange/src/exchange/moderators/base.py new file mode 100644 index 000000000..d7c630c6a --- /dev/null +++ b/packages/exchange/src/exchange/moderators/base.py @@ -0,0 +1,8 @@ +from abc import ABC, abstractmethod +from typing import Type + + +class Moderator(ABC): + @abstractmethod + def rewrite(self, exchange: Type["exchange.exchange.Exchange"]) -> None: # noqa: F821 + pass diff --git a/packages/exchange/src/exchange/moderators/passive.py b/packages/exchange/src/exchange/moderators/passive.py new file mode 100644 index 000000000..e3a24efbd --- /dev/null +++ b/packages/exchange/src/exchange/moderators/passive.py @@ -0,0 +1,7 @@ +from typing import Type +from exchange.moderators.base import Moderator + + +class PassiveModerator(Moderator): + def rewrite(self, _: Type["exchange.exchange.Exchange"]) -> None: # noqa: F821 + pass diff --git a/packages/exchange/src/exchange/moderators/summarizer.jinja b/packages/exchange/src/exchange/moderators/summarizer.jinja new file mode 100644 index 000000000..00c29ed82 --- /dev/null +++ b/packages/exchange/src/exchange/moderators/summarizer.jinja @@ -0,0 +1,9 @@ +You are an expert technical summarizer. + +During your conversation with the user, you may be asked to summarize the content in you conversational history. +When asked to summarize, you should concisely summarize the conversation giving emphasis to newer content. Newer content will be towards the end of the conversation. +Preferentially keep user supplied content in the summary. + +The summary *MUST* include filenames that were touched and/or modified. If the updates occurred more recently, keep the latest modifications made to the files in the summary. If the changes occurred earlier in the chat, briefly summarize the changes and don't include the changes in the summary. + +There will likely be json formatted blocks referencing ToolUse and ToolResults. You can ignore ToolUse references, but keep the ToolResult outputs, summarizing as needed and with the same guidelines as above. diff --git a/packages/exchange/src/exchange/moderators/summarizer.py b/packages/exchange/src/exchange/moderators/summarizer.py new file mode 100644 index 000000000..7e2dd5588 --- /dev/null +++ b/packages/exchange/src/exchange/moderators/summarizer.py @@ -0,0 +1,46 @@ +from typing import Type + +from exchange import Message +from exchange.checkpoint import CheckpointData +from exchange.moderators import ContextTruncate, PassiveModerator + + +class ContextSummarizer(ContextTruncate): + def rewrite(self, exchange: Type["exchange.exchange.Exchange"]) -> None: # noqa: F821 + """Summarize the context history up to the last few messages in the exchange""" + + self._update_system_prompt_token_count(exchange) + + # TODO: use an offset for summarization + if exchange.checkpoint_data.total_token_count < self.max_tokens: + return + + messages_to_summarize = self._get_messages_to_remove(exchange) + num_messages_to_remove = len(messages_to_summarize) + + # the llm will throw an error if the last message isn't a user message + if messages_to_summarize[-1].role == "assistant" and (not messages_to_summarize[-1].tool_use): + messages_to_summarize.append(Message.user("Summarize our the above conversation")) + + summarizer_exchange = exchange.replace( + system=Message.load("summarizer.jinja").text, + moderator=PassiveModerator(), + model=self.model, + messages=messages_to_summarize, + checkpoint_data=CheckpointData(), + ) + + # get the summarized content and the tokens associated with this content + summary = summarizer_exchange.reply() + summary_checkpoint = summarizer_exchange.checkpoint_data.checkpoints[-1] + + # remove the checkpoints that were summarized from the original exchange + for _ in range(num_messages_to_remove): + exchange.pop_first_message() + + # insert summary as first message/checkpoint + if len(exchange.messages) == 0 or exchange.messages[0].role == "assistant": + summary_message = Message.user(summary.text) + else: + summary_message = Message.assistant(summary.text) + exchange.prepend_checkpointed_message(summary_message, summary_checkpoint.token_count) diff --git a/packages/exchange/src/exchange/moderators/truncate.py b/packages/exchange/src/exchange/moderators/truncate.py new file mode 100644 index 000000000..41115f663 --- /dev/null +++ b/packages/exchange/src/exchange/moderators/truncate.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional + +from exchange.checkpoint import CheckpointData +from exchange.message import Message +from exchange.moderators import PassiveModerator +from exchange.moderators.base import Moderator + +if TYPE_CHECKING: + from exchange.exchange import Exchange + +# currently this is the point at which we start to truncate, so +# so once we get to this token size the token count will exceed this +# by a little bit. +# TODO: make this configurable for each provider +MAX_TOKENS = 100000 + + +class ContextTruncate(Moderator): + def __init__( + self, + model: Optional[str] = None, + max_tokens: int = MAX_TOKENS, + ) -> None: + self.model = model + self.system_prompt_token_count = 0 + self.max_tokens = max_tokens + self.last_system_prompt = None + + def rewrite(self, exchange: Exchange) -> None: + """Truncate the exchange messages with a FIFO strategy.""" + self._update_system_prompt_token_count(exchange) + + if exchange.checkpoint_data.total_token_count < self.max_tokens: + return + + messages_to_remove = self._get_messages_to_remove(exchange) + for _ in range(len(messages_to_remove)): + exchange.pop_first_message() + + def _update_system_prompt_token_count(self, exchange: Exchange) -> None: + is_different_system_prompt = False + if self.last_system_prompt != exchange.system: + is_different_system_prompt = True + self.last_system_prompt = exchange.system + + if not self.system_prompt_token_count or is_different_system_prompt: + # calculate the system prompt tokens (includes functions etc...) + # we use a placeholder message with one token, which we subtract later + # this ensures compatibility with providers that require a user message + _system_token_exchange = exchange.replace( + messages=[Message.user("a")], + checkpoint_data=CheckpointData(), + moderator=PassiveModerator(), + model=self.model if self.model else exchange.model, + ) + _system_token_exchange.generate() + last_system_prompt_token_count = self.system_prompt_token_count + self.system_prompt_token_count = _system_token_exchange.checkpoint_data.total_token_count - 1 + + exchange.checkpoint_data.total_token_count -= last_system_prompt_token_count + exchange.checkpoint_data.total_token_count += self.system_prompt_token_count + + def _get_messages_to_remove(self, exchange: Exchange) -> List[Message]: + # this keeps all the messages/checkpoints + throwaway_exchange = exchange.replace( + moderator=PassiveModerator(), + ) + + # get the messages that we want to remove + messages_to_remove = [] + while throwaway_exchange.checkpoint_data.total_token_count > self.max_tokens: + _, messages = throwaway_exchange.pop_first_checkpoint() + messages_to_remove.extend(messages) + + while len(throwaway_exchange.messages) > 0 and throwaway_exchange.messages[0].tool_result: + # we would need a corresponding tool use once we resume, so we pop this one off too + # and summarize it as well + _, messages = throwaway_exchange.pop_first_checkpoint() + messages_to_remove.extend(messages) + return messages_to_remove diff --git a/packages/exchange/src/exchange/providers/__init__.py b/packages/exchange/src/exchange/providers/__init__.py new file mode 100644 index 000000000..ac7ed07a0 --- /dev/null +++ b/packages/exchange/src/exchange/providers/__init__.py @@ -0,0 +1,17 @@ +from functools import cache +from typing import Type + +from exchange.providers.anthropic import AnthropicProvider # noqa +from exchange.providers.base import Provider, Usage # noqa +from exchange.providers.databricks import DatabricksProvider # noqa +from exchange.providers.openai import OpenAiProvider # noqa +from exchange.providers.ollama import OllamaProvider # noqa +from exchange.providers.azure import AzureProvider # noqa +from exchange.providers.google import GoogleProvider # noqa + +from exchange.utils import load_plugins + + +@cache +def get_provider(name: str) -> Type[Provider]: + return load_plugins(group="exchange.provider")[name] diff --git a/packages/exchange/src/exchange/providers/anthropic.py b/packages/exchange/src/exchange/providers/anthropic.py new file mode 100644 index 000000000..154ec5f79 --- /dev/null +++ b/packages/exchange/src/exchange/providers/anthropic.py @@ -0,0 +1,158 @@ +import os +from typing import Any, Dict, List, Tuple, Type + +import httpx + +from exchange import Message, Tool +from exchange.content import Text, ToolResult, ToolUse +from exchange.providers.base import Provider, Usage +from tenacity import retry, wait_fixed, stop_after_attempt +from exchange.providers.utils import retry_if_status +from exchange.providers.utils import raise_for_status + +ANTHROPIC_HOST = "https://api.anthropic.com/v1/messages" + +retry_procedure = retry( + wait=wait_fixed(2), + stop=stop_after_attempt(2), + retry=retry_if_status(codes=[429], above=500), + reraise=True, +) + + +class AnthropicProvider(Provider): + def __init__(self, client: httpx.Client) -> None: + self.client = client + + @classmethod + def from_env(cls: Type["AnthropicProvider"]) -> "AnthropicProvider": + url = os.environ.get("ANTHROPIC_HOST", ANTHROPIC_HOST) + try: + key = os.environ["ANTHROPIC_API_KEY"] + except KeyError: + raise RuntimeError("Failed to get ANTHROPIC_API_KEY from the environment") + client = httpx.Client( + base_url=url, + headers={ + "x-api-key": key, + "content-type": "application/json", + "anthropic-version": "2023-06-01", + }, + timeout=httpx.Timeout(60 * 10), + ) + return cls(client) + + @staticmethod + def get_usage(data: Dict) -> Usage: # noqa: ANN401 + usage = data.get("usage") + input_tokens = usage.get("input_tokens") + output_tokens = usage.get("output_tokens") + total_tokens = usage.get("total_tokens") + + if total_tokens is None and input_tokens is not None and output_tokens is not None: + total_tokens = input_tokens + output_tokens + + return Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + + @staticmethod + def anthropic_response_to_message(response: Dict) -> Message: + content_blocks = response.get("content", []) + content = [] + for block in content_blocks: + if block["type"] == "text": + content.append(Text(text=block["text"])) + elif block["type"] == "tool_use": + content.append( + ToolUse( + id=block["id"], + name=block["name"], + parameters=block["input"], + ) + ) + return Message(role="assistant", content=content) + + @staticmethod + def tools_to_anthropic_spec(tools: Tuple[Tool]) -> List[Dict[str, Any]]: + return [ + { + "name": tool.name, + "description": tool.description or "", + "input_schema": tool.parameters, + } + for tool in tools + ] + + @staticmethod + def messages_to_anthropic_spec(messages: List[Message]) -> List[Dict[str, Any]]: + messages_spec = [] + # if messages is empty - just make a default + for message in messages: + converted = {"role": message.role} + for content in message.content: + if isinstance(content, Text): + converted["content"] = [{"type": "text", "text": content.text}] + elif isinstance(content, ToolUse): + converted.setdefault("content", []).append( + { + "type": "tool_use", + "id": content.id, + "name": content.name, + "input": content.parameters, + } + ) + elif isinstance(content, ToolResult): + converted.setdefault("content", []).append( + { + "type": "tool_result", + "tool_use_id": content.tool_use_id, + "content": content.output, + } + ) + messages_spec.append(converted) + if len(messages_spec) == 0: + converted = { + "role": "user", + "content": [{"type": "text", "text": "Ignore"}], + } + messages_spec.append(converted) + return messages_spec + + def complete( + self, + model: str, + system: str, + messages: List[Message], + tools: List[Tool] = [], + **kwargs: Dict[str, Any], + ) -> Tuple[Message, Usage]: + tools_set = set() + unique_tools = [] + for tool in tools: + if tool.name not in tools_set: + unique_tools.append(tool) + tools_set.add(tool.name) + + payload = dict( + system=system, + model=model, + max_tokens=4096, + messages=self.messages_to_anthropic_spec(messages), + tools=self.tools_to_anthropic_spec(tuple(unique_tools)), + **kwargs, + ) + payload = {k: v for k, v in payload.items() if v} + + response = self._post(payload) + message = self.anthropic_response_to_message(response) + usage = self.get_usage(response) + + return message, usage + + @retry_procedure + def _post(self, payload: dict) -> httpx.Response: + response = self.client.post(ANTHROPIC_HOST, json=payload) + return raise_for_status(response).json() diff --git a/packages/exchange/src/exchange/providers/azure.py b/packages/exchange/src/exchange/providers/azure.py new file mode 100644 index 000000000..7bacb9ddc --- /dev/null +++ b/packages/exchange/src/exchange/providers/azure.py @@ -0,0 +1,45 @@ +import os +from typing import Type + +import httpx + +from exchange.providers import OpenAiProvider + + +class AzureProvider(OpenAiProvider): + """Provides chat completions for models hosted by the Azure OpenAI Service""" + + def __init__(self, client: httpx.Client) -> None: + super().__init__(client) + + @classmethod + def from_env(cls: Type["AzureProvider"]) -> "AzureProvider": + try: + url = os.environ["AZURE_CHAT_COMPLETIONS_HOST_NAME"] + except KeyError: + raise RuntimeError("Failed to get AZURE_CHAT_COMPLETIONS_HOST_NAME from the environment.") + + try: + deployment_name = os.environ["AZURE_CHAT_COMPLETIONS_DEPLOYMENT_NAME"] + except KeyError: + raise RuntimeError("Failed to get AZURE_CHAT_COMPLETIONS_DEPLOYMENT_NAME from the environment.") + + try: + api_version = os.environ["AZURE_CHAT_COMPLETIONS_DEPLOYMENT_API_VERSION"] + except KeyError: + raise RuntimeError("Failed to get AZURE_CHAT_COMPLETIONS_DEPLOYMENT_API_VERSION from the environment.") + + try: + key = os.environ["AZURE_CHAT_COMPLETIONS_KEY"] + except KeyError: + raise RuntimeError("Failed to get AZURE_CHAT_COMPLETIONS_KEY from the environment.") + + # format the url host/"openai/deployments/" + deployment_name + "/?api-version=" + api_version + url = f"{url}/openai/deployments/{deployment_name}/" + client = httpx.Client( + base_url=url, + headers={"api-key": key, "Content-Type": "application/json"}, + params={"api-version": api_version}, + timeout=httpx.Timeout(60 * 10), + ) + return cls(client) diff --git a/packages/exchange/src/exchange/providers/base.py b/packages/exchange/src/exchange/providers/base.py new file mode 100644 index 000000000..7b7ff88bc --- /dev/null +++ b/packages/exchange/src/exchange/providers/base.py @@ -0,0 +1,30 @@ +from abc import ABC, abstractmethod +from attrs import define, field +from typing import List, Tuple, Type + +from exchange.message import Message +from exchange.tool import Tool + + +@define(hash=True) +class Usage: + input_tokens: int = field(factory=None) + output_tokens: int = field(default=None) + total_tokens: int = field(default=None) + + +class Provider(ABC): + @classmethod + def from_env(cls: Type["Provider"]) -> "Provider": + return cls() + + @abstractmethod + def complete( + self, + model: str, + system: str, + messages: List[Message], + tools: Tuple[Tool], + ) -> Tuple[Message, Usage]: + """Generate the next message using the specified model""" + pass diff --git a/packages/exchange/src/exchange/providers/bedrock.py b/packages/exchange/src/exchange/providers/bedrock.py new file mode 100644 index 000000000..2a5f53dc8 --- /dev/null +++ b/packages/exchange/src/exchange/providers/bedrock.py @@ -0,0 +1,328 @@ +import hashlib +import hmac +import json +import logging +import os +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional, Tuple, Type +from urllib.parse import quote, urlparse + +import httpx + +from exchange.content import Text, ToolResult, ToolUse +from exchange.message import Message +from exchange.providers import Provider, Usage +from tenacity import retry, wait_fixed, stop_after_attempt +from exchange.providers.utils import retry_if_status +from exchange.providers.utils import raise_for_status +from exchange.tool import Tool + +SERVICE = "bedrock-runtime" +UTC = timezone.utc + +logger = logging.getLogger(__name__) + +retry_procedure = retry( + wait=wait_fixed(2), + stop=stop_after_attempt(2), + retry=retry_if_status(codes=[429], above=500), + reraise=True, +) + + +class AwsClient(httpx.Client): + def __init__( + self, + aws_region: str, + aws_access_key: str, + aws_secret_key: str, + aws_session_token: Optional[str] = None, + **kwargs: Dict[str, Any], + ) -> None: + self.region = aws_region + self.host = f"https://{SERVICE}.{aws_region}.amazonaws.com/" + self.access_key = aws_access_key + self.secret_key = aws_secret_key + self.session_token = aws_session_token + super().__init__(base_url=self.host, timeout=600, **kwargs) + + def post(self, path: str, json: Dict, **kwargs: Dict[str, Any]) -> httpx.Response: + signed_headers = self.sign_and_get_headers( + method="POST", + url=path, + payload=json, + service="bedrock", + ) + return super().post(url=path, json=json, headers=signed_headers, **kwargs) + + def sign_and_get_headers( + self, + method: str, + url: str, + payload: dict, + service: str, + ) -> Dict[str, str]: + """ + Sign the request and generate the necessary headers for AWS authentication. + + Args: + method (str): HTTP method (e.g., 'GET', 'POST'). + url (str): The request URL. + payload (dict): The request payload. + service (str): The AWS service name. + region (str): The AWS region. + access_key (str): The AWS access key. + secret_key (str): The AWS secret key. + session_token (Optional[str]): The AWS session token, if any. + + Returns: + Dict[str, str]: The headers required for the request. + """ + + def sign(key: bytes, msg: str) -> bytes: + return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() + + def get_signature_key(key: str, date_stamp: str, region_name: str, service_name: str) -> bytes: + k_date = sign(("AWS4" + key).encode("utf-8"), date_stamp) + k_region = sign(k_date, region_name) + k_service = sign(k_region, service_name) + k_signing = sign(k_service, "aws4_request") + return k_signing + + # Convert payload to JSON string + request_parameters = json.dumps(payload) + + # Create a date for headers and the credential string + t = datetime.now(UTC) + amz_date = t.strftime("%Y%m%dT%H%M%SZ") + date_stamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope + + # Create canonical URI and headers + parsedurl = urlparse(url) + canonical_uri = quote(parsedurl.path if parsedurl.path else "/", safe="/-_.~") + canonical_headers = f"host:{parsedurl.netloc}\nx-amz-date:{amz_date}\n" + + # Create the list of signed headers. + signed_headers = "host;x-amz-date" + if self.session_token: + canonical_headers += "x-amz-security-token:" + self.session_token + "\n" + signed_headers += ";x-amz-security-token" + + # Create payload hash + payload_hash = hashlib.sha256(request_parameters.encode("utf-8")).hexdigest() + + # Canonical request + canonical_request = f"{method}\n{canonical_uri}\n\n{canonical_headers}\n{signed_headers}\n{payload_hash}" + + # Create the string to sign + algorithm = "AWS4-HMAC-SHA256" + credential_scope = f"{date_stamp}/{self.region}/{service}/aws4_request" + string_to_sign = ( + f"{algorithm}\n{amz_date}\n{credential_scope}\n" + f'{hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()}' + ) + + # Create the signing key + signing_key = get_signature_key(self.secret_key, date_stamp, self.region, service) + + # Sign the string_to_sign using the signing key + signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() + + # Add signing information to the request + authorization_header = ( + f"{algorithm} Credential={self.access_key}/{credential_scope}, SignedHeaders={signed_headers}, " + f"Signature={signature}" + ) + + # Headers + headers = { + "Content-Type": "application/json", + "Authorization": authorization_header, + "X-Amz-date": amz_date.encode(), + "x-amz-content-sha256": payload_hash, + } + if self.session_token: + headers["X-Amz-Security-Token"] = self.session_token + + return headers + + +class BedrockProvider(Provider): + def __init__(self, client: AwsClient) -> None: + self.client = client + + @classmethod + def from_env(cls: Type["BedrockProvider"]) -> "BedrockProvider": + aws_region = os.environ.get("AWS_REGION", "us-east-1") + try: + aws_access_key = os.environ["AWS_ACCESS_KEY_ID"] + aws_secret_key = os.environ["AWS_SECRET_ACCESS_KEY"] + aws_session_token = os.environ.get("AWS_SESSION_TOKEN") + except KeyError: + raise RuntimeError("Failed to get AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY from the environment") + + client = AwsClient( + aws_region=aws_region, + aws_access_key=aws_access_key, + aws_secret_key=aws_secret_key, + aws_session_token=aws_session_token, + ) + return cls(client=client) + + def complete( + self, + model: str, + system: str, + messages: List[Message], + tools: Tuple[Tool], + **kwargs: Dict[str, Any], + ) -> Tuple[Message, Usage]: + """ + Generate a completion response from the Bedrock gateway. + + Args: + model (str): The model identifier. + system (str): The system prompt or configuration. + messages (List[Message]): A list of messages to be processed by the model. + tools (Tuple[Tool]): A tuple of tools to be used in the completion process. + **kwargs: Additional keyword arguments for inference configuration. + + Returns: + Tuple[Message, Usage]: A tuple containing the response message and usage data. + """ + + inference_config = dict( + temperature=kwargs.pop("temperature", None), + maxTokens=kwargs.pop("max_tokens", None), + stopSequences=kwargs.pop("stop", None), + topP=kwargs.pop("topP", None), + ) + inference_config = {k: v for k, v in inference_config.items() if v is not None} or None + + converted_messages = [self.message_to_bedrock_spec(message) for message in messages] + converted_system = [dict(text=system)] + tool_config = self.tools_to_bedrock_spec(tools) + payload = dict( + system=converted_system, + inferenceConfig=inference_config, + messages=converted_messages, + toolConfig=tool_config, + **kwargs, + ) + payload = {k: v for k, v in payload.items() if v} + + path = f"{self.client.host}model/{model}/converse" + response = self._post(payload, path) + response_message = response["output"]["message"] + + usage_data = response["usage"] + usage = Usage( + input_tokens=usage_data.get("inputTokens"), + output_tokens=usage_data.get("outputTokens"), + total_tokens=usage_data.get("totalTokens"), + ) + + return self.response_to_message(response_message), usage + + @retry_procedure + def _post(self, payload: Any, path: str) -> dict: # noqa: ANN401 + response = self.client.post(path, json=payload) + return raise_for_status(response).json() + + @staticmethod + def message_to_bedrock_spec(message: Message) -> dict: + bedrock_content = [] + try: + for content in message.content: + if isinstance(content, Text): + bedrock_content.append({"text": content.text}) + elif isinstance(content, ToolUse): + for tool_use in message.tool_use: + bedrock_content.append( + { + "toolUse": { + "toolUseId": tool_use.id, + "name": tool_use.name, + "input": tool_use.parameters, + } + } + ) + elif isinstance(content, ToolResult): + for tool_result in message.tool_result: + # try to parse the output as json + try: + output = json.loads(tool_result.output) + if isinstance(output, dict): + content = [{"json": output}] + else: + content = [{"text": str(output)}] + except json.JSONDecodeError: + content = [{"text": tool_result.output}] + + bedrock_content.append( + { + "toolResult": { + "toolUseId": tool_result.tool_use_id, + "content": content, + **({"status": "error"} if tool_result.is_error else {}), + } + } + ) + return {"role": message.role, "content": bedrock_content} + + except AttributeError: + raise Exception("Invalid message") + + @staticmethod + def response_to_message(response_message: dict) -> Message: + content = [] + if response_message["role"] == "user": + for block in response_message["content"]: + if "text" in block: + content.append(Text(block["text"])) + if "toolResult" in block: + content.append( + ToolResult( + tool_use_id=block["toolResult"]["toolResultId"], + output=block["toolResult"]["content"][0]["json"], + is_error=block["toolResult"].get("status") == "error", + ) + ) + return Message(role="user", content=content) + elif response_message["role"] == "assistant": + for block in response_message["content"]: + if "text" in block: + content.append(Text(block["text"])) + if "toolUse" in block: + content.append( + ToolUse( + id=block["toolUse"]["toolUseId"], + name=block["toolUse"]["name"], + parameters=block["toolUse"]["input"], + ) + ) + return Message(role="assistant", content=content) + raise Exception("Invalid response") + + @staticmethod + def tools_to_bedrock_spec(tools: Tuple[Tool]) -> Optional[dict]: + if len(tools) == 0: + return None # API requires a non-empty tool config or None + tools_added = set() + tool_config_list = [] + for tool in tools: + if tool.name in tools_added: + logging.warning(f"Tool {tool.name} already added to tool config. Skipping.") + continue + tool_config_list.append( + { + "toolSpec": { + "name": tool.name, + "description": tool.description, + "inputSchema": {"json": tool.parameters}, + } + } + ) + tools_added.add(tool.name) + tool_config = {"tools": tool_config_list} + return tool_config diff --git a/packages/exchange/src/exchange/providers/databricks.py b/packages/exchange/src/exchange/providers/databricks.py new file mode 100644 index 000000000..84dc7515c --- /dev/null +++ b/packages/exchange/src/exchange/providers/databricks.py @@ -0,0 +1,102 @@ +import os +from typing import Any, Dict, List, Tuple, Type + +import httpx + +from exchange.message import Message +from exchange.providers.base import Provider, Usage +from tenacity import retry, wait_fixed, stop_after_attempt +from exchange.providers.utils import raise_for_status, retry_if_status +from exchange.providers.utils import ( + messages_to_openai_spec, + openai_response_to_message, + tools_to_openai_spec, +) +from exchange.tool import Tool + + +retry_procedure = retry( + wait=wait_fixed(2), + stop=stop_after_attempt(2), + retry=retry_if_status(codes=[429], above=500), + reraise=True, +) + + +class DatabricksProvider(Provider): + """Provides chat completions for models on Databricks serving endpoints + + Models are expected to follow the llm/v1/chat "task". This includes support + for foundation and external model endpoints + https://docs.databricks.com/en/machine-learning/model-serving/create-foundation-model-endpoints.html#create-generative-ai-model-serving-endpoints + """ + + def __init__(self, client: httpx.Client) -> None: + super().__init__() + self.client = client + + @classmethod + def from_env(cls: Type["DatabricksProvider"]) -> "DatabricksProvider": + try: + url = os.environ["DATABRICKS_HOST"] + except KeyError: + raise RuntimeError( + "Failed to get DATABRICKS_HOST from the environment. See https://docs.databricks.com/en/dev-tools/auth/index.html#general-host-token-and-account-id-environment-variables-and-fields" + ) + try: + key = os.environ["DATABRICKS_TOKEN"] + except KeyError: + raise RuntimeError( + "Failed to get DATABRICKS_TOKEN from the environment. See https://docs.databricks.com/en/dev-tools/auth/index.html#general-host-token-and-account-id-environment-variables-and-fields" + ) + client = httpx.Client( + base_url=url, + auth=("token", key), + timeout=httpx.Timeout(60 * 10), + ) + return cls(client) + + @staticmethod + def get_usage(data: dict) -> Usage: + usage = data.pop("usage") + input_tokens = usage.get("prompt_tokens") + output_tokens = usage.get("completion_tokens") + total_tokens = usage.get("total_tokens") + if total_tokens is None and input_tokens is not None and output_tokens is not None: + total_tokens = input_tokens + output_tokens + + return Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + + def complete( + self, + model: str, + system: str, + messages: List[Message], + tools: Tuple[Tool], + **kwargs: Dict[str, Any], + ) -> Tuple[Message, Usage]: + payload = dict( + messages=[ + {"role": "system", "content": system}, + *messages_to_openai_spec(messages), + ], + tools=tools_to_openai_spec(tools) if tools else [], + **kwargs, + ) + payload = {k: v for k, v in payload.items() if v} + response = self._post(model, payload) + message = openai_response_to_message(response) + usage = self.get_usage(response) + return message, usage + + @retry_procedure + def _post(self, model: str, payload: dict) -> httpx.Response: + response = self.client.post( + f"serving-endpoints/{model}/invocations", + json=payload, + ) + return raise_for_status(response).json() diff --git a/packages/exchange/src/exchange/providers/google.py b/packages/exchange/src/exchange/providers/google.py new file mode 100644 index 000000000..426aa79d5 --- /dev/null +++ b/packages/exchange/src/exchange/providers/google.py @@ -0,0 +1,154 @@ +import os +from typing import Any, Dict, List, Tuple, Type + +import httpx + +from exchange import Message, Tool +from exchange.content import Text, ToolResult, ToolUse +from exchange.providers.base import Provider, Usage +from tenacity import retry, wait_fixed, stop_after_attempt +from exchange.providers.utils import retry_if_status +from exchange.providers.utils import raise_for_status + +GOOGLE_HOST = "https://generativelanguage.googleapis.com/v1beta" + +retry_procedure = retry( + wait=wait_fixed(2), + stop=stop_after_attempt(2), + retry=retry_if_status(codes=[429], above=500), + reraise=True, +) + + +class GoogleProvider(Provider): + def __init__(self, client: httpx.Client) -> None: + self.client = client + + @classmethod + def from_env(cls: Type["GoogleProvider"]) -> "GoogleProvider": + url = os.environ.get("GOOGLE_HOST", GOOGLE_HOST) + try: + key = os.environ["GOOGLE_API_KEY"] + except KeyError: + raise RuntimeError( + "Failed to get GOOGLE_API_KEY from the environment, see https://ai.google.dev/gemini-api/docs/api-key" + ) + + client = httpx.Client( + base_url=url, + headers={ + "Content-Type": "application/json", + }, + params={"key": key}, + timeout=httpx.Timeout(60 * 10), + ) + return cls(client) + + @staticmethod + def get_usage(data: Dict) -> Usage: # noqa: ANN401 + usage = data.get("usageMetadata") + input_tokens = usage.get("promptTokenCount") + output_tokens = usage.get("candidatesTokenCount") + total_tokens = usage.get("totalTokenCount") + + if total_tokens is None and input_tokens is not None and output_tokens is not None: + total_tokens = input_tokens + output_tokens + + return Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + + @staticmethod + def google_response_to_message(response: Dict) -> Message: + candidates = response.get("candidates", []) + if candidates: + # Only use first candidate for now + candidate = candidates[0] + content_parts = candidate.get("content", {}).get("parts", []) + content = [] + for part in content_parts: + if "text" in part: + content.append(Text(text=part["text"])) + elif "functionCall" in part: + content.append( + ToolUse( + id=part["functionCall"].get("name", ""), + name=part["functionCall"].get("name", ""), + parameters=part["functionCall"].get("args", {}), + ) + ) + return Message(role="assistant", content=content) + + # If no valid candidates were found, return an empty message + return Message(role="assistant", content=[]) + + @staticmethod + def tools_to_google_spec(tools: Tuple[Tool]) -> Dict[str, List[Dict[str, Any]]]: + if not tools: + return {} + converted_tools = [] + for tool in tools: + converted_tool: Dict[str, Any] = { + "name": tool.name, + "description": tool.description or "", + } + if tool.parameters["properties"]: + converted_tool["parameters"] = tool.parameters + converted_tools.append(converted_tool) + return {"functionDeclarations": converted_tools} + + @staticmethod + def messages_to_google_spec(messages: List[Message]) -> List[Dict[str, Any]]: + messages_spec = [] + for message in messages: + role = "user" if message.role == "user" else "model" + converted = {"role": role, "parts": []} + for content in message.content: + if isinstance(content, Text): + converted["parts"].append({"text": content.text}) + elif isinstance(content, ToolUse): + converted["parts"].append({"functionCall": {"name": content.name, "args": content.parameters}}) + elif isinstance(content, ToolResult): + converted["parts"].append( + {"functionResponse": {"name": content.tool_use_id, "response": {"content": content.output}}} + ) + messages_spec.append(converted) + + if not messages_spec: + messages_spec.append({"role": "user", "parts": [{"text": "Ignore"}]}) + + return messages_spec + + def complete( + self, + model: str, + system: str, + messages: List[Message], + tools: List[Tool] = [], + **kwargs: Dict[str, Any], + ) -> Tuple[Message, Usage]: + tools_set = set() + unique_tools = [] + for tool in tools: + if tool.name not in tools_set: + unique_tools.append(tool) + tools_set.add(tool.name) + + payload = dict( + system_instruction={"parts": [{"text": system}]}, + contents=self.messages_to_google_spec(messages), + tools=self.tools_to_google_spec(tuple(unique_tools)), + **kwargs, + ) + payload = {k: v for k, v in payload.items() if v} + response = self._post(payload, model) + message = self.google_response_to_message(response) + usage = self.get_usage(response) + return message, usage + + @retry_procedure + def _post(self, payload: dict, model: str) -> httpx.Response: + response = self.client.post("models/" + model + ":generateContent", json=payload) + return raise_for_status(response).json() diff --git a/packages/exchange/src/exchange/providers/ollama.py b/packages/exchange/src/exchange/providers/ollama.py new file mode 100644 index 000000000..acad89d9f --- /dev/null +++ b/packages/exchange/src/exchange/providers/ollama.py @@ -0,0 +1,45 @@ +import os +from typing import Type + +import httpx + +from exchange.providers.openai import OpenAiProvider + +OLLAMA_HOST = "http://localhost:11434/" +OLLAMA_MODEL = "mistral-nemo" + + +class OllamaProvider(OpenAiProvider): + """Provides chat completions for models hosted by Ollama""" + + __doc__ += f""" + +Here's an example profile configuration to try: + + ollama: + provider: ollama + processor: {OLLAMA_MODEL} + accelerator: {OLLAMA_MODEL} + moderator: passive + toolkits: + - name: developer + requires: {{}} +""" + + def __init__(self, client: httpx.Client) -> None: + print("PLEASE NOTE: the ollama provider is experimental, use with care") + super().__init__(client) + + @classmethod + def from_env(cls: Type["OllamaProvider"]) -> "OllamaProvider": + ollama_url = os.environ.get("OLLAMA_HOST", OLLAMA_HOST) + timeout = httpx.Timeout(60 * 10) + + # from_env is expected to fail if required ENV variables are not + # available. Since this provider can run with defaults, we substitute + # an Ollama health check (GET /) to determine if the service is ok. + httpx.get(ollama_url, timeout=timeout) + + # When served by Ollama, the OpenAI API is available at the path "v1/". + client = httpx.Client(base_url=ollama_url + "v1/", timeout=timeout) + return cls(client) diff --git a/packages/exchange/src/exchange/providers/openai.py b/packages/exchange/src/exchange/providers/openai.py new file mode 100644 index 000000000..dbd293b47 --- /dev/null +++ b/packages/exchange/src/exchange/providers/openai.py @@ -0,0 +1,101 @@ +import os +from typing import Any, Dict, List, Tuple, Type + +import httpx + +from exchange.message import Message +from exchange.providers.base import Provider, Usage +from exchange.providers.utils import ( + messages_to_openai_spec, + openai_response_to_message, + openai_single_message_context_length_exceeded, + raise_for_status, + tools_to_openai_spec, +) +from exchange.tool import Tool +from tenacity import retry, wait_fixed, stop_after_attempt +from exchange.providers.utils import retry_if_status + +OPENAI_HOST = "https://api.openai.com/" + +retry_procedure = retry( + wait=wait_fixed(2), + stop=stop_after_attempt(2), + retry=retry_if_status(codes=[429], above=500), + reraise=True, +) + + +class OpenAiProvider(Provider): + """Provides chat completions for models hosted directly by OpenAI""" + + def __init__(self, client: httpx.Client) -> None: + super().__init__() + self.client = client + + @classmethod + def from_env(cls: Type["OpenAiProvider"]) -> "OpenAiProvider": + url = os.environ.get("OPENAI_HOST", OPENAI_HOST) + try: + key = os.environ["OPENAI_API_KEY"] + except KeyError: + raise RuntimeError( + "Failed to get OPENAI_API_KEY from the environment, see https://platform.openai.com/docs/api-reference/api-keys" + ) + client = httpx.Client( + base_url=url + "v1/", + auth=("Bearer", key), + timeout=httpx.Timeout(60 * 10), + ) + return cls(client) + + @staticmethod + def get_usage(data: dict) -> Usage: + usage = data.pop("usage") + input_tokens = usage.get("prompt_tokens") + output_tokens = usage.get("completion_tokens") + total_tokens = usage.get("total_tokens") + + if total_tokens is None and input_tokens is not None and output_tokens is not None: + total_tokens = input_tokens + output_tokens + + return Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + + def complete( + self, + model: str, + system: str, + messages: List[Message], + tools: Tuple[Tool], + **kwargs: Dict[str, Any], + ) -> Tuple[Message, Usage]: + system_message = [] if model.startswith("o1") else [{"role": "system", "content": system}] + payload = dict( + messages=system_message + messages_to_openai_spec(messages), + model=model, + tools=tools_to_openai_spec(tools) if tools else [], + **kwargs, + ) + payload = {k: v for k, v in payload.items() if v} + response = self._post(payload) + + # Check for context_length_exceeded error for single, long input message + if "error" in response and len(messages) == 1: + openai_single_message_context_length_exceeded(response["error"]) + + message = openai_response_to_message(response) + usage = self.get_usage(response) + return message, usage + + @retry_procedure + def _post(self, payload: dict) -> dict: + # Note: While OpenAI and Ollama mount the API under "v1", this is + # conventional and not a strict requirement. For example, Azure OpenAI + # mounts the API under the deployment name, and "v1" is not in the URL. + # See https://github.com/openai/openai-openapi/blob/master/openapi.yaml + response = self.client.post("chat/completions", json=payload) + return raise_for_status(response).json() diff --git a/packages/exchange/src/exchange/providers/utils.py b/packages/exchange/src/exchange/providers/utils.py new file mode 100644 index 000000000..4be7ac31e --- /dev/null +++ b/packages/exchange/src/exchange/providers/utils.py @@ -0,0 +1,185 @@ +import base64 +import json +import re +from typing import Any, Callable, Dict, List, Optional, Tuple + +import httpx +from exchange.content import Text, ToolResult, ToolUse +from exchange.message import Message +from exchange.tool import Tool +from tenacity import retry_if_exception + + +def retry_if_status(codes: Optional[List[int]] = None, above: Optional[int] = None) -> Callable: + codes = codes or [] + + def predicate(exc: Exception) -> bool: + if isinstance(exc, httpx.HTTPStatusError): + if exc.response.status_code in codes: + return True + if above and exc.response.status_code >= above: + return True + return False + + return retry_if_exception(predicate) + + +def raise_for_status(response: httpx.Response) -> httpx.Response: + """Raise with reason text.""" + try: + response.raise_for_status() + return response + except httpx.HTTPStatusError as e: + response.read() + if response.text: + raise httpx.HTTPStatusError(f"{e}\n{response.text}", request=e.request, response=e.response) + else: + raise e + + +def encode_image(image_path: str) -> str: + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") + + +def messages_to_openai_spec(messages: List[Message]) -> List[Dict[str, Any]]: + messages_spec = [] + for message in messages: + converted = {"role": message.role} + output = [] + for content in message.content: + if isinstance(content, Text): + converted["content"] = content.text + elif isinstance(content, ToolUse): + sanitized_name = re.sub(r"[^a-zA-Z0-9_-]", "_", content.name) + converted.setdefault("tool_calls", []).append( + { + "id": content.id, + "type": "function", + "function": { + "name": sanitized_name, + "arguments": json.dumps(content.parameters), + }, + } + ) + elif isinstance(content, ToolResult): + if content.output.startswith('"image:'): + image_path = content.output.replace('"image:', "").replace('"', "") + output.append( + { + "role": "tool", + "content": [ + { + "type": "text", + "text": "This tool result included an image that is uploaded in the next message.", + }, + ], + "tool_call_id": content.tool_use_id, + } + ) + # Note: it is possible to only do this when message == messages[-1] + # but it doesn't seem to hurt too much with tokens to keep this. + output.append( + { + "role": "user", + "content": [ + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{encode_image(image_path)}"}, + } + ], + } + ) + + else: + output.append( + { + "role": "tool", + "content": content.output, + "tool_call_id": content.tool_use_id, + } + ) + + if "content" in converted or "tool_calls" in converted: + output = [converted] + output + messages_spec.extend(output) + return messages_spec + + +def tools_to_openai_spec(tools: Tuple[Tool]) -> Dict[str, Any]: + tools_names = set() + result = [] + for tool in tools: + if tool.name in tools_names: + # we should never allow duplicate tools + raise ValueError(f"Duplicate tool name: {tool.name}") + result.append( + { + "type": "function", + "function": { + "name": tool.name, + "description": tool.description, + "parameters": tool.parameters, + }, + } + ) + tools_names.add(tool.name) + return result + + +def openai_response_to_message(response: dict) -> Message: + original = response["choices"][0]["message"] + content = [] + text = original.get("content") + if text: + content.append(Text(text=text)) + + tool_calls = original.get("tool_calls") + if tool_calls: + for tool_call in tool_calls: + try: + function_name = tool_call["function"]["name"] + # We occasionally see the model generate an invalid function name + # sending this back to openai raises a validation error + if not re.match(r"^[a-zA-Z0-9_-]+$", function_name): + content.append( + ToolUse( + id=tool_call["id"], + name=function_name, + parameters=tool_call["function"]["arguments"], + is_error=True, + error_message=f"The provided function name '{function_name}' had invalid characters, it must match this regex [a-zA-Z0-9_-]+", # noqa: E501 + ) + ) + else: + content.append( + ToolUse( + id=tool_call["id"], + name=function_name, + parameters=json.loads(tool_call["function"]["arguments"]), + ) + ) + except json.JSONDecodeError: + content.append( + ToolUse( + id=tool_call["id"], + name=tool_call["function"]["name"], + parameters=tool_call["function"]["arguments"], + is_error=True, + error_message=f"Could not interpret tool use parameters for id {tool_call['id']}: {tool_call['function']['arguments']}", # noqa: E501 + ) + ) + + return Message(role="assistant", content=content) + + +def openai_single_message_context_length_exceeded(error_dict: dict) -> None: + code = error_dict.get("code") + if code == "context_length_exceeded" or code == "string_above_max_length": + raise InitialMessageTooLargeError(f"Input message too long. Message: {error_dict.get('message')}") + + +class InitialMessageTooLargeError(Exception): + """Custom error raised when the first input message in an exchange is too large.""" + + pass diff --git a/packages/exchange/src/exchange/token_usage_collector.py b/packages/exchange/src/exchange/token_usage_collector.py new file mode 100644 index 000000000..8f0801062 --- /dev/null +++ b/packages/exchange/src/exchange/token_usage_collector.py @@ -0,0 +1,27 @@ +from collections import defaultdict +from typing import Dict + +from exchange.providers.base import Usage + + +class _TokenUsageCollector: + def __init__(self) -> None: + self.usage_data = [] + + def collect(self, model: str, usage: Usage) -> None: + self.usage_data.append((model, usage)) + + def get_token_usage_group_by_model(self) -> Dict[str, Usage]: + usage_group_by_model = defaultdict(lambda: Usage(0, 0, 0)) + for model, usage in self.usage_data: + usage_by_model = usage_group_by_model[model] + if usage is not None and usage.input_tokens is not None: + usage_by_model.input_tokens += usage.input_tokens + if usage is not None and usage.output_tokens is not None: + usage_by_model.output_tokens += usage.output_tokens + if usage is not None and usage.total_tokens is not None: + usage_by_model.total_tokens += usage.total_tokens + return usage_group_by_model + + +_token_usage_collector = _TokenUsageCollector() diff --git a/packages/exchange/src/exchange/tool.py b/packages/exchange/src/exchange/tool.py new file mode 100644 index 000000000..4ce9e7c50 --- /dev/null +++ b/packages/exchange/src/exchange/tool.py @@ -0,0 +1,55 @@ +import inspect +from typing import Any, Callable, Type + +from attrs import define + +from exchange.utils import json_schema, parse_docstring + + +@define +class Tool: + """A tool that can be used by a model. + + Attributes: + name (str): The name of the tool + description (str): A description of what the tool does + parameters dict[str, Any]: A json schema of the function signature + function (Callable): The python function that powers the tool + """ + + name: str + description: str + parameters: dict[str, Any] + function: Callable + + @classmethod + def from_function(cls: Type["Tool"], func: Any) -> "Tool": # noqa: ANN401 + """Create a tool instance from a function and its docstring + + The function must have a docstring - we require it to load the description + and parameter descriptions. This also supports a class instance with a __call__ + method. + """ + if inspect.isfunction(func) or inspect.ismethod(func): + name = func.__name__ + else: + name = func.__class__.__name__.lower() + func = func.__call__ + + description, param_descriptions = parse_docstring(func) + schema = json_schema(func) + + # Set the 'description' field of the schema to the arg's docstring description + for arg in param_descriptions: + arg_name, arg_description = arg["name"], arg["description"] + + if arg_name not in schema["properties"]: + raise ValueError(f"Argument {arg_name} found in docstring but not in schema") + schema["properties"][arg_name]["description"] = arg_description + + return cls( + name=name, + description=description, + parameters=schema, + function=func, + ) diff --git a/packages/exchange/src/exchange/utils.py b/packages/exchange/src/exchange/utils.py new file mode 100644 index 000000000..04d5ffa18 --- /dev/null +++ b/packages/exchange/src/exchange/utils.py @@ -0,0 +1,155 @@ +import inspect +import uuid +from importlib.metadata import entry_points +from typing import Any, Callable, Dict, List, Type, get_args, get_origin + +from griffe import ( + Docstring, + DocstringSection, + DocstringSectionParameters, + DocstringSectionText, +) + + +def create_object_id(prefix: str) -> str: + return f"{prefix}_{uuid.uuid4().hex[:24]}" + + +def compact(content: str) -> str: + """Replace any amount of whitespace with a single space""" + return " ".join(content.split()) + + +def parse_docstring(func: Callable) -> tuple[str, List[Dict]]: + """Get description and parameters from function docstring""" + function_args = list(inspect.signature(func).parameters.keys()) + text = str(func.__doc__) + docstring = Docstring(text) + + for style in ["google", "numpy", "sphinx"]: + parsed = docstring.parse(style) + + if not _check_section_is_present(parsed, DocstringSectionText): + continue + + if function_args and not _check_section_is_present(parsed, DocstringSectionParameters): + continue + break + else: # if we did not find a valid style in the for loop + raise ValueError( + f"Attempted to load from a function {func.__name__} with an invalid docstring. Parameter docs are required if the function has parameters. https://mkdocstrings.github.io/griffe/reference/docstrings/#docstrings" # noqa: E501 + ) + + description = None + parameters = [] + + for section in parsed: + if isinstance(section, DocstringSectionText): + description = compact(section.value) + elif isinstance(section, DocstringSectionParameters): + parameters = [arg.as_dict() for arg in section.value] + + docstring_args = [d["name"] for d in parameters] + if description is None: + raise ValueError("Docstring must include a description.") + + if not docstring_args == function_args: + extra_docstring_args = ", ".join(sorted(set(docstring_args) - set(function_args))) + extra_function_args = ", ".join(sorted(set(function_args) - set(docstring_args))) + if extra_docstring_args and extra_function_args: + raise ValueError( + f"Docstring args must match function args: docstring had extra {extra_docstring_args}; function had extra {extra_function_args}" # noqa: E501 + ) + elif extra_function_args: + raise ValueError(f"Docstring args must match function args: function had extra {extra_function_args}") + elif extra_docstring_args: + raise ValueError(f"Docstring args must match function args: docstring had extra {extra_docstring_args}") + else: + raise ValueError("Docstring args must match function args") + + return description, parameters + + +def _check_section_is_present( + parsed_docstring: List[DocstringSection], section_type: Type[DocstringSectionText] +) -> bool: + for section in parsed_docstring: + if isinstance(section, section_type): + return True + return False + + +def json_schema(func: Any) -> dict[str, Any]: # noqa: ANN401 + """Get the json schema for a function""" + signature = inspect.signature(func) + parameters = signature.parameters + + schema = { + "type": "object", + "properties": {}, + "required": [], + } + + for param_name, param in parameters.items(): + param_schema = {} + + if param.annotation is not inspect.Parameter.empty: + param_schema = _map_type_to_schema(param.annotation) + + if param.default is not inspect.Parameter.empty: + param_schema["default"] = param.default + + schema["properties"][param_name] = param_schema + + if param.default is inspect.Parameter.empty: + schema["required"].append(param_name) + + return schema + + +def _map_type_to_schema(py_type: Type) -> Dict[str, Any]: # noqa: ANN401 + origin = get_origin(py_type) + args = get_args(py_type) + + if origin is list or origin is tuple: + return {"type": "array", "items": _map_type_to_schema(args[0] if args else Any)} + elif origin is dict: + return { + "type": "object", + "additionalProperties": _map_type_to_schema(args[1] if len(args) > 1 else Any), + } + elif py_type is int: + return {"type": "integer"} + elif py_type is bool: + return {"type": "boolean"} + elif py_type is float: + return {"type": "number"} + elif py_type is str: + return {"type": "string"} + else: + return {"type": "string"} + + +def load_plugins(group: str) -> dict: + """ + Load plugins based on a specified entry point group. + + This function iterates through all entry points registered under a specified group + + Args: + group (str): The entry point group to load plugins from. This should match the group specified + in the package setup where plugins are defined. + + Returns: + dict: A dictionary where each key is the entry point name, and the value is the loaded plugin object. + + Raises: + Exception: Propagates exceptions raised by entry point loading, which might occur if a plugin + is not found or if there are issues with the plugin's code. + """ + plugins = {} + # Access all entry points for the specified group and load each. + for entrypoint in entry_points(group=group): + plugin = entrypoint.load() # Load the plugin. + plugins[entrypoint.name] = plugin # Store the loaded plugin in the dictionary. + return plugins diff --git a/packages/exchange/tests/.ruff.toml b/packages/exchange/tests/.ruff.toml new file mode 100644 index 000000000..cddf42337 --- /dev/null +++ b/packages/exchange/tests/.ruff.toml @@ -0,0 +1,2 @@ +lint.select = ["E", "W", "F", "N"] +line-length = 120 \ No newline at end of file diff --git a/packages/exchange/tests/__init__.py b/packages/exchange/tests/__init__.py new file mode 100644 index 000000000..c2b89ac6d --- /dev/null +++ b/packages/exchange/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for exchange.""" diff --git a/packages/exchange/tests/conftest.py b/packages/exchange/tests/conftest.py new file mode 100644 index 000000000..684a446d7 --- /dev/null +++ b/packages/exchange/tests/conftest.py @@ -0,0 +1,36 @@ +import pytest + +from exchange.providers.base import Usage + + +@pytest.fixture +def dummy_tool(): + def _dummy_tool() -> str: + """An example tool""" + return "dummy response" + + return _dummy_tool + + +@pytest.fixture +def usage_factory(): + def _create_usage(input_tokens=100, output_tokens=200, total_tokens=300): + return Usage(input_tokens=input_tokens, output_tokens=output_tokens, total_tokens=total_tokens) + + return _create_usage + + +def read_file(filename: str) -> str: + """ + Read the contents of the file. + + Args: + filename (str): The path to the file, which can be relative or + absolute. If it is a plain filename, it is assumed to be in the + current working directory. + + Returns: + str: The contents of the file. + """ + assert filename == "test.txt" + return "hello exchange" diff --git a/packages/exchange/tests/providers/__init__.py b/packages/exchange/tests/providers/__init__.py new file mode 100644 index 000000000..4e13a800d --- /dev/null +++ b/packages/exchange/tests/providers/__init__.py @@ -0,0 +1 @@ +"""Tests for chat completion providers.""" diff --git a/packages/exchange/tests/providers/cassettes/test_azure_complete.yaml b/packages/exchange/tests/providers/cassettes/test_azure_complete.yaml new file mode 100644 index 000000000..3ac8a4fc0 --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_azure_complete.yaml @@ -0,0 +1,68 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello"}], "model": "gpt-4o-mini"}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + api-key: + - test_azure_api_key + connection: + - keep-alive + content-length: + - '139' + content-type: + - application/json + host: + - test.openai.azure.com + user-agent: + - python-httpx/0.27.2 + method: POST + uri: https://test.openai.azure.com/openai/deployments/test-azure-deployment/chat/completions?api-version=2024-05-01-preview + response: + body: + string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"content":"Hello! + How can I assist you today?","role":"assistant"}}],"created":1727230065,"id":"chatcmpl-ABBjN3AoYlxkP7Vg2lBvUhYeA6j5K","model":"gpt-4-32k","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":9,"prompt_tokens":18,"total_tokens":27}} + + ' + headers: + Cache-Control: + - no-cache, must-revalidate + Content-Length: + - '825' + Content-Type: + - application/json + Date: + - Wed, 25 Sep 2024 02:07:45 GMT + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + access-control-allow-origin: + - '*' + apim-request-id: + - 82e66ef8-ac07-4a43-b60f-9aecec1d8c81 + azureml-model-session: + - d145-20240919052126 + openai-organization: test_openai_org_key + x-accel-buffering: + - 'no' + x-content-type-options: + - nosniff + x-ms-client-request-id: + - 82e66ef8-ac07-4a43-b60f-9aecec1d8c81 + x-ms-rai-invoked: + - 'true' + x-ms-region: + - Switzerland North + x-ratelimit-remaining-requests: + - '79' + x-ratelimit-remaining-tokens: + - '79984' + x-request-id: + - 38db9001-8b16-4efe-84c9-620e10f18c3c + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/cassettes/test_azure_tools.yaml b/packages/exchange/tests/providers/cassettes/test_azure_tools.yaml new file mode 100644 index 000000000..9da479790 --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_azure_tools.yaml @@ -0,0 +1,74 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant. + Expect to need to read a file using read_file."}, {"role": "user", "content": + "What are the contents of this file? test.txt"}], "model": "gpt-4o-mini", "tools": + [{"type": "function", "function": {"name": "read_file", "description": "Read + the contents of the file.", "parameters": {"type": "object", "properties": {"filename": + {"type": "string", "description": "The path to the file, which can be relative + or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent + working directory."}}, "required": ["filename"]}}}]}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + api-key: + - test_azure_api_key + connection: + - keep-alive + content-length: + - '608' + content-type: + - application/json + host: + - test.openai.azure.com + user-agent: + - python-httpx/0.27.2 + method: POST + uri: https://test.openai.azure.com/openai/deployments/test-azure-deployment/chat/completions?api-version=2024-05-01-preview + response: + body: + string: '{"choices":[{"content_filter_results":{},"finish_reason":"tool_calls","index":0,"logprobs":null,"message":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\n \"filename\": + \"test.txt\"\n}","name":"read_file"},"id":"call_a47abadDxlGKIWjvYYvGVAHa","type":"function"}]}}],"created":1727256650,"id":"chatcmpl-ABIeABbq5WVCq0e0AriGFaYDSih3P","model":"gpt-4-32k","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":16,"prompt_tokens":109,"total_tokens":125}} + + ' + headers: + Cache-Control: + - no-cache, must-revalidate + Content-Length: + - '769' + Content-Type: + - application/json + Date: + - Wed, 25 Sep 2024 09:30:50 GMT + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + access-control-allow-origin: + - '*' + apim-request-id: + - 8c0e3372-8ffd-4ff5-a5d1-0b962c4ea339 + azureml-model-session: + - d145-20240919052126 + openai-organization: test_openai_org_key + x-accel-buffering: + - 'no' + x-content-type-options: + - nosniff + x-ms-client-request-id: + - 8c0e3372-8ffd-4ff5-a5d1-0b962c4ea339 + x-ms-rai-invoked: + - 'true' + x-ms-region: + - Switzerland North + x-ratelimit-remaining-requests: + - '79' + x-ratelimit-remaining-tokens: + - '79824' + x-request-id: + - 401bd803-b790-47b7-b098-98708d44f060 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/cassettes/test_ollama_complete.yaml b/packages/exchange/tests/providers/cassettes/test_ollama_complete.yaml new file mode 100644 index 000000000..88bc206ff --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_ollama_complete.yaml @@ -0,0 +1,68 @@ +interactions: +- request: + body: '' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + host: + - localhost:11434 + user-agent: + - python-httpx/0.27.2 + method: GET + uri: http://localhost:11434/ + response: + body: + string: Ollama is running + headers: + Content-Length: + - '17' + Content-Type: + - text/plain; charset=utf-8 + Date: + - Sun, 22 Sep 2024 23:40:13 GMT + Set-Cookie: test_set_cookie + openai-organization: test_openai_org_key + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello"}], "model": "mistral-nemo"}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '140' + content-type: + - application/json + host: + - localhost:11434 + user-agent: + - python-httpx/0.27.2 + method: POST + uri: http://localhost:11434/v1/chat/completions + response: + body: + string: "{\"id\":\"chatcmpl-429\",\"object\":\"chat.completion\",\"created\":1727048416,\"model\":\"mistral-nemo\",\"system_fingerprint\":\"fp_ollama\",\"choices\":[{\"index\":0,\"message\":{\"role\":\"assistant\",\"content\":\"Hello! + I'm here to help. How can I assist you today? Let's chat. \U0001F60A\"},\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":10,\"completion_tokens\":23,\"total_tokens\":33}}\n" + headers: + Content-Length: + - '356' + Content-Type: + - application/json + Date: + - Sun, 22 Sep 2024 23:40:16 GMT + Set-Cookie: test_set_cookie + openai-organization: test_openai_org_key + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/cassettes/test_ollama_tools.yaml b/packages/exchange/tests/providers/cassettes/test_ollama_tools.yaml new file mode 100644 index 000000000..7271bf227 --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_ollama_tools.yaml @@ -0,0 +1,75 @@ +interactions: +- request: + body: '' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + host: + - localhost:11434 + user-agent: + - python-httpx/0.27.2 + method: GET + uri: http://localhost:11434/ + response: + body: + string: Ollama is running + headers: + Content-Length: + - '17' + Content-Type: + - text/plain; charset=utf-8 + Date: + - Wed, 25 Sep 2024 09:23:08 GMT + Set-Cookie: test_set_cookie + openai-organization: test_openai_org_key + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant. + Expect to need to read a file using read_file."}, {"role": "user", "content": + "What are the contents of this file? test.txt"}], "model": "mistral-nemo", "tools": + [{"type": "function", "function": {"name": "read_file", "description": "Read + the contents of the file.", "parameters": {"type": "object", "properties": {"filename": + {"type": "string", "description": "The path to the file, which can be relative + or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent + working directory."}}, "required": ["filename"]}}}]}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '609' + content-type: + - application/json + host: + - localhost:11434 + user-agent: + - python-httpx/0.27.2 + method: POST + uri: http://localhost:11434/v1/chat/completions + response: + body: + string: '{"id":"chatcmpl-245","object":"chat.completion","created":1727256190,"model":"mistral-nemo","system_fingerprint":"fp_ollama","choices":[{"index":0,"message":{"role":"assistant","content":"","tool_calls":[{"id":"call_z6fgu3z3","type":"function","function":{"name":"read_file","arguments":"{\"filename\":\"test.txt\"}"}}]},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":112,"completion_tokens":21,"total_tokens":133}} + + ' + headers: + Content-Length: + - '425' + Content-Type: + - application/json + Date: + - Wed, 25 Sep 2024 09:23:10 GMT + Set-Cookie: test_set_cookie + openai-organization: test_openai_org_key + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/cassettes/test_openai_complete.yaml b/packages/exchange/tests/providers/cassettes/test_openai_complete.yaml new file mode 100644 index 000000000..1a92eb36b --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_openai_complete.yaml @@ -0,0 +1,80 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello"}], "model": "gpt-4o-mini"}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '139' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - python-httpx/0.27.2 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-AAQTYi3DXJnltAfd5sUH1Wnzh69t3\",\n \"object\": + \"chat.completion\",\n \"created\": 1727048416,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n + \ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 18,\n \"completion_tokens\": + 9,\n \"total_tokens\": 27,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": + 0\n }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8c762399feb55739-SYD + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Sun, 22 Sep 2024 23:40:17 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + content-length: + - '593' + openai-organization: test_openai_org_key + openai-processing-ms: + - '560' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15552000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199973' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 8ms + x-request-id: + - req_22e26c840219cde3152eaba1ce89483b + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/cassettes/test_openai_tools.yaml b/packages/exchange/tests/providers/cassettes/test_openai_tools.yaml new file mode 100644 index 000000000..30496fcb8 --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_openai_tools.yaml @@ -0,0 +1,90 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant. + Expect to need to read a file using read_file."}, {"role": "user", "content": + "What are the contents of this file? test.txt"}], "model": "gpt-4o-mini", "tools": + [{"type": "function", "function": {"name": "read_file", "description": "Read + the contents of the file.", "parameters": {"type": "object", "properties": {"filename": + {"type": "string", "description": "The path to the file, which can be relative + or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent + working directory."}}, "required": ["filename"]}}}]}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '608' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - python-httpx/0.27.2 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-ABIV2aZWVKQ774RAQ8KHYdNwkI5N7\",\n \"object\": + \"chat.completion\",\n \"created\": 1727256084,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_xXYlw4A7Ud1qtCopuK5gEJrP\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"read_file\",\n + \ \"arguments\": \"{\\\"filename\\\":\\\"test.txt\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null\n },\n \"logprobs\": + null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": + {\n \"prompt_tokens\": 107,\n \"completion_tokens\": 15,\n \"total_tokens\": + 122,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n + \ }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8c89f19fed997e43-SYD + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Sep 2024 09:21:25 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + content-length: + - '844' + openai-organization: test_openai_org_key + openai-processing-ms: + - '266' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9991' + x-ratelimit-remaining-tokens: + - '199952' + x-ratelimit-reset-requests: + - 1m9.486s + x-ratelimit-reset-tokens: + - 14ms + x-request-id: + - req_ff6b5d65c24f40e1faaf049c175e718d + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/cassettes/test_openai_vision.yaml b/packages/exchange/tests/providers/cassettes/test_openai_vision.yaml new file mode 100644 index 000000000..1b9691d29 --- /dev/null +++ b/packages/exchange/tests/providers/cassettes/test_openai_vision.yaml @@ -0,0 +1,86 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What does the first entry in the menu say?"}, {"role": + "assistant", "tool_calls": [{"id": "xyz", "type": "function", "function": {"name": + "screenshot", "arguments": "{}"}}]}, {"role": "tool", "content": [{"type": "text", + "text": "This tool result included an image that is uploaded in the next message."}], + "tool_call_id": "xyz"}, {"role": "user", "content": [{"type": "image_url", "image_url": + {"url": "data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAAj0AAAEJCAYAAAB/kpYnAAAAAXNSR0IArs4c6QAAAGJlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAABJKGAAcAAAASAAAAUKABAAMAAAABAAEAAKACAAQAAAABAAACPaADAAQAAAABAAABCQAAAABBU0NJSQAAAFNjcmVlbnNob3SjhiuVAAAB1mlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4yNjU8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+NTczPC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cr+mE4cAAEAASURBVHgB7F0HYBXF0/8leemN9ISSAIHQexGko4igoiKIvYvdz4KCFbvyt/eGCiJ2QREVBCwU6b33ECCk916/mbl37917eS+N0MIOvNyW2dnd393tzc3O7brExMRUQlGtEYicMBohFwyEq7sJ2259ApWlpZayzW6+AoH9umHHHU9b0hoiEDpqMELO64+Cg0cQ0KOjiNx+2xMNIdqpjNcnvIKDaYfw3pIPnfKcDRkzZsxAfn4+7rnnnrOhu6qPCgGFgEKgUSNgatS9OwGdqygqRuY/a5C2YKmNwsNV5WzaidKsnAavNXfjDnhGhcM9KABHv/gJ2Ss3NXgd9gKX7PwbaXnp9slnXfymm2466/qsOqwQUAgoBBorAi7K0tNYT63ql0JAIaAQUAgoBBQCRgRcjREVVggoBBQCCgGFgEJAIdBYEVBKT2M9s6pfCgGFgEJAIaAQUAjYIKCUHhs4VEQhoBBQCCgEFAIKgcaKgFJ6GuuZVf1SCCgEFAIKAYWAQsAGAVcXFxd4+/jC5O5uk6EiCgGFgEJAIaAQUAgoBBoTAqZLxt8IFxfN4JOVkYYNq5ciNyerMfVR9UUhoBBQCCgEFAIKAYUA3MrLip/Zsn4lDh3YjeiWbRAYFIKjCQcVNAoBhYBCQCGgEFAIKAQaFQKuacnHUF5WhoL8PKSlJCEkLLJRdVB1RiGgEFAIKAQUAgoBhQAjICsyt47rhCZBwWgeE4vtm9cqZBQCCgGFgEJAIaAQUAg0OgRE6WlByk5AYBDKyOKTkZba6DqpOqQQUAgoBBQCCgGFgELAsg0Ff8XV85zBCI9qjj/mzlbIKAQUAgoBhYBCQCGgEGhUCFjW6amsrMSxo4fg4eEJL2+fRtVJ1RmFgEJAIaAQUAgoBBQCrmERTeFmMiGwSTDiOnRDZWUFiosKFTIKAYWAQkAhoBBQCCgEGhUCpnOHXmjpUFZmGlYtXUSKT6UlTQUUAgoBhYBCQCGgEFAINAYEXFq1al3p7eODkuJilJaWNIY+qT4oBBQCCgGFgEJAIaAQqIKAqaKiHPl5uVUyVIJCQCGgEFAIKAQUAgqBxoSAxZG5MXVK9UUhoBBQCCgEFAIKAYWAPQJK6bFHRMUVAgoBhYBCQCGgEGiUCCilp1GeVtUphYBCQCGgEFAIKATsEVBKjz0iKq4QUAgoBBQCCgGFQKNEQCk9jfK0qk4pBBQCCgGFgEJAIWCPgFJ67BFRcYWAQkAhoBBQCCgEGiUCZ4zS4x4UCTdvP4cnwc3HH+5Nwh3mqcTTBwG/kHCcP/ExuNIK4MdLY8cOQpcurWzEuLq64oYbRiAoyN8m/XSNjBpYgN3zEmAynZmLgUZ36YO+Y29ucHhHjeqDfv06NLjc+gocObI3+vfvWGPxl859HmNaX1wjX10ZWnbvj/Nun4x2A0bYFHVxdcPgG/4PvkGhNunMO+r+5+THPCeDzmsxHG8Nft1hVZ1iS7B3fgIiQ8od5qtEhcDJRMDy9Onc/RzEtuuENSv+wrEj8SetDQHdhqH1w19o9dEWGAWHtiNl/kfIWvObpHmENkPcs7/C5B8s8bydK7H/f9ejsrwMLm4mtHn8W/i27S15pdmp2PP0JSgvzEXXT7ZrMu3+lmYmY/v/nWOXao026XsRWt77viRUlBajMH4r4t+7F6WZSZIWN/Vn+MR2txag0NGvX0Dqguno+NpSeIRHS155QS5ytvyDI188Ru3Js+E/GyI3vPEtdi1fiDVzzOeWOh3RugP6T5iI5d98gKLc7OOC4eabR2LZsq3YuvWgRY63twduvPEC7N59GKtX77Kkn66BqXdlYM1WL5SVuZyuTZR2tR90IYbc+CA+vm2kTTu7j56ADgMvtDnHNgz1jPA5PHo0HatW7aynhPoVu+uuSxAaGojnn//KRgC3JykpEytX7rBJN0baB7VDz/AeeGntK8bk4w5f9+pXaNmjP3JSjsHVzQ27VyyyyPSgPRKH3PgAEndvwb7Vf1vSm7brioDwZghuFoO/pv8PxQUnfvxZkfgfHun1ELqHdcOm1M2WtnBg+34PZOa44pUH0nHTU+rl1AYcFTnpCIilJyAwCDGxcSe9cqmQdndn2vfSBOybdh34zSTmzrdEoeH0mDvepLA7tt3XG/sp369Df4SefwNnIWzU7aLw7HvxSlJk+sHN0wfRE19DRVE+dk05X35Jc94U3j1Tx0h87/NjJe70j7k9e58fh4SPH4JnRCu0f3mRtIvLHHx7osjhOnK3LZNw+j/faOLI0lBwYDN2PjoMSXPfRFC/SxA2eqLTqhpzRlRcF1Jy2p/ULhYUFOOBBz7Apk37T2q99alseN9CefN9+oOg+hQ/qWWCmkYjrGWbk1rnqaisffsW4F996NZON2NRwhLklebXp7jTMmxNW/3jZ3jnmoFY9NFLNnwlBfmY+cCVOLRplU36rEnX4s8PnrNJO9GRovIizDswHxM73+qwqg+/C8SQ3oUIDqhwmK8SFQInCwGx9PQ5dzi2b1qDbr0HnKx6q9RTknYEJWlHcXTWM2jzxPcI6D4c2ev/hG9cb6Qu/AJl2WnIzV6O4qSDCB40jtI+R/CAsSg6uhd5u9eIvIzlPyFk2DUSLkrcJ8eS9KNanPgqSmq/kWrxsf3I37tO6mv3wu8I6n8pMlbMQWlWivzY0lSWlwW9HqmE/rCViduYSr+QoVcjZNB4JP30hp7d4EcXUrRa3HMtmvTvQcohKV37E3DgpY9QnlcgdTU5tweirh0Dj7BgVJaV49jXvyJ1vvWt0FmDbv94PkwenmgS2QImdw8k7tmKGf83HuW0VUmbc4Zh7JPvwNPHTzaozTh6CF8+dBXy0lMwbuoHiOt/HtyoTLcLx6HL+ZehKC8Hb4zrY6lq7BNvo3XvQSimlcD/eHcqti35xZLnLBATE4E337wLgYG+yMjIhbu7rdn+ww//D9HR2lvk5MnTsW2b1QLUs2dbvPjiLZgzZxmuuGIQTSe54c8/1+N///sOv/zynFiGunZtDRdSeOfN+w/vvz9PmnHrraMwfvxgqstE1phyrFu3B08++YXsTffrr88TjwvYurRnzxHExTWX4913vyNleUrkvvsuo3xPFBWV4I03fsSSJRst3RszNB+FxS44eNTdksYBU4AfWj9+J7xbNSdrZgUyl63F4Y++BVUKF3d3tJp8Oyn+1FYKl2ZmY+ddz6CyovoHCWN91QvTkRK/B1FtOqGMzuG8aZOw/Z/5Uvf1r3+N6C59xZJQVlKMZbPexfKv34eXfyAe/H41pZsIG1c8vmC38P878y2s+OZDCbu4uuDumX8hpFlLZCQm4Punbkfqob2SV92f11+/U6Yn3eiaLSkpw6xZi/H110ssRfhc/vHHy3Ke9+8/Rli+K3zM8PjjV2PAgM7w8vKQtKefnoG1a3ejd+84vPDCzYiPT0abNk1pW51yTJv2Lf75ZzN8fLwwY8YjCA72l/Ocm1uAZ5+dhY0b9+Gii86Rc8XXBdOCBS/LceLEN5GQkCLh0NAAuVZ8fb3o2orHQw99hAoD7l1CO+O3db8Lb0P8ueW9uYhs01Huo75jb0Gfy27E7v8W46fn7hHxt304D6HRsRKePflGHN62rlbVssyrXvwM/iERqCgvx6YF3+O3N5+oVdmamNalrMflsZfCla6VCrLaG+mHRb5gy+awcwrw0yLHbgpGfhVWCJwoBFxbt9Xmqg8dqHmgOlGN0OW6engheOhVEs3fux5uPgH0XHElxWaPzoLilATy34mQuCkwFMXJ8ZY8VkB4yovlNBQVJuygB04FvFt2rpNIr2Zx8IxsJZafmgq++OKLmD9/fpXf7NmzayqKkJGDEDSoN9L+WIqjn/9E7WyOaFKCdGo+cQIKDxzG7gdfwqG3Z5LyWLstR9hPIDS6DbYu+Rlr5s5AU7Lc9LpEk+vh5Y2dS/8QRee31x8nxag5rnz2Y6lyySev4OspN4Efngc3/Cfh7+hBaKRIevByOX74Dr/1EWOW0/CkSePlIff++7+YlR7LzKyUefvtOXj11e+Fx9fX00YOKx4eHiZReBYv3oC3356LtDRteo0fYt26xeKzzxZg+/Z4XH75QHkosgBXeqB//fVf9EB8D99887f4mVx33fkim2Vu2rQPycmZ9IBthnff/Rnt2rVAq1aRYAXt0Ucn4MiRNDz33Cw6puKRR660aVNcTCkysm0VN2aIvv8GeLWIxJHpPyD9z+UIHtYPIcP7SdnIcSPh16ktDrz8CXZPmoas/0iJqsXMGCunrIQ2iWiG3956AoU5mbjoYe3BzoKL80n5fPtJsRrww3PYrZMQ0ryVKKXfPHYzNv/5oyhdfF75t2nBD9Ie/uPu5YOCrHT8/s7TIv/cq++y5FUXyM8vpPMwRyxzrKDeeuuFaN48zFKElZO//tqIH39citjYKDl3nMmKzXnn9ZT0u+56Gz/88C8pN2VSzsfHUxTUiIggvPXWHOTk5OPhh8dJnpubiygwU6fOxBNPfI7i4lJRhDmTp0mnTJku+VlZeRLm+LFjGVKW/zRrFipK6+zZS0RZ69PHahkP8QqBm4sbDucesfAfb+DXVx8VrFnZ3bzwRwn/9ek0i9jf334Kv746WfD39K2dDxtb0W986we6Fjwxj+RvWTQHPS++BmExbS1yjydwxNz/Zn7NqojJK3AFvTegVwe1v2MVcFTCSUXA1LlHXyxdzG98lSe1YvvKOrz6rzalRQrGoY8eQFlOuigNzGf0iakoLrAoNa40nVVRZJ2v5iknJje/IFRkHJNwQ/xh357aOkr7dxqIbl9oyhdbro58+XSNTZg+fToNqlUHipycnBrLBg3qJW/8ibN+Fl6/zm0R0FNTZNkK5OpJCgBZMMrI8lO02nauvSbhhblZmP/aFGHrMHgU2g8cKf4bO/79HTuXLRTHytCYWHqIZqFJlDYtkHksAfzjt8iclETEb1pZpZpFH72IrYt/Jr+DKAy6/r4q+Y4SWLFgS8mcOcvNx2ds2HbtOozDh1Nt0uwjc+euwMdkwbIn9v/hhyc//GbPfgw9erTBhg178emnv4uFYMSInqRMuYu1p1275pbi3B5+eHp6upMV4D+xFjRtGoJhwzSfr82b94sitH37IVGMhgzpin//3SLlw4LLkZhaVenx69gG2as3IX3RCuFrMrAXggb3QfoSwtE89coZxUeSkDhzrvDU9s/fn7+ODfO/QX5mOq587mNxgM3PTMP3T98himvn8y5DZuIhtOo5AM079kT6kYNy/qLadaHRodLhueS6v3rkOlFyu184Hi0696pVc55+eiYiI4NEgUlMTAdb4zp2jBEFkQXk5RWKEsvhwYO7imWHFU8jsVL5+ecLjEkS5rT581chMzOPlM4bxbE9MzMXkyZ9jA4dokUWK6vsv8OUk1MgU6Js/eFz6Wh6lMu/846G95VXDsWgQV0sfmORvtpLWFpRushriD9sLdMtZmkJ+6tgn7hrM9IPH6hTVWyBZT+gw9vWiqLD9215WSmG3PQgfnz27jrJcsScWpgmyZE+EaQAHq7CUlTsiuio0irpKkEhcDIRMOXlZJO53x2h9ABiahIUjKyMVBTSfPHJpCMzn4Krly+aTpiCwF4jkfnfz6T4aDeR8astVnTKSfFhEgXIy8/STC7PVJ5rfUOzZB5HwJXejHRH5prE8HRb0pw3EHbhrfBt0xMuJo+aiiA3N5csBslV+AoLa56OMwX6o+hwkqVswb5DCDynm8R52iNxxhxEXTcGnT59QZSjQ2/NRP7O/Rb+6gJZSdY3V34ARsRqX9R0Hj4Glz/+llhqclKTaJrLlwZP7W27Onl63v61/0owm5QinjapiZo08RVLzc6dh7Ry2fmigNRUzj6fH4SOSJ8KS0rSrhu2MjBNnjwBI0b0Qn5+EdgCwFMx/FDUiaeteGqmnKahdPL09KCHOU0l0hu68QsktvawJUKn1Aw3hDWxlpN0UmpcaIolf4/WT04rPpoMU5D2cE797R8E9OiI2KdpioPkZy5fj4R3Z+kiazwe3LBcePQjK7Hrf52NO6YvQHjLOOSRAqQ7vXrQOa0NsV8JW/WYWIEKDG9am2KYPv0htGwZKYpJQUGRlDHiw47DOjF2HTrESJSnGHlKii1u/OM8ns7Uzx0zscJqPA4c2JmmM9dh7txnZWqTrXxs+WPiI5/DmujoUW0sYr7CwmIEBFjxScrX7t0w71BkF2sWxJrknYp83ceuSVQ0vaRESxP4HmflpyGI+8+UlG8dj4xyvTzphTbRev8Y81RYIXCyEDB50eDWd+B5lvra0FtdSXEx9u/Zbkk7GYHcbUvFp6eyrATNb3gO3jGdUEhfcvHUklfzdpYmeNGUURn51TCxn49nVGtLnneL9vJVF1tmGorkSy16MBcc3ForkaVZycha+weyNyxC5/fWI4ZM8Hufv6LaspMnT6a3XM06Y2QsKCggn5LxxqQq4XKaJvCM0AYbzvRqESW+Ozpj2sJl4J9fpzaIeeAmtLjzauz6vxf0bNx///0IDw/HwoULydKxzJLOAZ731ykgrCmyk49KdMSdTyAz6TDeu26IxO+bvQxefgE6q3akh7IrKdOOyOgLYZ/vqD1ZWZqSww9JJlY8dP8L+/LVxfUpLXseamoVYvms8CxYsBavvfYDTXW50oPzFcvUV5UChgT2OWK6+WZyqjf4fRhYsPuQOy4coCnvlnRqCPvxeEVrLyCc7kHntjRDeyjx1OTuR6aRJdMH4ZeeR7/zkfnPGuRu3S0iOnfujKuuuooeyoU0dfOiRaweaNquG9j/io9MCVvWgB3Ow1u1E2vNwQ0rENtnMK55ZSbNKhuUUWoXqWO6mFof3cnvaOrUqcL/3nvvkWKiPQzZ/6lVqyia8vtEFJQ+fdrhlVduk+lEXTj70OjE01VGpYYtNuxnddllA3DHHRdh4sSLZBpR5+dpRlZSdKvcli0H5Ks+PqcXX/yEKDk8/ch+V+zHpRNfB8a4ns7H8nIHF4mZIZ0sPOWV5Wjh1xz7smr3QmGUfSLCpUXaC5Onr59FkWULLNPC956F/uLRkHW38NesvUfzE6uI9fOpALtMbdhV80tglcIqQSHQgAi4/j7nK/Dvj7mzRey6Vf+edIXH2J+0xV+iPD9bFB9Oz9uzDiFDJsj0UkDXIfJJeMayH6UIOy57RcXCr+O5cA+OEmdj/qS9IcgrugOCB16B2EkzpT2Zq+aJWI+QpvCO7ihTcSb/IAnzOkH2xI7Oid9Po6/LeoGVserosccew7hx46r8rr322uqKSV7u5p2ESTAC+3QR59eA3p1JWdSUE1eyOoSOGmyxHpSkZtB0oK1C2LdvX3qL7kB+Cl2q1OVHfj1dR1wu01hBTVtgDzlSMhXQmyE7ObOvSO9LbxBfDvvCGYnxaE3TJOzvU5e1Qpy15/DhFAwf3l2clXU/Dfs6GzLO1hv+8Rs9O8FOmTLB6QPRvt7fflslvE88cY04Xvv7e+Oeey4lWT4W1t+W+sLbsxKtmtma+3naqkm/bqT4NCXn9O50XQcid9NOKccWPE5nJ/WcddpLiSu1TafWrVvLuezRo4eeZHPkqUQ+H8Nve1SsMzJ9Yn7mB4RGUl4LjLxHU1KMBfet/kem1vqOvalOayx50tQqX1v8CwsLs4jU9QyeXmKrGGNjT+ywfsEFvcVaFhUVYvl8vVu31qKMsh8P+/zwOTJaiFjO9defL1Nnt902WhScQ4eSxUrHPlpsxePpS55qtCde/oDbxFNgrOTWhbambaNP1nvWpcgJ5T20eTUZAytwPr2g8PpY7NPF9y9PO4+8d6p8WcnrZfE5je0zpEHa0ov6z0qfvRMzCx8/Ip+Nk/h7tfUeaJBKlRCFQB0R0Gy8dSzUoOx8J9hR0i/voNk1T4lCk/Dxg4h7bj46vbNGuPhLrbQlmkk/deFnNBV2AdpM+Vry2A8o4dNH7KTp0ar16Dk2R3N7WKas00OfoMe/f69MJzBfy3s/sKzTw/47/GWXvk4PvdbbiOJP2Ztd/QSaXfcM9r18lU2eMVJGU0P8qw8l/7gQAb06o+Ujt0nx8oJCJLz3lYRd6G246Q2Xo9nNmqWpPL8A8a9+5rAano6xp9LiQlw65Q1JZv+etT/PlPA/X7yGK556D4/+ulVwyU1PFodKY/nls9/H6AdeBFuB2G/gpZFxlK3VUdPXRizHvj3Tp/8hX+Z88cUjMrVlnFJi59Zp0263VP/SS7dKmJ2TjV8E2cvUC9inc5x//CUXOzbzl1o8BcLTGo54dTl8ZMsO+wj9/PMKXHrpuRg6VLOq8Ndf7BSt0+JV3kjNdMPUOzNt1i45RNNVbZ9/AO1emyyshfFHkTxXUzb9u3dAy4dvsVyLuZt3kf/PZl1klbZZMswBv+BwOR8cXfbVe5J6bPdWsH/ImMmvSTyLLHhMxnPEylHSvu2iELFStPL7T7H445cs7ZACDv4YrVxG3HbvPgL2weLpQybdilNRYb4+6MBTh3p+bm6h+HIxb3R0BDk/jyUlVLufeNqRLXFGYsVm9uzHJemrrzTs+MiWHT2dLUHsnGxsF/tlsWL97rv3itJ6++1v4MCBY8TDorS2Weuxjc/Y8SVeH/w/fLjlIxSUaVYWK2/DhvhrvGunfWkRevVLn0v4789ek6/uOMIKz6ofPhOlptPQi7Hi6w/w12ev4pdXHpJzPfHTP6QM87FD9PGSh5sHLmo5Cs+sft6hqDuuzMaKTV7IoPV6FCkETiUCLjExMbZ376lsTTV1e4S1EItLeUFV514TOS670lck7DhcG2IfHWdUQdNr5lHOGctpmc6+Pa5enihJtvoeSEPptdojPITt84RPZq3b/sD3q5B2aB9+pE9kA8OikHxgV5Wy/BUWp1dWlFfJOxEJPD3BX/Ls3ZvodNqooevlaZQWLcLk4VdX2WwtiItrRj5bhTLdYl/+ihF5ePWhdMRdEk2KnHWahfk8I0PJgb+4ytd2bt5ecCfLXvGRZJoKqx3u7ITOSwm8dGE7cmBtQ87KCZYpD71N/LUeL03AflYni3jlbF56ICXFsU8Jf1nHU1useBiJ/XBiY5uCHaCzyb9Lp8GDu9B02g248MLH6Au6cMrPgO4vpPPwp/Dp6Tnip6WnNdTxg2HvYMnhv/HTvrmkNLmKNbQ+stlHipWRE0n8xRZbf/T7ly23Tv3r2DSnaX5VmsRfYPL9PypmJK6MG4+bF2kvX0bGrnEl+OXtYxh8czMcTjr179nGtqnw2YfAGaP0NNSpMQWEiK+NM3kH37pd/HGc5Z8t6brSw1/mKDqzETAqPbzOUmMlo9Kjf8Z+qvo66Lp7MfTmh+tV/bz/PSKfqdercD0LTfl9B9w9vR2WZqXG2RT13lV/4dsnNMuqw8IqUSFwmiFw1qndZfRl185JQ5yehtp+peVUQCPJ2PLnHOhTHY2kS2dtN9hixw+ninpOoZ4pwB06lCK+PzyVeKppzZwZtGXEnw6bwc7Sxmk1e6bMRG2K0T79RMY/uX2UU8uUiZShMprqdkT8xZ8ihcCZhMBZZ+k5k06OaqtCQCGgEFAIKAQUAg2HgPIqazgslSSFgEJAIaAQUAgoBE5jBJTScxqfHNU0hYBCQCGgEFAIKAQaDgGl9DQclkqSQkAhoBBQCCgEFAKnMQJK6TmNT45qmkJAIaAQUAgoBBQCDYfAGaP0uAdFwrgHV8NBUD9Jp1t76teLxl+KV6M9f+JjdVpJ2B4V3jn9+utH2CfXKn7xxf1kI81aMRuYAv0qsOfXBJzbvciQWnOQVwd3tjmui5sJnuExNQs5wzkiAv0wtk9H+NO6VSeaArw9cVnv9rTFQvVDqbeHO4Z3ai28brQyNFP/ti1wZb/O8ots4tegTa0Og+1zEzBhZF6D1qeEKQTOFARMl064xaatu7ZtwO7tm2zSTmSk+5fxtKv6g7TB6FxLNbzYYOcPNuLAm7fRZpo7EffsrzD5B0s+bzOx/3/Xyx5bHV9bKttScEZ5QS5ytvyDI188Jruy855XAV2HYutd2oq4vm16oO1TPyFr3ULEv3sX2k9bIis+WyqlQM6Wf3HgtRvR5cPNEj704f3GbAl7hDZz2J7o21+Ff+fB2HZvL5syrR74lLaq6IAdDw20STdGmvS9iFZ6fl+SZBXo+K2If+9eyyancVN/tqwCrZfTV4GuDgOd92w48pYYvHHmd0/eZrOQYkTrDug/YSKWf/MBinKz6wXFOed0oL2bRmDWrEV1Ln/HHbQa7opt2LHjUJ3KvnAfLaxX5IL/aBVbnao716zQtHn8W9r2pLewl2anYs/Tl1iuoajxjyLikrslr5JWyD7w+s3I3b4cnd/fYLm39Hr04+ab28h9psftj53eXg33oAhauK4CZXlZSP75HaQumiFsXrT1SvsXF9gUqSjKx5aJnRDQbRhaP/yFlkdlC2iPvZT5HyFrzW82/PWNtG8aiusHdcPWw8nITbLddqW+Mp2VaxYcgBsH98Bf2w8ihxaTdESsGM2483Jk0sa12YVF+H3TXpTT2jdcltvaoyUt/pmdhyRaXbqhqDoM/l3njSm3ZuK7hQ2raDVU25UchcCJREDW6Vm+5DdaYl/b/LCkpG5vlsfbOH7Ie0a2tBGjx4sSdiDmzrdonyt3bLuvN7ybt0fs5K8Qev4NSF1IS6/TircFtE3EoY8ekIG02bVPozj5IJJ+4q0TrCvc8l5ZbZ74AXk7V4nCw5XxBoqFh3fhEG8xYaayfMcrw+r5fIy5402H7Sk8sgdNzrnEyCphXkm6ODm+SrpNgnkzor3Pj5OHCG+42v7lRdh2d3dZ7fTg2xPh5hOAuGd+Qf6+DTj61bMoyTCvUlstBja1NOqIG21uGRjRDD5NaPXpBqZff12Jdet210vqgw9+IDuJ16Uw78l10eB8vPllE9ti1ZzrsFG3i8Kz78UrUZySgA6k1EdPfA37p10nyj0rPElz30LKbx+LctTq/z4WBWTP1DFw9fCCq7cfWLk+9uOryKYXAybeP64m4m1hjtEec0H9L0Wz65+RlYR5/7zixH3YNeV8uicuRuTlD2D3k6Np3zfz6snm633fSxPoHnajLWeelPs8e/3CWtVZU5tOt/whHVqihPYIu/WTn22a9uPq7RKf8+BVNuknOvL8x8FY+dURjD0vH3OWWHeLP9H1KvkKgdMBAVF6CgvzUVjAG8Kd/B0pSunh7RnREryRJys4yb++D1NAqLw9lqQnwjeuNyk4X8iO6rnZy1GcdBDBg8ZpSg8hWF6YK2mplB4y9GqEDBpvVno0eD1pV/a2U+fKju1sITJSRVEeimhwrgs5aw8P/Py27UI7i0eMuVceNLxnF0815O34r1ZVFB/bj/y966Q/vKcXP0gyVsxBKe0qzz9+CPEbtX2ba8KgVpXXgYl34G5xz7W0IWYP6jMpnvsTcOClj2QjTBbT5NweiLp2DDzCgmXH92Nf/4rU+X/XWMPk+dtpF/B4RMS2J2WvEht//xa/v/WklBt+6yPoN/42WTqfF9nbv24pWXVul4fsfbOXWnaEv+blGZJ2iHYQn/2o9XyPfeJt8J5FxXm5+OPdqdi25Jca28MbT7755l3CV1RUSrt6P20pw1tMvPzyrbJ5JW9kyXs5PfDAh6Tg5ArPvfdehlGj+kh47twVmD79d0vZX355Drt3H0bXrq1ljyfe4+v997UNbZlpQI8i8AzITw4eSM7OdfCAsSg6uheshDDxZrwhw66RcMhw2riWLCrJv7wrSvSxH19D7KOzxHpYsF+z6upTxyVpiVWuLxHi5E8FbQuTv3e9/HxadxMFh5Uevlb5OuX7m6kocS9dC7abq5akHZGtY47OeoZeSr5HQPfhyF7veEE/J9XXKrlbdCQmXTwAN344hzbDtI5xevoNH/yEtrSp6WNjBqMJbX1RRJuZfvb3Bizett8if+oVw5Cak4844osOCRSe2z+1njNm5NesKZcORouQAEz6aiF4iumlq86Hu5sbeErrm/vGi7ybP5or5SXi5A9bh54eOxStw4PIKlSJf3fG4/0/V+Pagd3QLioUT/2wxKbk9ZQe5yDdhskcOZbmhpw8V4wepJQeR/iotMaNgExEj7j4Soy58mYMOv9ieHmf3F1wWYnxCG0Of5qK8m3XlxSa8WT5aYUy2mmdrRu0/jkN5nssZ4HfYt2bkEndjryaxUk5tvzo5OrpIxuCVhTmYe/zV8iAr+fx0btlF9rM9FfLj03y1VF17SlM2ClFPSNaIWzETfKG6+rlK30opCm6ulAhWbj4IeXdsnNdisERBs4EvPjii5g/f36V3+zZs50VsaSHjByEoEG9kfbHUhz9/CdqZ3NEkxKkU/OJE1B44DB2P/gSDr09s8r+UTqf/dGDrr3wlnH4a/qrSNiyGr0uuRY+gUHCxorWcto08Yv7rsCKbz5E237DwUv9M/H+YHNfekDCf3/xOr6echMWvveMxPU/vE/Yb68/Dt4riBWo2hDv+TRlynQsXrwRfn7WaSYuy9NdvNEp7/Q9a9Zi8E7gzz13o0XsTz8to/gs2a07NJSuYwPxnlLdusWCN0Tdvj1eNjXlVXp16tWB914CElOcL5huf65NgaE2FkVWOFgJZysOWxv5ftL3SCs6olmt+GWjIYkVLpNfkzqJ5PYFD9UsHaw8NQTtSUoHW1FSSElh2p+SAT8vD7ShHd2N1Kt1U5SYV29+hpSacgL9f78ux/7kDNxzQV+EBVitIGH+PhjRJRaFtPHsa7/9RwrRAVJYrdJcKfLsuOHoFhOJF+b+i4KSUhzJzMFT3y/Bf3sSUFBcKmGOF5NSVRM9NPpcUa4+XrIOCzbvxXmdW+N8qj+RlOoOzcKqFO8T24ymzwot6fYYWDLMAd7sNraFrRJqz6PiCoHGiIBp87r/kJmegpCwSHTo2gtxHbthy/qVJ62vrNAE9RsDvw79wbuk+8TSlA4NPiWph8nio01VlJPSolNFcYEM5Hqcdzrv9oU2wPOGo0e+tL6Ns9UFNL6wHK+mbWQ6Sy/Hx0qayiuiaSmdKp0sta7nV9cetsSwouJDioqbb6Bsjsq+C/zgKTxESkwdiaf9nDmk2ouqDgN7Xj0+ffp02mW6mR61HHNycixhZ4GgQb3IVyQbibN+Fha/zm0R0LOjhFk5cfX0JGXVhaxSBSgy7ALuTJ4xPX7TSvz37UdY+d0neHLRPnS9YBztFv0plnw6Dey302XEWNrR3Uu2VIhq11WK8k7h2UlHtPAe8ociGfa06KMXsXXxzwgIj8Kg6++zz3YYL6YH1aZN+9G+fXSVfN49nTcSfeWVbyWvR482YMuQTseOpYN/zrZE4J3Yf/jhXyxbtpV2/n5MLEYbNuyV4q3pYVRSanii6kLp6Oxcs4LPlkud9KkkN/KPM9H1WEnXk07l5mkm9p1rSOL7l19SWJGpqMU0eYdX/5X7g+8bnqKW8g3QoGOkGMxescUiKY92bOdfV7L4hPr74v4L+4GtOx1Jedh9LA0tw4LAjsYfLFqDlXsPY0tCMr665woMaheDOWut924xKUiPfbtI5LIiY6Rnxg1D8+BA3DfjNxzL0qx9pcS/jxSo1Nx8lFVUSNhYprpwp+bh0paFWzRL9GCaIhvasRVeJaWMrUZsbbq4ZztR5B6atQBNgwIwd6315coeA/u6UjLc0DG2xD5ZxRUCjR4B1/j9u5CdlYEDe3fgwJ4daNGyjZjcT1bP2ULiRm+HvmQa56ktnubyDI8WU31ZTpo0Qze9c4QH93JSfHRik/6hD+6TaSGPkCiaXvLQs8Scvu2+PuD9tlpPmiEPYksmBdjknvDJw5ZfccohY3aVcE3tKcvNRNDAsWAnUn7rDRkyQWQYLVVVhDpJ4J3ga7sPWHUYOBFPD+xcJCcnV/llZGQ4K2JJ5x3diw4nWeIF+w7BhXxqmCppcE+cMQf+3dqj06cvoOPHz8G3Q6yFt6bA0V2apY53mS6maddYmpJiGjP5NTw6bwuG3fIw2vY/X6bV3D1trS/Vyd6/9l/J5l3Ene4mXZ0Auzx/evNPSrJitWtXAngX+NrStm0HhVWXERzsbyl64LA7PNyt0zCWDAo4O9fyMuDlZ2FlKyNTOV37ZXmZpIhYN5N0M+fxfdGQ5E7WJlZgaqPwcL1HZj4Fdsjnayaw18iGbEoVWWy96UyKxKD2MfCk89S7dTM0J8VhY/wxi0VnZ6I23uQXl4AVnPBAq6WHBe5KTK0iV09oRkoHKyMN8RUWq7v8NRgrZDodTs9GEFkIswqKyNpUii7REejXtjlahjZBm4hgqXsD9aW2FB5cjozs2l+vtZWr+BQCpzsCMr2lN7KUTP8mso7w2/rJIraC8IDsEdYcmSvmooKsLd4xncgCs1u+yOJB1Kt5O0tzvHjqi60qZirNSkbW2j+w76WrhJ+/2tKJHwTl5Jx88O07yEE4Ek2velzPqteRvxCrrj3ssOzXvh/5uGxC7rZlYr2qKCms4stQU+Vs7eI35oKDW2tilfzqMHAmYPLkyXjrrbeq/N58801nRSzp5WRG94ygB5yZvFpEie+OHk9buAxbr38E+599V66lFnderWfBnZSjF154QX6RkZGWdD0Q1rKtHgRPdx0jy42ryYRuZOHZuOB7vHppN3x48wjhMU4J6a4abu5WpdciiAIV9GB1Rvfff7+0Z9AgTcFyxmdML6QvdYzTVi1bRqKcnFVrS3p7HfGv3e4pUyfNI6pOgzg712XZafCMam0R501TtexXwxbDktQjNM3qr1lViIMd+5l4arkhya/juShla08tKXfbUqQumI6js59Dkz6j5L7XiwYHB2PMmDEYMGCAnnRcx02HjqF1BH0VSorPoq37Mbp7W1F+NhxMtEwLsQLB5EFKEStGxukiTs8tdG4Z+b8vf5evxSaPGQRfT8fXIMtwROSyAy9361Qmq7vsxxNjbg+XiWziL9YqDu9PzsRAskKZaJzeTdbEm4b0EEUomxSi2lJESDn2H7bWWdtyik8hcKYj4BoSFgE3moIJjYhCbFwn5OVko6K8/KT1qzjpgNTFyg6/kfKD3jgllLdnnVhMeKonoOsQ+UQ9Y9mPVdrHA3wiORP7tu0FHvCNlL9nLbJWz0c4feHCvhC1IVNAsDwc+AHBP319k+rao/tRsMKTS5+/cz9KM6wWkZrq9aJP24MHXoHYSTNleixz1TwpwtYvbgPLM/mTKZ7C/BCzp+owsOd97LHHMG7cuCq/a6+91p61Sjx38046D8EI7NMF3q2aI6B3Z5rCOyp8rjTgh44aTBY3N+TvOUQP3AyadrFOrXjS1FeHDh3kFxZW1Tehda9BiIrrIn43bJHZRIpOJSkT5XR+feic8BTXpVNeq2KtKczJRDkp7X0vvxGevvSAp7K1pb59+0p7unTpUtsi2Lz5AJrQ2iqjRvVFly6txClZt9rUWogTxpWbaXqInnyXD9d8Uhyx2Z9rdlz2iooFKx7uwVHiBM/LOzCl//21KNH8FRVbgCLHPSxfUhUctE4BOaqjNmns58Z1Rk98Xa7LpDma0szXKl+nHmHalB+HuX2OiB2fy8nniL9a1Klz587kN3Ujbr31Vj3puI7rDiRa1u35gfx9ePqIrTnpeYWIT8sSJYPXzAny9aZP0Omlg2g9lakt8fTZK78sE3Z2QK4LJaRnib9QiJ+3KFtcli07vI4PKz7nxkWD89gqxbQ5IUk+dd9+JAUraJqN+8LTaLWlSFJ4/HwqsGCFT22LKD6FQKNBwHXg8Itw8bgbMGDoKPnqZc0K268CTnRP5W2UfAx0q0bujhVSZSGt3cGU8PGD5L9Rgk7vrKEpqpkybZS2ZJbk0eu7djT/Tf/nGxnMm133DKXw+5KVEj59hMzuhYh9ZKYkVtrlWzm1EPtO8BdU+i92Cj04iKprTxF9As/ECg9PlfHXKvxFVo1kfu1vQ3U0v/klsXLteoysGeb0lvd+IO3gB5beruDB2tRZ9Rg4r7mMvoAqLCys8ispcf42q0tL/nEhTbMko+UjtyFu2iOSnPDeV3J0oTfWpjdcjq5fv4Gus18j60MYEmda12AyWlwcfi1Ifb7tw3kYcM3dOLpjI33NRTiStW/9vK8Qd+55ePTXregwaBRKaIkF+/Ibf/9OvtDiabCJn/5ubq52HfAUSk1kL686/vfe+wVpadmYNGk8WcvuFtYXXphtKbJ48f+wZMmr8PHxxIgRvST8zjv3WPLt6zLGC4tdsJAeSLdcbudfZdcH4/WeuvAzWc6Ar6FOb62UKSa+5pmK6BpM+f1j+aqw6yfb6aWgAw6+c6elLbYB2/vGNq9qjD8+aEPLSLD/Gk9XiYJFbJ7kQ8f3TsQYrc+83ELc8/M1AQ7MXEm/vCMvLLpiFBSk+RvxNdoQdCQjRxQbVhj4KyxWUg6SgzMT+9689+cqtCVH58/vuIysQHH4g9bSqapIVI8NOy+/8stS+cKL/W1qSzP/3URfjXlj+sTL8Nhlg6XYW3+slCmrt24YhUfoy7ODqZn4cY3mX8TWKaZluxOwbJc2Jb/5UO1frp6cmIls+nrrx0XW6VARqP4oBM4CBFxiY9tU8hdbPLVVXNQwA8yJwI2/QOG3wXL6RPZ0oDq1h5x6XQ2+Rvbt5ymIM5HYt8eVVr0tSbb6Hkg/qL8e4eSEThbDkrTMWnftqSUH8cc7U7Fv9d9kbSxFTqrtQM5TV6EtWtssPlhr4cfBOGXKVRg4sDMuvvjJKlLCw8kfjXwtDh60bWsVxjomBAeWY/23R3D94xFYvtGr1qXZOdnVy0c+BbcvxH5ifN2yEqQr1PY8xjh/CODMYsYvK/rXYMYyDRV+7rnn0KlTJ0ybNg1r1qxpKLHVymGfHPbN4YUC2Qp0OlAUTWuxMlWXqaua2r13fgJe+bwJPpsTUBOrylcINDoETGVkjcir50q1JxMN/prrdKK6tIfXD2pBFhxntOmGls6yTuv0smzyceKfPdGbfBVFyJ6nmnhWkuNzzdNXyQc0a1o1xRss68EHr0C7di3Qpk1TrFq106HclJQsh+nHm8hOpq1GxdRZDE8Rg38OiJVr+zWeHLBZkjq/t8HhNCozZCz9HgnTH7XwNnQgOjqaHMWTTprCw+1nP5oEmlY6nUj/Eqwh29T2Ym3KsSFlKlkKgTMFAeXJdhLOFDto523Xpu1OQnVnbBW7li9E4m7t663ToRMFBcVIT8/BL7/8hz/+ODnWhtOh33obdj1+gVMLpShXOuMJON50000nQKoSqRBQCJztCLjExMRUP1F9tiOk+q8QUAgoBBQCCgGFQKNAoPafuDSK7qpOKAQUAgoBhYBCQCFwtiKglJ6z9cyrfisEFAIKAYWAQuAsQ+C08ulxN1Xi9jE5GDskH03DynAszYS5//rik3kBTpflP8vOl+quQkAhoBBQCCgEFAL1ROC08ekxuVVi9tQUnNOp6qqim/Z64qqnI1BUwgu0K1IIKAQUAgoBhYBCQCFQdwQs01ueXrQaKP1OFd0+JtehwsPt6d62GP935Yn5NPhU9VfVqxBQCCgEFAIKAYXAyUXA1CQ4FP2HjISHh6fUfOjAHmxau/zktoJqu2KodYdoR5XzlNe0r7RVWh3lqzSFgEJAIaAQUAgoBBQC1SFgGjBsFJKPHcGWdf8Jn6/fyV+lkxbwRbSDzRWNDQ8LKgctmCp7EhnTVVghoBBQCCgEFAIKAYVAbRBwdXNzw8Y1y2QbipKSYmRmpNamXIPy8FY8uxLcq5V5ONmkFJ5qEVKZCgGFgEJAIaAQUAhUh4CpiDb0481Gg0LCkJqciF3bNiAjLaW6Mick76e//dA11vlOwTP/qLqr+AlpiBKqEFAIKAQUAgoBhUCjRMDV28cXBfm5WLroV9m1ule/Iaeko18u8MfyLY43Vly70xNfLfTHLRefHpuNnhKAVKUKAYWAQkAhoBBQCBwXAvL11taNq2Raa9fWDfDx9Ye3j99xCa1PYZ7imvRuKPIKLR+UyXTWqu1euO+NMLRpXoonb8pE+5iS+ohXZRQCCgGFgEJAIaAQOMsRkMUJy8vKBYby8jI5sp/PqaCkDDes3OYJdmzeftADm/Z4YsVWL5zfuxAvTEwXR+avn0nB4x8HY8k6b5SWqXV7TsV5UnUqBBQCCgGFgELgTERAlJ64jl2xd+dWxLbrhKLCAuTlZp+yviTSKsw7SOH5bokf3MjowwqQUblh/YxVnYoKpfCcspOkKlYIKAQUAgoBhcAZiIDrprUr0LZDN4weex1Cw6OwZf3KU9qN8grAw13b+P3K8/Jw7xXZWLTWGzc8H0E+R8DD74aAt6u46Nz8U9pOVblCQCGgEFAIKAQUAmcWArINhYuLK7y8vVFYcOoViUHdivD2A6nw9qyUbSceJj+fv9ZrK0XfdXkOvv/LF6s/PUIou+Cc25shPfvUTMWdWadZtVYhoBBQCCgEFAIKgdNm7626nIpLBmrK2a/LfetSTPEqBBQCCgGFgEJAIXAWI3BGKj1n8flSXVcIKAQUAgoBhYBCoJ4IWL8Pr6cAVUwhoBBQCCgEFAIKAYXAmYDA6aH0eNF+X75hZwJeZ2cb+dzwOVKkEFAIKAQUAgqBMxgB+WT9VLb/zbfekOqnzFh7Kpuh6q4BgfLU3ShL2gaUq8Uha4BKZSsEFAIKAYXAaYrA6WHpOU3BUc2yIuAW1g6myM7WBBVSCCgEFAIKAYXAGYaAUnrOsBN2KpvLio8ihYBCQCGgEFAInKkInPLpLSNwLq6u6NSrO0IjIrB17XqkJ1t3ew8OC0XbLp2w+q9/LUX8AvzR//xhEs/JyrbJszCpQI0ILH/iPYT65aGgxAM9pz5UI79iUAgoBBQCCgGFwJmIgOmnn36q0u5Jkybh4MGDVdJPdMJ9zz0Bd3d35OXk4mj8IRulp3WHdhgy+gIbxcbD0xOR0c0RFBpC+3K52uQdb1uHX3oRWKmaN+vb4xV12pf3NJXhz23tMem7S077tqoGKgQUAgoBhYBCoL4ImB599FFL2bvvvhtRUVGnROEJiQiXVaFnvPEuUo4es7RJD2xbuwGJCYf1qBwzUtMw660PcNE1V6Jtpw42eccbiWzRDP6Bgccr5owpX0Z7mZWUqdWtz5gTphqqEFAIKAQUAnVGwLR//34pxDurt2jRAitXnpq9t25++D5px/X33yXHRXPmYcvqdRJ+4KWptPGoC3g3+Heeel7SavOnc5+eOP/yMfDw9EBpSQkW/jAXOzZsrrZot359qMwl0Heaf3jac8I/4/V3UV5ejlsefQDzvvwa+7bvkvRzhg+hKbah1K4XMHzMaMR17QxXVxf4+PkhPzcPX7/3MTLT0oW3uvawknXpDdcgMKiJ8B49dBiz3/1IwuqPQkAhoBBQCCgEFALHj4DFp2f06NHyoP/mm2+OX2o9JCxfuASDafqKp5OKi4qQeizJIuWXmV+jffeu6EJKTG2JLUejrxqH5COJWEV+QP3OG4JRE66oUenZs3W7KCkjxo6Bl48Pfv1Km97KysgkpasMpcUl4kekKz29BvZHTmYWKkghYkWHp8SOHT6ClYv/wdCLL8TI8Zfj2w+no6b2jLjiUrh7uOMb4vXw8ECr9nE2XXU3uaGMdmP18jShsKhU8gJ8PPHF4+Nt+PTIJ/PWYOGaPRL18nRHcUkZbdTqhpLSMp1FHRUCCgGFgEJAIXBWIWCj9GRkZCApyapsnEwkko8cleoO7z+IosJCm6oP7t4rSoNNYg2Rc0cM1+QdOIioFs2RGJ+AiGZN0a5bF+zevNVp6cL8AiTsO0CbrxbARP5FHDYST7P1HjIAJpMJvqTg+AUGYOkffxpZ8B0pLiWkHDWNiUZsR+2Lp5ra4+PrK9asstJSMAb7d+62kenlYUJQgA9y8ossSk9eUQle/3aZDZ8e2RGfrAfhQcpOeBBZngqLaYNWpfRYgFEBhYBCQCGgEDirEBClJ4K+lgoPD8e33zYep90mIUGorKwEO0DrxD5APNV1PPTfor/Qh5Se3kMGgr8o4ykvVoR0KiNrECs8TMfIB6lDj66iINXUnsU0nTf66vG47r47UV5RgTV/L8XyBYt1saDOaD9rCiorKpGcmWdI0YKVlRUoIcsOFaCfiyWf8RDiI00XSj4FqTozUYRZJE9P07M4Q5FCQCGgEFAIKATOXARE6bnmmmtEQZg3b94Z2ZPyslLw5+5G4i/AmD7731ukHFie6kaW6sP0jGc/InviqbfkxGPo3r8vTX954+AubQpJ52MLkCv5R1WQ8hMaGU6+RMVg6421PW+ikqaphFi+WQFhy867T7+A8GZRGD1hLPoNG4zVS/5BKZVlxaWotByHU7JoesvdUsbf1xPT7hypVy3t1dv82fy1+G0lWYtIPk9pHU7OkuktTauhIgbFh6HjKNcjOpIW0eQKBJzOAaX4aKCovwoBhYBCQCFwJiIgmsI555yDffv2obi4+EzsA3Zu2ir+MJ169YCvv5/0YfOqtaIEXHLtlfCmqSMvb2+cd9nFFPapVR+PHIyXqauo6BZVFKpVS/5FADkc8yfzyxcarDGkFLA15YIrxsh0XFyXzkhN0tYa2rxyjdR7MVlzvMn3R9pDn8V7+1F7qAw7Q/sHBCCVvlxLPkpTjKSJiGWG9QzKLyUlii07hTSlJQoIpeXkF+O657+3/K597jtc8+w3uOaZb/A7KzxMpKwUFZeSCLL+sD8PyzMqc6TLkFiugsgc4Hz5aUlaJucxjyKFgEJAIaAQUAicmQiYBgwYIGvj/Pzzz6dlD9j5uEvfXpa2Pfr6SxL+4LlXkJedI+FDe/aJ781FV4+Th/Vn097EAbKcbFi+Ej0H9BMnaGbkqahVZD2pDW1YsYqmprrh+vvvFJlfvPaOxbmafYLKysajhKw+ls/rSWtgpYQVFf5Kq0vf3uL4vPyPRVL+AFmENpJMbk+Hnt2FT9rzF7WHFIzu/c/BwJHnU9MqpZ3rl66gOsopT5K0JrMiwtqJC/84qRJFJZoliNP5nwutVyTaiegoFGdG4dX4rWFm07QYYzZz6ema4iOCJFniWkj9VQgoBBQCCgGFwBmHgEtMTIz25DtFTT/RG47ytFdk86YoKii0fDrOX0k5ozKyhlh8XxwxkcXEx98f90x9TL4KW/a71Yl5zA1Xo2W7NjRN9SLCm0YhJTFJm1oTRYV1CVJCqD1RzZuJo3RWWoZZISG1g/ICg4PI8uMrvkA8TSX8rOiwpkL1stKhpWnzUazkMLFiIwoP84koXdnhskSG+jkqihAHmIhl7dS34edVhOJSE7o/PYnYK6gqvS6WwUK1+os2fiN5UtbJH2u7nTAcR3JtZNeGp75NqI3s2vCo+uuHQG2wrQ1P/WpXpRQCCoEzHQHL11tnekectZ/9eY4lHLFk84KDdz092RK3D/z+7Y82jslavq4XuuAC+gS9Y4/uVquR6BX8h9QOYXORz9f5U3lOsCgjlCfKibk9nM7kUsnKhaZkZNHXc/wTvxwqK/wi1MwrJeiPUa4IIU6NhSVKPkQvIkWF5bD1R5dnrtdYf++p92tKjuRpbbHw29Wv+wzpTanpWN0DSM/Tj45k2eep+uWCcwSVwzR7/IxMep5+NObpYfu8xoS/3kd1VAgoBM4eBBq90mN/KvNycvD5q2/bJ1vi2aR02BA/9PXnDIVLyT9m7/btWLWInIx5akmUDU3j2LdtG/JJvpBeTldGRBexKiBczoUWMeSHiugpZmWEHyrieE1HvVq9PfIAooiusGj6DZVnGUxy1EppvCRLy6C/HOb6zXVytfWoX8RJVQZZeiLXQvKdkfEBagzrZfQ0PjIZ03WZxjQO67z2+XrceNTlc5ox7EymMV2XY0xT9TcO/PVzqx+N14aepo4KAYVA40DgrFN6eEBLS7KuYeP8NPKDV3+A68dK/D3vN34ai4KhKxGsWXCYHap3btomIkUx0Z7dkgfTRhHiAABAAElEQVSZHaKHRAX56bBksr5wW3jayMXFzfLwZquP1KtXTzzCK5WIVHP93AT2+WErDkukP9QusQ5xZWxB4iMrO6JEaPn6l2z1rd81tC0q0vZyhZY2S4T+6AqBfZ5Wv5ZfU1jP12Xy0ahccL5ejz2vnq7qtz03Ok5GHJ2Fdd6zAX/9OrHHwngdGXEw8tunq7hCQCFwZiBAT0xFDhFgRUL8WDhXIvz0pZ89tzVBc2RmRYNIitADmgtwmP5YFBqOsWxRRlgpobCIEUZLWFeQLLxcv6UBzGutmyIiT6tDIpJkZSF+c30aa/3qN0XQNhuhcZps+mt8QPAD0/6haZ+vFzTycdjIp4d1Hv1on86y9DQOM5/Oy3Em+3wtVeM1ho18eliXpR/t0x3J13l12XoZjhvz7MNGPj2s8+hH+3RVf9Xzq2NVG/x1Xj7aY8tper4uyx5vY7oKN0IE3DzoK9qzzi7QCE+kbZdOuSPz5Zdfjt8Petm26lTGaLCjEdCuBawwWJNoOCRdgi0p5kQ+cBEuqwWszFVCxCNsdnWY62WlRXxwpBwzMhGvyNZiepImh2K6PJGhxw28ej4nEY+0X//KS2cTHs7mB4CuC6v6NXgU/mfe9Wd3f+nXOd8sxvvBkm4O8PXvSpZXC5/tHW2Vag7xQZcn958lwV6yip+BCJSn7kZZElnvy7UFZ8/ALqgm2yGgP93sks/iKCs8PHjxT0YzGv5kMNMwEaVEFBvONvOZxz+J62HJNltTRBazU5zKagOquaxFhlYvv3EKn1iCWIhejsIctbTFIEfL0vLMYSlnXYCHS2pydYVO6pVkqUMUPapL1a/wbxzXH13vvCip3Ed0YevXv7P7T79v5P6jaWO5Z7kcB/klRyO5/zjGfHKTUbo5U+LCpnObC6nDGYuAW1g7mCI7n7HtVw2vioCy3VkwYe3CEpFBTQY+Gtx4eJM8URjMgx2zclyUBy5IP47zQMtjnkTN+SyBfHnEgqLzEy8PksIqxc0y+MDFzbLF7E5lyEWHeOWP7SAszDy4U4BlSv0sVWuOJo7+sgJklqn3S9Wv8D9rrz9y4pf7gO9Nvg/p9tDuB+3ekRtT7iLtD99HMhJwMeY3xyWXMzlN/OvM9xnFuYSiMx8BVnzKjlq3Gjrze3R298Bi6fH08j4rkYjzDMXM6HH0hTdBwUqB/iNFY2BkJXqFWAcv3cdGgJK3SBntKEpHDrJyImH9yEkUFgsPhytswqwgaW+OUliTI8J5ZNXeUsVZmdNYTgWvqMx12YY5u11gJc6P0vIsZaUcyeG26u1i52cpT0euX8/TOsAliKz1V5CTde6ISSj3Da1Sf/sAc51cxNwu7Whuu6QfX/217X9N9Yd5VWJ0i0r4utWt/1z/uRFAj2BbzI3ngrtZU/2O8G/nX4EuTcznxgn+de2/dh3UHX9XeuozPtGyoLn1/Ne1fkfnv39YBaJ9zddtHa+/+tTfPqCC7gW6to3nxRy23Ivme0+zagknWA+6KLoSMf6G8yn3hxY33v/6BwEs1qrbaHIkzfLHUZolUwJtA4BuIbZpxlhras9F0UDHJsZULX5hc9u02sS4n2NigJbczwagViRnAN0jJ4oGRQK9Q6tKL23RA3mDaPFYBxRu8sU3MRPg50p+OYoUAgYETB269ETruE60KaY7LZiXhy3rVyIp8bCBpXEHbw3ujaV58WQI4YcPkdkaQo843NDWFalFFViXZh7XKO85Whw61p9WTSZ2Hjwq6GH/9X5gwWEe3CiBSFdkWA8QMsvUIhofy5epJBpU+dNxflhIDovgMP2s6fxmyRkaH8sRXqlAkzeMFJ7eYcDioxTX/tORrEvMbKnfXIdentLZ+ZqzndVf0rIvXApz4JbL22nY1j+0aSV6klK4+KgIoHymhq1fa1fN/Zd6q6m/DZ2zK2LoCztakSDPvNKAhn/1/ef6x7esRHapCzam2fbfxYB/TfWzHObhvzpaI1u4oIl7JbZkctbxnX+9fr4OetIDYtFRlln7828ipWdsDOBNtt+E3Lpff3r9ctQuAUv9V8e64J/ESiTk2fa/tvhzqZquf2P9Q6JA1yVjYN9/wkSIzwCRfl/wfUD3vwdhcGUrV3i7VSI+h68Lfic0d4Z5DfxSXv/DJ1VIP7MWAKzJZg5Hh4tJoYmi3Wg2p1fNva8T0C8cOJoPuJH4HVlWnhGk8PSle37BEWtabULu1K1rYgEfOtfxubUpUT0PKyUDSelZUZuPYqsX5TD3+jZABrnU8DhsJFPidhT0HIeysFiYUmkQNlBKWT6OluZgfJMu+CJjvSFHBc92BExxHbtjw+plOJpwAD36DkTXXueS0vPdWYFLrEcwOnmF4+Xkf6xPIlF+aIiVgVAbzcSsbVaK3thCgwUt6PxsTxfsyarAV/uAzCJ+SOiQsfLEg58+ANKwSWV1GSxXH6D1qSjtDZLTNV6tLIclgf7woEwWCinLXMRL//W3VlK9SPlyEUVMS+N8YZCHrAiSOLWNBnJpKv0RXkp3Wj9Zv4piB8Bn6zyNx67+ctL8+EXYUic3zYyfF43QvcMqsZy2Eat3/XXovwBiqN++//zYLJONXuvQf3P9lYRtKW1hwkeNGF+t3/ywCvKooIeROc/cf/v6tbgt/p/vriCEq8G/Hv0vpxPC14K5gbU//3RueQaUz6neT+P116FJOTJLXHCsgBGw9l/DQ7/mKeag/8+soz3jyjXFok7XXz36z/Vr7ZcTZNd/VmQYG7PyJR3U2qWlateIhp+Zi3g05YysciEu9PB1QSIpIHw+9fuHMdCUMq2MhC33CldHHTHGuYCBvtgDsCLiiHqRAvvdAWDeIUe5JyZtWFNgFSkwhTTknM7kQs7FHgdWoajTKPj9816Vpn6btQVPhQ/HN1mbUcRWckUKAUKAdH0gNfkoPbzKkZ6ahGbRrWivS1eK80DWuKmnT1OklRUgq7xQG5S0kU/rtGVA5JGXM/jggqziCvoBZQQPWwwS8ytRnJOC4uxktC2Px7GMLJTQooVl9DgzuVSgmAaOLHpL4YEzyKNSBjcWzVIziytRVkmjHSV4meit35MePIR7Ccn2oGR+yJSWV8r5CHCvEJ4KejKVUj7L5Ae5JskFr67iMpVIKdKTKE8GW4oLGeJ6lyRfq5/7JoO7PLTMRdzcUbHmZ2QVZZNexSOgQQbFXlvpYq6T20FE8nhsZ7EmUnqyqL/SHsrSHhyUwaxO6o/wcUEJAcsPAJaTX+aCvBKSSddjIPWfZfB3NW6Uz7hmEz66FSDIk+pkxwwixo95WRnVT10q4fu3qZIwBWFKTNIOzq+m/+b+3r3Wlc53BdKLzZ2jgxSnv2kkkxW8DNmrV6tf65+VVxpllsUHHzfAjxTnAgKqnK4TrZ0a/qGeFXJteRAPPzxzSytRUM5rOlXARBZBtgxx/+mykE4USD63hojkvboS8KT8lCLn9TvCn62KN62m+gi8PPPzgesXfAmjXFLsSivM54NFS4VcL4es9UtQklwQSOef28JGJz6X+dRWPT+MZtP52vYy0VhD7S4gpYh5uJ8sLZBmJfie4Oud76VMsrjy9c4takJ5jA8LKyEZnM73g1x4VHgaYeDj7oIkujcpy0wU4AuT49I+nV87/3yNcdY1KytRQsNigXsQfMJawisgDN5BUXSeaEoo2gWbMypF6aGm0b2ryeRrjUlrO/eB7wMt7yqyqLAlh4mtNM1ISb7nPy3O01pTumvhnZT3Gr1Q6TS5G9ChCWFA/byyFTC2JfDvMYAVpBNBdBrkumPZN7UFdlF7CknBNaZXVy+fj3f6A+F0XlNpDHqOXGD4yP2Ipekvf7reuY9tA7Wx66l12vHTQWRZzAPaU1/59Px8CPjxYHU12ea5p+xBSey5dF75PNKNb6AthUlkHXNBG48QbCsiLU6RQoAQMOXlZKPvgPPIupOA1m074cCeHadE4XE3ucmbuJeniXYSJ23CQF6e7iguKQPzyE7hhrzqyjFbdfnRpiZkAqUHOo9ZfMcxmQcrjn53oJIGam0wkzy6qfSHLL9R8+7luUf3oSDtEMKDAxHdsSfCTf7ihMzDs0YsV394aGkyKOq55vq0RjAr8ehtMfNUexB+A4f0hUQYkrQgpTAvt8tevpG5Sv1a+23EmeuQNAM/J2tYmo/cCilurlPqpzxj/eapPS4qZJFnbJQlkwJ6e8z5Fn4zjyUurbHld1S/hd++vDluc9A7bu4P5zkrb6xL5zGm6XL1PGdxS3/NDIydXkaXx1mSrvPQUe++OUnMODZpOr56n3RGPlIeZ0uWuT5LNj9YKENvh56ui5O4TcTMQWmsJOgy9XIO45zJMiTTcDSkyzWky+N0JipjLGJOcpgm/PRH55f6OFF78cgrKELCUdIykg5jTGQMdrnTopyVHtLt0TStdEWrSkwmZTiNFGGtKZpKxvJ4axkmfom5NKYS/6WQspReiYntSWnXsiT/MFmM/reZFC2avgm3W7WDlRs/eiV9sTfwawLtj5eqKQlS0Pzn70TgUD2mp/iFbW48sN4wXfRgZyCM2vDGVk04K1s8tdYlGJi4zFir4zDz76V3R7ZK3daeFKc44FVS4kI8gRwaQ1fS7PhIwu3jXcCNpFQNjATmU79YGYojRWj6bqBrEDCuFfD7YXohMBhmvj9oGze2wDVP60SFXwhccwkkA5WQdZwtPK08gpTSY8DlbA+aEuL3omPX3vAPbEI+Kq5ISTp6SjDx8jAhKMAHOflFVZQeD1J2woP8kF9YjPRsw91ALa2uHHekuvymHgFIKMkkLh6JWJ2hgUvGWi3+VyIrK0z0l0c2y4BF1hd64GzZuwflZOXp2aUjPYdcsS+1APn5aaS88VSIVlKKV/PHymURXg23s6yqZa1ynZUxp9easQY5lF2vVlQt1PANq7npdedoQNyqr9yuIruobdlqMqtcj8547dNriptbYMNmE7EyOEo232HWfjhkomxDuiFoLcchpxm2bA5jVJb+s4XGZDLBx8cbzcKDUekWiQW7DmFkuwq0C+hMlgq2hgGvbyWFhywZ1jq1C1l85MhqxOPFOeRvU0Yy393GgskZPkTzz9GrLyLz0W5652LLsTdZgIyUQgoE6QlCSRQ+4EC52UL+afyrK7GVkJUTI72zHbiOlK/XztGsOy+QsrWH2vbEWiNX9eHXSWHiPrFj8/CmVt4dNMSuJn2ElR5W1EbRMZiUIZ1WUUc5/R/6sf9SH8KNrVo6La7mkeRaSI0kqvAJqqL0cHp6eQEiTH4cVKQQEARcWeH5e+HP+O2nWdi9YxP6DxlJ0ymktjd6IjN1SQ5ZZsw3BA1KYp2ho8zVU//lSAOEEA2GMqARFys0OxOOYe+hw+jUvg350tDWFumZyMrOQWlZDbu0m8Wpg0JAIXD6IcD3dmlpKbLpXk5LS6c16QoR2jwGy3cfwc4jPF0CstxUYHcWPd1FkaQEJhonxILL44R5mqUTWS5yaApWLHM0bhykaZzTlXi6mBWPbFLomLhrf1Gcp6hqQ6zs8I9pX47mJO1tfoyw4sdTygIX5XOcp+912k/8TJRM07lAZ8KttlThTWYiItd8fnmtSiFuPkgqO42Br9pklXKCEXAtKytFTpb2upB0lOyNRE2C6JXkJFMRTV8dTs5CcamtJYebUVJWLnm5BeY70tC26soxm/N8F8SXZKCZO900dDfaWGb47mOiAUybJ6YEvmMlqt3Z+4+lwCeAbzjyJ6ABsrikattEhvqjEFAInJEI8D2dlZmFijJymPULwKqDqfjraAW6Bbvg/XNp6oanpEjBYcvGg515+kofOCidxo7d9DD3J180/Wnf3MeQf4IR6d+pBS7qF4fgAHKyqQXxlNPzZN1ZTIoOK0D8RerNcZrlRy/OX8U93AW4JFpPsR55ekufumvuq8nQHaF1ZcfKbRtifp18abrroAOrlp5vf6zwJ7MQkWt+un0WfY3nBi/aRiJerPm22W2bhwg+cS1O/rPOtiUqdrIRcOVP1Zu2aAk3Mum2atuB/FXpK40MbZ70ZDamlBQbVjzs/Xm4Dew7w3n2/jycV105p/l8F9JvY+ExhJp8EOTmTWOU9uqhfVFFJZlHJwrzP07jgY7XHNm4PwHhzaJRUFCI4mKl8OhQqaNCoDEhwIpPXn4+TMFR2Lg3HsmFlXh8bSU+2kVO7OIo70LOybxchAv5qZitPgwAjRUrk3iqiBzE27mQolSJvuGG/BMM0g0ju+HaEV3RuRXNF9WCllFb71mh+fow+0bSIW5fBszaZy3MSyHw1JOzNXnYaZv9goaScsRTY7Wlc6iJTWl6b3QLTXHi6a7aUmlYW7hlk6Zm58TM5bt6R5LDfyX2lVRViMac207wGXNu+9pWpfgaCQKm+P270eucIXB1c0MRuetvXv8f3a/0dtKYiS04RAdKM7C7OA2XBnbEjMz1Wo/NeRKx4MD8pPRQ3p3tK9A6ALh1WSbCuvgjv+DkK4haQ9VfhYBC4GQgwC82TZt6YXtGNpIL+GWIXnrktmefP22JAP4CjX/02JYm8UtSRokrFh+pIF8WF1zQFDhCZaPE8KKNP6/0gc0Cgd8O16Z3WNk4XvJ0lw9zkZZNn2DVgow+Q6ywFJLBnX1/bNYO4u4R2T8duNvsHM2+OmwF4vDb2zRe/a+5qB7l0dRCHH6jnxbdQLhqX0Jasp0H3EzgdcR818xyyDM+sAt+zdnp8HN1X28PKZOVRw5Tis4qBEyb162gBQn/A6/IzErP2UHWW44XrpoaMQwz09dbtnqwKn2asmONV+KFjYxQBQ7nlqMLTW2VkQ+PIoWAQqDxIsD3uBtZbDIKSsEPZb7/eVNSeRGioeS/FFf6cTqPF2aVwPzy9PleF8wgawm/P93fxYWmjqxjz5RaOAlf/TfLrRsF+HjKBxzpOQXYEU8exHWkFzc5LvCmnSKjc7E1iH/8ISavW8VrOZGbo9Cja3QuQO/L43b95i/VdpJLDiOXUwejeUnz7nAtzIIpmQTYETsvt/cMw0sp/9jlaNGWkU3o5Z4Wll1M3teKzioE5HWAT/7Zo/Dw+aW7k0chmtLaRms5jI//Rkvi9w/rmERh87QWD2D6OjXCoDkrMiu/5ShSCCgEGi8CPD5axgUO83ggVmA60pda/FJEIRlPeCDhL8D06fDX+1XSZ+WVCKQFi3gbiY92nnic+nZoLpV89tuGE1+ZoQZWdHjl6PqQrLNUx4Ieh9aBf44omZyXL413bAHyoSVQPOlr4aWb4lFArhOKzi4ENBvo2dRnfdBiHx6jxiKKjfktzYyHDFwcFj4e1swDnizcaF2Jx8xe5wNJs1DTyHB0btcSO/YewNFjaVSlnivDqYXPcUDn1XNrKGPPrhc7jqNWYz0E2xQxR2povk0zbcrb5DiNNI2MQOf2jrB2WkROfTW5x5lVi07UyFINg+Va0pvpjNdZul7OybHGYsTgkMc+0T7O9dml2UWtLXKaYWWxCdnx20VtWDmiX5M8TgiZj6zwkMMsjxWs6jDJvWvm+y3BRVZy5jV5PtxRQX5AVE4XIdwN/2fx+v3g35lA/Dn64byT21JWdK59/seTW6mq7bRB4OxTemTA4cGJBx/66Q8EOup77WgKh7bYmK746G9uPAhbp7sa5jxG+BVj0o0D0SK2BzKzsvDM618gMVl3vnM0GjsYNW3YbCJ1aKQDuZbS9ZVpEWAO1EJOLVjspdY2LljfZMaavsyxxbq2Uox8J7CxXI1D8Q4TjY3Swvq1bclxVs5Run2afdxR2+x5zHH7ZGmPfWI1cfssS384UG1m9fk1FTXWI7z0Ryw8lGFRfsyWHvOHENZ0GSnos28X+lE54acXLVkIjIVVd68ZK27cYV6U8HSn8tQzoJGnO4inUfu0T5ZOowad+KawomOuxfxQYIVG/tGApmWZ/1Z5aFA5PU0/Sgnmr9/P5EaOjtGpCMjeQnPi5WgWGoBWLegziWrFUSbXb/yZW67jV78htbpKdcm1OR6vnOrK1z9PsI4xYB1GWEfzJ6v1l1kbNGrkqa56KWzP4EiimcfmmtD59PJ6XD86SrdPs4+by3KyDdknUNxhUftE+zgLNZTlbIfkqJzOqOc5KFxNll7a4ZFvKCqrW2D5qEnnv+a7TWQLEzNSKqWTsqNzyv3KaVpBh9WoxNMLAVZ4ypKcODOdXk1VraklAmefpccCjHXk0SeqZHDih4YMTGz5oQGLfXlkwDPz8xub8BAbJ1nFWCRbAlLOEnMYKKc9hxYcCMe/SfG4xmUbBvfridxc/qLAkWAWWB1ZyzQJDMC0Z6dgydL/8N2c+U4K2cq75MLzcOet12DCTffLZ7pVC1nlc56npwcGnNMLvbp3QVREOOITjmD5yrXYuGV71aI2KbZybLJOYKSc9o6qinUtV187nnZV6W6VhHpIN8gwBKsKcpZpn24fZ0kO0qokVUnQyjlKriLPEROlOUqWjjnNkFyHBWsqYi5pe3BQiJWckjyUbP9ZWGVvLdtCMl5wkowbrPSIdceOSUUVAgqBU4rA2an0GBUXY5hPBcfJZ0dTgLS4TGcJH6WLPw8NihyviRyMnfZFWH9KKXSnTRS90KZ1K2TSlEtKmrNFLpwJrNqW84YOQKuYFrjt+gn4fu5vljdU2/pt5XmSg587rdukPTxs82zL0Ye5tMQBK1Ud27Ul2RXIzslFl47tcPHI4fj4i68xZ94fWpGqTbMXddLidcPaWbMMuBiCzrgbLt2uMruotR6nGWYW+3z7OLM5SnOUbM9HcfskqdVRon2as7LVtEdkO8i3F23hc8Brk1dNpIbr2KjgGK1BxvRqpKsshYBC4CQhcPZNb/GTj388iJHiwg9szVTNIyUlSp4+wmmjp1iCKF0bzLQ05uM1OrRRntPq+6OiROcP6I64Ni2xYsNWpGfVYUlSKV217guGDaIcLb1rR1pa1aZ9UkgsNVWKc0JVcXZplbjwvMGk8LTBrj37cN1t/4cJN96DSU+8QAtIkpPghMvg4cHKk1UWK1O8t5sj2ZwuyhbXK2TbAFbGpCCfG7ufJ9djl1ZT3IL1+i1Iz6Rlc+tUXmuK9ENr7HH8te2n9NERQHplRnap1Zigh501xz7fPs7lHKU5SnbER2mcbENO+OwZBX+bguaIo/I6n55HR0PQKto+UY/r5et45OJERoVGD2s5tn9Z2VEKjy0mKqYQOB0QEEsP35xnzTo9uj5jVnDaeYXh8fChuDnhR1JhaGTjAZgHLDo7/SO0Rbo2pmkjHjs62yg+wlW/02geQ6lwpWy0OmpYX3h4eSGQlrv39/VCRnaenWBLw+3Sq0ZjW8WQlac5Fv29HGzxOZ8UoC3bdlmeB5dfcgHGXToaYaHBSExKwZff/IS//v1P2qI/QUy0yevkB+5Al04d8PLr72Pztp02FfXr0wMlJaV4+Y0PZN8xztyyfRdmzP4B4y67CK1b0s7UpBBxWybecg06d2iHwsJCrFq7Ee9+PINWsS4WpeueiTeif5+e8PX1wbYde/DpjK+xd/9BqevyS0biCmpneGiIoZ0rJG/0BcMwdswotGjWFPsOxGPm1z9izXonC4xICe0Pb2prwdo/kLD2doC1oUC1QetZrJbteDJtqrCJ1FKqfRn7OItxlGYWXyXLPoHi9klS1FGifVpdyprbY6zMXpzTevWyx3GkMcGejEoNjwv2ceY3ptmXV3GFgELg5CPg2rFbH4weex1GjrkKF156DfwDaDGJxkqs0IiiQgOYWbm5JagXlubFawoP91sGN43vqtbAmBjNEsQj+6hmlfhwAG2W56JZeFwsX2Iwf3U/FuycOrVtgSaB/vJhyLBB/fD8pFtw1/UXo0Nsc5hoGkkjg3xue5UfcZlZzh86UIp8N+dXbN66A4PP7SsKBg/bbVq3xF23XocD8Ql49e1PyH8oD4/+3x3w87VugMO+Os889gAGUTlWJuwVHhYe16YVdv4/e8cBHkXRfUBCGhBI6ISEXqT3jvQiAlIEUfntotgQBKwIiiBKkWbDAqEXQQRFkV6k995bgIQQOiSBBP73Zm/2Zvd273avJcDOl7udefPavLvMvnvzZufQUTiPTpNYflu4BHq88CY6PEcgd+6cMHzwQCgWHQVTZ/0GK9dugFbNGsEH/XozkgF9XseI0aOwev0miJ3xG8REF2H4EXly2/TsCSeYnt/DtevXoT/TMxQa1qsJfXq/hDvdrsKkKTNYVGnQ++9ATNEiyFewk0bdwdb90db/Q1uXKoK2psCnc3plvzhyD+oOn6Xw+Sr0MSJDrT+n4XDepqsWTABTt1zUuNhmessItooaj8AaME1ajmtjpbjYeHBWdFUU3qEAerHhIFDBW8u5IZizaJCCgdWwLGBZwC8WCChdrhJsXPsvXLmUCHUbtYLqdRvD6qV/+EV4hgmhCRcnpBKBeeCR4Pzw5YXV6ArZdlmwPkkzmrTYkoxtGWtT4j32mPVKeODgMcJ3Pg8Kw1MiKluAkZ4w9sTXu/h0r9AcYVC9WlUoV7YMtGhUBw4eOQlrNuyCgyfOsNOfE5Okk9wF5raqxDUbPiyt2aP14QhGP06fOQvLMNpTrXIFqI8JxysxqTlXTsm5uYu5SXHnzsOAT4ZDrlw5FInLX332ARSNKgRDhn8D6zfZjucQBAajU5Qndzhs2EwPP5PkvvP6i1Ae83t4+Sl2JoSFhjJn7tNho224ACEYzaLoEzlZFC1avmodTPjhV0Z2/OQpGPpJf6hSqTw75ZqA6UzPeBj4yTDUMyfTkyJXVH6OnYXRptsQn3ABBg3sA7VqVIFTZ+JYn95bnnBXtt5ps3UaJCZdxbPd0vRY2eDqT1MD3QCKBpUBkBHGWjhaMBSnCdYCIkwL7ADUQNL9p9HAFflpdYv9BqzlEkUhQ9HA8araZC6EaTk7XI6rfo5nXS0LWBbwnwUCUlNSIOHcGSbx+JH9UL1OYwgIDIQ0zM144IocxcGR4YRVI6wIJN65CVfupuD0SZMavugJq7RjC/sx3YdNbCw4dC8LJKXchRt4D6yEhwfSHg4W8HacC02b7ebNFEjBz4EexE7OyO076XgAbCDkzV8A6kXkgWpVyrMA1JEjx2Do2Blw4dIVmwzHkHvN6lUgD+7cotd33wyD0BA8ARBLz6c6M6dnNy5TLf57OYu41KtdHZecUuCvpSvhpyn4VGpbIYeHyjF0QphNWMv+loJLU9dv3IS8kXlk4E6MKF25dg3owX9NG9UDitYUyJeX9UvLVZKhjhw7zmSTYxOIh9ySc8YLX9aKxojNNIz+kJ4tMTJUv3YNuMX0XAE/o57UpjLuqyGclF2LFkG9NW5OItJNPGvHsK2/mS7YWuSSEXUzXzQ9XB24JlgLaIM5dDkA0EAqGGuqYLIZteA2mFaXmrfMx1lFYCRUnVEo+xz/18R+LQeHR3qcOUYiD6tuWcCygO8tgP4NJYlKJTBQOoQtOCQUbtzR20HEsb17DcQckrT0uxAcFOBw0nowJrKm3k7Dm2Q2h5PWndGRhg79NOGR84M3x+iA3HAu7ark2PCbJXo60iSVBeafugu36Ic+9dn691+6B2XDaQIkRvyFVbdLFth79AxcSIiH/PkLQkhYTrh14wZGOi7jIbCYcoWigoOC4BIedjj3r7WYf4KJt3Ih+crSsmlDBvhjyb+4E0z6DJ97uitEFS4IkehA0S6rX6bOxh1W06FcmVLw9JMdMW+mLWzcyg4VY7RTZ81n8M8/fg/6DBwCN286nsl2GJ8cXbNaZbakdBq3qq/GpavVa/Eg1v89xXicOXMOHWcpShIdVRguXrzE4NFRRdCxu4f5O4dYFIf6uG1ZHbHOYQSKLPzz1Jmo51QoV5r0fILlIW3ash02oa4UVer58tvII519XhR5uoZLda6K+7Z2xdlsv+NnZ5aDhG+EjwaOBkifnw3ZgcYBgCxUMNZUweSBasFtMIcuB4DMxbGCuGbQHRloQBwZis6MWBeJCa7lEIk4Vt2ygGUB/1kgW91GzQZnxehGAP7qpvweivKcPXUcIwBuHqJiUvfy5cvDkSsBEBacHQpE5oR0dHxS0MERSyj1ReRkvkqy6qwUZ3TEw6GfRXuwA++qXcIrQDye0bLpFi2J8EkNO9DBoYnq5I0seJYMwfGFYLoR04GBDTHBec5/e6F02fJwIZE/ORk73Sy3klOhQJ4QyJcnDAKzB2FSeRAKywpXL1+EVPwckm/egDmLlsOydTvkg/y0RNGSUZ83XoHtu/bCl6Mnwh5MLKbXIXRQmj3agOXA0HEXY0cMhlw5csDZs+cx4Tgal7IKw4JFS/BZO/mgBjoyQ4aNgfgLidCmRRMoXbIYrMJlMbKHWE6ho9OudXNo2rgeLrulsaToJx5vjcnFbeDw0eMwHR2n6+i8tW3VjPGkXXK1MApFicmUZ7T472VQEbe4N6pXmzktpMdLz/VgW+F/xC3vFM0ZO2II5MyRE86iE1QcE6PJKVqw6G8WnXq0YV3MAYpiB762f6wlDOjTG87jMtfRYydFNR3qzNYRBmy9lmytHLOSmbM+Jab3WmZlqvBVTaVeWp02mFaX/P8ichEQWVVoi2isru6ztdVgTTkOzGwAJHag18M1B8+fPz8cPrgfcuD/DXdw+FXkpIZRWw0T8a26ZQHLAv61QDZM3hwcU6IMFC9ZDuLQ2ckTmQ8O79+FNzITx916oDN3eoICAyCEIjp4A1U7PXQ4HEV77qSlg9rpcUZHain6ucPEIj24TBVcCCIDQmHFzeP2Edh+mTEPi+XyoP9B3g7etOkemJSMT1DGhOaFm/dC8dLlIfGiOadHb06Oi78E2e+lwK2riXD5UhKeb3obrl66ABdw6XH99v2waMV2B8fDrrRUq1OzGjRrXB+mzpzHEoAlNw0gHpON27VpATlz5oBfp86BQrgE1QKXjSgqFBkZAdNmLYA1uJxUumRxFr2ZMed3tvOKcnIo/4a2oVNkRiyX8HlCe/YfZA4NySQnpEypErg7azuMGPMt3Lx1C27dSob9Bw9D9aoVmdP1CO7g2rlnHwz7ejzLxdmEO7lK4O6uFk0aQY3qleEKJiZ/PfZ7OHbiFDo68ahnfqYn5fBIes6HtagnLYNlyZoFGtSthUtljdEhioZFGNmaPW+hSxvRGOLwiA/J1he1bb18myE+oj3cr+t9I8xydMLHSZe2l2Aj0KTTAqpgrKmCycPRgiPMAewAkDkoK1q0Sgz3Wkr5+fPjvKhyejhf7tTwqxrO29bVsoBlgYy3QJaYmBj5v7tIUbzp1W8Ki+ZOZrkl/lCvU6dO8NeJYMdlKEG4d5e3cLjM6bkHnTHS83iucrhdfS5KY54NXskcFJJmCT3ylTk9pBPm3PSrdBc+/XkGNGrb2cEZIBRnBVMfdbtzhARCtRK5oWQBTADOngI374bC7nN3Yev+MywCpkvoRgdF9oJw2YycE3UUxww7Gg3l74SGhOAzby6zKIwWPe0IIzm0zV1d6Jk+9JGkpjo62qQnLe9p6Uk3mXDMXaIdaOnpmIdlosi2LhgGuQPR1vfI1ulu2Fr+9zEh3RNUg/Jcomkh2GBaXY5eCQ5ChciaKphiqOo+bKtBjgAFB3tDi9beq8FY7DRdr1jhEVj8+zwoUKAAflft/8O8zq9ajJ31aeFbMMsClgV8Z4GA/AWLwMXEeIwChEOl6vUw2nPMbw6POCyK4lBJTnG8KabYIjS3bTkiRukIT8kXJ0phwtqVEg/PRVSHPBjtuZyebPN3eCKzJIWmN2lexho5Qlh2XMRt67S72YkDQ73axTbL2y4ch9RKS02GLEknIelmFgjLfQuOJ+WETafz21DsEy2n8eSahruS6KVSwy2WFPWhl365x57Lo9dPO7D0Cul4Q2cHFTlRFB2yfXC2qx4nO5zZ+jba+tIpSLol2jqfHSlDah5+GobItZAQpgVmNtDqUMFYUwVT2E+jj8KmDkULJiLZ+nXRdDtEJl6ri84M/+EgwqjO4V4TajGyLGBZwCMLBFBkhycwJyUmwO5tGzximLmJyXGxTYw4IR27nQQHUy7AExjx+TVpq6Q6OjZs+zo9g0f6Y7/s+ORFEaCtF2mdHrsxIVf/ZmHOEgFZ70KVfIlQIg9uSYfskDsMnbDL9mfnuC/Iu86S46hs9nTscBPiLj/jdEpb4xEgYZiofTnUTX1FMuM6iFQe1Q2L1EDk/wuaCmjga33ZzfLQxNeSxZUS+oQq75Wuuh1KNC+3aE5QOzmiCHW/2GfVLQtYFsgYCwQsWTADQsNy4C/x5Adzm7poV5pwyVthBev4F3t5J3xaoCnEXtoO6fdsSySEg8tYFNkhbMnhsU1wlGB85x6el0WsaLKll7HiDPMOHjy6Kb4AvgoaY2YYy5lUKVblHMOwIAOI/pMkKaMtz25rzEhXFG18BUpGN0yrqCJgTRVMMSatPg2YpvPCGanwWVMF46i6/z8CvlCVyXTp7BhWzbKAZQHLAmoLBFDk4uYNcRu0GuUBarPwjG0GZc7PPdiXkgBPnsJn1PBJnLrR02FHTtDEytAlGu78MIfIraUtI7aUZBnB1Mfhjp0+hjd6lFK8obeolbf5ibzvs7phU+ggymC5ojKACTj/P1FxkJoqPnJTrghUWjDebevTRNEEckLb1QiOisRl087TiuC4NJaFYFkg01qAnb2VabXztmI80qOYuKXJTDpXC6M76AzxJGa7eMJhMR8p6kMOE/7JTpAd0WBN6S4YJDKBZp+gjRBJIxMxRf20eYkYIqXrujY/13QPOIYhsxhCkgwlo8oVDQPq9WnAFf8zalYqfLkpVwQCLRjvxj7Nbk0gJxKuRvEEEjeq4pIWJ+f5O+o+y0HiFrKulgUyhwUeLqeHRXfUhpdu+czRsc24DEK4zElCfNtcyiY07hQhWD3BqTmr25IkgnpjciZu3it23Yinc/30JTun85629xknp2Zx2ml8oIyNUV56eCq4S55m8FW4ipFhn2a3JlBBqUOowvFuU8uR0YKZnR+8q6XFzbKAZQEtC9wXTs+91OuQJSinlv5uwGwTKTo00tETEguaoOQfs8zhoZwe6qM3ySWQHCOq44tyfjK0GLkhGFVQ342xc/CmPDtXj2t+Vcv+XfBYb7cZ2AZsetx6BCq43JQrGpqq+uSmXBFotGC2bvkfTkBnVSc0MqoLHBfdMhujFYGfGWdGyxkyKtLCsyxgWcD7Fshwp4ee0aNXyNlJTzwEd6+chntpqXpoHsH5pERXsThrU9+9O45HM4j091ddOXZnuhtxj5zRG+4zrhKyNIVsWAVtRF/IMsDTAIo5fQWGQtW5LRWIwo8CLckqXBFF9b9m73JCw5Bc9SOSARS7PN/W1HOIb6VZ3C0LWBYwYoEMd3qcKZmedBTSLx5xhuJxnxThcZwp9eDiROZI5bE6GcqAHBq9MZlydvSYmB6d1xiZlqwk8IMeHokwSizgCVX9T51bQYHsA2eH5KhkcNG6cBlBh1SPn0BnoipGhU2QmV4CN8PbwrUsYFnAvAUytdNz99IJ8yMyQUEODA9V86vo1LhiZcoRcMUsk/TzMYk/xjnMcxW9eyNy1MfX/B0lGoJ4RS13mQh0QlXS2wGgGo7QL1flij6u2MPQTdIwej0aG3PNbk2gqI3bdfF/QJw3RIZacC2YSGPVLQtYFvCvBWSnJzsedHn7tm+WkFwN6dUmG6Fv61UMbcjC1jBzYzVW99WSFteHOzrUVjs76janUV5pkvXWRCtOq0opPmkZUNucRnaGgXho7R08r4vsGxISzM7gcjUGOqaCTl8nOmWR+ObIEQY3bhg4BNeuhpJNpmr5SkkVX1XT2HdVIBKq2rQKBKWFWZdevx6cWDjp0+zSBCp18XJLnDdE1mo4tY3NIyIXq25ZwLKALy0QUCS6ONSo+yjeoLJCSvItWLfiL78/t+en1XVg6voasOOzURAUoDxh3ZeDJ978l5h6cuITmBruTX0KFsgP9fHQzOpVKkNWPECTDtJcvGSp6UNMuU7kbNzF4zzSTSVZS65N6xZNoVSJYowVjTkOTzbfiweKnjx1Bp0RStp2fXMpXbIEfPJBXyiIhzOO++5noINAH2vVHF596z04E3fWpqYjn6xZs0LspAmQdOky9O4zgOGxseB5WunoCFWtXBG+GjoIfomdCbPmLrDxcedCsmm8jjq4w83/NC701uzWBKpUF3CEqoTkAECwFkwE6/Tr0TFBOjSaYE2gpK6P3t2R6Mu5w0fDtNhaFnjgLRBQs15TOLRvJ5w5eQQaNmuHB442gdVL//DrwO/eywLJdwL9KpML484Nb4tXrUlL/PUmrfObmQ7tsZOS6GCM/GIwhIWGwK3kFBYVqVmtCnMSBg76HI7jSeOsGGQfnisnzJ32E/wydZZJx0AS0LB+baBT2ulA0GzZsuGLHS4GO3bthU8+H8FORRdto1Vv364Vc3i+/zkW1m/YxA6RPR0XBzdvUoRGZyAIvpt+F44dP4nnd11maGws03+WnZzLeK7XmbhzcP58goZYHb4amBLILL4uIy91mNTHKbrTTpW+NlxdEq0OLRiylcFyRZClBePdOn2aYE0gZ6S6msFVkeo07f+5OFxhWVwHXQY7m19kJKtiWcCygN8swJa3jhzYDUHBwRAcEspe2YOC4XZqit+UyGhBWs6Nnk4irjgR6uEr4fbJ+NUXejKHZ8ac+UAv2iXfvWsneLZ7F3jyifYwYvR4mZQiIeSIOC79yChY4bzpyutSvyO9tuYU3XnxtT7sVPMa1avAS889DdWqVITOHR6DWfN+V/Cl5Sj1qeh5IyIg7ux5mP/7YiZ4wR9/Ab3UhW4EdLK6SP/hp1+o0Wzte3Dq9Bl46fV3HPqlcQXo2kVLjto2DkwzGqD86DS0cYmgQcNBNlpdFnodGnAZJFe4ENtVD07dOn0KsKKh4q3VNIuvxcMYTM+REZ0hjsPnC942JsHCsixgWcBXFgigPJ709DSoXudRiD97GgoWicY8jNCHyukh49KkxCcoavMJTIQRXFFonjU515K7EYwOZtVKFWDPvgMwZfpsWe60mXOhQvmyUKJ4DBNDjsUbvV6C+nVqQShGhGi56adfp8Hho8cUauSNjICJY0YwWI8nO0GLpo3h5d7vglF6BTMcUAo6vBSpOXXqNIwf/SU0ql9Hjh61a9MSOndsB0WjCsORo8dR/zmweet2GDl8CFSuWJ6xmhX7I4wZ/z3kz5cPnnmqC/R8qTfkyxsJo0d8DvsPHoJHypWFiDy54fCRYzBk+EhITLwIw4Z8DPEJCTBj9m8w8Zuv7GNp1hg+GDQUxo8aDuMm/gj/bdqiHFdYKOxFO/7061Rml8KFCqKcoRpyvmZylGP1c0v3u6Lb4QUFbbw1RWgCBZlCv1CVEBwANjqTcAd0B4Cgj7p6D3LmzAGFCxVSdNBSakpKCoOH4ffjJH6PU1NTIRL/TwrkLwABAdkwP+wGc6bTcQnVWHGtl+jYiPOGCDcmy8KyLGBZwFcWyJqedgcKFy0OOXKGw+7tG5mcwKAgX8nLlHzVDg8pqQXzhvI0dZYqWZzxX7lmvezwEG/KnRn48WfQC3NgqAx49y1og7k2q9f9B7Ez5kBM0Sh0Dj6CyIg8rJ+/JeMEP2f+QtYkR4rnvRil53yYA0cK2l4UtSGHpjg6YdmzZ4dGDerCO2+8gstQV2ASOl8EG/The1AsJgrmLVjIlqAoL+eXKdNxee4k5MqVgzk3xD8Yv1Pk6DSsV4flLS1ZugLKlC4Jj7VuwcTTmPJGRgIby2/CWOYsgCCMChEtOX6k3IC+aJeWzSS7oNMYE12UOU2RGGlSyvkHlixdLsgRBscH6emVtrkZfenKYibw4E1nXEwvZjKBt4grgOWq0C9U5W55DHaIVOPIBuEO6A4ANSNscxx+BfYdDA8PxyXUW3Dt2nX2Sse8toqPPMKcnvBc4RCQLQC/Q0FQsngJ5vxcuJCIDlBeyVnirFxetaOjopKio0Nwy9kRrWPVLQtkDgtkDQoKgaq1GsD2TatZMi2pdQd/FT1MRT1ZGR+7y5kSWSlxMJ4EhQpJp3ufPXeO9QdjROdbjG78MH4ke1GddivVrV0Dlq9aC+O/m4SOzHwYNe5byIW/bKtglEjkSzkzy1asQhiwaNCylashMDBASY9LaKPGCvRKtRit1tu58/F408iGUZtIFkEinJ+nTINt23eiIzYbsqOcWjWqwcbN2yABbyZJSZdg6fJVusnYRDNt5hyMBH2LS2HnoDbSSmMhzvdY/o99LAdg2cpV1CEXSnCuW7um0i5jJ6CDlVOyC93oscSiMzQNHcUxaDNZjnrM3mjLmvm6oqGs7GyhbI1uSSN1h5aeNhzRSSKQonA+CiA2tOAc5sBEhe4Ej4kR+zV4EY4N5Tx+T/krFSOVFMk5cvQo40JI9DT1M3Fn4OTJkxjxSwT6sUffa5kBZ+T0KrHTmy+0nBwtmE0p62JZwLJABlggICv+4yecj4ML8WfZ0hbpkHzLwNbgDFDWmyJp4vJ0QtKZhl2qSb80qeTLl5dd0zDEvnHLVtxXlAUa4lJS8ZhoKILLNIEBAbij6zjDobejuLuLStGoIuwqv9kmfta21QsXtNHjEhSbx7FTSa/Wnrf5VeIejbIouTk+4QLUq1OTAceNHCZ12t4lfWyCGYzzEK9SPQ4TknmhZYhCBSUHkMOcX+9BYcRndqElPpuDc/ToCUZGepw8eYrV487y3WLAdo4Zk8P1da6FY6/rKICdxoUMF912PuqaGUIBV6iqOUptPQQtuBbMxlXRpWioxOr06YA5ceVKFeEuOjZU9uzZi5HGExAWFsa7WSI+OUWUB1a6VCmgeY+WU40XFwpoMBKdI0/nGg32FsiygGUBNyzAEplv3riGibIBUKZ8Fbh6OQkniAc/0iNOQrwuTlJ6tiRcjifd6sxMhhLFCbwx30lLg47t2sLylWsgDesUmaDlInJ6aImIknfTcVdTTFSU7LSQA0LlHCYcc0dGrSflK1BJQCeF0eOSGC/RRQV6DnRyrYwRpTq1arDcHdJxEy51lS9TGnq+3Bt5p6PTCJAnd264dv2GEy7KLm47JZRaNjvy6AVC2C9xoU1Y0rjS2VIftalER0tjPHce7WIrkhz1Z6Nuc2xPryb5mkS3a+cuoYpO1bTz5zVnCFp9WjAbL7lLrnAhGlcBR6hqICKII0jXU6dPs+8kOcLSIxYcqbjDkxOjgkcOH4Hk5GRHJF2I3bHl84UuqtAh4tJ3UmwLaFbVsoBlAT9ZIGD7prWYxNwISpWrhLtgbsPaZYv9JDpzidG/GXtTT2mCvnbtGubA/AE9nuzMEoAX//UP253VtlULdjP/depMloi5c/ceaNOqGTpBl+A6hus7dWiHE3UK7Nq7D5Xik76kH+XC0G6oRvXrwr79B3Cr+R4wQ09cIiJysxwbSrQuV6YUW0ZKvX0bvp30M5O3YeNm3NZeHd7v9zaswGW36lUrQ1vMyaGk5SVLl0mKaL6TrlxfvJIjIxZVWxpLKssh2ofJ24kXL8rYKbj0unP3XrRLc8ku6HB16vi4ZBf8hZ8jNFTG9bxCetpvdob5qYanT2cYUZ+FQ48Tnk667J+PA0MbQI9YA64AKRo6zFU4qqaSSLvz6tWrktOjRJZb5PCUKV0Kl0HDIQ4fo0DPsgrB7zl914wVu1x3nRfL4TFmaQvLsoAvLRBAz+c5e/oY26p+66bxX+zeVOqN5uvh7ZZrGUv71OJNCc55mXF4zODqSaUxTpk+i4Xcn+raGQb2fZuh3kYHg3JeZs+THsA37Ksx8P5770C3rk+wX4iUXzP4ixHAl8dE/kQ7f+Fi6Nq5A3z2yQfQvuvTYIY+7U4ahIaEQJ83ezG2lAezau16WLh4CT5D5wSD/fXPMrYk93jbVrjUVQtoWY76ly5fyfopGpSGOwH5DfQOLotRRIuew3MH+VNJwyRTXhi+rU11Xuxj6QifDXofXundh3VxnGEjRsH7/d9Fu3SS7IKRr8FDv2R2CbJFwzguEYpyuAyuo72tV9P4RmqA9Kh9AzehgFNUp5021fVwNOAKkKKhYwYNHA2Qs89K5S8r5PD/VbqSI08OD5Uoip5iuXbtKhw8dJjVXb+54fzamJJ8y+FxbWELw7KAPyyQJSYmRnOa8YdwkhFUtYeuqNSdM3X7vN3BJ0iRrwhT1xMwH6B9lx6YOHxAJHFet1laNDg9BDBf3rws0kN5M1pbaGnZi5aSxOfa6AkKwDwgmmDFZ/qYodfjK8KJf3h4LnYsRBomhMpFHJgMdL9iH8ttTSa642J6eFkZTQ18AXRTb0NkhpBwUM7wVH2qpnNaspcDgWRETbAmUML383vFCo/A4j9+hwIFpBw07sTwK1dHbFPdcni4ZayrZYHMYQH57K3MoY5dC1+fu2WXJNX4BMXhopPDYXTVg4s4DnXV3E2/GTmI8m7I2XFWKPJhtCicEBuRlKPl/i9Vu2xJa7LBlStX7IOwI3hY41aR2GiNRRRwG5fz7EVJa4ebrXmLj1m5LvBNqWUK2SbYFY2t3ymas04nfQ5dDgAXxvFHt/P/H9HZ4dpYDg+3hHW1LJB5LJDhTk964iHIlq+sg0V8fcI6F6jnxKidII4vXmkbrEN+ioigU8+YKd0LUr3Awm1PSSFb0dCxsh7YE1o9nl6Am1LLFLKGcibp0cl1Xpz1O+lz6HIAOBer2+stPqIAO08jDg6fP/j8okUjcrfqlgUsC/jHAhnu9KTF72Uj5Y4PRXjuXsGdGElH/WIBPhnxycmcUOe//vR4iZEePZxMA7fP9W6o5CaxA5kDwIAu7tAYYOsJikuVXCK4Id0NnozECJ0zHGd9OAxFt6LhxhiJxBs83BStQcbnEz6/aKBYIMsClgUywAIZ7vRA+m1IO7udvTJg/IZE8glMjUx5Nu6WTOv4uH3vcJtQuF+5y8NdOqOfnhP+TrqMcvcMzxsKIA/TbPQI9OC2UcrdcsWz4TtTXBThwf+qWkGaD8iZER0aqnO4iM/nDhFX7LfqlgUsC/jXAhnv9Ph3vJrS+GTFJyiOJE5kvI/DCMdl1J8zUl3FuVjVlTFN0wqZJlCOi5F7wsMTWpsqMgu5otQxU7R8oZuNp1usnRE560Njyt1yxQML6/DQATNBzvq4JgYdI+7A8HmDyMU6Z0dXjivCrLplAcsCGWcBy+lB24uODP8ouJPD21o4BudIzkK+ej/KY2RGl8VLFQUJ10hvRApkFSMDTXe9Q5m1CfmaqJpAmbv3Kv6S40xjJzo46XLGUfBYNNBcMcV+VygaXB1BGkw0QI50JiAG+ek5OGpJfM6wHB+1Zay2ZYGMs4Dl9KDt1Q4OfRx8wuIfDcfhVw5398rdC2meNTjbuiuM0+mK4R38ygncvHrk5JjQQRdVt8PAgDyhNcDebRQTeplAda6OM0bO+pCrR98BUStBjlAVMfTrnID/t+ljuuzhrBDRjBPD5xEzNC51sRAsC1gWcNsCARF5C0D5yjUgb76CsAdPWT9+ZL/bzO5XQj4xmdefZkJhNjTPgFHwOIub5M7JPFfPOX8+frflmCR0QHcAuNCXd7tLx+nNXH0gy2sszTLSwZfBcsWMgVS4Nh5usdIi0oKpRGo2zTlL6ggQ/4FkOTyaxrWAlgUyxAJZGzVvx05Vp+3X1j+nuc/A3alULcVbfGS+xJC/ZKA3Kpyp7Uq/5jnIEHuOLF5dEIqoVJcF8g4X9A74RunUfDmdO1c1L4NtZ6IMsnC0l5qpUUY2OjU5b8t2NspPjccZ4ZVX1Si6bU5AV28Wka82b+7YkFT1/EltNcyb2lm8LAtYFjBvgaz/Lp4Dm9cv13wSsHl29yeFOHGZGYG534HanImH+NLGIqh6AsY2czo0rlq4XoGp1NBXVugR9RbAelURneoOeusRcrgDA97h4qqmU7ddkLvTrRahbit4qjuNthVMzDUU3y8kJZEOn03ZhwAAQABJREFUhevh0GEAwGkFxkLVNQNO7xrTmxjifOHMqSE8EdebOli8LAtYFnDPAgEZdd6We+r6hoomLqOTkwIX6bxT7DM952iH2CQ4ALwj2TAXQ/INITmKVJApGo64DhBH/KCgIHbStngUh5LMkUbZb2u5QuMfFqHr4up2aIr0D1Clk6rpWgfTBM4MJIkzzNIwouthuInBHR1+5WxoDhFhirmCI1lXywKWBTLUAg99IrN6ohI/Da1JS3SO2BOZRQKXdeWEXRDP8alftw47rTwLngJ99OgxWLzkbzxVPAmy2FCVFM4FBAYG4uGe6ewEaeeYjr2tWzaHUiWKsw4aYxwe4rl37344eeo0cyCUFGa0kihHDPsMbuCJ6J8P/0rDQTDDzxGXjfsujhuP9KDTtGN/+ZGdwN77LemgUqXuQsuRldCpVxWIhKoetm/gbgh2g8RRd7NMDOAbQJH0MIzoqLaPIOq5Q3R4SKS630dqWGwtC1gWMGGBh97p4Y4Nv4q2Ex0cDhfxcMWeg51ctSfrkuhgfD18KISFhsCt5BTGqWb1qtC2TSt4/6NP4fiJk4wnSdDmoBQZnisXzJ0xGX6JnQ6z5vym7DTQali/HtSpVR1Pfr/DDj+lg1Cp7Ni5Gz4Z8gU7Ed4AG12UYIy+yCtVDMvIqDg7fVw27plT4ZcpU3Hc89BBS8dT4Y/DpUuXObExA9qxbTV9mQ6oHgO8KMuLrKRhucPQAI0BFDc/OJ3Pz8j/qscfpMXAsoBlgUxuAenOlsmV9LV6oiPjSpbSEaKZ29VLm+OrLz7PHJ6ZeKPu/uzz0L3n8zB91lwIz5UTunbuqCCi6AVFM4wXpU5Zs2ZBevJvlXB1O+7sOXi8czfo1O1pGPLFCKB2taqVoXPH9g6iaQlJr5CupLN24TpIiZ90UrpeyZ49EB0wOx/iK7Yd6Yg3wIefDIaRY8bah4cwVzak70BQEOli1494eVY4L1dXz6TIKkvD94CZlp5m2RlQwiUK18OsbI6vJcBTnpw3fkMwEmoVywKWBe5PCwQEh4RCQADepLJkhSCs58gZDpTnQ7+YH5bCw9BmJzN3p76Q4GCoUrkS7Nm3H6ZMmylPotNmzoYK5ctBieLFmenpJvzGa69AvTp1IBQjQnv3H4Cffo2Fw0eU55LljYyEiWNHMpoeT3aBFk2bwMuvv4k38SBGX79ObaQPtdFPcaBXfM44qJSUVFj/30Y4hUtb48eMhEYN68GsuVL0qF3b1tD5iQ5QNKowHMHlONJ/85ZtjEWJ4sWg1ysvQsVHykMqngy/YeMm+PaHn+HmzZsKEaQL4TVqUB/CwkIwqnUKJnz7A+zD8RHvZ5/uDnvRNrVq1oBpM2bBps1bcByvQtmyZSAtLQ22bNsOY8dNBHKYJo4bzXj36P4ktGjeFF7u1RuGfT4Y4uMTYNyE7yQbvN4L6tcjG6INkO9PP//KbFC4UCEYPfJL2H/gIDyCdo/IkwfhR2DI0OGQmJio0Nl1w91vg2vOCgxTYkwhK8SYbxiQ5RTFaacJdVzxEftNRH8EVHKQ1YX/cFL38blFjW+1LQtYFsgYC2St06gFNH+sC2TNlg1Kl6vE6nki82aMNhko1azDQ6o6Tn3GBlCyZAlMeARYtWad7PAQ5d27d2Hgx5/Ca5SLgv0D+vYByrVZs249xOLNPya6KAwbMggiIyIEQfcgOSUZ5vy2gMHIkZo1dx6rE30bpF/tlF5gpapSpGfz1q1QvFgx5mCQk/LOm6+xpaNJv0yB7IHZYdCH70OxmGjInTscvhw6BEoWLwYzMGK1avVaaIlOyJuvv0o/jRWcu3TqAG1btYAVq1bDr1Om43jywAcD+jGcnDlzQI6wMIgqUgSmTJ0O23fshIHv9YXo6GjEnQpLly2HRrgU91T3rtK4581ndHv27oNZs+eyQE1kRCSQI0hlwHvvQhuUtRptHTtthmTDoYMhMjIPBAdnZ44OLe0t/nMJLPlnKZQpXRoeQ8fOeaHxqF/OKUz3qtnztoIRB+pdFcg+bJB8F0UXhevugt5Qt64QHWoTsgXWWnOFlnNDDpDaCdJRxAJbFrAs4CcLBKxe+oefRGVeMXxi0prM1FoTrhE8NZ3YLlyoIGueRaeCCuW7jP5qGC7BZGNtirIN+GgQ1KlTCx2DNTAeoyBUjh8/CUMHf4RRoorMYWBAfKNIyrLlK6HXS8+zaM6yFavYcljdOjVhuYL+BNJ/zKJM5HAYKefOx2MkMBvkz58PWjRrwkh+nhzLcnzi4y/AJx/2x4hMdYyMXETHJxdbFqMoEZVDh49oLkfN/30ROi8r0K/LAjlyhEFdHOcj5cqyaBYjxLdPBn8GZzGZmsoH6AiS3YNDgtmSYId2j0H5cuVs414BvV5+kUVwyAZioeWwuhjlWr5yFYyf+C3rOo75PkOHfIo2qAwnT55kMHKGps2YyeqVKlaA2hhhmhI7DdvCnY71+ujNpRiXCD5SzBVbA3ppomgCXQlz0u8pP06PvzQMFD5fGEBlc4UZfCM8LRzLApYF3LdAgPukDwYl/4WmdmTUbW+O9sIFaekkXz4popaGO642bt7KbuwN69fFyEo00NJLIDobR44el0UfPSbVi0YVkWF6FXKsAgMC2BIUxzFDz2mio6JYcjMtF9WrW4uBx40ewbvZtSji0JIdlcPo6HBn4Z9/l/Eq6+NvVSpVhHffeQPzl3JxkMOVnC1eunfrAo+1ac3Gw2H58kqRHN7Wuko2JBvYlwOPHjvGUIsWjZKdnri4OJn8zJk4KMScUn4jlLu8U3Fg6wDwjhyfctHQmUDkM2h02VVx2mlHM1zzFT8N50cD5EpNX84hrmRb/ZYFLAtoW+Chd3r0Ijd6cHEic3fKPXHyFNxJS4eOj7fDKMRqlqcSO30mW0IipycJdx6dPn2GbcGOxpszLzHRUv3ceSkCwuHiNQCXKakkJFxg9DECfbQBepFXZXRO6tSuyRwnyqXZhLk75TGvpueLvdjDLMlGeXLnhmvXryNeDUZavHgMbrm/yOo1q1fDsaXDzl27RbbwwnPPsl/A7w38EI4ePwEv/O9Z6Ni+nQKHN2JwWYvstGHTZsxnmgzkME7+6Qc4feaMdIO1fQh83JyOrgkX4nH7fjrEFC0qg6Nt9XO2KBt14CPk2LuM5LUK8nX3S+I1HTxkRPrLzoyLweh263a4oZw3eemJd5Qh/t/rUWnBrSiPllUsmGWBjLPAQ+300ETGJyV+NTO5ufHjj33SV69dg9/m/w5PYQRjJG5bp2fzZENnpW2rlkBOzuSpMyAlNRV27t7D8lGSLl2C6+hY0C6qZNzevmvPXodvTHJKCqSm3sbk4HosIXgHOho7d+9G+pbsmTXX8Rk5nZzQE0NK5H0Mt8wHY9SmXNnSULd2LZaQ/O0PPzF5GzZuxm3tNeB9zJOh5bHqVavgFvuWMAaTirds3Y7LTbfg3bfewF1oc1gU53/P9oB1uNSldnrS0tOYg1ewYAEoh8tatE1fr6Sn32FduXLmhBLFisEzPbqznKYTtqUpadypmGzdQBr3zp2IL920KCF7J265b9O6ld0GmChNOVBkwxyYRG24EEv55q+mcrxJqjHum7bWUPD/xL3iLp0ozRs8RH7u1cX/dXHeELlpwbVgIo1VtyxgWcC/FsiWO3fuwf4VmXmkcUdHSyNXzg/l0ZQtXwEuYC6LO2U3Jt5S4jIlBz/aqAE+pLA2hIeHw3RMxqWdUiSfdkVRYnDzpk2gOj7D58qVq2wrNkVH1CUdl8jIWaH8mCaNGwFthSf6Evg8IKKvYaP/GrdyH9Ogb9ywPpQsUYzR16xRDfN4AjCJeTvbVcWXxWi3FiW8N6hXF1q2aArF0QlZhAnAszGZmOxxAHdBVcVcmVYtm7G8oV2798I3Yyeio5YMrZo3YzsCly1fAQm4VFYfeTTEsVM06UJiInOSaNxly5TGfJ2ymAw9mw2RokghuHONcGlctCONlgPP4/IXJSfL40b7sXHPngNt0ckhmStXr0EbbJFs0Kwp2qAaXLl6Bb4eNQaO4TJXDkya7tj+cViJDhwta1Fp1KAB7igLhT//+pu17W+Z4+Zr18dVzYi+eCt326FxJt+IbGf01OcNHq5kGO/Pnz8/Lt0ewhy0HPIPJaIW5xCxLvap4calWpiWBSwLeNsCWWJiYjLX7OLtEbrgx3+J6Tk5HK6+JiQkQHt8pg1tI/ek0HNn8uXNxxJ+49mSlOOjAuh5NTRxpmLkghe9D42cFcIVj2Cgrd2MHqNHukWPoQOBFB0jB+3GjRtsaU6NQo4JqgApyVyeI3PSh5wLik6Juurd7CgpOTQ4hC2l8c9Ckivx1hq3qJdsQ2c2IALGzlFfkVfmqruhqxskxsbsKWNP6Y1p6Q5WhQoV4M9Ff0ABfIo6fXd50avzfrqKOCLcqlsWsCzgfws81MtbZG5nE5Ly5ip9OITP4d6YounohHh0oJyV2/jMG6OFcm/UxSW9oYHYkWj8V65cUYuR26myY2GnkTttFeJx44by+T16Dg85InfwSdFX8aUsdv5a4xb5ObWBzEauKMVkmpab+rlJZnzYngrwlN64pu5iCn4O+/93Nm+IMoziiTRW3bKAZQHfWeChd3rItNyJMWJmEdf+e88Ipbs4wg2BBNqaQtVdxgbpBPkGKZiSpsh0kHXAshGc6qNLrKTyyfKOUoS5llG9zXH1PrZBPZ0K9gYPpwJ80qnnyNDcwPv4lc8XvO0ThSymlgUsCxi2AHN6gnDZIB0jBGlp6l/Shvnc94g0KfEJigbDJzARluGDVN0jfOv4qIQZHrwZOie4ul26HTYNXfVzNIN4hsdtFNGFXBfdRqX4Ds8bCnqDh4sRchHe+mXC+TkRKzo24rwhwp2QW12WBSwL+MECAe269GTHUJCspMQE2LllHZ6GfdUPojOPCLXDQ5ppwTJcY194OV6LdAh3BaGqbTMXCJrdmkCBvat+G6rXxiuIVlQN6kE0JlAVIpw23GWq5x24y09LSW/y0uJvg4lieF1veE7YKLoM0PMfSpwuU84hXDnralngIbVAwIHd2+Ds6eMQmD0I6jdpDeUqVYet/618qMwh/iozO3BXcyGfc+18RYh5avW2ac5B5GqXpa4Zw1JTOW+b5WkWn6S7onHVb+NhBE13sB4RS1y9wEKpnjcZepOXUkvXn58a3822syHwPv4PY1YEp0c6tXPDWWlFdLRgHN+6WhawLOB/CwQcP7KfSU1NTYEzJ49BqbIV/a5FobKVoOugiZC7gPTwvbj92+HXt7syPXLmLQj/Gz0LIgpHsy3ex7ashtkfv4ITz11o9vIAKN+4LUz8X1OG+3i/L6FQmUowqZf0oLvi1RvAU1/8DJvn/wp1urwI2QICYec/82DR1wMwlJMVOn80FsrUb8G2Yd9OvgVzBvWCU7s2QoES5aDr4G8hNFcEW/bbs2w+/PPtUA270EwozIYaGDTH6mNo9Tin0BDhBKTF3wm6qS4d3jpgZ1ZQiHWgdwAo0F3ztdG7YqPgagpZQaloeImNxNOrzBRq+q7hJ53NiBFx3XWADBpM/DFlOT8GjWahWRbwsQUUicwFCkXBpaQLPhbpyP6xd4ZCYHAoxPbrAdlDwqBkzcYyUvehk9AZKgKrJo+GfMXKQIWm7aFBj9dh3YyJkCMyP+TIk0/GzRGRD3IijBfiFYARLHJ4di9bAOcO7IJc+Qux7rZvD2EO05GNK2ATOkUVm7VHXnnRF8oGz46cAWnoBP41bhBEPVIdKrfqClsXzYCkM8fdWvYy58bQrOyCQqNbA8TNkAmu4p3GjDrO6Jz1kQyhX6i6lm4K2ZGdh+QSQ68wcdTNLxA/6+6JOKKlfxyTxYwDI+LqRYhMirfQLQtYFvDAArLTU7JsBQjPEwnL/pRO6PaAp2nS0PA86ExkhTvoaJzatQnIEeGlQInyLPqydtoEBipdpxlUaNaBOT0cx9V184IpsOyHYQq0is07wPWL8RjdeZXBT+74j13L1G8JgUHBcGbvFsgXXQpSrl/BaM8dqN+9FywaOVDBQ5oxjc2adqdEa5bW4mGnUAmVmpxEi51M4IIH4RlAkdnJFadCZaxMUTGlqilk5fA8IFU4aEqu91HLIwPcR+PET0vYpWVGcdEBMkNn4VoWsCzgPQtkJVYFi0RDxap1YPP6FXDzxjXvcTfI6e/xgzHCkgVemvg7fPjPYWjyfF9GSY4QPQE4bv8OmdOVhDggJ8lM2b54ugM6RYFO79nC4GIYOj9Gk6iEFywKJWs3gVJ1m8HVC+fQ+ZHsIuIyRNNv5GWoX3pMCM9FsaHo33K4LCd8DIhRUpsmUJIbaumPyDW5J7SuuTtgmBJHyOqXA8f7BCCOIwNUNmV3Df388TVGsZ7PGRq6WyDLApYF3LJAQGS+AlCnYQvYtfU/OB930i0mnhJRZGfkE9WgYKlHoOP7o6HRs2/C+pnfYeQnmeXu5C9eVhaRM28BuHX1Mmun4RlK2fBJvbzkKRzDq4rr9YuOS3ZES/Ko0C8wPjFdjj8NWdDRWvb9MDi+ba3Mh/eLuHKnwQrNsZ7O05qifMZYU1omABq0okE0aUCmkO02cEnmEsHOyys1V/K8cad3JcMrA7nvmNAcIUZz+Fwhwu67QVkKWxZ4wCyQtUHTtrh76wRcvHAezyIKZy9/j7Fxz7chV76CkHDsIJw7uIslLFOiMpWrCeegRI0GLLemRodnISRnbji1cwPrO/TfMnR6skOZes2hNEZkIosWZ3Ajb6d2b4a8RUuyZStKaq7Q5HEoVq0eHN24EtLv3IHmvd4HivpkxWMdqrV7CvvqM7bc+TEiw5844hNjteW6uNm56Fby9PVNzxP+Ei0d4EovKnRekvMi0dDxGXTchV5x6NdVkzr4S4+bt+FG5XE89dWVPiK+K1w/9JM6maiQY6N2btROUCZS11LFssBDa4EAWkIqEl2cvbgVFs2dzBwP3vb1tUb7Z+DR59+lODDcxcMktyyYDGm3pXObFnzxDvQcNQNeGP8bU+NK/BlYattJdXzrWriWeB66D8VTwJH25pUkTVW5AyV2LhjWB14cPx+avNgPHn2hL1twmv/F25B66wb8Oep9aNtnKJNJc2v6nduwZNynIrlUJ0fBmbOgMTETOgcXxHN86tetg6eVV0a/KyscxQM96cT1xIviOEQKRxVESCCe0XU3LR3S8SBT7aLNK1++vNCtcydtEoTOnjsfLiYZ1ElbhC5vsaN1qxZQqkQJBqIbRtzZs7B33344efKU4e8jHbr68YcfsMNF9+BJ6jNmzoavvhwGv0yeArPwMFLHIn0aWdH+sZN/gaSkS9D7zbcYGjlAdCgsHWrq0M8/RJmhA0Du8V3FmzK9yct3I2acM1hVtXNDOqkdHMIhGL2oaNGwDuvNsoBlAb9aIGDh7F/8KlBL2JhudSB3wSgIwVyd84f2KFBo+/rwtuXYNvJkTCq+lhgv95MzM/ap+pA3uiRcPn+GOSdyJ1YOrV8KnzcvLoLkeurN6/Ddiy0hGKNbeTFhOeHYfqBt61T2rVrMXpFRJdjy2YWTR9AZS8OdYNmxF+/qOI8ZmsTIAaCimqQJTKeffz18KIThCeK3klOY71QTT0Jv26YVvP/xp3D8xEmiNFzCc+WCOdOnwC+x0/GUdkxGV8m0MyLpys6owkXYyeSEQwdz8nLbdtbVvAULOch2VdKrOt1uNqxfH+rUqgl0Tla2AIzU4E46Kjt27oJPBn/G4GrdlcLuQUU8GLJ2zZqwZu1amDp9Bus+ExeHp7KfV6KqWuTcHDt2HC5dusR6yJ5zZ8+UnSVFPxu+b2ygUkujmVFyNVTJCFBGDZ//Lxscs+XsGDSUhWZZwM8WkHdv+Vmug7gr8XFAL72ScPygXhdcPH1Mt89VRwo+fTpu3zb5F5mIf/GMxJdPYK/98i8mVgdgFOo2jH6uFToqBmdCjiZM2K+++DxzeGbOmQczZs9DJwqge9cu8MxTT0LXTh3hq9FjZVUoykBLNcrTyOVulxVHelLIrsyOXbvg8c5PMj4N69eDQR8OhHf7fwD7dE+QV9JzBUhHcgbT7jgeeko4tDxkP4yUUymvcWfPwYuv9ILg4CCM1lSHl154DqpVrQKdn+gAs9BWYqHT49UHiebNG8lQZsyaDadOnWb1l5CfupCe5OCJ+nz48SdqNEX7w494v912jrZVkDB7kJ6iHCWG0ZZdplGKBw4vI00gyKb5gL4/4g8fqnO4aHc+d4i4Yr9VtyxgWcC/Fsg0To9/h62UxicrPkHxXnEio75p7z3DIj9Up+3uGLzmqMauNl8hBJdgqlSuBHtw6WbKtJmywzUNl2IqlC8HJYoXZ/yCgrLDG71egXp16kBoWAjs23cAfvp1Chw+elQhL29kJEz4ZhSD9XiyC7Ro1gRefv1NCMJnFL3x2itQv05tCA0Nhb3oxDD6I0p6BTNVg5Z4Jk/6HjZu3gLjv/2e9ZYtUxqGDPoIJv38K1BE5OmnusH6DRuhedMmeH5bGqz/bwOMnfCd7JC0a9sGOqMjV7RIETiCuk+ZNgM2b9mqksSbkk1TUlIZn1OnTsH4sWOgUcMG6PTMZUgUJXvt1VegUsUK7LT3pf8uh8mxsfBEh47wv57PMJxvRo2E1RjtiZ06ndGPGz+BLZONHvU17D9wAB5BO0fkyQOHjxyBIZ9/AYmJiTBs6OcQHx+PS2KzYOKEcYxPj+7doUXz5vAyOk7DvpD6iRc5cG/0fh3q16sn2XbfPvjpp58Zv8KFC8FolL8f7f3II+UlOYdJzlAmh4/U9dXk98s1w/sTI5OZgTswfN4go4p10cgcV4RZdcsClgUyzgJsy3rGic8ckrlzI2pDkxi9eCEcikQlnTnBXvTsHrcKOj4lS5XAX4kAq9asU8igJZSBuLT12lt9GOsBfftA65bNYc269bhUMwuio4vCF0MGQWREhEJ0ckoKzP1tAYORI8UjIgP69YE2SL8a6WNnzIIYpB8m05MH5qTYuim6dOr0aXRoHsXICC3vAatH5MmNTtR+yJkrJ+TCV53atWA6RlfWb9gALZs3g7d6v8ZwGzWsD++82ZstG036ZTLjMeijD6BYTAzrd/VGkR9ykIqjI0jyw8PDYdjnn0F00aIwecpU2Lt3P/To3g26P9kV83/2wcaNmxhLWtr6868lzDkh54acPsr3oTotoy3+8y9Y8s9SKFO6NDyGThmVyMgIyJs3L5A959ASIZY9e/fCLBwXRcZYPzqYVAb0fw/atG4Fq9esgdhp0yAmJhqdoqGIEwnB+JwnJqcByln8Jyz5+x8og47iY23bMlr9N/q+iS99zIeih5siMwyW/mFtRZwXOEzrqjWvaOFZMMsClgX8ZwHL6UFba01i6l9oHIdfPfmIChcqyMjP4g2dSjBGDb4dOwq+H/8Ne1E9R44w5kisWLUGxn/3A8vTGT12PHMwKEoklps3b8KyFSsZiKI5y1auYruQ6tauCcuJ/lukx6WhUd9o04u8tOp/L12GTkMI1EXHJlu2rNC4UUPYvmMXJCTYHwUwcvQ3MHP2XPgal+W2bt+BEZC6jFWLZk3Z9edfY2Hb9u0Qi85Idowe1apZQ0uUJuwc5uME4NJZ/vz5WISMHIr5C36HLVu3ol3mss+vOcqhKNKu3VJO2Np16+DgwUOa/MhJmYZ6jPlmLEuWro15RGJh9ly2nIHIkVq2XKpzHIp+1cUE9OVo8/ETJjKnaNToMdJnU6UyR8Mok3M5SgeH7vBWkS0g/OCQYRlZEfRRzw3O1LIcH2fWsfosC/jfAtbyFtrc3YnJ/tvP3Ad34UIiI8ifPy/Li07D3UEbN29lejSsXxeKF4uGwoUKQSAm8x45etzGPAscxURbKkWjithg+hdyrAJxu/0R3BHGiyM9jcDJzdbWvWHTZrh69Ro0bdIYbty4gVGM3PD9j7hjTigHDx+WWySzZvVqkAfx6uHSGpVxo7+W+6lStGiUou2sER0VxZbK4uMToOmjjzLUl198AejFS0x0DK+6uOKuMExs5uXMmTgoZHNCOczVlZavJNvalwlp5x2VolFF4eSJk6yulHMGChWkI1Cc2JtRWW/oxdqMwK/cJvSFzLyFfhCJDhH/gSTCMq/2lmaWBR4OC1hOjwefs3pKNsrqOG7BTsOt5R3atYPlK1dDGu4Mi50xEyMg2XHppS4kXboMp0/jbrT0u7iMY3cOeP3sOf2dSAHoKFGhKAzRxxC9zXmJjpZ4UeTEcEFaWuJatmIVdHj8MUZ2/foNlsMj8qDlKtpeToWW0W4lJ2O+zVXYhEtT5cuVhZ4vvMy2ftMNIE/u3HDt+nWRXLdeGaNatHRGURzKF+K6f4BJx3v27mN0OXOG4dZy/DRcfiASAr8Z6QplHRIuRZjUxW7bonIXLT1SOXfurAyT5LhUSsZ/6CsuTeUSwWZC7hwRPq+rrKsDVmG5bIqOjtq5UbddMrMQLAtYFvC5BSynB01s7Cbovc/i2rVrMA+XZ57CpGPatr74r7/ZFu22rVoyJ2fy1BmQkpoKO3Gppg0+uyYJt1FfxwhL5w7tIRm3t+/GPBN1oTyU1NTb0Ah3X9Guqx27diP9bqRvKdN3stHvwufX2AvN/q5vJn8v/Re6dOoADerVgd//WOywk2xAv3dhwcI/WE5MfcRZu34Ds+sGzLGhbejv9+8HK3DZrXo13JaPuTBjxk1gOTV2PaRaBOYrPdamNQSHBEO5smWgLkaKUnG33Lff/8gQdqNNkvFp2u+89SbQDi1aGnzh+f+xpOEP5N1Vaq5m2/eYnWnHVSNcytuHztyOnTtlJilo6527dkIb1JN9NujAder0BKPZhTbPERYm41oVgxaQv4JyxSChFprIg9e95OWgOGeOjqgNn1cs50e0ilW3LJCxFsCHE2bBG0wo3sRu41ZjN5NzM3YMHksnG/AJyhUzEdfwlnUNprHTZ+KSzR3cpt4ZBvR9h2FQmxKOZ/82n7WHfzUK3n/vXYZDmc/0rJkhX0wAvjwmsqWt2wsWLoIunTvCZ598BO27dodhjL4vdMOt8EiOUZJ4GDx0uCY950UP4qNCN3axnMLI05m4s2xpjXJ81IWWiV5/laI5d1m+zxjMP6Ly199LgR5++PhjbdlSFy3lLVy0GJbacmZEPmmYHB4aEgJ93n6TgenhhKtWr0X8RewZOgRMvHgRBn82FHq/3gveffsttpyw78B+3C02gdFQNEgsxJMKXe/Y+ijKxgvh87ZIS/akvKGuaLvPhnwK7Tt2UmzFHzZ8BLw/cAB06/Yk0+EcRt8GDxmCtr0AQZhkTYUieLzQNn6xzeG+u3r/Zu8zXZmqXF9fSRH5e+YAcSeGX7nGojNEMHGu4DjW1bKAZYGMtUCWt/p/eo+eykzlyqWLsH3TGrh+7YrftWr1+seQA8/Vmv+59DRcfynAJyq109P85QEQFpkfFn7Zj6nC+/k1ISEB2nfphlGVg05UFSdabbRs+AyefPny4fN/skI8W5Ky35B5AIZ2LZHTQpEcqejzzYZ5PDTZis/0keizSM+K0STVBCoUzhsRCd9P+AZoae2dfgOwT6J5DreIP4Nb1ls//gSEhYWyZaZbt/AhjwqWUq4D7byinCDRubALsREo6HivJpA9y4eek8PkEao2mrMOLkC4IhOBT4CGPQVktqNM+mxSRXAG1QXFdTXw7Iavy9atDqWt3WLhLhEzg3Fb0EMvFy/6AwrgU9RFZ0evztUS+znMuloWsCyQcRYI+G/1P3A5KRG39gZD3UYtoXzlGrB53XK/a1S4XBWgw0T9XWhS0nJ8CpapCKG5Ix3U4fhSh6tJk/qd34joyIh4dKCoOGDayNUP4GPIOm9aDoWCXlMlTaAsgY7KGPzxBywiMhmfe6NXbty4qdEljYpsfOWKd51pepaPbDUH42mo4hLkyETLniKb27bjUkRY5q6rx0iffUYUtR4ZoYMl07KAZYGHzQIBFxOkpNZbaTfw0NF4KBJTwq82qI6HebZ5awjmtEjHH3z49yEm/8dXH2NPWm72Un+o++TL7GDRu7gUcWzrGpj98SvoqEjnS7V+YxCUqtMUDyY9yw4MJeLfh/eFvcsXQvv+X0GV1l3gHia50jETdNTFyE7VGX86Yb370EmQI09+dsTEjiVz8HytQVC1TVdo/caneBZWNqBn8fT7bStGL9Jhcp9ucCnupOFlMCaEvTl3KOx4cr6xCDJdNy7NOGta1hr65dfsCce0zCUWyvWhYyJ4BEzsk+omNTKJLstzl05mIFS8yUtga1XJ0SHj3t+F/0i6v0dhaW9Z4OG0AEtkLlGmAuTOEwFRMSVh364tfrXEgbX/wKWzp6DN20MgNBc+f2Xo20w+naVFhQ7iXDfjW6DDRUvVbgKNer4FjZ59E9ZMlZ6YG5YnL0QUKQYhuXLD6slj0DkKgnuYN0IOTtU2T8KJ7evh2JbV0OzlgchN+nVJDs3/xsyBtNQUWISHixatUAPo0NOtC6fCof+Ww6Vzp6Hlax9BcI5c8MdXA5iDRae9u188uIt6QOq+vkpKOruKXspQlKQY7WQSn9ejpPRzKxPYys8jFsS5GznxhyMiylDpeb98ZqSnrWgtWfEIsLrPcpC41ayrZYHMYQHm9BRFZycXHvZJofxLF6VnyPhLveRrl+Hkzg1AZ2AF4hIb1cWyfNIICArNAZVadoZAfKIuRXsKlbU/AI7jTu3bA8TzuZq80I85K9P6P8tQyGEqVrUeq5eu2wx5hsGZvVsgX0xpoINM6VT3huhM0anup3dvBjqQNBs+hO70ns2MRj+SwTVwdTU2u3sTy5VGPu03NhBtFTyh1eZoAKq6GRugUHmBhigyJ5LolHhTQ9GmYl0lI0M+b5UOrpqC+lqOjBZM7QC5EmH1WxawLOB7CzCnZ/W/f7DkvOp1GkPdxi1hyQL9vA3fq6SU0GHgSKiCDk8KOiE3ryRBFkz4JedILOSwiA4P9RWrWpc5Uhwv/ug+2ekpUKIcW7IKLxAF9KJyNfEcpN64xtGl5Rr5IWkymNnJfQfIP7O7SymaCJpA+8Af+Jpq/KrmAz98FsajQZst3Bvw0GBcNGdnVg0/4ptxZrScIT+qaomyLGBZQGUB5vQQjP45z+MyEy1x0Rb2lGTcgePHIk0OylMxsuLOGXJ4dvw9BxaPfJ/l2Xy09LBi9wSpSE6PupzZuxWiKkj5O9SXv3g5yZHB+pX40wz93++GwtHNqzQdmXuYYJw1m5RnxJA13vg8rdHlBOT65uCA4QBwwt6XXT7VQ8Vc1TQ8LHfplGt3hsU9WIjkcZAB9Qr3SLSMzPv0aA3CtVgbJPUpmjOz6Ah2/8eRDkMLbFnAsoDHFsiar0BhTCIOgPDcEVCmfBW2JJSKD3/zdzmzZwvu3ioIRcpXZc4Nyb+Hz3xJx2edhOaKYEtcHd8fiQ6K0jHS03PHX7MYbudPxkPFZh0gBiM/PPn5MObtUHJzK0yCLliyPEaPskGtTv+DEjUby+zOHtgBOXDLeqEylWR9mE5C9OceTYTOXjI3X1TcmIV9ocaDzNOpib10k8909lOPS90mhbVgXhwI/5/yIktPWQn/9qZYmYkKmWJsIVsWsCzglgUC6jeRTpgm6iuXL8LGNf/KERG3OLpJtAWTiCs0aw8vjscH8+E28h9facuWrLb9MQ0dkudgwKI9LKJzGyNQRn5BUXL0wXX/QIVH27HX1cTzQEnPVFJv3cDn7/SFx98bAa/88Cdb6qKJfPGoD2Ttty2eCeUatYFnRsSyZOpf3+oCiaeOyP1UcXpP5Aia9wei1OwgKlZcY3BM7atLek0ETaC2AK9o6YQ1dZlVR5edB4yIlIrzj0vCybB3byvH+WXw4DNYvPhxclUIJkWlRYiEqQXXgol8rbplAcsC/rVAluLFS9wLCQ2F2/jIfXoqc2Ys2fBMqrxFSzjk7RjRlXZq4SwFz46cBjkjC8C3zzdXkFEic1bcLk85QfdwazovaseK2qQHPYWZbgln407bHk54gJPoX/k9xAFDt4NhOvS6BpiT4MCPyDWBdr4O3Q4AO67MzgWOTKGD5/Rnth6NzNRW0cGT0Vz1I6KMIldk6oytZDZ9fGQNbw6T+SyOjoue5u48nJCiPJbTo2dRC25ZIGMsEEDPoLl5w9jhjxmjIkA6OmPqRGUjurw7dzMknTkGQWG52DLWvz8OdyC7cFLKEVI7OTwsLcJf//VfzPMJYM7RqJ4tHXjpAmhu9eaELQvyGWNZgncq94ueLkb7gAzDxSgzb3cG2t+df19x7si8RrU0syzwcFlATmR+EIe95fdYKF69PiTFHYffh/VxWJ6iMXPnRmv86klr2ntPS88BwgcjXrlwVovEJMz5LO6815go4kFFc9LWFKAJlJjQu4vuQNzmLx6BYSf0oOZCpgeczZNmJl3Ma3//U2SQ/emoEV5oXnA2b3A8uhrFE2msumUBywK+s8AD7fSsmz4B6OWqqJ0bPfwr8WflfCJ6XpCpojFZF8RzfOrXrQ3VqlaGrJigfeTYMVi85G+4eDFJm7UGDxGRHA6K3NGhn2ZL65bNoVQJ/jTue+xU99179+Fp4rtdsJKUKl2qJHzy4ftQsEB+GDfxezw5folE50JnF8wzYTe6j5lqTJrubCa0m3GV6HucP39+dnirpgOdwfbXc2REZ4jj8LmFt41bwcK0LGBZwBcWeKCdHjMGo0mJT1BExycwEWaGnyvckiWKw9fDPodQzKe6lZxMudtQo3pVaNu6FXzwyadw/MRJVyxs/dIdIDxXLpgzfQr8GjsdZs2d50Dr6j7RsH49qF2zBiSjLnTIZvbs0nb9nyfHwux50qnvDkwFQPt2jzGH5/tJv8D6DcoHTApo7ld1B6Db4b4sI5QklsqD53NI48qgd3J4ypUrh2cBBkFkRAQcOHhQO3Lo74/dwOcsOjbivCHCM8islljLApYFbBYwtv/7ATeX2uGh4WrBPDYDv1Eio1defJ45PDPnzIOnej7PXjNmz4XwXDmhS6eOsigioZPE6WbgbuH0gng7KwF49tw5eKLb09Cx61MweOhwPL08WaELJ6IbkrrkjYyEuLPnYP7vC+HyZfvBotnoYZJOdKcT4LPhIwN4obojvl1J6hPxOR1d+TgZzE7Cmtp8WRfj5yhT6nP5rpLjEt9C0LUAOdvk8NCT4Xft2gV38FqubFmN74MuC991GPicRUeHFLGcHd99HBZnywLuWsCK9KDl1JOVu8Y0RIeTZwg+UbpKpYqwd99+iJ0+0yb/HkybORseKV8OShYvzlgFBWWH3r1egXp16qCDFAL79h+An36NhcNHjipEkcMx4ZtRDPbUk12gRbMm8PLrb7Jfy28w+toQGhYK+/YR/RQ4dFRJr2CGDTr5/b+Nm2Dr9u3QuGEDiMiTBy5dvgzt2rSGzk90gKJRReDI0WMwZdoM2LxlG4z8chhUrlSBsZk1bQqMGTseduCy2Juv94IG9eqiHtlh2/adMHbCRLh06TLj8ezTPXD8+6AWRpemzZgFc39bAG/2tuFnD4JtO3YI+B3hmR7dYcu2bdCwfgN2M1n6778wdvxEJpOcsN6vIS1Gq8LCwuDgoUPw3fc/wuHDRzBilR3efON11KOeTQ/kO34C6nEJcuTIAW/0fh2XGOtCIEa29uHnMW7CBDhzRjr3TW0X3bAO3RANRAIc+XkKyRChniqtSU8OT/ny5fGQ3zT83A7jEm06HMLPsSw6PeXRETpw4ABzghTEaPecOXJC4UKFFGA6Jy4lJYXB6ftw8tQpSMXdqZH4f1IAl5QD0LG+cfMG0OG5JMdQEUxN84WWQ2MUZkiehWRZwLKATyzw0EZ6/OroqD66kqVKsElz1Zp1CofrLjob73/8Kbz2Vh9G0f/dPtC6RXNYs249TEXHILpoUfhiyCAW9hdZJqekMqeBYORIzcLoEZUBfZEec3UY/XSkj5bo8+KyAd2nnZVcOXNC8WLF4Nq163D16lVo1KA+vPPm68z5mfTLFMiO2/cHYQ5PsZhomDf/dzyQ9CwkJV2CXyZPYUtz7737NrRp1QKWr1wFc3B5rEb1avD54EFMZE7knQNvRlFFomDK1OmwfccueO/dd6BNy5awfAXh/4b41RH/Uxmfbm7FY4pB7NRpcBSdtnZt20KJ4sVY/4B+fXFZsDWsRTtNnzETb3aFYfjQz/HJ4sHwXt93UY9WyHcFzMFlvxo1kO+QwYyu25NdoXnTJihvHvw4aRKUKV0aHaTerM/0myuDmmb48BAwh8cW4SGHlTsi9P9Ajg/l9ZRDh0grGkdObXju3HDz5k24dv06exE9bTEvXKQI6yP+5BiXLFmSOT8XEhMhMm9eKFy4sE+NTHMMf/lUkMXcsoBlAcMWeGgjPeKvMl434ggRrhE8Z59A4YIFWXccLidRCcYJedSIYbjMIvmgNGkP/HgQ1K1dC1asWgMTvvuBBRIoz+fzQR9BlcoVGZwR4xtN+MtWrIRXX3oe9mI0aBk6GnSDqFO7JsMbj/RUjp84AZ9/+jFUrlwJ4asZjL3ZbthRRQrDxG9Goh4BEIMOEulDy28U+aHoERXK8bmdehvi4xMwcXkAi9RQlKbD449BLlyaW7psBZPdpHEjFpH6e+m/jI4cNooaFcBEZ14+GfwZ0JIa6SrhH4G/ly6V8FF+44YNFfhffDkCTmMUhiJQ308Yj7JrMmerbt06sHL1ahbBIWLqr1GtGls+bPJoY9QD+f5DetxDxy8aGjcivgWAHDsqdFPdsmUrrFq9hrXdfhMdHyEy4DY/XUKfMteV6qsOiuaQU3IBIzQR6JBfvHhRFkVtijJGRUVB2TJlWHRQ7qSKzebn4+NlZ4nAN27cwO9GHJRGR5YK/c9SBO/ChQusXbhwIRbxYQ2Tb3y+MEIm4pIOYtsIvYVjWcCygHctIDs9FavWgZJlK8Dm9SvgfNxJ70rxITc6gb3XT3/D7I9fdutZPlw1tSPT/OWBEBqRF/4Y8R5H8dqVfmlSyZ9PekJ0Gjo5m7ZsQUgWXL6py6InFLIPCMgGR48dZ7j0xutF8QbgqhQuVBAC8RcuLUPxwumjo4rwe4W0KmO7h16/cRN27t4LucNzYRQlBpeTtmNi9DRGXq9ObXYdN+orzo5dJV2UN2G+3FCmdCl0Tsba8REtf758cvvc+fOsbscvzZwZGQErIj7dxKicxmUJKuQsEW1gQCBzbBgQ3w4ePMReMejgUKEIzvff0i4+u57ElyI/MTEx8PKLL7DX+fPxMOnnn2Hd+vWMzqM3uhnbxXnE6kEnvo4RGnI8aZmWlqNEp6cYfj6UXH/92jVIva3/8FRy5O/i0TJU9uzZwxx84sXLbaQ9j983yvsqXaoUPm8rG8QnJPBuU1d3nRfL4TFlZgvZsoBPLMCcnlzheSCmZBmfCPA102x44wsvUARCc0e6LUrt8BCjQmUrQSjaRV20cNU4rtrHT55iyZodcMfT8pWrWT0Wl58oVE9OTxLmvdCNnbaeFy1qd3CibfWz5yRnQUtONnSUqCQkXGD0nIZgvK5HT8tYk36ZTKjMmaBoSYnixdkNZBPm7pQvWwZ6vvQq+0VNT6bOg8sKtKSgLgkXpJvJEoyuTMTcGipBQYEQFhIGCfhLu3q1qgoSGR+jPBNtUSnKAwoL1cYXienGlYbnsxUvVkwGk83I0dm4aTODLfnnH5j47fdYv8ciCmGY30T2CQ4Oho8+GcRgNWtUg5defBH6vP22d5wekmyLQnjX+XnwPKnTp0+zzyk8PBzKYDRHXSgaeOWKPTle3U/tU6dO2yI999D5uauFIjs8OXGn4xHMGyJnyh/FXSfJH7pZMiwLPGwWYE5PrfrNYN/OzVClZoMMGX+Jmo2gyycTIDhHLvb05VWTx8B/s+gmBfDCuHl4KnocLMCHC1KhqA4dTvrX2E/grelr2NESBH96+GQMYd+FU7s3w/QBPfHw0Ebw1NCfgJ64XKhUBUi7cxvP2+oH+1f/Sejw3vztsBRPWd/973wWch6waDf89vlbkCMiH7TuPQgxpGUseqoz3Swnv9MNLp09yXA9dXyu4a/WeQsWAiUdfz38c3ymzT8sqkM5MOSYUIJwCiZe7tq9B/NcWrCk22vXb0Dnju0hGRM0d+/dS0NQFIKn4rJTI0zm3Y9LXJRIvHP3bsxnaYlO1CX23J3OHZA+WZtewQwbtPV8/Jiv4M3XXoW+Az+ADZjYXKdWDXj/vXfZkln1KlWgbZuWMGbcRFjyj7QkxXmkYI4R5elQPlFi4kWgZbxuXTphBCsGejz7HEeTrwx/507EbyHhnz0L3bp2seH/T8aTK9yZQAAlqG7fsRNatWiBOUVJ7Nd7j+7d2VLbps1bsG8HtEYbJGJ0LY7x7QrFiqEeTz8LfTFnqjo6dhO/+x5zl66wmyYlwHq9cH098lc8Ivb6kHzBkDkHyJgSzHnJgpEZI4Ucdp4LpIVPER5yhHOhYxWHEUNasg3BnC/6f/BWUTs3FNlRw7wly+JjWcCygHsWCChR+hFGeer4kQxxeujU9O6fT4I7qcnw55gPodYTz0HzVwbC4f/+hYunj7GT12lHBy85IvLj6ecFWHPeZ29A7gJR0PXTb2Hlr6Pg3MFdcOPSBdZHy150VlZujAL9+c1H0Ljn2/B4v+Gy0xMUlhNCwiM4WwgMCoVgPK7i0PplcPnsaWj+2gcQEhYOf3w9gDlTVy9I+TeeOjxcIO3aopB7966dMeH4HQam9tQZs+Xn4gz/ehQMRCejW5fO6INlYeH5IV9MxLwEaXmM86Ir0S5YuAi6dO4IQz75CDp07Q7DvxqFTkpflNGFRR1o+WbIF8MV9Px+TNuExXII82CWrVjFcnmqVqkMf6Fjkw+XhB5v2xp3k9XG6FQ6LFz0J8vhITqiJxgvw0Z8De/37wc9uj/JQBQR+vLrkXAVHT7CTb2dym4IMv6XX8H7A95D/G42/Gvw5Vf6+JyOrsNHIG3/93DsnZhTSlGBocO+ZHkdw0aMQL79ocdT3SW+mJj9JeKTHrHTpkEhXAbs2+dt1kcRhdFjvmF1n7yRsd3yXdwi8skQfMmUIi+3cZnrEUxapu87Ffpe37p1S1ess/9H3kdXiuqRw0OF8oMoCnft6jW200+XuYkOraUrkqsFN8HWQrUsYFnAyxbI8lb/T++tWbYYrl5Ogg7dXvB7Tk/RijXh+bFzYcm4T2HrwlhcpoqAfr9tg03zfmaRmLdnrGORnti+T7Gh98MIzZm9W2HOoFdZm5agCDat/7NwYrs9F6N847bMGfrrm49h26LpULZBK+j22Q8wuktNuHklCT5eehSWTRoBG+ZIyy8fLT0CC0f0g73L/2B8n/lqKjuV/YeXpVPoxQmUEBJwWaV9l+6wD7fSulVs97Fs+AuUnImsmDQsLUmR46C8yQXishfdAyiSw4qy2yZeAtJOFZpoKUeCF1o2IxhFRTQLkmqy1IASH1qGoETRtDvcUdKmJlkkm3S6deumJFoXVeqw44s3Og0iBUhqUI4PbUemqJe6SHyzSTdQBS2wPBKKEjq7ubpQXi3OeVsl34vIzlk9qL2m7GkzAvOpJMfKiFnMHjhK/yd8ziD+1LaKZQHLAhlvgaw3rl3Fm1Ig5M1fiGmTO08EhGAuhb9KJJ6eTuX41tXseuvKJXxWxx0IL4i/xrxQTmxfx7jQlSahcg1bS1zZpHSXTUbShKSclKSWi9lUSeKWthRmp7yUc5inYw/PE2M78zv4a1d2eFxIoSiK6PAQOv1a1nV4XPBTd5MNKZKijgyp8Xjb1S91jsevhvFl89g/Ixq3lsNDvBV8ZVpJKu1+c+3wEK6KUCI3/26YjX1s5oVYFF61gOHPTJLKHR6aWyyHx6ufhMXMsoBHFsgajA5O7YbNoVaDZoxRKUzgLRxVzCOmZoivJpxl6EXKV2PX7CGhkA2dsJuXpCUcysUJCrOv8VPej1jwHswKLWVplcJlqzAwuyLyacwHokKOVWiu3FJfOcJR3mColQWkfAI+gTFk4c3kPChQ+qKaUdoo7aY9MgEno9RUK5bRejiVT/YSbKbW3Wr73wLCx8HnA9GZoTqHi8oRTAsu4lh1ywKWBfxngax/zZ8G9FqyYDqTunXjajh2eJ/fNIjbtx0nhbvQ6Nm3IDdGd9r3/5rJ3r/6L3aN278d8sWUhsio4tDkhX5sq6moXPK1yyz5uXan59A5yom/qpSJj416SnybvTwA7txOkU9av3k5Eco3bsfyg1q8+oHDr7G4fdsgF0a/CpfFw0BxyYQXcaIT5kHe7eWr0zujl2VlVnZOrOypeTyl99RkGS3fU/0fUno+B4jODNU5XDQLwbTgIo5VtyxgWcB/FlB6CP6TK0uiBOZ/vx/GnJq3pq+FRx59DA6sWQInd25gONv+mAZ3cUty7ykroF63VyANE2DVv4J3/DWb7dYa8MdueHWS5CxxAZT4THyLlK+KeUK/cDBsXjAFIqJi4J2Z6yBXvoLs1xiPGhHStkUz4OqFs/DMiFiWY5S/mONWWpmZHyq+vj/6mr8fTOSeiIweeEbLd89qDx8VOi+8iM4Oh2ldydkxiqtFb8EsC1gW8L4FsuDD2Zz8lPa+QD2OWTHZtVDpinDx1FFIvXXDAa1AiXKmHj7IE5mHtSmLkaJScPncaQe+AXjGU658hSAp7oSDPAKIExbVaQlNmsgAzsWdhse7dGPnYWkSuwKasrqELJPIFbUQ3Q41orJtI3OkdoQoCbEleooOnRygwccB5ACwEevBBd6uUDiqsyUjQ+MgRoaFyVINVWS2csUQmYWEFnDHZMyHsTsyruxoNpGZ+PEID80dvO5KjtVvWcCygG8twJ7T41sRxrjfxQTcswd26iInHD+o2+eqI/7ofk0UihqZefZO78nLIEvWbBh5SodRPVt4K61VUzctIE3R7szvWry8AvOaMl5j5P6wMp1x3R/KQ0WZCb463N5q54b/aLIcHm4h62pZIOMtkGmcHm+bgiJGRzauAHKmvFWm9nsasmKSNbke1xLPZy4HxFuDNMPHE0fBE1q1jt7i5S0+av2MtJnsTHQHN6KzhcOiwdyp4VduFnWbw62rZQHLAhlngQfW6Uk8dQRmffSSIcvyX2SukOnJ0ByXdn/5r2Tk3djTUXpyI/eE1k2972dTuzlki8ycBcSIjjPHhs8VznDMSbawLQtYFvDUAhmeyOzpALxBb2ZSEnHp/CmrZBILePOj8CYvM+bJKLlmdLRw5fwccS4gs3Anh5tI3c/h1tWygGWBjLPAQ+/0qCcq8aPQmrREfNpq778i3RGzYPDjgbk3ensg3uRHvLzJz39fFEuSnywgzgUkUj1fUL8a5ifVLDGWBSwL6FjgoXd6aFLSm5zUkxrZUDGJCdtYdex7X4GV93gjS0tGcO4rEzgq6zfnx2ZL5YfgqI8FsSxgWcCygGUBty3wwOb0mLEId3yM0Gg5QkboRBw6b2vsyK/wEMQgdho58TwTdxY2bt4MF5Muiagm61ng8cdaw2uvvAQ9er7ATlY3xIButA7+iyZQxc4IjorEaloW8IMFChQoAHS8CJ0R5+2i9yPJ23IsfpYFLAt43wKZyumh7eCVWzwB+fBBgLv+mSc/Pdn7w1Zy5JOYaYfGwVFQ8tVr0QGcpUqWYN0REXjWGZ4ATY5X9yc7Q7+BH8GFROkIDj16Z2KD8NlDgcjfneKMry4/y+/RNY3bHZZN3TYdJ5w4cQI7RX3AwIFw8eJFDnb/Sp+JrSiivQKMzyMcj65aMLHfqlsWsCzgXwsEdOz+okLiwb3b4dA+/eflKJC93Hhv/jYIDA6B60kJcAaPgaAdWP4qph0eUkyYCN3Rc+aceTBl2gwokD8/PNujO7Rs3hTatGoBsdNnyuyCgrIrDxsVbojZ8GT2rOgoqg8YlYmtimWBh9QCM2bMgF6v9oKRX38F/QcMhNcBQ1EAACLcSURBVEQXPyRcmkn4RaDlyGjBtJwjl3IsBMsClgV8agEWEli3/E9ITr7FBN3G86kyotD5WsE5w2FSr3ag9zBBX+nFJycjjg/hGsEzo2vChQvw/U8/Q7MmjaFM6VKM9LE2raBzx/YQVaQIHDl6DKbOmAWbt25lfdmzZ4c3X3sF6tetA0FBQbBtx04YN/F7uHT5skJsYGAg9H/3HahU4RH4ctQY2LV7j6Jf0RCcKQXcavjBAsIdlUuzPg9uCbeu8+cvwIeI3oXevV+HkSO/hgH9BwD9n3mj8PnCCC8tZ8gInYVjWcCygG8swJye5OSbkHzrptdv5kZV7r9wF/y/veuAj6Lo4n8gJJBCKgQiJEAgdJBmoURAUFGwgISqAiLy0QQpFkCkaRAFUaRYEJGqoiIoCoIIgnSQICI91ACBhCSkQMr33lz2srfZq7lcAs78crdT3rx5+/Yy8983b2bcy3gK8v5zvhPXdbPfwP51K0V8wLwfkBx/CcHhdeBbIQR88vrsni3oMNBG6DL+A3h4epPs2bS7ciwWv9wDKVcvg/kE0fETHmW9cOnUvygfWgOpyYn4fMhTSLx0TvDq+dZCePoGCH+W/etW4CdqUwnmgE3Ptz8XB5B+OrqvY4YenfGN2yzn44NSdLBpamoqWrW4H8MHD8LBmEP4ecNGdGjXFhNeG4uhL49GbOwZjHppGNpEtsT3a34kn4Ub6P50F0yZOB5DRoxSxBdg6JVRI9G0SWMCRPMsAx6qpS+WHHmNCi2KCKufg/7DMZTJb7Ma+H71amTS5qRDhw7FDAI+o0aNdtziozwLs63lLzDXh+SnlDlSA1IDrtKAAD0dOkWJ9q4RWNi9bRPSc60+rhKCNxEMa3gv2j4/GqumDEPGjWSTc7a8A8qjUkQDxJ85TsBkAkIbNCcfmJIElMqKw0kPrl+FgJCq6DhiCqImLcDCoU+B6zCfE7u3iENMN336DvjE9WZPPIONFO/7/td06noG1s58HVXqNUHjR3ti93eLcYXa4GDOosMgqWw5f3Fie0HHoqiuTwmQU5GcLu+7t7lod8sf29CeQA6HhYuX0NRWBuLiLmH8q2PQvGkTnL9wUQCeo8dO4Of1vwq60CqVEdmqhZgmExn0FT11MqpUDsGkadHYvmOnkm35KjGOZf0UVal8Lg5r/seffkL79g+iTt26aNGiBVYTEHIk2Hw0m4a5PVYhTVWZlBqQGigEDbj9tWc7EgjsBNJJ43UaNkVE3UY4uNdwwnkhtKfL8uyhPXAva7D0xB7cifTk6/noeAfkef06iPy9a5aK6+Hff8I/W39BrZYdyKoTjrSkRPhVqmKsy2d5Hdq4WoCebcvnofFjPeAdWAE172snfIe4LZ5WS0u6juzsTET2HYFVk4ca6+tF+O0tPeU6UhOvOWbpUTFly87jjz0qLGxswflk4SL8sX0HAZyxgur9GdEqahCIuQshlSqKvIia4Zj/4SyT8vLlg4xpBjwcTp46La7y6zbXgAQ+dj/AkrRKcuyYMahTpy727d2LdevW2c1DqcDqV4K5KSu9fL08hY+8Sg1IDbheA26nTxgO8rxOg7gHWU6qR9RFzL4dRTbVZU4FV04fzVdUv93jeOr198V0V9KVOJrm8kKW6qytzFsZyGQfpdzXtJzsHAF2+MR2Dv4hYfC/qypysrPoFPazBGTyfGK4s9IPdO5WfJwoMkehX49yNRUUR2Yt/a49e1G7VgSeGzAIWXS4KW8H5O/nj+vJSeKwU6b/ef1GfLTgE1HVg3x8vLxoGo98Fmrl+gR9uWwlenZ/Wkx7jRjzqli+q23HtrQcbW3Tkwuo5KOwWckMeF577VVEto7E7j27MWnSZKc5/Juz3mjzzVmLbb4JSSg1IDXgdA2I6S2F6y3ylXGjAzVLUIeRQ4NtcQp61p8Og8YhIe4s5vR5QIg6bOlWlPEulye2BmTkZGcLMJdIddhMs37uFBzftVl3KkvpwLTgh/O9/cvntVEIsT937sI9zZrilVEjsGnzFjS+uyE6PtQB78+Zi3XrN2DfgYN4qH07XI6/gvPnLyKq65MICwtFr2fzzhpbvWYtrl69ihHDBmPcK6MxYdJUAaD0xDVRk90DK1XgbaJNmOi1IvP0NWCn4ux+Pvqt3um5r736CiIjI7GTpnanTJ1aYMBj51MS6tX2HXe6zuX9SQ3cDhpwCywfjMRrV+EfVB7hEfWQwlM9xQzwmFNkKk1nlS3nKxyZG3ToAr/gu5BOfjzmggBz5PB8dPuvdPp6Fh4aPIEcpOMQf/YEmnbqRddTOLlni7G6XqfFPMp4lhMWIyOhLRFVr5lNFqdsAmDsZKkX1v2yAeWDgvBYx4dx3z3NkUnP44e1P2H9xk2C/O0Z7xEgGomeUd1EOjkpGdPfnYXrSUkC2GTRqhXmzQApNLSKWAXWrctTWPH1N3rNFTBPdWMF5GR/9aJs235pnVZDAh+rqgwNC8PWrVsRHT3d7P+ZVSYqAra2KoH7BeWlSMkzd7WVzlx9mS81IDXgXA24tWr3mJFjWmoKdm3baEwXp4geANn8+bvoOmEOxq6hpdjUEfH+PqVzV4Epsqvr5eSQ9YrGyQy6zx/eGY1Oo6dj4CfrCABlCp+eH2eOU6qZvTIgzM68CS+/QLM01gp4X51Hn3zaIhnv1cPL1H19y4kVWgxicnLNKUnJyRj35mS4l3Yny5ybcIZWmPGKLv4oppcFny4Ef8wFXdhg16BqF7E5MZyTr3szCmuLhQrR7XMtRmovjkp78cVBhSaWOSCjBkMKjdL/KOlCE0oylhqQGrBJAyXCw2vklCEnYp7aykhPs6lScSOqWKOeWO3Fvjn2hiBayl6KwMPlk0cI+GShtHsZXrqV59NEYEoZLjNptVe/Od/i6tmT+PjlPujUpRv+PvyPbU0qTGyj1qUysMhlZJWfVQLRhlkqkwKThI5sVG6WxGyBgY8oNkdjLl8RQVOuSSpUqoZMs0xSFiur7s8KnQlPWxIF5FfA6rZIWKxpHL1/YbkRXzbdXv169bB2zQ/g4y3UAMZcXM1UTaPOl3GpAakB12vALZNWRaXorJZyvSiOtxh3/G+HK/MyeA78RlaOVrANWbxZpBkAsQWI89kXiNM/fzCRKRkTiTxBaMuXox2zJd7yTd+Sdv47ZfJ34JpnbQM+Ult6WCgGO4qlxzVCylakBqQGrGnAxJHZGvGdWq50TMnxl/HZ/zoTrCGUYvhjNGQAPnRNjDsP34qVxeaGrtaFATfZg56sj4YWuVmv7moV3IHtWXwCtt+vfFa268pRStWj0oIbhaWeRUcvT6GXV6kBqQHXa+A/C3r0Oi7e1fny6WPGp6CAIc5Q4rzfj11B1VnaVc8WYpcNdi5ryJa7ljR6GpCPSE8rRZqn9BkshAQ/RfooZONSA0YNlDTG/mMRdSfEcXXakipspTPy4MHIacGpzJwmVdExcoE+GLQWJnAtOuXJlh3UgD19gLpvUYMgB5uW1aQGpAYKqIH/LOjR6u227ZAsjvtcaJFAqwbTdAGqmjIqrFQho5FCZu9UrdxOsjr1xl3PzNG+wh6w5Pq7ki1KDfw3NCBBDz1nezoxe2id+RPKwx95MSN/nSxjmYhYJTAltztlib+lMrsbkhX0NCABj55Wik1eUfUZxUYBUhCpgWKkAQl66GHY8wZmD63xOReLcd9UiDtunHT2DTmbn/HH4OTI7SKnk2+7OLLTghvuKzjPoT6jON6glElq4A7QgBH08Llb/CmKcHfHKNRu/UhRNG1sU9sxic6qZCnw+V6Rz76EoCrVBa22YzMycGnEFMCIpnWyLIlkJ7klVrKsqDQgAU9RaT5fu9x/6PYhlC+D1IDUQPHRgJtfQBDuf+BhuLt7CKliTx7Fgd1/uFRCBhUp8ZdwZOvPLm2XGzMHYrgDG750C9w8yiDpykXwie3xtCmhSbBn0OG+zx56k4b0Eo4wzKtjvyh5dfWkEa5D9jPVZVXkmbfDfdwOMhb5gywcAbTghlvRWnSYhvOU/kWvTuFIJ7lKDUgNWNKAW8u2HXHp4jkc3LNd0HmpD+y0VPMOKVM6I6VzUm4rqEo4PLx88MWIKMNuz9SB5QuMA+wJubghjM7DcjSYSmGaEjx1svK3ZSCyiZQrGwmNkfwsTei0xRbqiSJz5ebyFf6qclVUKTW9WiOwVm7KzTkpB9t0sJpzZC5mXBzQRWxsrP03Yef/utKfKP2L/Q3KGlIDUgOFoQG3UqVKYf+ureKQUf5HvXntSmG0YzPP8OaR6PbmPOxctRC/LXxPAI/e0xcjpHZDMiaUwIWjMQKI8JEQHDqNjkZDOmy0FJ0On3L1MpaM6YMrscdQvVlr9Jj6Ke27cxSV6JiKTDpmY3X0KBz+nc+lshxGfr1LHEfBmxT2mv4F6SYbGz+ORszG79ErehE7AeHzVwc4vC4q9gyd8u5A0O/fdXJ1skybE9svmmZZShn5GSP61HrA0Ehppq7INlOWh7aMXEwjqnqqqCmNkrJGkFtujcyqTEp7tlytNpafiQNV8jO5g3JcpQ9VO9xPMphRAxqOK/lq7Urwo9aGjEsNFL0GSqanpaFlm454PKofWrR5BAFBFYpMqpr3tUPPtz/Hqf1/CsDDgjDIqFSzPn5fNAubPpsBPmcr8pnhQsZWvYagccfuOLRxNQGal+kMrdJ48vVZoszD01ucqcUnr//4/jikJSXg0ZeniTLtl7az+nriIGxd+hFKkE/P2vdex7dTh+PYrs2imqdfAAIrV8fNtFTHhj873xjVsupX1c9V18sfd6ROfi4mOapBwSTfFQmrbVslIClt1YmtdNZu3BaZNDwcqKLhIJNO0IACdhRAwyy1fYjSjBYcKfnyKjUgNVA0GihZ1tMLqTeSsWXDGvGP2/S+B4pEkoAq1YRl5tS+7Vg5foBRhsp1GiMpPg4enj4o6+OHGwlX0OhhwwnlzZ7oA7b4pCZeQ4VqtXH51FFUDK8LdzpAVQlsLdq3djnW0blZZb194eUfpBQZr8pbmpJx7vA+xB2PofO1snD20G7xyUi5nltcAukpiUi9fk0hL35XG8ZlG0h07stCLQtFOow0WQWqrOHlaJIQhUtBhZ337FLZHNXhHVyPLDlKUIMdJU/vqu1X9GhkntSA1IBrNSCOoYjZv4NOWE/HkZh9iOzQGWXJSpKWmuJSSRjQZGdloXxYDWO7vhVCxFSSBwGziJYdRP5NOgk+PSlRxMv4+AoTs1LGmVfPn4anb6Ao569T+wxO2XzlzqpWy4cIBC0zlnNErxMrUYIWtrHJOpfSSEM8kuiMrgIF7j8VxgVipFRWOmQVU6ttWCVQmNtxLQye5ppX3as5ErvybZXdWe3awccOUrtuWRLbrgHV1K1i6bGlsgJ87KljC19JIzUgNeCYBgToycrMErWzsjLFlf18XB3YSvMTTUP1nf01Oo95B2tmjCVwcUmIcfrADnwzaXA+kW6m3hCnnc99rl2+skoR9UVeSK1GuHY+FnzljudszO58tErHpC5gkMPAh4dCdWBvGO+A8iJLW6amK5q4rQO3ndIVEls7pdCQO1kom4GFs9q1gY/NMmlUI5NFogFDn8HP1RCUFyUJeBSNyKvUQNFrgMwZQETdhihd2h3hteohnXxVUpKVqRzXCXgr7QZNI+0RVpi7H+kmHJF5eikx7ixq3POAsNCwNOzofG/X/kKwYzs2wTuwAu7r9gJKurkhKDQcHQa9biJ062eGwY9ORm83YCxupqcKJ2cTAnMJ6ru4fW3Iyc6BZzl/mkLzKpixJq9v1DZRwLRtjA3jqW20BRQot7qz27rdEYEV+a0UO+eZSC4F1YACbJiPFtxwWptX0PZkfakBqYGCacDtwO5tuLt5S9Ss00j49hzc+2fBOBaw9o+zxqHGvW0RNXkB3n2yMZa/1g/Pzlop0gprdlzmwA7KFarVEkBHATsMkjbMf0shJatMBQxbulWkty750Jivjqg7LmM+DTolSpbMB2xKlCxBfkTpNIXmbyQtfhEGGHwDhouefAoEcf7YaqFRPUEcznNiO85Xgg13ZUH+IpHHBpElidCA2qJjCdQo/YolGqlSqQGpAddqwC325L84c+oYypQtS348N1zbem5rH/RsadLu7B4tjOn4Mycws2szYdHxC65MzspHxMopJsjOzMRnQ54UjsvB5MB87fxpcnSON9blyMynmws/oYQLZ5CR66dU2sN052mlc2J6dozOyckmX6BtmN7JMEXG+UrgDuzisb9x/fIFwhQKdFBK7bxaGPfMcbK9inVKw9hqnc5UFnvpTWs7L3W7IwMz8pvJdp7eJKeCakABMcpV4acGQ5zH5eq+RaGTV6kBqYGi04Dw6eFBvqgAj623znvw8Ecv8PJxnhozF+KOHzYWlSsfjGHLtqEkLUfnDomBU0nyYcqmqSxOr3n3FcRs+E7Q63VaObRnj8jPzhbgyMjYRRH7xsSiAigubNeqQqwQWCl20WM1NFOcZHHpjd+ejemBHPWdaMvVZTIuNSA1UDQaEKCnaJou3FbjY4+DfX4Y1KhDcvwVfDLwUV6ylW/qivMSaHdqJXCnpQ1H/lhPVp5cGnqTK3CwEx/YSc6vm+Je1XLmvyu7uarZybjdGsj/BPL/GO1mKitIDUgNSA1IDVjRwB0LenhX5hXjns93+2zV4pVi2qAHcLQ0nN62fK5edsHyNJjDw8OdLE85uHXrVsH4mqnNzXHQGXoNBXrfJjKaJPSobcpji5kbOaA7dp9OkMEuBdh0SzYSaWQvMjlsFFeSmWhAWnBM1CETUgO3lQbuWNBjz1NQOjFbgY+RtxiseAAzF/RHs1LkID2g33Mw2RqASFNu3MCS5SvxxSfzcfVaAoaMGGWOsdn8Rzq0x9BBL6B3vxdwPSnJQKcZY/38/RA9+U1s+n0LVn7zbS6vEvD398WXn32C2XPmYsOm3zB96mSkpKRgSvQ7Ju2x/C883xd85cB6uxh3Cbt278H5CxdFnv5XniDdunZB7x7d4Um+ZN169iFZXb9iUF9Ga7n6z9RaLdNyFQ9V1JTm9kvx75lBLIcs2nMrU2VldXd3N65kyqapYUtAl3kEBwfj8uXLFulEQ3k/KZG06cvSv6w5Bqo6DNa1QZkK15YpfYuWXqalBqQGikYDEvTk6t1uwMP1dDo/08eYv3Pkcjc6LuOpxzsL0ps3bxqrJCQmYvHSFThx8jSuJSRQPte3b1QsXdqNth8obRx8DMyJTwnik8vqwTYPoFrVUDxftQ++WvWdAC1Mx4ONO9XnAYpDmTIe4qr9Yvm7PGGQPzU1TdC7uZXCs316Ysyr43H8hOY0ei0DSvfqHiXu8cO585BKR6EUXjCjQ/vUWnji3UGcGzakrS8E6OHfG20MeuoUrly5gvLly9Pvrarh/4WnjKksJuYQ0nSeO/92a9euDQ8PDwQGBOCfI0ecC3yobYeC6veiB2T08rQAyKF2ZSWpAakBp2pAgh5Sp9I52QJ8lDc6ZzyF5V+vwhdLluVjNe7NKaq8vF66DE17pWcYDlpVEYgoT4llZOQBKG25Ot2hXVtjskH9ejhIA5Ah5LVlJLAQWbbyayz6cil8vL3RvVtXRHV9Co881AFz5i2gWswrh6xZJYXTuPrN3tPTE15enli9di02/rbZCMa4KQZcaiDIeepQllcZqgZLa/RlypRBOu3ibRJyBzAeYNkiwZYHbbDEl8v4fmz5vWj5mqRVA6lJ/m2aOHz4sBFsV77rLlSpXBnXrl1DfHw8UlNTjXeVQ/pWP0OlQAE8WWQh+ovATnh4OOoQAHIa8Mn38+YHkC9TEcfsVekvzBKoCvTAkKpYRqUGpAZcrIH/POhROiXtAKZN6z0X+7tLPS6aPGI6beIExF26hA/nfYzp0yaBd8z28/NF9WpVkXj9OuUvwB/bd4iK9evVxcD+z6FWzZq4dPkKzp47p2GoJIkxWXvCq1UDW3l+3fQ7Hmz7ANrTJw/0KLS2Xg0aSKYpMAZvT5H1J7x6VVGZgcHQ/w1Ey/vvo7d2d+zddwCzP5pLlqTSmD1zhqB5snNntGvTBn2fH4iqJNOgFwaAQVhi4nWs//VXLFr8pQAW77z9lgBP/n7+4MH0+UGDhDXLQF+f6BOxfoOKPvqtXJ35kTzVSWeJ+GDOR/hj2zbRbnXKe3HgANSvV4+AYgb+/HMH5s5fgBs3UsBlgwYORIMGxDeB+P66AYu+WCzkqF2rFoYOGYyaNWrgBg3iv23ejAUff2wRpIkGTb5ykc4dBnj4FlmX/OFw5uxZ1K9fX0xTXbhwgXRreTsMtjLWrlVbTIn9+++/AogePXoUtSIiLAIfHx8fhFSqJNoUX6RX/h9Ip2N1ON/LywunY2ORQVtRBAYGCnncaBqOp21jz5wRoNcm4GP4qee1Y0PMlj7EBjaSRGpAasCJGjA4ZTiRoSOs7u4YhdqtH3GkaoHrmLPcmHubU3dkBR23yvl4I7hCBeOHLSAcAgP9RQct4gH+aNK4EXXcZ7B0xVeivO8zvcU1KDAAk8a9iork/8Cg48DBg2jWpLEoM/fVPtfKs+KbVUQfg8hWLQUoMUdvkm+h469TK0JMbZw5YwBdo0cOJ6tPe2HJ+Yr8hpqSXFMmviHe+JcsWyHYxhw6hCXLloMHrrcmT0JolSoEdJbg0N9/o2dUFFmPDAfLBtJ9NqhXXwxmH86j6bAbqUQ/mehDBSA5dIjou3fPow8IpPaaiMFuyTKDJa3fc8+JNv18yZ/prakE/qpj2fIV2Lz5d3Ro/yCBmf/B19cXb02dgtBQkuOLL0iOQ7l8u4m6r70yVky5zHz/faq3GY927IjI1q1NVGQ+ofq1qKLm6W/vErbkJJCVp2LFiqa+azq3xYCHLTqZdAyOAniYjC1w/xLwuUlWNZ7yYjptYHDt60cHEROoSkpORlJKsgAyDGhDCCBzmRtN2fJ0GVuOGJRdpim3wKAghISEaNmZTess5DRLqy4w14+oaWRcakBqwHUacHuie/98ra1f85VLDxyNfPYlpNA5W0e2/pxPlsLMYACjdErKVQ1qrLVtAQNYqyrKH3vkYfBHCavX/oh5nyw0JFXMGfC8M3O2yOf9hXpGdRVAp07tCHjT1NKb06Lx585dopwBRIt771FYmlxLlXJD2wda49iJU/Qmfk4AksaNGghrzKbNW0xobUl06vgwmjdtLGSoVDGYLB638MvGjcIK0yayFY4eO46f128QrBjQRLZuSYfZlhXtDhs8CDEEbtb/uhGRLVsiwN8fn36+CHv27qUPHXzbuhVZotpixVdfi/onTp3ExMmGab/IVq0M9As/N9DvY/rWeJAAnUJ/OvY0ps94V9QtSQ7XvchpmsEhW2v8CNxMmjIN27ZvF+X/Hj1GA3MJNCKfFCHHwoXYs0eRg/m2I75fwZt0y9M0bCVYtHgxgablwvlcMLH4xSiHHyhdc3KvFunvjMLzZOHxJ78cBvYXLpp3cGdrDoOSy2ShYf3HX71qVECAfwASyL+tMk2V1a5dCwxw9cLFuLhcq42hNIWsdmfPn0NNsoBy4P/rs2R9YudoDmwFYouPrUH17yh4Kf2Fur66P1Hy9fKUMnmVGpAacL0G3DasNQwq3HTdhk0REBTsUsDj+lvOa1HdcWnBjjadVysvVtAXdl49tebHdUaGV8j3wSTk9rTnzl8wZivTV+z/wFM9HI4dP24sP0ZAwxzoadbkbvjTNBl/5n84C+wfw6EPAYL8oEfdzRvZm0Qu0QDC1qL6deuCQc+sDz/C4X+OIIwADoeImjUwf44BrCkVKwSVR3JyipIU17CwUHEd0K8vrWrraywLCzXkc8YF1aowI33/fhhAHyWEhYYpUbIKnVfFz4o4ny/H4IsDT50o4Zf16ymag2d6GyxoA/r3J755LwNKex/NnSvyJ06YIAY+tqy9N3OmmEpReJle1b8QBjxcqs4zpb7TUmztYZ+eigQw+LfC/lN6gadHb5EfD08/8XSUGvRUrVaV+qNU+s0kW9Az0LBBA2TnmmNiYmJwkpyomZcS2E/sIgEvBsA8PckbkvIUsiNB3W+o62vzOW1LP6LmIeNSA1IDhasBt9QbyaIF/gcNDqmCY/8cLNwWrXDnA0W7vTkPO1ctxG8L34OHlw96T1+MkNoN6V25BC4cjcEXI6LEcRHMqtPoaDTs0AWl3EqLHZuXjOkjDhWt3qw1ekz9FJdPH0WlGvWQeesmVkePwuHffzSRQHkTU3dOhvbupo6xpDjyYtvy+djxzaeiHp8Jdv3SeSx+Y4gJH0cS7IPzz795g685HjrbKArSixfjxJWnY+KvXhPxMIqbC4aprRJYs+5nGox4dRjwXO8eBJ4qIYgGHHvD7r37hSNzxeAK+GzBR+j+dBdsJiDHAxyHdb9swEfzPxZx9uvx8vSisktGsCUK6EuxArw24Q3E5L7Js3N0Nu2ppBcUAPTa+PF59DRVyHsbGYOZ+QgFDFWrVg0KyGzWtInwJTHKMY75Gpy72XKmODqzFWrL1q24i8Bm2zZtyHrUAz1oGu6DOXOMzcqIqQbYn4dXYfESdI7rhTPkW8OBpxcjyOqjDWwxYr8tS8Hon0PPXXleWnoF8PiUK4djBHrznKn5d2MZ5Kt+WVq2ZtPqPsUskSyQGpAacKkGSiqtBVeqQnPmpRF70vogrNRx9rXmfe3Q8+3PcWr/nwLwMP9e0YtQqWZ9/L5oFjZ9NgMVCcBEPjNcNN2q1xA07tgdfADp6uiXUYqsH0++PkuUeXh6U9odfsF3iYNJ05ISBEAShaov7dsZF6WlkCPt3MlYMuYZnP/nAFr3GYqAkDBRyycwGJ5+QSoOLorq9Mkxf/8jfBRGDh1CS+A7of9zfdAmsrWuQN7eXrifpr327NsvHKSXrvxK+AiNnzRN0LdrE6lTT6dRHSp+Y/7u+zWoShabzo89Sm/kGdi3/y88THsGRdF+PC3ImXn6tClY+PE8lCMQoQ3sSJ1GjqcvDRuCduRY/ShN+S2kvYrGjnpZSyrSB+lNPo1WZL00bBjRtyH6R4j+Y7P06gGNrQDshDzypeHoRLL27tUT06ZMQefOnXCQLDdpaSTHcObblnx2mO8nGDt6FHzL+eArms569513xNRIEjmUc+C9lWwKuqOmWr96cXWeTa0UOyIGFldzfXvYkhNAAMiP/Gz0/u9YeL5jnrL1JisNXwWdGQCrvtnr9DzER9mbSl1IcQY8ETTVxT4+DL6yyF9IsXRqSHWT9E5oDPaAGXP3aWQmI1IDUgMu1YAR9FSPqIurVy4hQ7u810XiBFSpJiwzp/Ztx8rxA4ytVq7TGEnxcfDw9EFZH3JYTLiCRg8/LcqbPdFHWHxSE6/Raeu1xU7LFengUfeynsb6bC3at3Y51n0wEe5laKm0f37Aou3Evp0yDKcP/InQBs2RcCGWeOUgpFZDwTObTPTcgRYksEWC30bVm7ep+bGpXwkmNNTxpmekiyJens4WlbfffZ9M6NkYNKA/HqdB/DAt9eXATqHq0KRRI+FovFHju7N3/wFa3ZSEe5s3FSueDHUN0xBZGh6CH8nAsvM9qGVbRr43CbTqqnePKKGft6a/S8DnADkCd8OYkS/Rxof+iJ7xntgwkZcsMw8GRxzY4vIm+djwPY0kwDH4xYE0PXEas2m6jEPmLdN7MdBPFStyRg4fjsGDXhTTGbNzLS5qubg+AxkO7MTKUycT35yEZHJ4HTZkCO0t1Ad/EdiZO2++QQ5ykBZyECgaTKvEeJpk9odzhNxzaHorLCwMkyZOxAsvvIAdO3di1apVgnf+LxXKUUVN6ZQCHlE5zld13JT6dk2dI18dntoKD6+OGjS1xNNLvJWANrC/VAZNQ9Wl6VLDp47YGsDSPk7a/101T6WMr9xeObIkcWD/IOavnj5V17MWNwdklPa4PtPwh/PU+dZ4y3KpAamBwtXA/wELdrh0vDV44gAAAABJRU5ErkJggg=="}}]}], + "model": "gpt-4o-mini"}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '78932' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - python-httpx/0.27.2 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-ABIA0YzOHlhqb02K8Ay4Jwsw6xOpk\",\n \"object\": + \"chat.completion\",\n \"created\": 1727254780,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The first entry in the menu says \\\"Ask + Goose.\\\"\",\n \"refusal\": null\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 14230,\n \"completion_tokens\": 11,\n \"total_tokens\": 14241,\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e9627b5346\"\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8c89d1c45d98a883-SYD + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Sep 2024 08:59:41 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + content-length: + - '613' + openai-organization: test_openai_org_key + openai-processing-ms: + - '1289' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199177' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 246ms + x-request-id: + - req_9503b21e31db78c4ebd2b71b304cea72 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/exchange/tests/providers/conftest.py b/packages/exchange/tests/providers/conftest.py new file mode 100644 index 000000000..2b35958fb --- /dev/null +++ b/packages/exchange/tests/providers/conftest.py @@ -0,0 +1,129 @@ +import os +import re +from typing import Type, Tuple + +import pytest + +from exchange import Message, ToolUse, ToolResult, Tool +from exchange.providers import Usage, Provider +from tests.conftest import read_file + +OPENAI_API_KEY = "test_openai_api_key" +OPENAI_ORG_ID = "test_openai_org_key" +OPENAI_PROJECT_ID = "test_openai_project_id" + + +@pytest.fixture +def default_openai_env(monkeypatch): + """ + This fixture prevents OpenAIProvider.from_env() from erring on missing + environment variables. + + When running VCR tests for the first time or after deleting a cassette + recording, set required environment variables, so that real requests don't + fail. Subsequent runs use the recorded data, so don't need them. + """ + if "OPENAI_API_KEY" not in os.environ: + monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY) + + +AZURE_ENDPOINT = "https://test.openai.azure.com" +AZURE_DEPLOYMENT_NAME = "test-azure-deployment" +AZURE_API_VERSION = "2024-05-01-preview" +AZURE_API_KEY = "test_azure_api_key" + + +@pytest.fixture +def default_azure_env(monkeypatch): + """ + This fixture prevents AzureProvider.from_env() from erring on missing + environment variables. + + When running VCR tests for the first time or after deleting a cassette + recording, set required environment variables, so that real requests don't + fail. Subsequent runs use the recorded data, so don't need them. + """ + if "AZURE_CHAT_COMPLETIONS_HOST_NAME" not in os.environ: + monkeypatch.setenv("AZURE_CHAT_COMPLETIONS_HOST_NAME", AZURE_ENDPOINT) + if "AZURE_CHAT_COMPLETIONS_DEPLOYMENT_NAME" not in os.environ: + monkeypatch.setenv("AZURE_CHAT_COMPLETIONS_DEPLOYMENT_NAME", AZURE_DEPLOYMENT_NAME) + if "AZURE_CHAT_COMPLETIONS_DEPLOYMENT_API_VERSION" not in os.environ: + monkeypatch.setenv("AZURE_CHAT_COMPLETIONS_DEPLOYMENT_API_VERSION", AZURE_API_VERSION) + if "AZURE_CHAT_COMPLETIONS_KEY" not in os.environ: + monkeypatch.setenv("AZURE_CHAT_COMPLETIONS_KEY", AZURE_API_KEY) + + +@pytest.fixture(scope="module") +def vcr_config(): + """ + This scrubs sensitive data and gunzips bodies when in recording mode. + + Without this, you would leak cookies and auth tokens in the cassettes. + Also, depending on the request, some responses would be binary encoded + while others plain json. This ensures all bodies are human-readable. + """ + return { + "decode_compressed_response": True, + "filter_headers": [ + ("authorization", "Bearer " + OPENAI_API_KEY), + ("openai-organization", OPENAI_ORG_ID), + ("openai-project", OPENAI_PROJECT_ID), + ("cookie", None), + ], + "before_record_request": scrub_request_url, + "before_record_response": scrub_response_headers, + } + + +def scrub_request_url(request): + """ + This scrubs sensitive request data in provider-specific way. Note that headers + are case-sensitive! + """ + if "openai.azure.com" in request.uri: + request.uri = re.sub(r"https://[^/]+", AZURE_ENDPOINT, request.uri) + request.uri = re.sub(r"/deployments/[^/]+", f"/deployments/{AZURE_DEPLOYMENT_NAME}", request.uri) + request.headers["host"] = AZURE_ENDPOINT.replace("https://", "") + request.headers["api-key"] = AZURE_API_KEY + + return request + + +def scrub_response_headers(response): + """ + This scrubs sensitive response headers. Note they are case-sensitive! + """ + response["headers"]["openai-organization"] = OPENAI_ORG_ID + response["headers"]["Set-Cookie"] = "test_set_cookie" + return response + + +def complete(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]: + provider = provider_cls.from_env() + system = "You are a helpful assistant." + messages = [Message.user("Hello")] + return provider.complete(model=model, system=system, messages=messages, tools=None) + + +def tools(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]: + provider = provider_cls.from_env() + system = "You are a helpful assistant. Expect to need to read a file using read_file." + messages = [Message.user("What are the contents of this file? test.txt")] + return provider.complete(model=model, system=system, messages=messages, tools=(Tool.from_function(read_file),)) + + +def vision(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]: + provider = provider_cls.from_env() + system = "You are a helpful assistant." + messages = [ + Message.user("What does the first entry in the menu say?"), + Message( + role="assistant", + content=[ToolUse(id="xyz", name="screenshot", parameters={})], + ), + Message( + role="user", + content=[ToolResult(tool_use_id="xyz", output='"image:tests/test_image.png"')], + ), + ] + return provider.complete(model=model, system=system, messages=messages, tools=None) diff --git a/packages/exchange/tests/providers/test_anthropic.py b/packages/exchange/tests/providers/test_anthropic.py new file mode 100644 index 000000000..a6f5bc689 --- /dev/null +++ b/packages/exchange/tests/providers/test_anthropic.py @@ -0,0 +1,174 @@ +import os +from unittest.mock import patch + +import httpx +import pytest +from exchange import Message, Text +from exchange.content import ToolResult, ToolUse +from exchange.providers.anthropic import AnthropicProvider +from exchange.tool import Tool + + +def example_fn(param: str) -> None: + """ + Testing function. + + Args: + param (str): Description of param1 + """ + pass + + +@pytest.fixture +@patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test_api_key"}) +def anthropic_provider(): + return AnthropicProvider.from_env() + + +def test_anthropic_response_to_text_message() -> None: + response = { + "content": [{"type": "text", "text": "Hello from Claude!"}], + } + message = AnthropicProvider.anthropic_response_to_message(response) + assert message.content[0].text == "Hello from Claude!" + + +def test_anthropic_response_to_tool_use_message() -> None: + response = { + "content": [ + { + "type": "tool_use", + "id": "1", + "name": "example_fn", + "input": {"param": "value"}, + } + ], + } + message = AnthropicProvider.anthropic_response_to_message(response) + assert message.content[0].id == "1" + assert message.content[0].name == "example_fn" + assert message.content[0].parameters == {"param": "value"} + + +def test_tools_to_anthropic_spec() -> None: + tools = (Tool.from_function(example_fn),) + expected_spec = [ + { + "name": "example_fn", + "description": "Testing function.", + "input_schema": { + "type": "object", + "properties": {"param": {"type": "string", "description": "Description of param1"}}, + "required": ["param"], + }, + } + ] + result = AnthropicProvider.tools_to_anthropic_spec(tools) + assert result == expected_spec + + +def test_message_text_to_anthropic_spec() -> None: + messages = [Message.user("Hello, Claude")] + expected_spec = [ + { + "role": "user", + "content": [{"type": "text", "text": "Hello, Claude"}], + } + ] + result = AnthropicProvider.messages_to_anthropic_spec(messages) + assert result == expected_spec + + +def test_messages_to_anthropic_spec() -> None: + messages = [ + Message(role="user", content=[Text(text="Hello, Claude")]), + Message( + role="assistant", + content=[ToolUse(id="1", name="example_fn", parameters={"param": "value"})], + ), + Message(role="user", content=[ToolResult(tool_use_id="1", output="Result")]), + ] + actual_spec = AnthropicProvider.messages_to_anthropic_spec(messages) + # != + expected_spec = [ + {"role": "user", "content": [{"type": "text", "text": "Hello, Claude"}]}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "1", + "name": "example_fn", + "input": {"param": "value"}, + } + ], + }, + { + "role": "user", + "content": [{"type": "tool_result", "tool_use_id": "1", "content": "Result"}], + }, + ] + assert actual_spec == expected_spec + + +@patch("httpx.Client.post") +@patch("logging.warning") +@patch("logging.error") +def test_anthropic_completion(mock_error, mock_warning, mock_post, anthropic_provider): + mock_response = { + "content": [{"type": "text", "text": "Hello from Claude!"}], + "usage": {"input_tokens": 10, "output_tokens": 25}, + } + + # First attempts fail with status code 429, 2nd succeeds + def create_response(status_code, json_data=None): + response = httpx.Response(status_code) + response._content = httpx._content.json_dumps(json_data or {}).encode() + response._request = httpx.Request("POST", "https://api.anthropic.com/v1/messages") + return response + + mock_post.side_effect = [ + create_response(429), # 1st attempt + create_response(200, mock_response), # Final success + ] + + model = "claude-3-5-sonnet-20240620" + system = "You are a helpful assistant." + messages = [Message.user("Hello, Claude")] + + reply_message, reply_usage = anthropic_provider.complete(model=model, system=system, messages=messages) + + assert reply_message.content == [Text(text="Hello from Claude!")] + assert reply_usage.total_tokens == 35 + assert mock_post.call_count == 2 + mock_post.assert_any_call( + "https://api.anthropic.com/v1/messages", + json={ + "system": system, + "model": model, + "max_tokens": 4096, + "messages": [ + *[ + { + "role": msg.role, + "content": [{"type": "text", "text": msg.content[0].text}], + } + for msg in messages + ], + ], + }, + ) + + +@pytest.mark.integration +def test_anthropic_integration(): + provider = AnthropicProvider.from_env() + model = "claude-3-5-sonnet-20240620" # updated model to a known valid model + system = "You are a helpful assistant." + messages = [Message.user("Hello, Claude")] + + # Run the completion + reply = provider.complete(model=model, system=system, messages=messages) + + assert reply[0].content is not None + print("Completion content from Anthropic:", reply[0].content) diff --git a/packages/exchange/tests/providers/test_azure.py b/packages/exchange/tests/providers/test_azure.py new file mode 100644 index 000000000..adafabedb --- /dev/null +++ b/packages/exchange/tests/providers/test_azure.py @@ -0,0 +1,48 @@ +import os + +import pytest + +from exchange import Text, ToolUse +from exchange.providers.azure import AzureProvider +from .conftest import complete, tools + +AZURE_MODEL = os.getenv("AZURE_MODEL", "gpt-4o-mini") + + +@pytest.mark.vcr() +def test_azure_complete(default_azure_env): + reply_message, reply_usage = complete(AzureProvider, AZURE_MODEL) + + assert reply_message.content == [Text(text="Hello! How can I assist you today?")] + assert reply_usage.total_tokens == 27 + + +@pytest.mark.integration +def test_azure_complete_integration(): + reply = complete(AzureProvider, AZURE_MODEL) + + assert reply[0].content is not None + print("Completion content from Azure:", reply[0].content) + + +@pytest.mark.vcr() +def test_azure_tools(default_azure_env): + reply_message, reply_usage = tools(AzureProvider, AZURE_MODEL) + + tool_use = reply_message.content[0] + assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}" + assert tool_use.id == "call_a47abadDxlGKIWjvYYvGVAHa" + assert tool_use.name == "read_file" + assert tool_use.parameters == {"filename": "test.txt"} + assert reply_usage.total_tokens == 125 + + +@pytest.mark.integration +def test_azure_tools_integration(): + reply = tools(AzureProvider, AZURE_MODEL) + + tool_use = reply[0].content[0] + assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}" + assert tool_use.id is not None + assert tool_use.name == "read_file" + assert tool_use.parameters == {"filename": "test.txt"} diff --git a/packages/exchange/tests/providers/test_bedrock.py b/packages/exchange/tests/providers/test_bedrock.py new file mode 100644 index 000000000..2525f650b --- /dev/null +++ b/packages/exchange/tests/providers/test_bedrock.py @@ -0,0 +1,228 @@ +import logging +import os +from unittest.mock import patch + +import pytest +from exchange.content import Text, ToolResult, ToolUse +from exchange.message import Message +from exchange.providers.bedrock import BedrockProvider +from exchange.tool import Tool + +logger = logging.getLogger(__name__) + + +@pytest.fixture +@patch.dict( + os.environ, + { + "AWS_REGION": "us-east-1", + "AWS_ACCESS_KEY_ID": "fake-access-key", + "AWS_SECRET_ACCESS_KEY": "fake-secret-key", + "AWS_SESSION_TOKEN": "fake-session-token", + }, +) +def bedrock_provider(): + return BedrockProvider.from_env() + + +@patch("time.time", return_value=1624250000) +def test_sign_and_get_headers(mock_time, bedrock_provider): + # Create sample values + method = "POST" + url = "https://bedrock-runtime.us-east-1.amazonaws.com/some/path" + payload = {"key": "value"} + service = "bedrock" + # Generate headers + headers = bedrock_provider.client.sign_and_get_headers( + method, + url, + payload, + service, + ) + # Assert that headers contain expected keys + assert "Authorization" in headers + assert "Content-Type" in headers + assert "X-Amz-date" in headers + assert "x-amz-content-sha256" in headers + assert "X-Amz-Security-Token" in headers + + +@patch("httpx.Client.post") +def test_complete(mock_post, bedrock_provider): + # Mocked response from the server + mock_response = { + "output": {"message": {"role": "assistant", "content": [{"text": "Hello, world!"}]}}, + "usage": {"inputTokens": 10, "outputTokens": 15, "totalTokens": 25}, + } + mock_post.return_value.json.return_value = mock_response + + model = "test-model" + system = "You are a helpful assistant." + messages = [Message.user("Hello")] + tools = () + + reply_message, reply_usage = bedrock_provider.complete(model=model, system=system, messages=messages, tools=tools) + + # Assertions for reply message + assert reply_message.content[0].text == "Hello, world!" + assert reply_usage.total_tokens == 25 + + +def test_message_to_bedrock_spec_text(bedrock_provider): + message = Message(role="user", content=[Text("Hello, world!")]) + expected = {"role": "user", "content": [{"text": "Hello, world!"}]} + assert bedrock_provider.message_to_bedrock_spec(message) == expected + + +def test_message_to_bedrock_spec_tool_use(bedrock_provider): + tool_use = ToolUse(id="tool-1", name="WordCount", parameters={"text": "Hello, world!"}) + message = Message(role="assistant", content=[tool_use]) + expected = { + "role": "assistant", + "content": [ + { + "toolUse": { + "toolUseId": "tool-1", + "name": "WordCount", + "input": {"text": "Hello, world!"}, + } + } + ], + } + assert bedrock_provider.message_to_bedrock_spec(message) == expected + + +def test_message_to_bedrock_spec_tool_result(bedrock_provider): + message = Message( + role="assistant", + content=[ToolUse(id="tool-1", name="WordCount", parameters={"text": "Hello, world!"})], + ) + expected = { + "role": "assistant", + "content": [ + { + "toolUse": { + "toolUseId": "tool-1", + "name": "WordCount", + "input": {"text": "Hello, world!"}, + } + } + ], + } + assert bedrock_provider.message_to_bedrock_spec(message) == expected + + +def test_message_to_bedrock_spec_tool_result_text(bedrock_provider): + tool_result = ToolResult(tool_use_id="tool-1", output="Error occurred", is_error=True) + message = Message(role="user", content=[tool_result]) + expected = { + "role": "user", + "content": [ + { + "toolResult": { + "toolUseId": "tool-1", + "content": [{"text": "Error occurred"}], + "status": "error", + } + } + ], + } + assert bedrock_provider.message_to_bedrock_spec(message) == expected + + +def test_message_to_bedrock_spec_invalid(bedrock_provider): + with pytest.raises(Exception): + bedrock_provider.message_to_bedrock_spec(Message(role="user", content=[])) + + +def test_response_to_message_text(bedrock_provider): + response = {"role": "user", "content": [{"text": "Hello, world!"}]} + message = bedrock_provider.response_to_message(response) + assert message.role == "user" + assert message.content[0].text == "Hello, world!" + + +def test_response_to_message_tool_use(bedrock_provider): + response = { + "role": "assistant", + "content": [ + { + "toolUse": { + "toolUseId": "tool-1", + "name": "WordCount", + "input": {"text": "Hello, world!"}, + } + } + ], + } + message = bedrock_provider.response_to_message(response) + assert message.role == "assistant" + assert message.content[0].name == "WordCount" + assert message.content[0].parameters == {"text": "Hello, world!"} + + +def test_response_to_message_tool_result(bedrock_provider): + response = { + "role": "user", + "content": [ + { + "toolResult": { + "toolResultId": "tool-1", + "content": [{"json": {"result": 2}}], + } + } + ], + } + message = bedrock_provider.response_to_message(response) + assert message.role == "user" + assert message.content[0].tool_use_id == "tool-1" + assert message.content[0].output == {"result": 2} + + +def test_response_to_message_invalid(bedrock_provider): + with pytest.raises(Exception): + bedrock_provider.response_to_message({}) + + +def test_tools_to_bedrock_spec(bedrock_provider): + def word_count(text: str): + return len(text.split()) + + tool = Tool( + name="WordCount", + description="Counts words.", + parameters={"text": "string"}, + function=word_count, + ) + expected = { + "tools": [ + { + "toolSpec": { + "name": "WordCount", + "description": "Counts words.", + "inputSchema": {"json": {"text": "string"}}, + } + } + ] + } + assert bedrock_provider.tools_to_bedrock_spec((tool,)) == expected + + +def test_tools_to_bedrock_spec_duplicate(bedrock_provider): + def word_count(text: str): + return len(text.split()) + + tool = Tool( + name="WordCount", + description="Counts words.", + parameters={"text": "string"}, + function=word_count, + ) + tool_duplicate = Tool( + name="WordCount", + description="Counts words.", + parameters={"text": "string"}, + function=word_count, + ) + tools = bedrock_provider.tools_to_bedrock_spec((tool, tool_duplicate)) + assert set(tool["toolSpec"]["name"] for tool in tools["tools"]) == {"WordCount"} diff --git a/packages/exchange/tests/providers/test_databricks.py b/packages/exchange/tests/providers/test_databricks.py new file mode 100644 index 000000000..3c1421146 --- /dev/null +++ b/packages/exchange/tests/providers/test_databricks.py @@ -0,0 +1,49 @@ +import os +from unittest.mock import patch + +import pytest +from exchange import Message, Text +from exchange.providers.databricks import DatabricksProvider + + +@pytest.fixture +@patch.dict( + os.environ, + {"DATABRICKS_HOST": "http://test-host", "DATABRICKS_TOKEN": "test_token"}, +) +def databricks_provider(): + return DatabricksProvider.from_env() + + +@patch("httpx.Client.post") +@patch("time.sleep", return_value=None) +@patch("logging.warning") +@patch("logging.error") +def test_databricks_completion(mock_error, mock_warning, mock_sleep, mock_post, databricks_provider): + mock_response = { + "choices": [{"message": {"role": "assistant", "content": "Hello!"}}], + "usage": {"prompt_tokens": 10, "completion_tokens": 25, "total_tokens": 35}, + } + mock_post.return_value.json.return_value = mock_response + + model = "my-databricks-model" + system = "You are a helpful assistant." + messages = [Message.user("Hello")] + tools = () + + reply_message, reply_usage = databricks_provider.complete( + model=model, system=system, messages=messages, tools=tools + ) + + assert reply_message.content == [Text(text="Hello!")] + assert reply_usage.total_tokens == 35 + assert mock_post.call_count == 1 + mock_post.assert_called_once_with( + "serving-endpoints/my-databricks-model/invocations", + json={ + "messages": [ + {"role": "system", "content": system}, + {"role": "user", "content": "Hello"}, + ] + }, + ) diff --git a/packages/exchange/tests/providers/test_google.py b/packages/exchange/tests/providers/test_google.py new file mode 100644 index 000000000..47ad46b43 --- /dev/null +++ b/packages/exchange/tests/providers/test_google.py @@ -0,0 +1,147 @@ +import os +from unittest.mock import patch + +import httpx +import pytest +from exchange import Message, Text +from exchange.content import ToolResult, ToolUse +from exchange.providers.google import GoogleProvider +from exchange.tool import Tool + + +def example_fn(param: str) -> None: + """ + Testing function. + + Args: + param (str): Description of param1 + """ + pass + + +@pytest.fixture +@patch.dict(os.environ, {"GOOGLE_API_KEY": "test_api_key"}) +def google_provider(): + return GoogleProvider.from_env() + + +def test_google_response_to_text_message() -> None: + response = {"candidates": [{"content": {"parts": [{"text": "Hello from Gemini!"}], "role": "model"}}]} + message = GoogleProvider.google_response_to_message(response) + assert message.content[0].text == "Hello from Gemini!" + + +def test_google_response_to_tool_use_message() -> None: + response = { + "candidates": [ + { + "content": { + "parts": [{"functionCall": {"name": "example_fn", "args": {"param": "value"}}}], + "role": "model", + } + } + ] + } + + message = GoogleProvider.google_response_to_message(response) + assert message.content[0].name == "example_fn" + assert message.content[0].parameters == {"param": "value"} + + +def test_tools_to_google_spec() -> None: + tools = (Tool.from_function(example_fn),) + expected_spec = { + "functionDeclarations": [ + { + "name": "example_fn", + "description": "Testing function.", + "parameters": { + "type": "object", + "properties": {"param": {"type": "string", "description": "Description of param1"}}, + "required": ["param"], + }, + } + ] + } + result = GoogleProvider.tools_to_google_spec(tools) + assert result == expected_spec + + +def test_message_text_to_google_spec() -> None: + messages = [Message.user("Hello, Gemini")] + expected_spec = [{"role": "user", "parts": [{"text": "Hello, Gemini"}]}] + result = GoogleProvider.messages_to_google_spec(messages) + assert result == expected_spec + + +def test_messages_to_google_spec() -> None: + messages = [ + Message(role="user", content=[Text(text="Hello, Gemini")]), + Message( + role="assistant", + content=[ToolUse(id="1", name="example_fn", parameters={"param": "value"})], + ), + Message(role="user", content=[ToolResult(tool_use_id="1", output="Result")]), + ] + actual_spec = GoogleProvider.messages_to_google_spec(messages) + # != + expected_spec = [ + {"role": "user", "parts": [{"text": "Hello, Gemini"}]}, + {"role": "model", "parts": [{"functionCall": {"name": "example_fn", "args": {"param": "value"}}}]}, + {"role": "user", "parts": [{"functionResponse": {"name": "1", "response": {"content": "Result"}}}]}, + ] + + assert actual_spec == expected_spec + + +@patch("httpx.Client.post") +@patch("logging.warning") +@patch("logging.error") +def test_google_completion(mock_error, mock_warning, mock_post, google_provider): + mock_response = { + "candidates": [{"content": {"parts": [{"text": "Hello from Gemini!"}], "role": "model"}}], + "usageMetadata": {"promptTokenCount": 3, "candidatesTokenCount": 10, "totalTokenCount": 13}, + } + + # First attempts fail with status code 429, 2nd succeeds + def create_response(status_code, json_data=None): + response = httpx.Response(status_code) + response._content = httpx._content.json_dumps(json_data or {}).encode() + response._request = httpx.Request("POST", "https://generativelanguage.googleapis.com/v1beta/") + return response + + mock_post.side_effect = [ + create_response(429), # 1st attempt + create_response(200, mock_response), # Final success + ] + + model = "gemini-1.5-flash" + system = "You are a helpful assistant." + messages = [Message.user("Hello, Gemini")] + + reply_message, reply_usage = google_provider.complete(model=model, system=system, messages=messages) + + assert reply_message.content == [Text(text="Hello from Gemini!")] + assert reply_usage.total_tokens == 13 + assert mock_post.call_count == 2 + mock_post.assert_any_call( + "models/gemini-1.5-flash:generateContent", + json={ + "system_instruction": {"parts": [{"text": "You are a helpful assistant."}]}, + "contents": [{"role": "user", "parts": [{"text": "Hello, Gemini"}]}], + }, + ) + + +@pytest.mark.integration +def test_google_integration(): + provider = GoogleProvider.from_env() + model = "gemini-1.5-flash" # updated model to a known valid model + system = "You are a helpful assistant." + messages = [Message.user("Hello, Gemini")] + + # Run the completion + reply = provider.complete(model=model, system=system, messages=messages) + + assert reply[0].content is not None + print("Completion content from Google:", reply[0].content) diff --git a/packages/exchange/tests/providers/test_ollama.py b/packages/exchange/tests/providers/test_ollama.py new file mode 100644 index 000000000..5a66c482c --- /dev/null +++ b/packages/exchange/tests/providers/test_ollama.py @@ -0,0 +1,48 @@ +import os + +import pytest + +from exchange import Text, ToolUse +from exchange.providers.ollama import OllamaProvider, OLLAMA_MODEL +from .conftest import complete, tools + +OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", OLLAMA_MODEL) + + +@pytest.mark.vcr() +def test_ollama_complete(): + reply_message, reply_usage = complete(OllamaProvider, OLLAMA_MODEL) + + assert reply_message.content == [Text(text="Hello! I'm here to help. How can I assist you today? Let's chat. 😊")] + assert reply_usage.total_tokens == 33 + + +@pytest.mark.integration +def test_ollama_complete_integration(): + reply = complete(OllamaProvider, OLLAMA_MODEL) + + assert reply[0].content is not None + print("Completion content from OpenAI:", reply[0].content) + + +@pytest.mark.vcr() +def test_ollama_tools(): + reply_message, reply_usage = tools(OllamaProvider, OLLAMA_MODEL) + + tool_use = reply_message.content[0] + assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}" + assert tool_use.id == "call_z6fgu3z3" + assert tool_use.name == "read_file" + assert tool_use.parameters == {"filename": "test.txt"} + assert reply_usage.total_tokens == 133 + + +@pytest.mark.integration +def test_ollama_tools_integration(): + reply = tools(OllamaProvider, OLLAMA_MODEL) + + tool_use = reply[0].content[0] + assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}" + assert tool_use.id is not None + assert tool_use.name == "read_file" + assert tool_use.parameters == {"filename": "test.txt"} diff --git a/packages/exchange/tests/providers/test_openai.py b/packages/exchange/tests/providers/test_openai.py new file mode 100644 index 000000000..45bc62050 --- /dev/null +++ b/packages/exchange/tests/providers/test_openai.py @@ -0,0 +1,63 @@ +import os + +import pytest + +from exchange import Text, ToolUse +from exchange.providers.openai import OpenAiProvider +from .conftest import complete, vision, tools + +OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") + + +@pytest.mark.vcr() +def test_openai_complete(default_openai_env): + reply_message, reply_usage = complete(OpenAiProvider, OPENAI_MODEL) + + assert reply_message.content == [Text(text="Hello! How can I assist you today?")] + assert reply_usage.total_tokens == 27 + + +@pytest.mark.integration +def test_openai_complete_integration(): + reply = complete(OpenAiProvider, OPENAI_MODEL) + + assert reply[0].content is not None + print("Completion content from OpenAI:", reply[0].content) + + +@pytest.mark.vcr() +def test_openai_tools(default_openai_env): + reply_message, reply_usage = tools(OpenAiProvider, OPENAI_MODEL) + + tool_use = reply_message.content[0] + assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}" + assert tool_use.id == "call_xXYlw4A7Ud1qtCopuK5gEJrP" + assert tool_use.name == "read_file" + assert tool_use.parameters == {"filename": "test.txt"} + assert reply_usage.total_tokens == 122 + + +@pytest.mark.integration +def test_openai_tools_integration(): + reply = tools(OpenAiProvider, OPENAI_MODEL) + + tool_use = reply[0].content[0] + assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}" + assert tool_use.id is not None + assert tool_use.name == "read_file" + assert tool_use.parameters == {"filename": "test.txt"} + + +@pytest.mark.vcr() +def test_openai_vision(default_openai_env): + reply_message, reply_usage = vision(OpenAiProvider, OPENAI_MODEL) + + assert reply_message.content == [Text(text='The first entry in the menu says "Ask Goose."')] + assert reply_usage.total_tokens == 14241 + + +@pytest.mark.integration +def test_openai_vision_integration(): + reply = vision(OpenAiProvider, OPENAI_MODEL) + + assert "ask goose" in reply[0].text.lower() diff --git a/packages/exchange/tests/providers/test_provider_utils.py b/packages/exchange/tests/providers/test_provider_utils.py new file mode 100644 index 000000000..5ad0135ea --- /dev/null +++ b/packages/exchange/tests/providers/test_provider_utils.py @@ -0,0 +1,245 @@ +from copy import deepcopy +import json +from unittest.mock import Mock +from attrs import asdict +import httpx +import pytest +from unittest.mock import patch + +from exchange.content import Text, ToolResult, ToolUse +from exchange.message import Message +from exchange.providers.utils import ( + messages_to_openai_spec, + openai_response_to_message, + raise_for_status, + tools_to_openai_spec, +) +from exchange.tool import Tool + +OPEN_AI_TOOL_USE_RESPONSE = response = { + "choices": [ + { + "role": "assistant", + "message": { + "tool_calls": [ + { + "id": "1", + "function": { + "name": "example_fn", + "arguments": json.dumps( + { + "param": "value", + } + ), + # TODO: should this handle dict's as well? + }, + } + ], + }, + } + ], + "usage": { + "input_tokens": 10, + "output_tokens": 25, + "total_tokens": 35, + }, +} + + +def example_fn(param: str) -> None: + """ + Testing function. + + Args: + param (str): Description of param1 + """ + pass + + +def example_fn_two() -> str: + """ + Second testing function + + Returns: + str: Description of return value + """ + pass + + +def test_raise_for_status_success() -> None: + response = Mock(spec=httpx.Response) + response.status_code = 200 + + result = raise_for_status(response) + + assert result == response + + +def test_raise_for_status_failure_with_text() -> None: + response = Mock(spec=httpx.Response) + response.status_code = 404 + response.text = "Not Found: John Cena" + + try: + raise_for_status(response) + except httpx.HTTPStatusError as e: + assert e.response == response + assert str(e) == "404 Not Found: John Cena" + assert e.request is None + + +def test_raise_for_status_failure_without_text() -> None: + response = Mock(spec=httpx.Response) + response.status_code = 500 + response.text = "" + + try: + raise_for_status(response) + except httpx.HTTPStatusError as e: + assert e.response == response + assert str(e) == "500 Internal Server Error" + assert e.request is None + + +def test_messages_to_openai_spec() -> None: + messages = [ + Message(role="assistant", content=[Text("Hello!")]), + Message(role="user", content=[Text("How are you?")]), + Message( + role="assistant", + content=[ToolUse(id=1, name="tool1", parameters={"param1": "value1"})], + ), + Message(role="user", content=[ToolResult(tool_use_id=1, output="Result")]), + ] + + spec = messages_to_openai_spec(messages) + + assert spec == [ + {"role": "assistant", "content": "Hello!"}, + {"role": "user", "content": "How are you?"}, + { + "role": "assistant", + "tool_calls": [ + { + "id": 1, + "type": "function", + "function": { + "name": "tool1", + "arguments": '{"param1": "value1"}', + }, + } + ], + }, + { + "role": "tool", + "content": "Result", + "tool_call_id": 1, + }, + ] + + +def test_tools_to_openai_spec() -> None: + tools = (Tool.from_function(example_fn), Tool.from_function(example_fn_two)) + assert len(tools_to_openai_spec(tools)) == 2 + + +def test_tools_to_openai_spec_duplicate() -> None: + tools = (Tool.from_function(example_fn), Tool.from_function(example_fn)) + with pytest.raises(ValueError): + tools_to_openai_spec(tools) + + +def test_tools_to_openai_spec_single() -> None: + tools = Tool.from_function(example_fn) + expected_spec = [ + { + "type": "function", + "function": { + "name": "example_fn", + "description": "Testing function.", + "parameters": { + "type": "object", + "properties": { + "param": { + "type": "string", + "description": "Description of param1", + } + }, + "required": ["param"], + }, + }, + }, + ] + result = tools_to_openai_spec((tools,)) + assert result == expected_spec + + +def test_tools_to_openai_spec_empty() -> None: + tools = () + expected_spec = [] + assert tools_to_openai_spec(tools) == expected_spec + + +def test_openai_response_to_message_text() -> None: + response = { + "choices": [ + { + "role": "assistant", + "message": {"content": "Hello from John Cena!"}, + } + ], + "usage": { + "input_tokens": 10, + "output_tokens": 25, + "total_tokens": 35, + }, + } + + message = openai_response_to_message(response) + + actual = asdict(message) + expect = asdict( + Message( + role="assistant", + content=[Text("Hello from John Cena!")], + ) + ) + actual.pop("id") + expect.pop("id") + assert actual == expect + + +def test_openai_response_to_message_valid_tooluse() -> None: + response = deepcopy(OPEN_AI_TOOL_USE_RESPONSE) + message = openai_response_to_message(response) + actual = asdict(message) + expect = asdict( + Message( + role="assistant", + content=[ToolUse(id=1, name="example_fn", parameters={"param": "value"})], + ) + ) + actual.pop("id") + actual["content"][0].pop("id") + expect.pop("id") + expect["content"][0].pop("id") + assert actual == expect + + +def test_openai_response_to_message_invalid_func_name() -> None: + response = deepcopy(OPEN_AI_TOOL_USE_RESPONSE) + response["choices"][0]["message"]["tool_calls"][0]["function"]["name"] = "invalid fn" + message = openai_response_to_message(response) + assert message.content[0].name == "invalid fn" + assert json.loads(message.content[0].parameters) == {"param": "value"} + assert message.content[0].is_error + assert message.content[0].error_message.startswith("The provided function name") + + +@patch("json.loads", side_effect=json.JSONDecodeError("error", "doc", 0)) +def test_openai_response_to_message_json_decode_error(mock_json) -> None: + response = deepcopy(OPEN_AI_TOOL_USE_RESPONSE) + message = openai_response_to_message(response) + assert message.content[0].name == "example_fn" + assert message.content[0].is_error + assert message.content[0].error_message.startswith("Could not interpret tool use") diff --git a/packages/exchange/tests/test_exchange.py b/packages/exchange/tests/test_exchange.py new file mode 100644 index 000000000..f01ef4694 --- /dev/null +++ b/packages/exchange/tests/test_exchange.py @@ -0,0 +1,763 @@ +from typing import List, Tuple + +import pytest + +from exchange.checkpoint import Checkpoint, CheckpointData +from exchange.content import Text, ToolResult, ToolUse +from exchange.exchange import Exchange +from exchange.message import Message +from exchange.moderators import PassiveModerator +from exchange.providers import Provider, Usage +from exchange.tool import Tool + + +def dummy_tool() -> str: + """An example tool""" + return "dummy response" + + +too_long_output = "x" * (2**20 + 1) +too_long_token_output = "word " * 128000 + + +def no_overlapping_checkpoints(exchange: Exchange) -> bool: + """Assert that there are no overlapping checkpoints in the exchange.""" + for i, checkpoint in enumerate(exchange.checkpoint_data.checkpoints): + for other_checkpoint in exchange.checkpoint_data.checkpoints[i + 1 :]: + if not checkpoint.end_index < other_checkpoint.start_index: + return False + return True + + +def checkpoint_to_index_pairs(checkpoints: List[Checkpoint]) -> List[Tuple[int, int]]: + return [(checkpoint.start_index, checkpoint.end_index) for checkpoint in checkpoints] + + +class MockProvider(Provider): + def __init__(self, sequence: List[Message], usage_dicts: List[dict]): + # We'll use init to provide a preplanned reply sequence + self.sequence = sequence + self.call_count = 0 + self.usage_dicts = usage_dicts + + @staticmethod + def get_usage(data: dict) -> Usage: + usage = data.pop("usage") + input_tokens = usage.get("input_tokens") + output_tokens = usage.get("output_tokens") + total_tokens = usage.get("total_tokens") + + if total_tokens is None and input_tokens is not None and output_tokens is not None: + total_tokens = input_tokens + output_tokens + + return Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + + def complete(self, model: str, system: str, messages: List[Message], tools: List[Tool]) -> Message: + output = self.sequence[self.call_count] + usage = self.get_usage(self.usage_dicts[self.call_count]) + self.call_count += 1 + return (output, usage) + + +def test_reply_with_unsupported_tool(): + ex = Exchange( + provider=MockProvider( + sequence=[ + Message( + role="assistant", + content=[ToolUse(id="1", name="unsupported_tool", parameters={})], + ), + Message( + role="assistant", + content=[Text(text="Here is the completion after tool call")], + ), + ], + usage_dicts=[ + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=(Tool.from_function(dummy_tool),), + moderator=PassiveModerator(), + ) + + ex.add(Message(role="user", content=[Text(text="test")])) + + ex.reply() + + content = ex.messages[-2].content[0] + assert isinstance(content, ToolResult) and content.is_error and "no tool exists" in content.output.lower() + + +def test_invalid_tool_parameters(): + """Test handling of invalid tool parameters response""" + ex = Exchange( + provider=MockProvider( + sequence=[ + Message( + role="assistant", + content=[ToolUse(id="1", name="dummy_tool", parameters="invalid json")], + ), + Message( + role="assistant", + content=[Text(text="Here is the completion after tool call")], + ), + ], + usage_dicts=[ + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=[Tool.from_function(dummy_tool)], + moderator=PassiveModerator(), + ) + + ex.add(Message(role="user", content=[Text(text="test invalid parameters")])) + + ex.reply() + + content = ex.messages[-2].content[0] + assert isinstance(content, ToolResult) and content.is_error and "invalid json" in content.output.lower() + + +def test_max_tool_use_when_limit_reached(): + """Test the max_tool_use parameter in the reply method.""" + ex = Exchange( + provider=MockProvider( + sequence=[ + Message( + role="assistant", + content=[ToolUse(id="1", name="dummy_tool", parameters={})], + ), + Message( + role="assistant", + content=[ToolUse(id="2", name="dummy_tool", parameters={})], + ), + Message( + role="assistant", + content=[ToolUse(id="3", name="dummy_tool", parameters={})], + ), + ], + usage_dicts=[ + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=[Tool.from_function(dummy_tool)], + moderator=PassiveModerator(), + ) + + ex.add(Message(role="user", content=[Text(text="test max tool use")])) + + response = ex.reply(max_tool_use=3) + + assert ex.provider.call_count == 3 + assert "reached the limit" in response.content[0].text.lower() + + assert isinstance(ex.messages[-2].content[0], ToolResult) and ex.messages[-2].content[0].tool_use_id == "3" + + assert ex.messages[-1].role == "assistant" + + +def test_tool_output_too_long_character_error(): + """Test tool handling when output exceeds character limit.""" + + def long_output_tool_char() -> str: + return too_long_output + + ex = Exchange( + provider=MockProvider( + sequence=[ + Message( + role="assistant", + content=[ToolUse(id="1", name="long_output_tool_char", parameters={})], + ), + Message( + role="assistant", + content=[Text(text="Here is the completion after tool call")], + ), + ], + usage_dicts=[ + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=[Tool.from_function(long_output_tool_char)], + moderator=PassiveModerator(), + ) + + ex.add(Message(role="user", content=[Text(text="test long output char")])) + + ex.reply() + + content = ex.messages[-2].content[0] + assert ( + isinstance(content, ToolResult) + and content.is_error + and "output that was too long to handle" in content.output.lower() + ) + + +def test_tool_output_too_long_token_error(): + """Test tool handling when output exceeds token limit.""" + + def long_output_tool_token() -> str: + return too_long_token_output + + ex = Exchange( + provider=MockProvider( + sequence=[ + Message( + role="assistant", + content=[ToolUse(id="1", name="long_output_tool_token", parameters={})], + ), + Message( + role="assistant", + content=[Text(text="Here is the completion after tool call")], + ), + ], + usage_dicts=[ + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=[Tool.from_function(long_output_tool_token)], + moderator=PassiveModerator(), + ) + + ex.add(Message(role="user", content=[Text(text="test long output token")])) + + ex.reply() + + content = ex.messages[-2].content[0] + assert ( + isinstance(content, ToolResult) + and content.is_error + and "output that was too long to handle" in content.output.lower() + ) + + +@pytest.fixture(scope="function") +def normal_exchange() -> Exchange: + ex = Exchange( + provider=MockProvider( + sequence=[ + Message(role="assistant", content=[Text(text="Message 1")]), + Message(role="assistant", content=[Text(text="Message 2")]), + Message(role="assistant", content=[Text(text="Message 3")]), + Message(role="assistant", content=[Text(text="Message 4")]), + Message(role="assistant", content=[Text(text="Message 5")]), + ], + usage_dicts=[ + {"usage": {"total_tokens": 10, "input_tokens": 5, "output_tokens": 5}}, + {"usage": {"total_tokens": 28, "input_tokens": 10, "output_tokens": 18}}, + {"usage": {"total_tokens": 33, "input_tokens": 28, "output_tokens": 5}}, + {"usage": {"total_tokens": 40, "input_tokens": 32, "output_tokens": 8}}, + {"usage": {"total_tokens": 50, "input_tokens": 40, "output_tokens": 10}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=(Tool.from_function(dummy_tool),), + moderator=PassiveModerator(), + checkpoint_data=CheckpointData(), + ) + return ex + + +@pytest.fixture(scope="function") +def resumed_exchange() -> Exchange: + messages = [ + Message(role="user", content=[Text(text="User message 1")]), + Message(role="assistant", content=[Text(text="Assistant Message 1")]), + Message(role="user", content=[Text(text="User message 2")]), + Message(role="assistant", content=[Text(text="Assistant Message 2")]), + Message(role="user", content=[Text(text="User message 3")]), + Message(role="assistant", content=[Text(text="Assistant Message 3")]), + ] + provider = MockProvider( + sequence=[ + Message(role="assistant", content=[Text(text="Assistant Message 4")]), + ], + usage_dicts=[ + {"usage": {"total_tokens": 40, "input_tokens": 32, "output_tokens": 8}}, + ], + ) + ex = Exchange( + provider=provider, + messages=messages, + tools=[], + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + checkpoint_data=CheckpointData(), + moderator=PassiveModerator(), + ) + return ex + + +def test_checkpoints_on_exchange(normal_exchange): + """Test checkpoints on an exchange.""" + ex = normal_exchange + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + + # Check if checkpoints are created correctly + checkpoints = ex.checkpoint_data.checkpoints + assert len(checkpoints) == 6 + for i in range(len(ex.messages)): + # asserting that each message has a corresponding checkpoint + assert checkpoints[i].start_index == i + assert checkpoints[i].end_index == i + + # Check if the messages are ordered correctly + assert [msg.content[0].text for msg in ex.messages] == [ + "User message", + "Message 1", + "User message", + "Message 2", + "User message", + "Message 3", + ] + assert no_overlapping_checkpoints(ex) + + +def test_checkpoints_on_resumed_exchange(resumed_exchange) -> None: + ex = resumed_exchange + ex.pop_last_message() + ex.reply() + + checkpoints = ex.checkpoint_data.checkpoints + assert len(checkpoints) == 2 + assert len(ex.messages) == 6 + assert checkpoints[0].token_count == 32 + assert checkpoints[0].start_index == 0 + assert checkpoints[0].end_index == 4 + assert checkpoints[1].token_count == 8 + assert checkpoints[1].start_index == 5 + assert checkpoints[1].end_index == 5 + assert no_overlapping_checkpoints(ex) + + +def test_pop_last_checkpoint_on_resumed_exchange(resumed_exchange) -> None: + ex = resumed_exchange + ex.add(Message(role="user", content=[Text(text="Assistant Message 4")])) + ex.reply() + ex.pop_last_checkpoint() + + assert len(ex.messages) == 7 + assert len(ex.checkpoint_data.checkpoints) == 1 + + ex.pop_last_checkpoint() + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert no_overlapping_checkpoints(ex) + + +def test_pop_last_checkpoint_on_normal_exchange(normal_exchange) -> None: + ex = normal_exchange + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + ex.pop_last_checkpoint() + ex.pop_last_checkpoint() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert no_overlapping_checkpoints(ex) + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.pop_last_checkpoint() + assert len(ex.messages) == 1 + assert len(ex.checkpoint_data.checkpoints) == 1 + ex.reply() + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert no_overlapping_checkpoints(ex) + + +def test_pop_first_message_no_messages(): + ex = Exchange( + provider=MockProvider(sequence=[], usage_dicts=[]), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=[Tool.from_function(dummy_tool)], + moderator=PassiveModerator(), + ) + + with pytest.raises(ValueError) as e: + ex.pop_first_message() + assert str(e.value) == "There are no messages to pop" + + +def test_pop_first_message_checkpoint_with_many_messages(resumed_exchange): + ex = resumed_exchange + ex.pop_last_message() + ex.reply() + + assert len(ex.messages) == 6 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert ex.checkpoint_data.checkpoints[0].start_index == 0 + assert ex.checkpoint_data.checkpoints[0].end_index == 4 + assert ex.checkpoint_data.checkpoints[1].start_index == 5 + assert ex.checkpoint_data.checkpoints[1].end_index == 5 + assert ex.checkpoint_data.message_index_offset == 0 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_message() + + assert len(ex.messages) == 5 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.checkpoints[0].start_index == 5 + assert ex.checkpoint_data.checkpoints[0].end_index == 5 + assert ex.checkpoint_data.message_index_offset == 1 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_message() + + assert len(ex.messages) == 4 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.checkpoints[0].start_index == 5 + assert ex.checkpoint_data.checkpoints[0].end_index == 5 + assert ex.checkpoint_data.message_index_offset == 2 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_message() + + assert len(ex.messages) == 3 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.checkpoints[0].start_index == 5 + assert ex.checkpoint_data.checkpoints[0].end_index == 5 + assert ex.checkpoint_data.message_index_offset == 3 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_message() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.checkpoints[0].start_index == 5 + assert ex.checkpoint_data.checkpoints[0].end_index == 5 + assert ex.checkpoint_data.message_index_offset == 4 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_message() + + assert len(ex.messages) == 1 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.checkpoints[0].start_index == 5 + assert ex.checkpoint_data.checkpoints[0].end_index == 5 + assert ex.checkpoint_data.message_index_offset == 5 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_message() + + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert ex.checkpoint_data.message_index_offset == 0 + assert no_overlapping_checkpoints(ex) + + with pytest.raises(ValueError) as e: + ex.pop_first_message() + + assert str(e.value) == "There are no messages to pop" + + +def test_varied_message_manipulation(normal_exchange): + ex = normal_exchange + ex.add(Message(role="user", content=[Text(text="User message 1")])) + ex.reply() + + ex.pop_first_message() + + ex.add(Message(role="user", content=[Text(text="User message 2")])) + ex.reply() + + assert len(ex.messages) == 3 + assert len(ex.checkpoint_data.checkpoints) == 3 + assert ex.checkpoint_data.message_index_offset == 1 + # (start, end) + # (1, 1), (2, 2), (3, 3) + # actual_index_in_messages_arr = any checkpoint index - offset + assert no_overlapping_checkpoints(ex) + for i in range(3): + assert ex.checkpoint_data.checkpoints[i].start_index == i + 1 + assert ex.checkpoint_data.checkpoints[i].end_index == i + 1 + + ex.pop_last_message() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert ex.checkpoint_data.message_index_offset == 1 + assert no_overlapping_checkpoints(ex) + for i in range(2): + assert ex.checkpoint_data.checkpoints[i].start_index == i + 1 + assert ex.checkpoint_data.checkpoints[i].end_index == i + 1 + + ex.add(Message(role="assistant", content=[Text(text="Assistant message")])) + ex.add(Message(role="user", content=[Text(text="User message 3")])) + ex.reply() + + assert len(ex.messages) == 5 + assert len(ex.checkpoint_data.checkpoints) == 4 + assert ex.checkpoint_data.message_index_offset == 1 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(1, 1), (2, 2), (3, 4), (5, 5)] + + ex.pop_last_checkpoint() + + assert len(ex.messages) == 4 + assert len(ex.checkpoint_data.checkpoints) == 3 + assert ex.checkpoint_data.message_index_offset == 1 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(1, 1), (2, 2), (3, 4)] + + ex.pop_first_message() + + assert len(ex.messages) == 3 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert ex.checkpoint_data.message_index_offset == 2 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(2, 2), (3, 4)] + + ex.pop_last_message() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.message_index_offset == 2 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(2, 2)] + + ex.pop_last_message() + assert len(ex.messages) == 1 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.message_index_offset == 2 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(2, 2)] + + ex.add(Message(role="assistant", content=[Text(text="Assistant message")])) + ex.add(Message(role="user", content=[Text(text="User message 5")])) + ex.pop_last_checkpoint() + + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + + ex.add(Message(role="user", content=[Text(text="User message 6")])) + ex.reply() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert ex.checkpoint_data.message_index_offset == 2 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(2, 2), (3, 3)] + + ex.pop_last_message() + + assert len(ex.messages) == 1 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.message_index_offset == 2 + assert no_overlapping_checkpoints(ex) + assert checkpoint_to_index_pairs(ex.checkpoint_data.checkpoints) == [(2, 2)] + + ex.pop_first_message() + + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert ex.checkpoint_data.message_index_offset == 0 + + ex.add(Message(role="user", content=[Text(text="User message 7")])) + ex.pop_last_message() + + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert ex.checkpoint_data.message_index_offset == 0 + + +def test_pop_last_message_when_no_checkpoints_but_messages_present(normal_exchange): + ex = normal_exchange + ex.add(Message(role="user", content=[Text(text="User message")])) + + ex.pop_last_message() + + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert ex.checkpoint_data.message_index_offset == 0 + + +def test_pop_first_message_when_no_checkpoints_but_message_present(normal_exchange): + ex = normal_exchange + ex.add(Message(role="user", content=[Text(text="User message")])) + + with pytest.raises(ValueError) as e: + ex.pop_first_message() + + assert str(e.value) == "There must be at least one checkpoint to pop the first message" + + +def test_pop_first_checkpoint_size_n(resumed_exchange): + ex = resumed_exchange + ex.pop_last_message() # needed because the last message is an assistant message + ex.reply() + + ex.pop_first_checkpoint() + assert ex.checkpoint_data.message_index_offset == 5 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert len(ex.messages) == 1 + + ex.pop_first_checkpoint() + assert ex.checkpoint_data.message_index_offset == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert len(ex.messages) == 0 + + +def test_pop_first_checkpoint_size_1(normal_exchange): + ex = normal_exchange + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + + ex.pop_first_checkpoint() + assert ex.checkpoint_data.message_index_offset == 1 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert len(ex.messages) == 1 + + ex.pop_first_checkpoint() + assert ex.checkpoint_data.message_index_offset == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + assert len(ex.messages) == 0 + + +def test_pop_first_checkpoint_no_checkpoints(normal_exchange): + ex = normal_exchange + + with pytest.raises(ValueError) as e: + ex.pop_first_checkpoint() + + assert str(e.value) == "There are no checkpoints to pop" + + +def test_prepend_checkpointed_message_empty_exchange(normal_exchange): + ex = normal_exchange + ex.prepend_checkpointed_message(Message(role="assistant", content=[Text(text="Assistant message")]), 10) + + assert ex.checkpoint_data.message_index_offset == 0 + assert len(ex.checkpoint_data.checkpoints) == 1 + assert ex.checkpoint_data.checkpoints[0].start_index == 0 + assert ex.checkpoint_data.checkpoints[0].end_index == 0 + + ex.add(Message(role="user", content=[Text(text="User message")])) + ex.reply() + + assert ex.checkpoint_data.message_index_offset == 0 + assert len(ex.checkpoint_data.checkpoints) == 3 + assert len(ex.messages) == 3 + assert no_overlapping_checkpoints(ex) + + ex.pop_first_checkpoint() + + assert ex.checkpoint_data.message_index_offset == 1 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert len(ex.messages) == 2 + assert no_overlapping_checkpoints(ex) + + ex.prepend_checkpointed_message(Message(role="assistant", content=[Text(text="Assistant message")]), 10) + assert ex.checkpoint_data.message_index_offset == 0 + assert len(ex.checkpoint_data.checkpoints) == 3 + assert len(ex.messages) == 3 + assert no_overlapping_checkpoints(ex) + + +def test_generate_successful_response_on_first_try(normal_exchange): + ex = normal_exchange + ex.add(Message(role="user", content=[Text("Hello")])) + ex.generate() + + +def test_rewind_in_normal_exchange(normal_exchange): + ex = normal_exchange + ex.rewind() + + assert len(ex.messages) == 0 + assert len(ex.checkpoint_data.checkpoints) == 0 + + ex.add(Message(role="user", content=[Text("Hello")])) + ex.generate() + ex.add(Message(role="user", content=[Text("Hello")])) + + # testing if it works with a user text message at the end + ex.rewind() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + + ex.add(Message(role="user", content=[Text("Hello")])) + ex.generate() + + # testing if it works with a non-user text message at the end + ex.rewind() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + + +def test_rewind_with_tool_usage(): + # simulating a real exchange with tool usage + ex = Exchange( + provider=MockProvider( + sequence=[ + Message.assistant("Hello!"), + Message( + role="assistant", + content=[ToolUse(id="1", name="dummy_tool", parameters={})], + ), + Message( + role="assistant", + content=[ToolUse(id="2", name="dummy_tool", parameters={})], + ), + Message.assistant("Done!"), + ], + usage_dicts=[ + {"usage": {"input_tokens": 12, "output_tokens": 23}}, + {"usage": {"input_tokens": 27, "output_tokens": 44}}, + {"usage": {"input_tokens": 50, "output_tokens": 56}}, + {"usage": {"input_tokens": 60, "output_tokens": 76}}, + ], + ), + model="gpt-4o-2024-05-13", + system="You are a helpful assistant.", + tools=[Tool.from_function(dummy_tool)], + moderator=PassiveModerator(), + ) + ex.add(Message(role="user", content=[Text(text="test")])) + ex.reply() + ex.add(Message(role="user", content=[Text(text="kick it off!")])) + ex.reply() + + # removing the last message to simulate not getting a response + ex.pop_last_message() + + # calling rewind to last user message + ex.rewind() + + assert len(ex.messages) == 2 + assert len(ex.checkpoint_data.checkpoints) == 2 + assert no_overlapping_checkpoints(ex) + assert ex.messages[0].content[0].text == "test" + assert type(ex.messages[1].content[0]) is Text + assert ex.messages[1].role == "assistant" diff --git a/packages/exchange/tests/test_exchange_collect_usage.py b/packages/exchange/tests/test_exchange_collect_usage.py new file mode 100644 index 000000000..590dc709b --- /dev/null +++ b/packages/exchange/tests/test_exchange_collect_usage.py @@ -0,0 +1,33 @@ +from unittest.mock import MagicMock +from exchange.exchange import Exchange +from exchange.message import Message +from exchange.moderators.passive import PassiveModerator +from exchange.providers.base import Provider +from exchange.tool import Tool +from exchange.token_usage_collector import _TokenUsageCollector + +MODEL_NAME = "test-model" + + +def create_exchange(mock_provider, dummy_tool): + return Exchange( + provider=mock_provider, + model=MODEL_NAME, + system="test-system", + tools=(Tool.from_function(dummy_tool),), + messages=[], + moderator=PassiveModerator(), + ) + + +def test_exchange_generate_collect_usage(usage_factory, dummy_tool, monkeypatch): + mock_provider = MagicMock(spec=Provider) + mock_usage_collector = MagicMock(spec=_TokenUsageCollector) + usage = usage_factory() + mock_provider.complete.return_value = (Message.assistant("msg"), usage) + exchange = create_exchange(mock_provider, dummy_tool) + + monkeypatch.setattr("exchange.exchange._token_usage_collector", mock_usage_collector) + exchange.generate() + + mock_usage_collector.collect.assert_called_once_with(MODEL_NAME, usage) diff --git a/packages/exchange/tests/test_exchange_frozen.py b/packages/exchange/tests/test_exchange_frozen.py new file mode 100644 index 000000000..a3095b3a3 --- /dev/null +++ b/packages/exchange/tests/test_exchange_frozen.py @@ -0,0 +1,48 @@ +import pytest +from attr.exceptions import FrozenInstanceError +from exchange.content import Text +from exchange.exchange import Exchange +from exchange.moderators import PassiveModerator +from exchange.message import Message +from exchange.providers import Provider, Usage +from exchange.tool import Tool + + +class MockProvider(Provider): + def complete(self, model, system, messages, tools=None): + return Message(role="assistant", content=[Text(text="This is a mock response.")]), Usage.from_dict( + {"total_tokens": 35} + ) + + +def test_exchange_immutable(dummy_tool): + # Create an instance of Exchange + provider = MockProvider() + # intentionally setting a list instead of tuple on tools, it should be converted + exchange = Exchange( + provider=provider, + model="test-model", + system="test-system", + tools=(Tool.from_function(dummy_tool),), + messages=[Message(role="user", content=[Text(text="Hello!")])], + moderator=PassiveModerator(), + ) + + # Try to directly modify a field (should raise an error) + with pytest.raises(FrozenInstanceError): + exchange.system = "" + + with pytest.raises(AttributeError): + exchange.tools.append("anything") + + # Replace method should return a new instance with deepcopy of messages + new_exchange = exchange.replace(system="changed") + + assert new_exchange.system == "changed" + assert len(exchange.messages) == 1 + assert len(new_exchange.messages) == 1 + + # Ensure that the messages are deep copied + new_exchange.messages[0].content[0].text = "Changed!" + assert exchange.messages[0].content[0].text == "Hello!" + assert new_exchange.messages[0].content[0].text == "Changed!" diff --git a/packages/exchange/tests/test_image.png b/packages/exchange/tests/test_image.png new file mode 100644 index 000000000..3488b8a51 Binary files /dev/null and b/packages/exchange/tests/test_image.png differ diff --git a/packages/exchange/tests/test_integration.py b/packages/exchange/tests/test_integration.py new file mode 100644 index 000000000..1eb198082 --- /dev/null +++ b/packages/exchange/tests/test_integration.py @@ -0,0 +1,89 @@ +import os +import pytest +from exchange.exchange import Exchange +from exchange.message import Message +from exchange.moderators import ContextTruncate +from exchange.providers import get_provider +from exchange.providers.ollama import OLLAMA_MODEL +from exchange.tool import Tool +from tests.conftest import read_file + +too_long_chars = "x" * (2**20 + 1) + +cases = [ + # Set seed and temperature for more determinism, to avoid flakes + (get_provider("ollama"), os.getenv("OLLAMA_MODEL", OLLAMA_MODEL), dict(seed=3, temperature=0.1)), + (get_provider("openai"), os.getenv("OPENAI_MODEL", "gpt-4o-mini"), dict()), + (get_provider("azure"), os.getenv("AZURE_MODEL", "gpt-4o-mini"), dict()), + (get_provider("databricks"), "databricks-meta-llama-3-70b-instruct", dict()), + (get_provider("bedrock"), "anthropic.claude-3-5-sonnet-20240620-v1:0", dict()), + (get_provider("google"), "gemini-1.5-flash", dict()), +] + + +@pytest.mark.integration +@pytest.mark.parametrize("provider,model,kwargs", cases) +def test_simple(provider, model, kwargs): + provider = provider.from_env() + + ex = Exchange( + provider=provider, + model=model, + moderator=ContextTruncate(model), + system="You are a helpful assistant.", + generation_args=kwargs, + ) + + ex.add(Message.user("Who is the most famous wizard from the lord of the rings")) + + response = ex.reply() + + # It's possible this can be flakey, but in experience so far haven't seen it + assert "gandalf" in response.text.lower() + + +@pytest.mark.integration +@pytest.mark.parametrize("provider,model,kwargs", cases) +def test_tools(provider, model, kwargs, tmp_path): + provider = provider.from_env() + + ex = Exchange( + provider=provider, + model=model, + moderator=ContextTruncate(model), + system="You are a helpful assistant. Expect to need to read a file using read_file.", + tools=(Tool.from_function(read_file),), + generation_args=kwargs, + ) + + ex.add(Message.user("What are the contents of this file? test.txt")) + + response = ex.reply() + + assert "hello exchange" in response.text.lower() + + +@pytest.mark.integration +@pytest.mark.parametrize("provider,model,kwargs", cases) +def test_tool_use_output_chars(provider, model, kwargs): + provider = provider.from_env() + + def get_password() -> str: + """Return the password for authentication""" + return too_long_chars + + ex = Exchange( + provider=provider, + model=model, + moderator=ContextTruncate(model), + system="You are a helpful assistant. Expect to need to authenticate using get_password.", + tools=(Tool.from_function(get_password),), + generation_args=kwargs, + ) + + ex.add(Message.user("Can you authenticate this session by responding with the password")) + + ex.reply() + + # Without our error handling, this would raise + # string too long. Expected a string with maximum length 1048576, but got a string with length ... diff --git a/packages/exchange/tests/test_integration_vision.py b/packages/exchange/tests/test_integration_vision.py new file mode 100644 index 000000000..20f165ade --- /dev/null +++ b/packages/exchange/tests/test_integration_vision.py @@ -0,0 +1,44 @@ +import os + +import pytest +from exchange.content import ToolResult, ToolUse +from exchange.exchange import Exchange +from exchange.message import Message +from exchange.moderators import ContextTruncate +from exchange.providers import get_provider + +cases = [ + (get_provider("openai"), os.getenv("OPENAI_MODEL", "gpt-4o-mini")), +] + + +@pytest.mark.integration # skipped in CI/CD +@pytest.mark.parametrize("provider,model", cases) +def test_simple(provider, model): + provider = provider.from_env() + + ex = Exchange( + provider=provider, + model=model, + moderator=ContextTruncate(model), + system="You are a helpful assistant.", + ) + + ex.add(Message.user("What does the first entry in the menu say?")) + ex.add( + Message( + role="assistant", + content=[ToolUse(id="xyz", name="screenshot", parameters={})], + ) + ) + ex.add( + Message( + role="user", + content=[ToolResult(tool_use_id="xyz", output='"image:tests/test_image.png"')], + ) + ) + + response = ex.reply() + + # It's possible this can be flakey, but in experience so far haven't seen it + assert "ask goose" in response.text.lower() diff --git a/packages/exchange/tests/test_message.py b/packages/exchange/tests/test_message.py new file mode 100644 index 000000000..d5442eb75 --- /dev/null +++ b/packages/exchange/tests/test_message.py @@ -0,0 +1,96 @@ +import subprocess +from pathlib import Path +import pytest + +from exchange.message import Message +from exchange.content import Text, ToolUse, ToolResult + + +def test_user_message(): + user_message = Message.user("abcd") + assert user_message.role == "user" + assert user_message.text == "abcd" + + +def test_assistant_message(): + assistant_message = Message.assistant("abcd") + assert assistant_message.role == "assistant" + assert assistant_message.text == "abcd" + + +def test_message_tool_use(): + from exchange.content import ToolUse + + tu1 = ToolUse(id="1", name="tool", parameters={}) + tu2 = ToolUse(id="2", name="tool", parameters={}) + message = Message(role="assistant", content=[tu1, tu2]) + assert len(message.tool_use) == 2 + assert message.tool_use[0].name == "tool" + + +def test_message_tool_result(): + from exchange.content import ToolResult + + tr1 = ToolResult(tool_use_id="1", output="result") + tr2 = ToolResult(tool_use_id="2", output="result") + message = Message(role="user", content=[tr1, tr2]) + assert len(message.tool_result) == 2 + assert message.tool_result[0].output == "result" + + +def test_message_load(tmpdir): + # To emulate the expected relative lookup, we need to create a mock code dir + # and run the load in a subprocess + test_dir = Path(tmpdir) + + # Create a temporary Jinja template file in the test_dir + template_content = "hello {{ name }} {% include 'relative.jinja' %}" + template_path = test_dir / "template.jinja" + template_path.write_text(template_content) + + relative_content = "and {{ name2 }}" + relative_path = test_dir / "relative.jinja" + relative_path.write_text(relative_content) + + # Create a temporary Python file in the sub_dir that calls the load method with a relative path + python_file_content = """ +from exchange.message import Message + +def test_function(): + message = Message.load('template.jinja', name="a", name2="b") + assert message.text == "hello a and b" + assert message.role == "user" + +test_function() +""" + python_file_path = test_dir / "test_script.py" + python_file_path.write_text(python_file_content) + + # Execute the temporary Python file to test the relative lookup functionality + result = subprocess.run(["python3", str(python_file_path)]) + + assert result.returncode == 0 + + +def test_message_validation(): + # Valid user message + message = Message(role="user", content=[Text(text="Hello")]) + assert message.text == "Hello" + + # Valid assistant message + message = Message(role="assistant", content=[Text(text="Hello")]) + assert message.text == "Hello" + + # Invalid message: user with tool_use + with pytest.raises(ValueError): + Message( + role="user", + content=[Text(text=""), ToolUse(id="1", name="tool", parameters={})], + ) + + # Invalid message: assistant with tool_result + with pytest.raises(ValueError): + Message( + role="assistant", + content=[Text(text=""), ToolResult(tool_use_id="1", output="result")], + ) diff --git a/packages/exchange/tests/test_summarizer.py b/packages/exchange/tests/test_summarizer.py new file mode 100644 index 000000000..fa7281920 --- /dev/null +++ b/packages/exchange/tests/test_summarizer.py @@ -0,0 +1,227 @@ +import pytest +from exchange import Exchange, Message +from exchange.content import ToolResult, ToolUse +from exchange.moderators.passive import PassiveModerator +from exchange.moderators.summarizer import ContextSummarizer +from exchange.providers import Usage + + +class MockProvider: + def complete(self, model, system, messages, tools): + assistant_message_text = "Summarized content here." + output_tokens = len(assistant_message_text) + total_input_tokens = sum(len(msg.text) for msg in messages) + if not messages or messages[-1].role == "assistant": + message = Message.user(assistant_message_text) + else: + message = Message.assistant(assistant_message_text) + total_tokens = total_input_tokens + output_tokens + usage = Usage( + input_tokens=total_input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + return message, usage + + +@pytest.fixture +def exchange_instance(): + ex = Exchange( + provider=MockProvider(), + model="test-model", + system="test-system", + messages=[ + Message.user("Hi, can you help me with my homework?"), + Message.assistant("Of course! What do you need help with?"), + Message.user("I need help with math problems."), + Message.assistant("Sure, I can help with that. Let's get started."), + Message.user("Can you also help with my science homework?"), + Message.assistant("Yes, I can help with science too."), + Message.user("That's great! How about history?"), + Message.assistant("Of course, I can help with history as well."), + Message.user("Thanks! You're very helpful."), + Message.assistant("You're welcome! I'm here to help."), + ], + moderator=PassiveModerator(), + ) + return ex + + +@pytest.fixture +def summarizer_instance(): + return ContextSummarizer(max_tokens=300) + + +def test_context_summarizer_rewrite(exchange_instance: Exchange, summarizer_instance: ContextSummarizer): + # Pre-checks + assert len(exchange_instance.messages) == 10 + + exchange_instance.generate() + + # the exchange instance has a PassiveModerator so the messages are not truncated nor summarized + assert len(exchange_instance.messages) == 11 + assert len(exchange_instance.checkpoint_data.checkpoints) == 2 + + # we now tell the summarizer to summarize the exchange + summarizer_instance.rewrite(exchange_instance) + + assert exchange_instance.checkpoint_data.total_token_count <= 200 + assert len(exchange_instance.messages) == 2 + + # Assert that summarized content is the first message + first_message = exchange_instance.messages[0] + assert first_message.role == "user" or first_message.role == "assistant" + assert any("summarized" in content.text.lower() for content in first_message.content) + + # Ensure roles alternate in the output + for i in range(1, len(exchange_instance.messages)): + assert ( + exchange_instance.messages[i - 1].role != exchange_instance.messages[i].role + ), "Messages must alternate between user and assistant" + + +MESSAGE_SEQUENCE = [ + Message.user("Hi, can you help me with my homework?"), + Message.assistant("Of course! What do you need help with?"), + Message.user("I need help with math problems."), + Message.assistant("Sure, I can help with that. Let's get started."), + Message.user("What is 2 + 2, 3*3, 9/5, 2*20, 14/2?"), + Message( + role="assistant", + content=[ToolUse(id="1", name="add", parameters={"a": 2, "b": 2})], + ), + Message(role="user", content=[ToolResult(tool_use_id="1", output="4")]), + Message( + role="assistant", + content=[ToolUse(id="2", name="multiply", parameters={"a": 3, "b": 3})], + ), + Message(role="user", content=[ToolResult(tool_use_id="2", output="9")]), + Message( + role="assistant", + content=[ToolUse(id="3", name="divide", parameters={"a": 9, "b": 5})], + ), + Message(role="user", content=[ToolResult(tool_use_id="3", output="1.8")]), + Message( + role="assistant", + content=[ToolUse(id="4", name="multiply", parameters={"a": 2, "b": 20})], + ), + Message(role="user", content=[ToolResult(tool_use_id="4", output="40")]), + Message( + role="assistant", + content=[ToolUse(id="5", name="divide", parameters={"a": 14, "b": 2})], + ), + Message(role="user", content=[ToolResult(tool_use_id="5", output="7")]), + Message.assistant("I'm done calculating the answers to your math questions."), + Message.user("Can you also help with my science homework?"), + Message.assistant("Yes, I can help with science too."), + Message.user("What is the speed of light? The frequency of a photon? The mass of an electron?"), + Message( + role="assistant", + content=[ToolUse(id="6", name="speed_of_light", parameters={})], + ), + Message(role="user", content=[ToolResult(tool_use_id="6", output="299,792,458 m/s")]), + Message( + role="assistant", + content=[ToolUse(id="7", name="photon_frequency", parameters={})], + ), + Message(role="user", content=[ToolResult(tool_use_id="7", output="2.418 x 10^14 Hz")]), + Message(role="assistant", content=[ToolUse(id="8", name="electron_mass", parameters={})]), + Message( + role="user", + content=[ToolResult(tool_use_id="8", output="9.10938356 x 10^-31 kg")], + ), + Message.assistant("I'm done calculating the answers to your science questions."), + Message.user("That's great! How about history?"), + Message.assistant("Of course, I can help with history as well."), + Message.user("Thanks! You're very helpful."), + Message.assistant("You're welcome! I'm here to help."), +] + + +class AnotherMockProvider: + def __init__(self): + self.sequence = MESSAGE_SEQUENCE + self.current_index = 1 + self.summarize_next = False + self.summarized_count = 0 + + def complete(self, model, system, messages, tools): + system_prompt_tokens = 100 + input_token_count = system_prompt_tokens + + message = self.sequence[self.current_index] + if self.summarize_next: + text = "Summary message here" + self.summarize_next = False + self.summarized_count += 1 + return Message.assistant(text=text), Usage( + # in this case, input tokens can really be whatever + input_tokens=40, + output_tokens=len(text) * 2, + total_tokens=40 + len(text) * 2, + ) + + if len(messages) > 0 and type(messages[0].content[0]) is ToolResult: + raise ValueError("ToolResult should not be the first message") + + if len(messages) == 1 and messages[0].text == "a": + # adding a +1 for the "a" + return Message.assistant("Getting system prompt size"), Usage( + input_tokens=80 + 1, output_tokens=20, total_tokens=system_prompt_tokens + 1 + ) + + for i in range(len(messages)): + if type(messages[i].content[0]) in (ToolResult, ToolUse): + input_token_count += 10 + else: + input_token_count += len(messages[i].text) * 2 + + if type(message.content[0]) in (ToolResult, ToolUse): + output_tokens = 10 + else: + output_tokens = len(message.text) * 2 + + total_tokens = input_token_count + output_tokens + if total_tokens > 300: + self.summarize_next = True + usage = Usage( + input_tokens=input_token_count, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + self.current_index += 2 + return message, usage + + +@pytest.fixture +def conversation_exchange_instance(): + ex = Exchange( + provider=AnotherMockProvider(), + model="test-model", + system="test-system", + moderator=ContextSummarizer(max_tokens=300), + # TODO: make it work with an offset so we don't have to send off requests basically + # at every generate step + ) + return ex + + +def test_summarizer_generic_conversation(conversation_exchange_instance: Exchange): + i = 0 + while i < len(MESSAGE_SEQUENCE): + next_message = MESSAGE_SEQUENCE[i] + conversation_exchange_instance.add(next_message) + message = conversation_exchange_instance.generate() + if message.text != "Summary message here": + i += 2 + checkpoints = conversation_exchange_instance.checkpoint_data.checkpoints + assert conversation_exchange_instance.checkpoint_data.total_token_count == 570 + assert len(checkpoints) == 10 + assert len(conversation_exchange_instance.messages) == 10 + assert checkpoints[0].start_index == 20 + assert checkpoints[0].end_index == 20 + assert checkpoints[-1].start_index == 29 + assert checkpoints[-1].end_index == 29 + assert conversation_exchange_instance.checkpoint_data.message_index_offset == 20 + assert conversation_exchange_instance.provider.summarized_count == 12 + assert conversation_exchange_instance.moderator.system_prompt_token_count == 100 diff --git a/packages/exchange/tests/test_token_usage_collector.py b/packages/exchange/tests/test_token_usage_collector.py new file mode 100644 index 000000000..d277f63e9 --- /dev/null +++ b/packages/exchange/tests/test_token_usage_collector.py @@ -0,0 +1,24 @@ +from exchange.token_usage_collector import _TokenUsageCollector + + +def test_collect(usage_factory): + usage_collector = _TokenUsageCollector() + usage_collector.collect("model1", usage_factory(100, 1000, 1100)) + usage_collector.collect("model1", usage_factory(200, 2000, 2200)) + usage_collector.collect("model2", usage_factory(400, 4000, 4400)) + usage_collector.collect("model3", usage_factory(500, 5000, 5500)) + usage_collector.collect("model3", usage_factory(600, 6000, 6600)) + assert usage_collector.get_token_usage_group_by_model() == { + "model1": usage_factory(300, 3000, 3300), + "model2": usage_factory(400, 4000, 4400), + "model3": usage_factory(1100, 11000, 12100), + } + + +def test_collect_with_non_input_or_output_token(usage_factory): + usage_collector = _TokenUsageCollector() + usage_collector.collect("model1", usage_factory(100, None, None)) + usage_collector.collect("model1", usage_factory(None, 2000, None)) + assert usage_collector.get_token_usage_group_by_model() == { + "model1": usage_factory(100, 2000, 0), + } diff --git a/packages/exchange/tests/test_tool.py b/packages/exchange/tests/test_tool.py new file mode 100644 index 000000000..847e79fb5 --- /dev/null +++ b/packages/exchange/tests/test_tool.py @@ -0,0 +1,161 @@ +import attrs +from exchange.tool import Tool + + +def get_current_weather(location: str) -> None: + """Get the current weather in a given location + + Args: + location (str): The city and state, e.g. San Francisco, CA + """ + pass + + +def test_load(): + tool = Tool.from_function(get_current_weather) + + expected = { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + }, + "required": ["location"], + }, + "function": get_current_weather, + } + + assert attrs.asdict(tool) == expected + + +def another_function( + param1: int, + param2: str, + param3: bool, + param4: float, + param5: list[int], + param6: dict[str, float], +) -> None: + """ + This is another example function with various types + + Args: + param1 (int): Description for param1 + param2 (str): Description for param2 + param3 (bool): Description for param3 + param4 (float): Description for param4 + param5 (list[int]): Description for param5 + param6 (dict[str, float]): Description for param6 + """ + pass + + +def test_load_types(): + tool = Tool.from_function(another_function) + expected_schema = { + "type": "object", + "properties": { + "param1": {"type": "integer", "description": "Description for param1"}, + "param2": {"type": "string", "description": "Description for param2"}, + "param3": {"type": "boolean", "description": "Description for param3"}, + "param4": {"type": "number", "description": "Description for param4"}, + "param5": { + "type": "array", + "items": {"type": "integer"}, + "description": "Description for param5", + }, + "param6": { + "type": "object", + "additionalProperties": {"type": "number"}, + "description": "Description for param6", + }, + }, + "required": ["param1", "param2", "param3", "param4", "param5", "param6"], + } + assert tool.parameters == expected_schema + + +def numpy_function(param1: int, param2: str) -> None: + """ + This function uses numpy style docstrings + + Parameters + ---------- + param1 : int + Description for param1 + param2 : str + Description for param2 + """ + pass + + +def test_load_numpy_style(): + tool = Tool.from_function(numpy_function) + expected_schema = { + "type": "object", + "properties": { + "param1": {"type": "integer", "description": "Description for param1"}, + "param2": {"type": "string", "description": "Description for param2"}, + }, + "required": ["param1", "param2"], + } + assert tool.parameters == expected_schema + + +def sphinx_function(param1: int, param2: str, param3: bool) -> None: + """ + This function uses sphinx style docstrings + + :param param1: Description for param1 + :type param1: int + :param param2: Description for param2 + :type param2: str + :param param3: Description for param3 + :type param3: bool + """ + pass + + +def test_load_sphinx_style(): + tool = Tool.from_function(sphinx_function) + expected_schema = { + "type": "object", + "properties": { + "param1": {"type": "integer", "description": "Description for param1"}, + "param2": {"type": "string", "description": "Description for param2"}, + "param3": {"type": "boolean", "description": "Description for param3"}, + }, + "required": ["param1", "param2", "param3"], + } + assert tool.parameters == expected_schema + + +class FunctionLike: + def __init__(self, state: int) -> None: + self.state = state + + def __call__(self, param1: int) -> int: + """Example + + Args: + param1 (int): Description for param1 + """ + return self.state + param1 + + +def test_load_stateful_class(): + tool = Tool.from_function(FunctionLike(1)) + expected_schema = { + "type": "object", + "properties": { + "param1": {"type": "integer", "description": "Description for param1"}, + }, + "required": ["param1"], + } + assert tool.parameters == expected_schema + assert tool.function(2) == 3 diff --git a/packages/exchange/tests/test_truncate.py b/packages/exchange/tests/test_truncate.py new file mode 100644 index 000000000..3875303e7 --- /dev/null +++ b/packages/exchange/tests/test_truncate.py @@ -0,0 +1,132 @@ +import pytest +from exchange import Exchange +from exchange.content import ToolResult, ToolUse +from exchange.message import Message +from exchange.moderators.truncate import ContextTruncate +from exchange.providers import Provider, Usage + +MAX_TOKENS = 300 +SYSTEM_PROMPT_TOKENS = 100 + +MESSAGE_SEQUENCE = [ + Message.user("Hi, can you help me with my homework?"), + Message.assistant("Of course! What do you need help with?"), + Message.user("I need help with math problems."), + Message.assistant("Sure, I can help with that. Let's get started."), + Message.user("What is 2 + 2, 3*3, 9/5, 2*20, 14/2?"), + Message( + role="assistant", + content=[ToolUse(id="1", name="add", parameters={"a": 2, "b": 2})], + ), + Message(role="user", content=[ToolResult(tool_use_id="1", output="4")]), + Message( + role="assistant", + content=[ToolUse(id="2", name="multiply", parameters={"a": 3, "b": 3})], + ), + Message(role="user", content=[ToolResult(tool_use_id="2", output="9")]), + Message( + role="assistant", + content=[ToolUse(id="3", name="divide", parameters={"a": 9, "b": 5})], + ), + Message(role="user", content=[ToolResult(tool_use_id="3", output="1.8")]), + Message( + role="assistant", + content=[ToolUse(id="4", name="multiply", parameters={"a": 2, "b": 20})], + ), + Message(role="user", content=[ToolResult(tool_use_id="4", output="40")]), + Message( + role="assistant", + content=[ToolUse(id="5", name="divide", parameters={"a": 14, "b": 2})], + ), + Message(role="user", content=[ToolResult(tool_use_id="5", output="7")]), + Message.assistant("I'm done calculating the answers to your math questions."), + Message.user("Can you also help with my science homework?"), + Message.assistant("Yes, I can help with science too."), + Message.user("What is the speed of light? The frequency of a photon? The mass of an electron?"), + Message( + role="assistant", + content=[ToolUse(id="6", name="speed_of_light", parameters={})], + ), + Message(role="user", content=[ToolResult(tool_use_id="6", output="299,792,458 m/s")]), + Message( + role="assistant", + content=[ToolUse(id="7", name="photon_frequency", parameters={})], + ), + Message(role="user", content=[ToolResult(tool_use_id="7", output="2.418 x 10^14 Hz")]), + Message(role="assistant", content=[ToolUse(id="8", name="electron_mass", parameters={})]), + Message( + role="user", + content=[ToolResult(tool_use_id="8", output="9.10938356 x 10^-31 kg")], + ), + Message.assistant("I'm done calculating the answers to your science questions."), + Message.user("That's great! How about history?"), + Message.assistant("Of course, I can help with history as well."), + Message.user("Thanks! You're very helpful."), + Message.assistant("You're welcome! I'm here to help."), +] + + +class TruncateLinearProvider(Provider): + def __init__(self): + self.sequence = MESSAGE_SEQUENCE + self.current_index = 1 + self.summarize_next = False + self.summarized_count = 0 + + def complete(self, model, system, messages, tools): + input_token_count = SYSTEM_PROMPT_TOKENS + + message = self.sequence[self.current_index] + + if len(messages) > 0 and type(messages[0].content[0]) is ToolResult: + raise ValueError("ToolResult should not be the first message") + + if len(messages) == 1 and messages[0].text == "a": + # adding a +1 for the "a" + return Message.assistant("Getting system prompt size"), Usage( + input_tokens=80 + 1, output_tokens=20, total_tokens=SYSTEM_PROMPT_TOKENS + 1 + ) + + for i in range(len(messages)): + if type(messages[i].content[0]) in (ToolResult, ToolUse): + input_token_count += 10 + else: + input_token_count += len(messages[i].text) * 2 + + if type(message.content[0]) in (ToolResult, ToolUse): + output_tokens = 10 + else: + output_tokens = len(message.text) * 2 + + total_tokens = input_token_count + output_tokens + usage = Usage( + input_tokens=input_token_count, + output_tokens=output_tokens, + total_tokens=total_tokens, + ) + self.current_index += 2 + return message, usage + + +@pytest.fixture +def conversation_exchange_instance(): + ex = Exchange( + provider=TruncateLinearProvider(), + model="test-model", + system="test-system", + moderator=ContextTruncate(max_tokens=500), + ) + return ex + + +def test_truncate_on_generic_conversation(conversation_exchange_instance: Exchange): + i = 0 + while i < len(MESSAGE_SEQUENCE): + next_message = MESSAGE_SEQUENCE[i] + conversation_exchange_instance.add(next_message) + message = conversation_exchange_instance.generate() + if message.text != "Summary message here": + i += 2 + # ensure the total token count is not anything exhorbitant + assert conversation_exchange_instance.checkpoint_data.total_token_count < 700 + assert conversation_exchange_instance.moderator.system_prompt_token_count == 100 diff --git a/packages/exchange/tests/test_utils.py b/packages/exchange/tests/test_utils.py new file mode 100644 index 000000000..6bc00f9e0 --- /dev/null +++ b/packages/exchange/tests/test_utils.py @@ -0,0 +1,125 @@ +import pytest +from exchange import utils +from unittest.mock import patch +from exchange.message import Message +from exchange.content import Text, ToolResult +from exchange.providers.utils import messages_to_openai_spec, encode_image + + +def test_encode_image(): + image_path = "tests/test_image.png" + encoded_image = encode_image(image_path) + + # Adjust this string based on the actual initial part of your base64-encoded image. + expected_start = "iVBORw0KGgo" + assert encoded_image.startswith(expected_start) + + +def test_create_object_id() -> None: + prefix = "test" + object_id = utils.create_object_id(prefix) + assert object_id.startswith(prefix + "_") + assert len(object_id) == len(prefix) + 1 + 24 # prefix + _ + 24 chars + + +def test_compact() -> None: + content = "This is \n\n a test" + compacted = utils.compact(content) + assert compacted == "This is a test" + + +def test_parse_docstring() -> None: + def dummy_func(a, b, c): + """ + This function does something. + + Args: + a (int): The first parameter. + b (str): The second parameter. + c (list): The third parameter. + """ + pass + + description, parameters = utils.parse_docstring(dummy_func) + assert description == "This function does something." + assert parameters == [ + {"name": "a", "annotation": "int", "description": "The first parameter."}, + {"name": "b", "annotation": "str", "description": "The second parameter."}, + {"name": "c", "annotation": "list", "description": "The third parameter."}, + ] + + +def test_parse_docstring_no_description() -> None: + def dummy_func(a, b, c): + """ + Args: + a (int): The first parameter. + b (str): The second parameter. + c (list): The third parameter. + """ + pass + + with pytest.raises(ValueError) as e: + utils.parse_docstring(dummy_func) + + assert "Attempted to load from a function" in str(e.value) + + +def test_json_schema() -> None: + def dummy_func(a: int, b: str, c: list) -> None: + pass + + schema = utils.json_schema(dummy_func) + + assert schema == { + "type": "object", + "properties": { + "a": {"type": "integer"}, + "b": {"type": "string"}, + "c": {"type": "string"}, + }, + "required": ["a", "b", "c"], + } + + +def test_load_plugins() -> None: + class DummyEntryPoint: + def __init__(self, name, plugin): + self.name = name + self.plugin = plugin + + def load(self): + return self.plugin + + with patch("exchange.utils.entry_points") as entry_points_mock: + entry_points_mock.return_value = [ + DummyEntryPoint("plugin1", object()), + DummyEntryPoint("plugin2", object()), + ] + + plugins = utils.load_plugins("dummy_group") + + assert "plugin1" in plugins + assert "plugin2" in plugins + assert len(plugins) == 2 + + +def test_messages_to_openai_spec(): + # Use provided test image + png_path = "tests/test_image.png" + + # Create a list of messages as input + messages = [ + Message(role="user", content=[Text(text="Hello, Assistant!")]), + Message(role="assistant", content=[Text(text="Here is a text with tool usage")]), + Message( + role="tool", + content=[ToolResult(tool_use_id="1", output=f'"image:{png_path}')], + ), + ] + + # Call the function + output = messages_to_openai_spec(messages) + + assert "This tool result included an image that is uploaded in the next message." in str(output) + assert "{'role': 'user', 'content': [{'type': 'image_url'" in str(output) diff --git a/pyproject.toml b/pyproject.toml index 0e55a9a6c..4db2570f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,10 +5,10 @@ version = "0.9.3" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "ai-exchange", "attrs>=23.2.0", "rich>=13.7.1", "ruamel-yaml>=0.18.6", - "ai-exchange>=0.9.3", "click>=8.1.7", "prompt-toolkit>=3.0.47", ] @@ -53,7 +53,6 @@ dev-dependencies = [ "mkdocs-gen-files>=0.5.0", "mkdocs-git-authors-plugin>=0.9.0", "mkdocs-git-committers-plugin>=0.2.3", - "mkdocs-git-revision-date-localized-plugin", "mkdocs-git-revision-date-localized-plugin>=1.2.9", "mkdocs-glightbox>=0.4.0", "mkdocs-include-markdown-plugin>=6.2.2", @@ -66,3 +65,9 @@ dev-dependencies = [ "pytest-mock>=3.14.0", "pytest>=8.3.2" ] + +[tool.uv.sources] +ai-exchange = { workspace = true } + +[tool.uv.workspace] +members = ["packages/*"]