-
Notifications
You must be signed in to change notification settings - Fork 213
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
299 ollama client does not work with stream #309
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,7 +7,16 @@ | |
import os | ||
from pathlib import Path | ||
|
||
from typing import Any, Dict, Optional, Union, Callable, Tuple, List | ||
from typing import ( | ||
Any, | ||
Dict, | ||
Optional, | ||
Union, | ||
Callable, | ||
Tuple, | ||
List, | ||
Generator as GeneratorType, | ||
) | ||
import logging | ||
|
||
|
||
|
@@ -304,24 +313,54 @@ def _extra_repr(self) -> str: | |
s = f"model_kwargs={self.model_kwargs}, model_type={self.model_type}" | ||
return s | ||
|
||
def _process_chunk(self, chunk: Any) -> GeneratorOutput: | ||
"""Process a single chunk of data using the output processors. | ||
|
||
Args: | ||
chunk: Raw chunk data to process | ||
|
||
Returns: | ||
Any: Processed chunk | ||
str: Error string in case of an exception | ||
""" | ||
if not chunk or not self.output_processors: | ||
return chunk, None | ||
|
||
try: | ||
processed_data = self.output_processors(chunk) | ||
return processed_data, None | ||
except Exception as e: | ||
log.error(f"Error processing chunk using the output processors: {e}") | ||
return None, str(e) | ||
|
||
def _post_call(self, completion: Any) -> GeneratorOutput: | ||
r"""Get string completion and process it with the output_processors.""" | ||
# parse chat completion will only fill the raw_response | ||
output: GeneratorOutput = self.model_client.parse_chat_completion(completion) | ||
# Now adding the data filed to the output | ||
data = output.raw_response | ||
if self.output_processors: | ||
if data: | ||
"""Process completion output, handling both streaming and non-streaming cases. | ||
|
||
Args: | ||
completion: Raw completion data from the llm provider | ||
|
||
Returns: | ||
GeneratorOutput containing processed data or generator type | ||
""" | ||
# Parse chat completion will only fill the raw_response | ||
output = self.model_client.parse_chat_completion(completion) | ||
# Handle streaming case | ||
if isinstance(output, GeneratorType): | ||
|
||
def process_stream(): | ||
try: | ||
data = self.output_processors(data) | ||
output.data = data | ||
for out in output: | ||
log.debug(f"Processing raw chunk: {out.raw_response}") | ||
out.data, out.error = self._process_chunk(out.raw_response) | ||
yield out | ||
except Exception as e: | ||
log.error(f"Error processing the output processors: {e}") | ||
output.error = str(e) | ||
log.error(f"Error in stream processing: {e}") | ||
yield GeneratorOutput(error=str(e)) | ||
|
||
return GeneratorOutput(data=process_stream(), raw_response=output) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. dont separate the code, it changed too much, and its better to minimize the change, so just add the initial code back to the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Understood. I’ll proceed with this approach then. It seems there’s no longer a need for the additional |
||
else: | ||
output.data = data | ||
|
||
# Handle non-streaming case | ||
output.data, output.error = self._process_chunk(output.raw_response) | ||
return output | ||
|
||
def _pre_call(self, prompt_kwargs: Dict, model_kwargs: Dict) -> Dict[str, Any]: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
it specifies only one output, but you returned a tuple.
Ensure we add code linting @fm1320 by developers to check these basics
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for. the review. Yes, that's my fault. I will fix this.