diff --git a/README.md b/README.md index cf781bc..eb2a7de 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ [![AssemblyAI Twitter](https://img.shields.io/twitter/follow/AssemblyAI?label=%40AssemblyAI&style=social)](https://twitter.com/AssemblyAI) [![AssemblyAI YouTube](https://img.shields.io/youtube/channel/subscribers/UCtatfZMf-8EkIwASXM4ts0A)](https://www.youtube.com/@AssemblyAI) [![Discord](https://img.shields.io/discord/875120158014853141?logo=discord&label=Discord&link=https%3A%2F%2Fdiscord.com%2Fchannels%2F875120158014853141&style=social) -](https://assemblyai.com/discord) +](https://discord.gg/5aQNZyq3) # AssemblyAI's Python SDK @@ -266,37 +266,6 @@ print(result.response) - -
- Use LeMUR to with Input Text - -```python -import assemblyai as aai - -transcriber = aai.Transcriber() -config = aai.TranscriptionConfig( - speaker_labels=True, -) -transcript = transcriber.transcribe("https://example.org/customer.mp3", config=config) - -# Example converting speaker label utterances into LeMUR input text -text = "" - -for utt in transcript.utterances: - text += f"Speaker {utt.speaker}:\n{utt.text}\n" - -result = aai.Lemur().task( - "You are a helpful coach. Provide an analysis of the transcript " - "and offer areas to improve with exact quotes. Include no preamble. " - "Start with an overall summary then get into the examples with feedback.", - input_text=text -) - -print(result.response) -``` - -
-
Delete data previously sent to LeMUR @@ -524,7 +493,7 @@ transcript = transcriber.transcribe( for entity in transcript.entities: print(entity.text) # i.e. "Dan Gilbert" - print(entity.entity_type) # i.e. EntityType.person + print(entity.type) # i.e. EntityType.person print(f"Timestamp: {entity.start} - {entity.end}") ``` diff --git a/assemblyai/lemur.py b/assemblyai/lemur.py index c81ab34..5debd48 100644 --- a/assemblyai/lemur.py +++ b/assemblyai/lemur.py @@ -14,11 +14,7 @@ def __init__( ) -> None: self._client = client - self._sources = ( - [types.LemurSourceRequest.from_lemur_source(s) for s in sources] - if sources is not None - else [] - ) + self._sources = [types.LemurSourceRequest.from_lemur_source(s) for s in sources] def question( self, @@ -28,7 +24,6 @@ def question( final_model: Optional[types.LemurModel], max_output_size: Optional[int], temperature: Optional[float], - input_text: Optional[str], ) -> types.LemurQuestionResponse: response = api.lemur_question( client=self._client.http_client, @@ -39,7 +34,6 @@ def question( final_model=final_model, max_output_size=max_output_size, temperature=temperature, - input_text=input_text, ), http_timeout=timeout, ) @@ -54,7 +48,6 @@ def summarize( max_output_size: Optional[int], timeout: Optional[float], temperature: Optional[float], - input_text: Optional[str], ) -> types.LemurSummaryResponse: response = api.lemur_summarize( client=self._client.http_client, @@ -65,7 +58,6 @@ def summarize( final_model=final_model, max_output_size=max_output_size, temperature=temperature, - input_text=input_text, ), http_timeout=timeout, ) @@ -80,7 +72,6 @@ def action_items( max_output_size: Optional[int], timeout: Optional[float], temperature: Optional[float], - input_text: Optional[str], ) -> types.LemurActionItemsResponse: response = api.lemur_action_items( client=self._client.http_client, @@ -91,7 +82,6 @@ def action_items( final_model=final_model, max_output_size=max_output_size, temperature=temperature, - input_text=input_text, ), http_timeout=timeout, ) @@ -105,7 +95,6 @@ def task( max_output_size: Optional[int], timeout: Optional[float], temperature: Optional[float], - input_text: Optional[str], ): response = api.lemur_task( client=self._client.http_client, @@ -115,7 +104,6 @@ def task( final_model=final_model, max_output_size=max_output_size, temperature=temperature, - input_text=input_text, ), http_timeout=timeout, ) @@ -133,7 +121,7 @@ class Lemur: def __init__( self, - sources: Optional[List[types.LemurSource]] = None, + sources: List[types.LemurSource], client: Optional[_client.Client] = None, ) -> None: """ @@ -159,7 +147,6 @@ def question( max_output_size: Optional[int] = None, timeout: Optional[float] = None, temperature: Optional[float] = None, - input_text: Optional[str] = None, ) -> types.LemurQuestionResponse: """ Question & Answer allows you to ask free form questions about one or many transcripts. @@ -191,7 +178,6 @@ def question( max_output_size=max_output_size, timeout=timeout, temperature=temperature, - input_text=input_text, ) def summarize( @@ -202,7 +188,6 @@ def summarize( max_output_size: Optional[int] = None, timeout: Optional[float] = None, temperature: Optional[float] = None, - input_text: Optional[str] = None, ) -> types.LemurSummaryResponse: """ Summary allows you to distill a piece of audio into a few impactful sentences. @@ -229,7 +214,6 @@ def summarize( max_output_size=max_output_size, timeout=timeout, temperature=temperature, - input_text=input_text, ) def action_items( @@ -240,7 +224,6 @@ def action_items( max_output_size: Optional[int] = None, timeout: Optional[float] = None, temperature: Optional[float] = None, - input_text: Optional[str] = None, ) -> types.LemurActionItemsResponse: """ Action Items allows you to generate action items from one or many transcripts. @@ -268,7 +251,6 @@ def action_items( max_output_size=max_output_size, timeout=timeout, temperature=temperature, - input_text=input_text, ) def task( @@ -278,7 +260,6 @@ def task( max_output_size: Optional[int] = None, timeout: Optional[float] = None, temperature: Optional[float] = None, - input_text: Optional[str] = None, ) -> types.LemurTaskResponse: """ Task feature allows you to submit a custom prompt to the model. @@ -301,7 +282,6 @@ def task( max_output_size=max_output_size, timeout=timeout, temperature=temperature, - input_text=input_text, ) @classmethod diff --git a/assemblyai/transcriber.py b/assemblyai/transcriber.py index 72f83b3..996471c 100644 --- a/assemblyai/transcriber.py +++ b/assemblyai/transcriber.py @@ -983,7 +983,7 @@ def __init__( client: _client.Client, ) -> None: self._client = client - self._websocket: Optional[websockets.sync.client.ClientConnection] = None + self._websocket: Optional[websockets_client.ClientConnection] = None self._on_open = on_open self._on_data = on_data diff --git a/assemblyai/types.py b/assemblyai/types.py index f34243c..ef10d82 100644 --- a/assemblyai/types.py +++ b/assemblyai/types.py @@ -835,7 +835,7 @@ def auto_chapters(self, enable: Optional[bool]) -> None: "Enable Auto Chapters." # Validate required params are also set - if enable and self.punctuate is False: + if enable and self.punctuate == False: raise ValueError( "If `auto_chapters` is enabled, then `punctuate` must not be disabled" ) @@ -1146,11 +1146,11 @@ def set_summarize( return self # Validate that required parameters are also set - if self._raw_transcription_config.punctuate is False: + if self._raw_transcription_config.punctuate == False: raise ValueError( "If `summarization` is enabled, then `punctuate` must not be disabled" ) - if self._raw_transcription_config.format_text is False: + if self._raw_transcription_config.format_text == False: raise ValueError( "If `summarization` is enabled, then `format_text` must not be disabled" ) @@ -1666,7 +1666,7 @@ def __init__( """ from . import Transcript - if isinstance(transcript, str): + if type(transcript) == str: transcript = Transcript(transcript_id=transcript) super().__init__(transcript) @@ -1773,7 +1773,6 @@ class BaseLemurRequest(BaseModel): final_model: Optional[LemurModel] max_output_size: Optional[int] temperature: Optional[float] - input_text: Optional[str] class LemurTaskRequest(BaseLemurRequest): diff --git a/setup.py b/setup.py index 84e81c1..5ff7e4f 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup( name="assemblyai", - version="0.20.0", + version="0.18.0", description="AssemblyAI Python SDK", author="AssemblyAI", author_email="engineering.sdk@assemblyai.com", diff --git a/tests/unit/test_auto_chapters.py b/tests/unit/test_auto_chapters.py index e3ba39b..8b03965 100644 --- a/tests/unit/test_auto_chapters.py +++ b/tests/unit/test_auto_chapters.py @@ -68,7 +68,7 @@ def test_auto_chapters_enabled(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("auto_chapters") is True + assert request_body.get("auto_chapters") == True # Check that transcript was properly parsed from JSON response assert transcript.error is None diff --git a/tests/unit/test_auto_highlights.py b/tests/unit/test_auto_highlights.py index 4648760..8472faf 100644 --- a/tests/unit/test_auto_highlights.py +++ b/tests/unit/test_auto_highlights.py @@ -64,7 +64,7 @@ def test_auto_highlights_enabled(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("auto_highlights") is True + assert request_body.get("auto_highlights") == True # Check that transcript was properly parsed from JSON response assert transcript.error is None diff --git a/tests/unit/test_content_safety.py b/tests/unit/test_content_safety.py index 2dc6fa6..4b76978 100644 --- a/tests/unit/test_content_safety.py +++ b/tests/unit/test_content_safety.py @@ -98,7 +98,7 @@ def test_content_safety_enabled(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("content_safety") is True + assert request_body.get("content_safety") == True # Check that transcript was properly parsed from JSON response assert transcript.error is None @@ -202,7 +202,7 @@ def test_content_safety_with_confidence_threshold(httpx_mock: HTTPXMock): ), ) - assert request.get("content_safety") is True + assert request.get("content_safety") == True assert request.get("content_safety_confidence") == confidence diff --git a/tests/unit/test_entity_detection.py b/tests/unit/test_entity_detection.py index 4c44bab..73e1a61 100644 --- a/tests/unit/test_entity_detection.py +++ b/tests/unit/test_entity_detection.py @@ -52,7 +52,7 @@ def test_entity_detection_enabled(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("entity_detection") is True + assert request_body.get("entity_detection") == True # Check that transcript was properly parsed from JSON response assert transcript.error is None diff --git a/tests/unit/test_lemur.py b/tests/unit/test_lemur.py index d273112..04ff316 100644 --- a/tests/unit/test_lemur.py +++ b/tests/unit/test_lemur.py @@ -14,7 +14,7 @@ aai.settings.api_key = "test" -def test_lemur_single_question_succeeds_transcript(httpx_mock: HTTPXMock): +def test_lemur_single_question_succeeds(httpx_mock: HTTPXMock): """ Tests whether asking a single question succeeds. """ @@ -64,54 +64,7 @@ def test_lemur_single_question_succeeds_transcript(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_single_question_succeeds_input_text(httpx_mock: HTTPXMock): - """ - Tests whether asking a single question succeeds with input text. - """ - - # create a mock response of a LemurQuestionResponse - mock_lemur_answer = factories.generate_dict_factory( - factories.LemurQuestionResponse - )() - - # we only want to mock one answer - mock_lemur_answer["response"] = [mock_lemur_answer["response"][0]] - - # mock the specific endpoints - httpx_mock.add_response( - url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/question-answer", - status_code=httpx.codes.OK, - method="POST", - json=mock_lemur_answer, - ) - - # prepare the question to be asked - question = aai.LemurQuestion( - question="Which cars do the callers want to buy?", - context="Callers are interested in buying cars", - answer_options=["Toyota", "Honda", "Ford", "Chevrolet"], - ) - # test input_text input - # mimic the usage of the SDK - lemur = aai.Lemur() - result = lemur.question( - question, input_text="This transcript is a test transcript." - ) - - # check whether answer is not a list - assert isinstance(result, aai.LemurQuestionResponse) - - answers = result.response - - # check the response - assert answers[0].question == mock_lemur_answer["response"][0]["question"] - assert answers[0].answer == mock_lemur_answer["response"][0]["answer"] - - # check whether we mocked everything - assert len(httpx_mock.get_requests()) == 1 - - -def test_lemur_multiple_question_succeeds_transcript(httpx_mock: HTTPXMock): +def test_lemur_multiple_question_succeeds(httpx_mock: HTTPXMock): """ Tests whether asking multiple questions succeeds. """ @@ -164,59 +117,6 @@ def test_lemur_multiple_question_succeeds_transcript(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_multiple_question_succeeds_input_text(httpx_mock: HTTPXMock): - """ - Tests whether asking multiple questions succeeds. - """ - - # create a mock response of a LemurQuestionResponse - mock_lemur_answer = factories.generate_dict_factory( - factories.LemurQuestionResponse - )() - - # prepare the questions to be asked - questions = [ - aai.LemurQuestion( - question="Which cars do the callers want to buy?", - ), - aai.LemurQuestion( - question="What price range are the callers looking for?", - ), - ] - - # update the mock questions with the questions - mock_lemur_answer["response"][0]["question"] = questions[0].question - mock_lemur_answer["response"][1]["question"] = questions[1].question - - # mock the specific endpoints - httpx_mock.add_response( - url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/question-answer", - status_code=httpx.codes.OK, - method="POST", - json=mock_lemur_answer, - ) - - # test input_text input - # mimic the usage of the SDK - lemur = aai.Lemur() - result = lemur.question( - questions, input_text="This transcript is a test transcript." - ) - assert isinstance(result, aai.LemurQuestionResponse) - - answers = result.response - # check whether answers is a list - assert isinstance(answers, list) - - # check the response - for idx, answer in enumerate(answers): - assert answer.question == mock_lemur_answer["response"][idx]["question"] - assert answer.answer == mock_lemur_answer["response"][idx]["answer"] - - # check whether we mocked everything - assert len(httpx_mock.get_requests()) == 1 - - def test_lemur_question_fails(httpx_mock: HTTPXMock): """ Tests whether asking a question fails. @@ -249,7 +149,7 @@ def test_lemur_question_fails(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_summarize_succeeds_transcript(httpx_mock: HTTPXMock): +def test_lemur_summarize_succeeds(httpx_mock: HTTPXMock): """ Tests whether summarizing a transcript via LeMUR succeeds. """ @@ -284,41 +184,6 @@ def test_lemur_summarize_succeeds_transcript(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_summarize_succeeds_input_text(httpx_mock: HTTPXMock): - """ - Tests whether summarizing a transcript via LeMUR succeeds with input text. - """ - - # create a mock response of a LemurSummaryResponse - mock_lemur_summary = factories.generate_dict_factory( - factories.LemurSummaryResponse - )() - - # mock the specific endpoints - httpx_mock.add_response( - url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/summary", - status_code=httpx.codes.OK, - method="POST", - json=mock_lemur_summary, - ) - - # test input_text input - lemur = aai.Lemur() - result = lemur.summarize( - context="Callers asking for cars", answer_format="TLDR", input_text="Test test" - ) - - assert isinstance(result, aai.LemurSummaryResponse) - - summary = result.response - - # check the response - assert summary == mock_lemur_summary["response"] - - # check whether we mocked everything - assert len(httpx_mock.get_requests()) == 1 - - def test_lemur_summarize_fails(httpx_mock: HTTPXMock): """ Tests whether summarizing a transcript via LeMUR fails. @@ -344,7 +209,7 @@ def test_lemur_summarize_fails(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_action_items_succeeds_transcript(httpx_mock: HTTPXMock): +def test_lemur_action_items_succeeds(httpx_mock: HTTPXMock): """ Tests whether generating action items for a transcript via LeMUR succeeds. """ @@ -382,43 +247,6 @@ def test_lemur_action_items_succeeds_transcript(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_action_items_succeeds_input_text(httpx_mock: HTTPXMock): - """ - Tests whether generating action items for a transcript via LeMUR succeeds. - """ - - # create a mock response of a LemurActionItemsResponse - mock_lemur_action_items = factories.generate_dict_factory( - factories.LemurActionItemsResponse - )() - - # mock the specific endpoints - httpx_mock.add_response( - url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/action-items", - status_code=httpx.codes.OK, - method="POST", - json=mock_lemur_action_items, - ) - - # test input_text input - lemur = aai.Lemur() - result = lemur.action_items( - context="Customers asking for help with resolving their problem", - answer_format="Three bullet points", - input_text="Test test", - ) - - assert isinstance(result, aai.LemurActionItemsResponse) - - action_items = result.response - - # check the response - assert action_items == mock_lemur_action_items["response"] - - # check whether we mocked everything - assert len(httpx_mock.get_requests()) == 1 - - def test_lemur_action_items_fails(httpx_mock: HTTPXMock): """ Tests whether generating action items for a transcript via LeMUR fails. @@ -447,7 +275,7 @@ def test_lemur_action_items_fails(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_task_succeeds_transcript(httpx_mock: HTTPXMock): +def test_lemur_task_succeeds(httpx_mock: HTTPXMock): """ Tests whether creating a task request succeeds. """ @@ -482,38 +310,6 @@ def test_lemur_task_succeeds_transcript(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_task_succeeds_input_text(httpx_mock: HTTPXMock): - """ - Tests whether creating a task request succeeds. - """ - - # create a mock response of a LemurSummaryResponse - mock_lemur_task_response = factories.generate_dict_factory( - factories.LemurTaskResponse - )() - - # mock the specific endpoints - httpx_mock.add_response( - url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/task", - status_code=httpx.codes.OK, - method="POST", - json=mock_lemur_task_response, - ) - # test input_text input - lemur = aai.Lemur() - result = lemur.task( - prompt="Create action items of the meeting", input_text="Test test" - ) - - # check the response - assert isinstance(result, aai.LemurTaskResponse) - - assert result.response == mock_lemur_task_response["response"] - - # check whether we mocked everything - assert len(httpx_mock.get_requests()) == 1 - - def test_lemur_ask_coach_fails(httpx_mock: HTTPXMock): """ Tests whether creating a task request fails. @@ -589,7 +385,7 @@ def test_lemur_purge_request_data_fails(httpx_mock: HTTPXMock): json=mock_lemur_purge_response, ) - with pytest.raises(aai.LemurError): + with pytest.raises(aai.LemurError) as error: aai.Lemur.purge_request_data(mock_request_id) assert len(httpx_mock.get_requests()) == 1 diff --git a/tests/unit/test_realtime_transcriber.py b/tests/unit/test_realtime_transcriber.py index 64d3f20..dbd3fbd 100644 --- a/tests/unit/test_realtime_transcriber.py +++ b/tests/unit/test_realtime_transcriber.py @@ -1,6 +1,7 @@ import datetime import json import uuid +from typing import Optional from unittest.mock import MagicMock from urllib.parse import urlencode diff --git a/tests/unit/test_sentiment_analysis.py b/tests/unit/test_sentiment_analysis.py index e8fdfd9..aeeda95 100644 --- a/tests/unit/test_sentiment_analysis.py +++ b/tests/unit/test_sentiment_analysis.py @@ -47,7 +47,7 @@ def test_sentiment_analysis_enabled(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("sentiment_analysis") is True + assert request_body.get("sentiment_analysis") == True # Check that transcript was properly parsed from JSON response assert transcript.error is None diff --git a/tests/unit/test_summarization.py b/tests/unit/test_summarization.py index 020487f..53d5e55 100644 --- a/tests/unit/test_summarization.py +++ b/tests/unit/test_summarization.py @@ -5,6 +5,7 @@ import tests.unit.factories as factories import tests.unit.unit_test_utils as test_utils import assemblyai as aai +from tests.unit import factories aai.settings.api_key = "test" @@ -73,9 +74,9 @@ def test_default_summarization_params(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("summarization") is True - assert request_body.get("summary_model") is None - assert request_body.get("summary_type") is None + assert request_body.get("summarization") == True + assert request_body.get("summary_model") == None + assert request_body.get("summary_type") == None # Check that transcript was properly parsed from JSON response assert transcript.error is None @@ -105,7 +106,7 @@ def test_summarization_with_params(httpx_mock: HTTPXMock): ) # Check that request body was properly defined - assert request_body.get("summarization") is True + assert request_body.get("summarization") == True assert request_body.get("summary_model") == summary_model assert request_body.get("summary_type") == summary_type