Skip to content

Commit

Permalink
fix(python/sdk): Fix README for sentiment analysis example (#2472)
Browse files Browse the repository at this point in the history
GitOrigin-RevId: c2d97c625499679492c8d3c9083a39bc74211c83
  • Loading branch information
ploeber authored and jhazenaai committed Dec 14, 2023
1 parent e4d3379 commit 60137e7
Show file tree
Hide file tree
Showing 13 changed files with 28 additions and 282 deletions.
35 changes: 2 additions & 33 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
[![AssemblyAI Twitter](https://img.shields.io/twitter/follow/AssemblyAI?label=%40AssemblyAI&style=social)](https://twitter.com/AssemblyAI)
[![AssemblyAI YouTube](https://img.shields.io/youtube/channel/subscribers/UCtatfZMf-8EkIwASXM4ts0A)](https://www.youtube.com/@AssemblyAI)
[![Discord](https://img.shields.io/discord/875120158014853141?logo=discord&label=Discord&link=https%3A%2F%2Fdiscord.com%2Fchannels%2F875120158014853141&style=social)
](https://assemblyai.com/discord)
](https://discord.gg/5aQNZyq3)

# AssemblyAI's Python SDK

Expand Down Expand Up @@ -266,37 +266,6 @@ print(result.response)

</details>


<details>
<summary>Use LeMUR to with Input Text</summary>

```python
import assemblyai as aai

transcriber = aai.Transcriber()
config = aai.TranscriptionConfig(
speaker_labels=True,
)
transcript = transcriber.transcribe("https://example.org/customer.mp3", config=config)

# Example converting speaker label utterances into LeMUR input text
text = ""

for utt in transcript.utterances:
text += f"Speaker {utt.speaker}:\n{utt.text}\n"

result = aai.Lemur().task(
"You are a helpful coach. Provide an analysis of the transcript "
"and offer areas to improve with exact quotes. Include no preamble. "
"Start with an overall summary then get into the examples with feedback.",
input_text=text
)

print(result.response)
```

</details>

<details>
<summary>Delete data previously sent to LeMUR</summary>

Expand Down Expand Up @@ -524,7 +493,7 @@ transcript = transcriber.transcribe(

for entity in transcript.entities:
print(entity.text) # i.e. "Dan Gilbert"
print(entity.entity_type) # i.e. EntityType.person
print(entity.type) # i.e. EntityType.person
print(f"Timestamp: {entity.start} - {entity.end}")
```

Expand Down
24 changes: 2 additions & 22 deletions assemblyai/lemur.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,7 @@ def __init__(
) -> None:
self._client = client

self._sources = (
[types.LemurSourceRequest.from_lemur_source(s) for s in sources]
if sources is not None
else []
)
self._sources = [types.LemurSourceRequest.from_lemur_source(s) for s in sources]

def question(
self,
Expand All @@ -28,7 +24,6 @@ def question(
final_model: Optional[types.LemurModel],
max_output_size: Optional[int],
temperature: Optional[float],
input_text: Optional[str],
) -> types.LemurQuestionResponse:
response = api.lemur_question(
client=self._client.http_client,
Expand All @@ -39,7 +34,6 @@ def question(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
Expand All @@ -54,7 +48,6 @@ def summarize(
max_output_size: Optional[int],
timeout: Optional[float],
temperature: Optional[float],
input_text: Optional[str],
) -> types.LemurSummaryResponse:
response = api.lemur_summarize(
client=self._client.http_client,
Expand All @@ -65,7 +58,6 @@ def summarize(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
Expand All @@ -80,7 +72,6 @@ def action_items(
max_output_size: Optional[int],
timeout: Optional[float],
temperature: Optional[float],
input_text: Optional[str],
) -> types.LemurActionItemsResponse:
response = api.lemur_action_items(
client=self._client.http_client,
Expand All @@ -91,7 +82,6 @@ def action_items(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
Expand All @@ -105,7 +95,6 @@ def task(
max_output_size: Optional[int],
timeout: Optional[float],
temperature: Optional[float],
input_text: Optional[str],
):
response = api.lemur_task(
client=self._client.http_client,
Expand All @@ -115,7 +104,6 @@ def task(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
Expand All @@ -133,7 +121,7 @@ class Lemur:

def __init__(
self,
sources: Optional[List[types.LemurSource]] = None,
sources: List[types.LemurSource],
client: Optional[_client.Client] = None,
) -> None:
"""
Expand All @@ -159,7 +147,6 @@ def question(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurQuestionResponse:
"""
Question & Answer allows you to ask free form questions about one or many transcripts.
Expand Down Expand Up @@ -191,7 +178,6 @@ def question(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

def summarize(
Expand All @@ -202,7 +188,6 @@ def summarize(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurSummaryResponse:
"""
Summary allows you to distill a piece of audio into a few impactful sentences.
Expand All @@ -229,7 +214,6 @@ def summarize(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

def action_items(
Expand All @@ -240,7 +224,6 @@ def action_items(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurActionItemsResponse:
"""
Action Items allows you to generate action items from one or many transcripts.
Expand Down Expand Up @@ -268,7 +251,6 @@ def action_items(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

def task(
Expand All @@ -278,7 +260,6 @@ def task(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurTaskResponse:
"""
Task feature allows you to submit a custom prompt to the model.
Expand All @@ -301,7 +282,6 @@ def task(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion assemblyai/transcriber.py
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,7 @@ def __init__(
client: _client.Client,
) -> None:
self._client = client
self._websocket: Optional[websockets.sync.client.ClientConnection] = None
self._websocket: Optional[websockets_client.ClientConnection] = None

self._on_open = on_open
self._on_data = on_data
Expand Down
9 changes: 4 additions & 5 deletions assemblyai/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -835,7 +835,7 @@ def auto_chapters(self, enable: Optional[bool]) -> None:
"Enable Auto Chapters."

# Validate required params are also set
if enable and self.punctuate is False:
if enable and self.punctuate == False:
raise ValueError(
"If `auto_chapters` is enabled, then `punctuate` must not be disabled"
)
Expand Down Expand Up @@ -1146,11 +1146,11 @@ def set_summarize(
return self

# Validate that required parameters are also set
if self._raw_transcription_config.punctuate is False:
if self._raw_transcription_config.punctuate == False:
raise ValueError(
"If `summarization` is enabled, then `punctuate` must not be disabled"
)
if self._raw_transcription_config.format_text is False:
if self._raw_transcription_config.format_text == False:
raise ValueError(
"If `summarization` is enabled, then `format_text` must not be disabled"
)
Expand Down Expand Up @@ -1666,7 +1666,7 @@ def __init__(
"""
from . import Transcript

if isinstance(transcript, str):
if type(transcript) == str:
transcript = Transcript(transcript_id=transcript)

super().__init__(transcript)
Expand Down Expand Up @@ -1773,7 +1773,6 @@ class BaseLemurRequest(BaseModel):
final_model: Optional[LemurModel]
max_output_size: Optional[int]
temperature: Optional[float]
input_text: Optional[str]


class LemurTaskRequest(BaseLemurRequest):
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

setup(
name="assemblyai",
version="0.20.0",
version="0.18.0",
description="AssemblyAI Python SDK",
author="AssemblyAI",
author_email="[email protected]",
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/test_auto_chapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_auto_chapters_enabled(httpx_mock: HTTPXMock):
)

# Check that request body was properly defined
assert request_body.get("auto_chapters") is True
assert request_body.get("auto_chapters") == True

# Check that transcript was properly parsed from JSON response
assert transcript.error is None
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/test_auto_highlights.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_auto_highlights_enabled(httpx_mock: HTTPXMock):
)

# Check that request body was properly defined
assert request_body.get("auto_highlights") is True
assert request_body.get("auto_highlights") == True

# Check that transcript was properly parsed from JSON response
assert transcript.error is None
Expand Down
4 changes: 2 additions & 2 deletions tests/unit/test_content_safety.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def test_content_safety_enabled(httpx_mock: HTTPXMock):
)

# Check that request body was properly defined
assert request_body.get("content_safety") is True
assert request_body.get("content_safety") == True

# Check that transcript was properly parsed from JSON response
assert transcript.error is None
Expand Down Expand Up @@ -202,7 +202,7 @@ def test_content_safety_with_confidence_threshold(httpx_mock: HTTPXMock):
),
)

assert request.get("content_safety") is True
assert request.get("content_safety") == True
assert request.get("content_safety_confidence") == confidence


Expand Down
2 changes: 1 addition & 1 deletion tests/unit/test_entity_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def test_entity_detection_enabled(httpx_mock: HTTPXMock):
)

# Check that request body was properly defined
assert request_body.get("entity_detection") is True
assert request_body.get("entity_detection") == True

# Check that transcript was properly parsed from JSON response
assert transcript.error is None
Expand Down
Loading

0 comments on commit 60137e7

Please sign in to comment.