From be4bdabdf465c2f84747e56d1b33592db075f855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleix=20Conchillo=20Flaqu=C3=A9?= Date: Fri, 18 Oct 2024 23:23:36 -0700 Subject: [PATCH 1/4] examples: use OpenAILLMContext in all the examples --- CHANGELOG.md | 5 +++++ examples/canonical-metrics/bot.py | 13 +++++------- examples/chatbot-audio-recording/bot.py | 13 +++++------- examples/deployment/flyio-example/bot.py | 13 +++++------- examples/dialin-chatbot/bot_daily.py | 13 +++++------- examples/dialin-chatbot/bot_twilio.py | 13 +++++------- .../foundational/06-listen-and-respond.py | 14 ++++++------- examples/foundational/06a-image-sync.py | 13 +++++------- examples/foundational/07-interruptible-vad.py | 21 ++++++++----------- examples/foundational/07-interruptible.py | 13 +++++------- .../07c-interruptible-deepgram.py | 13 +++++------- .../07d-interruptible-elevenlabs.py | 13 +++++------- .../foundational/07e-interruptible-playht.py | 13 +++++------- .../foundational/07f-interruptible-azure.py | 13 +++++------- .../07g-interruptible-openai-tts.py | 13 +++++------- .../07h-interruptible-openpipe.py | 14 ++++++------- .../foundational/07i-interruptible-xtts.py | 13 +++++------- .../foundational/07j-interruptible-gladia.py | 13 +++++------- .../foundational/07k-interruptible-lmnt.py | 13 +++++------- .../07l-interruptible-together.py | 2 +- .../foundational/07m-interruptible-aws.py | 13 +++++------- .../foundational/07n-interruptible-google.py | 13 +++++------- examples/foundational/10-wake-phrase.py | 16 +++++++------- examples/foundational/11-sound-effects.py | 13 +++++------- .../16-gpu-container-local-bot.py | 15 ++++++------- examples/foundational/17-detect-user-idle.py | 13 +++++------- examples/moondream-chatbot/bot.py | 8 ++++--- examples/simple-chatbot/bot.py | 13 +++++------- examples/storytelling-chatbot/src/bot.py | 13 +++++------- examples/studypal/studypal.py | 13 +++++------- examples/twilio-chatbot/bot.py | 13 +++++------- examples/websocket-server/bot.py | 13 +++++------- .../processors/frameworks/langchain.py | 2 +- 33 files changed, 166 insertions(+), 243 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 903f85688..596435f85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Renamed `OpenAILLMServiceRealtimeBeta` to `OpenAIRealtimeBetaLLMService` to match other services. +### Deprecated + +- `LLMUserResponseAggregator` and `LLMAssistantResponseAggregator` are + mostly deprecated, use `OpenAILLMContext` instead. + - The `vad` package is now deprecated and `audio.vad` should be used instead. The `avd` package will get removed in a future release. diff --git a/examples/canonical-metrics/bot.py b/examples/canonical-metrics/bot.py index e61dd375d..746d67dae 100644 --- a/examples/canonical-metrics/bot.py +++ b/examples/canonical-metrics/bot.py @@ -19,10 +19,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.audio.audio_buffer_processor import AudioBufferProcessor from pipecat.services.canonical import CanonicalMetricsService from pipecat.services.elevenlabs import ElevenLabsTTSService @@ -92,8 +89,8 @@ async def main(): }, ] - user_response = LLMUserResponseAggregator() - assistant_response = LLMAssistantResponseAggregator() + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) """ CanonicalMetrics uses AudioBufferProcessor under the hood to buffer the audio. On @@ -113,13 +110,13 @@ async def main(): pipeline = Pipeline( [ transport.input(), # microphone - user_response, + context_aggregator.user(), llm, tts, transport.output(), audio_buffer_processor, # captures audio into a buffer canonical, # uploads audio buffer to Canonical AI for metrics - assistant_response, + context_aggregator.assistant(), ] ) diff --git a/examples/chatbot-audio-recording/bot.py b/examples/chatbot-audio-recording/bot.py index 4cec2a996..b04f6d695 100644 --- a/examples/chatbot-audio-recording/bot.py +++ b/examples/chatbot-audio-recording/bot.py @@ -18,10 +18,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.audio.audio_buffer_processor import AudioBufferProcessor from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.services.openai import OpenAILLMService @@ -90,19 +87,19 @@ async def main(): }, ] - user_response = LLMUserResponseAggregator() - assistant_response = LLMAssistantResponseAggregator() + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) audiobuffer = AudioBufferProcessor() pipeline = Pipeline( [ transport.input(), # microphone - user_response, + context_aggregator.user(), llm, tts, transport.output(), audiobuffer, # used to buffer the audio in the pipeline - assistant_response, + context_aggregator.assistant(), ] ) diff --git a/examples/deployment/flyio-example/bot.py b/examples/deployment/flyio-example/bot.py index 079f88d95..77de3f3ab 100644 --- a/examples/deployment/flyio-example/bot.py +++ b/examples/deployment/flyio-example/bot.py @@ -7,11 +7,8 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.frames.frames import LLMMessagesFrame, EndFrame +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.openai import OpenAILLMService from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -60,17 +57,17 @@ async def main(room_url: str, token: str): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), - tma_in, + context_aggregator.user(), llm, tts, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/dialin-chatbot/bot_daily.py b/examples/dialin-chatbot/bot_daily.py index f5939b4df..dee0589fb 100644 --- a/examples/dialin-chatbot/bot_daily.py +++ b/examples/dialin-chatbot/bot_daily.py @@ -7,11 +7,8 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.frames.frames import LLMMessagesFrame, EndFrame +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport, DailyDialinSettings @@ -66,17 +63,17 @@ async def main(room_url: str, token: str, callId: str, callDomain: str): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), - tma_in, + context_aggregator.user(), llm, tts, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/dialin-chatbot/bot_twilio.py b/examples/dialin-chatbot/bot_twilio.py index 1cf32afdf..95fef893e 100644 --- a/examples/dialin-chatbot/bot_twilio.py +++ b/examples/dialin-chatbot/bot_twilio.py @@ -7,11 +7,8 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.frames.frames import LLMMessagesFrame, EndFrame +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -69,17 +66,17 @@ async def main(room_url: str, token: str, callId: str, sipUri: str): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), - tma_in, + context_aggregator.user(), llm, tts, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/foundational/06-listen-and-respond.py b/examples/foundational/06-listen-and-respond.py index 928473056..75b9b4953 100644 --- a/examples/foundational/06-listen-and-respond.py +++ b/examples/foundational/06-listen-and-respond.py @@ -20,10 +20,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.frame_processor import FrameDirection, FrameProcessor from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService @@ -92,18 +89,19 @@ async def main(): "content": "You are a helpful LLM in a WebRTC call. Your goal is to demonstrate your capabilities in a succinct way. Your output will be converted to audio so don't include special characters in your answers. Respond to what the user said in a creative and helpful way.", }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), - tma_in, + context_aggregator.user(), llm, tts, ml, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/foundational/06a-image-sync.py b/examples/foundational/06a-image-sync.py index b63c42674..5cb625729 100644 --- a/examples/foundational/06a-image-sync.py +++ b/examples/foundational/06a-image-sync.py @@ -16,10 +16,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.frame_processor import FrameDirection, FrameProcessor from pipecat.services.cartesia import CartesiaHttpTTSService from pipecat.services.openai import OpenAILLMService @@ -105,8 +102,8 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) image_sync_aggregator = ImageSyncAggregator( os.path.join(os.path.dirname(__file__), "assets", "speaking.png"), @@ -117,11 +114,11 @@ async def main(): [ transport.input(), image_sync_aggregator, - tma_in, + context_aggregator.user(), llm, tts, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/foundational/07-interruptible-vad.py b/examples/foundational/07-interruptible-vad.py index 753a4477c..8de28ff6d 100644 --- a/examples/foundational/07-interruptible-vad.py +++ b/examples/foundational/07-interruptible-vad.py @@ -13,11 +13,8 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.processors.audio.vad.silero import SileroVAD +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -65,18 +62,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ - transport.input(), # Transport user input + transport.input(), vad, - tma_in, # User responses - llm, # LLM - tts, # TTS - transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.user(), + llm, + tts, + transport.output(), + context_aggregator.assistant(), ] ) diff --git a/examples/foundational/07-interruptible.py b/examples/foundational/07-interruptible.py index 458edc8ed..ed2891d22 100644 --- a/examples/foundational/07-interruptible.py +++ b/examples/foundational/07-interruptible.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -64,17 +61,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07c-interruptible-deepgram.py b/examples/foundational/07c-interruptible-deepgram.py index d232ad973..36bdb320a 100644 --- a/examples/foundational/07c-interruptible-deepgram.py +++ b/examples/foundational/07c-interruptible-deepgram.py @@ -18,10 +18,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.deepgram import DeepgramSTTService, DeepgramTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -61,18 +58,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input stt, # STT - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07d-interruptible-elevenlabs.py b/examples/foundational/07d-interruptible-elevenlabs.py index fd6a5a1d5..524b50fd6 100644 --- a/examples/foundational/07d-interruptible-elevenlabs.py +++ b/examples/foundational/07d-interruptible-elevenlabs.py @@ -11,6 +11,7 @@ import aiohttp from dotenv import load_dotenv from loguru import logger +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from runner import configure from pipecat.audio.vad.silero import SileroVADAnalyzer @@ -18,10 +19,6 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -62,17 +59,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07e-interruptible-playht.py b/examples/foundational/07e-interruptible-playht.py index 520fb40bd..cd0c0505d 100644 --- a/examples/foundational/07e-interruptible-playht.py +++ b/examples/foundational/07e-interruptible-playht.py @@ -18,10 +18,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.openai import OpenAILLMService from pipecat.services.playht import PlayHTTTSService from pipecat.transcriptions.language import Language @@ -66,17 +63,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07f-interruptible-azure.py b/examples/foundational/07f-interruptible-azure.py index eb7745df0..1de3a5b0d 100644 --- a/examples/foundational/07f-interruptible-azure.py +++ b/examples/foundational/07f-interruptible-azure.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.azure import AzureLLMService, AzureSTTService, AzureTTSService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -74,18 +71,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input stt, # STT - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07g-interruptible-openai-tts.py b/examples/foundational/07g-interruptible-openai-tts.py index 56f94d568..49ee08595 100644 --- a/examples/foundational/07g-interruptible-openai-tts.py +++ b/examples/foundational/07g-interruptible-openai-tts.py @@ -11,6 +11,7 @@ import aiohttp from dotenv import load_dotenv from loguru import logger +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from runner import configure from pipecat.audio.vad.silero import SileroVADAnalyzer @@ -18,10 +19,6 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.services.openai import OpenAILLMService, OpenAITTSService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -59,17 +56,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07h-interruptible-openpipe.py b/examples/foundational/07h-interruptible-openpipe.py index afe378f47..186ea2fc1 100644 --- a/examples/foundational/07h-interruptible-openpipe.py +++ b/examples/foundational/07h-interruptible-openpipe.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openpipe import OpenPipeLLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -70,17 +67,18 @@ async def main(): "content": "You are a helpful LLM in a WebRTC call. Your goal is to demonstrate your capabilities in a succinct way. Your output will be converted to audio so don't include special characters in your answers. Respond to what the user said in a creative and helpful way.", }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07i-interruptible-xtts.py b/examples/foundational/07i-interruptible-xtts.py index f51487ec2..46cc86562 100644 --- a/examples/foundational/07i-interruptible-xtts.py +++ b/examples/foundational/07i-interruptible-xtts.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.openai import OpenAILLMService from pipecat.services.xtts import XTTSService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -66,17 +63,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07j-interruptible-gladia.py b/examples/foundational/07j-interruptible-gladia.py index f2d90761a..df5906922 100644 --- a/examples/foundational/07j-interruptible-gladia.py +++ b/examples/foundational/07j-interruptible-gladia.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.gladia import GladiaSTTService from pipecat.services.openai import OpenAILLMService @@ -69,18 +66,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input stt, # STT - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07k-interruptible-lmnt.py b/examples/foundational/07k-interruptible-lmnt.py index 9056437ef..f898c237a 100644 --- a/examples/foundational/07k-interruptible-lmnt.py +++ b/examples/foundational/07k-interruptible-lmnt.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.lmnt import LmntTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -62,17 +59,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), # User respones llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07l-interruptible-together.py b/examples/foundational/07l-interruptible-together.py index 0010b9643..cb143dc88 100644 --- a/examples/foundational/07l-interruptible-together.py +++ b/examples/foundational/07l-interruptible-together.py @@ -52,7 +52,7 @@ async def main(): llm = TogetherLLMService( api_key=os.getenv("TOGETHER_API_KEY"), - model=os.getenv("TOGETHER_MODEL"), + model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", params=TogetherLLMService.InputParams( temperature=1.0, top_p=0.9, diff --git a/examples/foundational/07m-interruptible-aws.py b/examples/foundational/07m-interruptible-aws.py index 7cc1440ef..298c8bbd5 100644 --- a/examples/foundational/07m-interruptible-aws.py +++ b/examples/foundational/07m-interruptible-aws.py @@ -18,10 +18,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.aws import AWSTTSService from pipecat.services.deepgram import DeepgramSTTService from pipecat.services.openai import OpenAILLMService @@ -69,18 +66,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input stt, # STT - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/07n-interruptible-google.py b/examples/foundational/07n-interruptible-google.py index b25ad185f..e5f8f7063 100644 --- a/examples/foundational/07n-interruptible-google.py +++ b/examples/foundational/07n-interruptible-google.py @@ -18,10 +18,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.deepgram import DeepgramSTTService from pipecat.services.google import GoogleTTSService from pipecat.services.openai import OpenAILLMService @@ -66,18 +63,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input stt, # STT - tma_in, # User responses + context_aggregator.user(), # User respones llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/10-wake-phrase.py b/examples/foundational/10-wake-phrase.py index 9bc9a0b9e..eeec2d4f3 100644 --- a/examples/foundational/10-wake-phrase.py +++ b/examples/foundational/10-wake-phrase.py @@ -10,14 +10,11 @@ import sys from pipecat.audio.vad.silero import SileroVADAnalyzer -from pipecat.processors.filters.wake_check_filter import WakeCheckFilter from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext +from pipecat.processors.filters.wake_check_filter import WakeCheckFilter from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -65,18 +62,19 @@ async def main(): ] hey_robot_filter = WakeCheckFilter(["hey robot", "hey, robot"]) - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input hey_robot_filter, # Filter out speech not directed at the robot - tma_in, # User responses + context_aggregator.user(), # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), # Assistant spoken responses ] ) diff --git a/examples/foundational/11-sound-effects.py b/examples/foundational/11-sound-effects.py index c6c486368..a921fb9c9 100644 --- a/examples/foundational/11-sound-effects.py +++ b/examples/foundational/11-sound-effects.py @@ -20,10 +20,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMUserResponseAggregator, - LLMAssistantResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.frame_processor import FrameDirection, FrameProcessor from pipecat.processors.logger import FrameLogger from pipecat.services.cartesia import CartesiaHttpTTSService @@ -113,8 +110,8 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) out_sound = OutboundSoundEffectWrapper() in_sound = InboundSoundEffectWrapper() fl = FrameLogger("LLM Out") @@ -123,7 +120,7 @@ async def main(): pipeline = Pipeline( [ transport.input(), - tma_in, + context_aggregator.user(), in_sound, fl2, llm, @@ -131,7 +128,7 @@ async def main(): tts, out_sound, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/foundational/16-gpu-container-local-bot.py b/examples/foundational/16-gpu-container-local-bot.py index ce4b923d5..9d154fd51 100644 --- a/examples/foundational/16-gpu-container-local-bot.py +++ b/examples/foundational/16-gpu-container-local-bot.py @@ -18,10 +18,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.deepgram import DeepgramTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import ( @@ -75,17 +72,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + context_aggregator.user(), llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), ] ) @@ -123,7 +120,7 @@ async def on_app_message(transport, message, sender): ) ) # And push to the pipeline for the Daily transport.output to send - await tma_in.push_frame( + await task.queue_frame( DailyTransportMessageFrame( message={"latency-pong-pipeline-delivery": {"ts": ts}}, participant_id=sender, diff --git a/examples/foundational/17-detect-user-idle.py b/examples/foundational/17-detect-user-idle.py index 79ea712ab..6acad8800 100644 --- a/examples/foundational/17-detect-user-idle.py +++ b/examples/foundational/17-detect-user-idle.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.user_idle_processor import UserIdleProcessor from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService @@ -65,8 +62,8 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) async def user_idle_callback(user_idle: UserIdleProcessor): messages.append( @@ -83,11 +80,11 @@ async def user_idle_callback(user_idle: UserIdleProcessor): [ transport.input(), # Transport user input user_idle, # Idle user check-in - tma_in, # User responses + context_aggregator.user(), llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + context_aggregator.assistant(), ] ) diff --git a/examples/moondream-chatbot/bot.py b/examples/moondream-chatbot/bot.py index 182dceb65..f481473fe 100644 --- a/examples/moondream-chatbot/bot.py +++ b/examples/moondream-chatbot/bot.py @@ -28,7 +28,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineTask -from pipecat.processors.aggregators.llm_response import LLMUserResponseAggregator +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.aggregators.sentence import SentenceAggregator from pipecat.processors.aggregators.vision_image_frame import VisionImageFrameAggregator from pipecat.processors.frame_processor import FrameDirection, FrameProcessor @@ -182,17 +182,19 @@ async def main(): }, ] - ura = LLMUserResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), - ura, + context_aggregator.user(), llm, ParallelPipeline([sa, ir, va, moondream], [tf, imgf]), tts, ta, transport.output(), + context_aggregator.assistant(), ] ) diff --git a/examples/simple-chatbot/bot.py b/examples/simple-chatbot/bot.py index 8ca764454..f0396b9ae 100644 --- a/examples/simple-chatbot/bot.py +++ b/examples/simple-chatbot/bot.py @@ -15,10 +15,6 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) from pipecat.frames.frames import ( OutputImageRawFrame, SpriteFrame, @@ -27,6 +23,7 @@ TTSAudioRawFrame, TTSStoppedFrame, ) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.processors.frame_processor import FrameDirection, FrameProcessor from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.services.openai import OpenAILLMService @@ -143,20 +140,20 @@ async def main(): }, ] - user_response = LLMUserResponseAggregator() - assistant_response = LLMAssistantResponseAggregator() + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) ta = TalkingAnimation() pipeline = Pipeline( [ transport.input(), - user_response, + context_aggregator.user(), llm, tts, ta, transport.output(), - assistant_response, + context_aggregator.assistant(), ] ) diff --git a/examples/storytelling-chatbot/src/bot.py b/examples/storytelling-chatbot/src/bot.py index 5fbe76cc1..82c55e013 100644 --- a/examples/storytelling-chatbot/src/bot.py +++ b/examples/storytelling-chatbot/src/bot.py @@ -14,10 +14,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.elevenlabs import ElevenLabsTTSService from pipecat.services.fal import FalImageGenService from pipecat.services.openai import OpenAILLMService @@ -82,8 +79,8 @@ async def main(room_url, token=None): story_pages = [] # We need aggregators to keep track of user and LLM responses - llm_responses = LLMAssistantResponseAggregator(message_history) - user_responses = LLMUserResponseAggregator(message_history) + context = OpenAILLMContext(message_history) + context_aggregator = llm_service.create_context_aggregator(context) # -------------- Processors ------------- # @@ -126,13 +123,13 @@ async def on_first_participant_joined(transport, participant): main_pipeline = Pipeline( [ transport.input(), - user_responses, + context_aggregator.user(), llm_service, story_processor, image_processor, tts_service, transport.output(), - llm_responses, + context_aggregator.assistant(), ] ) diff --git a/examples/studypal/studypal.py b/examples/studypal/studypal.py index 310eb4051..fb4c7fead 100644 --- a/examples/studypal/studypal.py +++ b/examples/studypal/studypal.py @@ -13,10 +13,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService from pipecat.transports.services.daily import DailyParams, DailyTransport @@ -150,17 +147,17 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), - tma_in, + context_aggregator.user(), llm, tts, transport.output(), - tma_out, + context_aggregator.assistant(), ] ) diff --git a/examples/twilio-chatbot/bot.py b/examples/twilio-chatbot/bot.py index 32d8317ba..5e6d91910 100644 --- a/examples/twilio-chatbot/bot.py +++ b/examples/twilio-chatbot/bot.py @@ -6,10 +6,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.openai import OpenAILLMService from pipecat.services.deepgram import DeepgramSTTService @@ -58,18 +55,18 @@ async def run_bot(websocket_client, stream_sid): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Websocket input from client stt, # Speech-To-Text - tma_in, # User responses + context_aggregator.user(), llm, # LLM tts, # Text-To-Speech transport.output(), # Websocket output to client - tma_out, # LLM responses + context_aggregator.assistant(), ] ) diff --git a/examples/websocket-server/bot.py b/examples/websocket-server/bot.py index deb6a31a2..2faaea4ad 100644 --- a/examples/websocket-server/bot.py +++ b/examples/websocket-server/bot.py @@ -13,10 +13,7 @@ from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.deepgram import DeepgramSTTService from pipecat.services.openai import OpenAILLMService @@ -62,18 +59,18 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages) + context_aggregator = llm.create_context_aggregator(context) pipeline = Pipeline( [ transport.input(), # Websocket input from client stt, # Speech-To-Text - tma_in, # User responses + context_aggregator.user(), llm, # LLM tts, # Text-To-Speech transport.output(), # Websocket output to client - tma_out, # LLM responses + context_aggregator.assistant(), ] ) diff --git a/src/pipecat/processors/frameworks/langchain.py b/src/pipecat/processors/frameworks/langchain.py index c49dbaa76..c0b657244 100644 --- a/src/pipecat/processors/frameworks/langchain.py +++ b/src/pipecat/processors/frameworks/langchain.py @@ -39,7 +39,7 @@ async def process_frame(self, frame: Frame, direction: FrameDirection): await super().process_frame(frame, direction) if isinstance(frame, LLMMessagesFrame): - # Messages are accumulated by the `LLMUserResponseAggregator` in a list of messages. + # Messages are accumulated on the context as a list of messages. # The last one by the human is the one we want to send to the LLM. logger.debug(f"Got transcription frame {frame}") text: str = frame.messages[-1]["content"] From 320f62225556c9b0235074f52d464e2c7581d32c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleix=20Conchillo=20Flaqu=C3=A9?= Date: Sat, 19 Oct 2024 16:26:09 -0700 Subject: [PATCH 2/4] examples: upgrade storytelling frontend packages --- .../frontend/package-lock.json | 335 +++++++++--------- .../frontend/package.json | 2 +- 2 files changed, 173 insertions(+), 164 deletions(-) diff --git a/examples/storytelling-chatbot/frontend/package-lock.json b/examples/storytelling-chatbot/frontend/package-lock.json index 614315448..7b491d0f3 100644 --- a/examples/storytelling-chatbot/frontend/package-lock.json +++ b/examples/storytelling-chatbot/frontend/package-lock.json @@ -299,9 +299,9 @@ } }, "node_modules/@next/env": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.14.tgz", - "integrity": "sha512-/0hWQfiaD5//LvGNgc8PjvyqV50vGK0cADYzaoOOGN8fxzBn3iAiaq3S0tCRnFBldq0LVveLcxCTi41ZoYgAgg==" + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.15.tgz", + "integrity": "sha512-S1qaj25Wru2dUpcIZMjxeMVSwkt8BK4dmWHHiBuRstcIyOsMapqT4A4jSB6onvqeygkSSmOkyny9VVx8JIGamQ==" }, "node_modules/@next/eslint-plugin-next": { "version": "14.1.4", @@ -313,9 +313,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.14.tgz", - "integrity": "sha512-bsxbSAUodM1cjYeA4o6y7sp9wslvwjSkWw57t8DtC8Zig8aG8V6r+Yc05/9mDzLKcybb6EN85k1rJDnMKBd9Gw==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.15.tgz", + "integrity": "sha512-Rvh7KU9hOUBnZ9TJ28n2Oa7dD9cvDBKua9IKx7cfQQ0GoYUwg9ig31O2oMwH3wm+pE3IkAQ67ZobPfEgurPZIA==", "cpu": [ "arm64" ], @@ -328,9 +328,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.14.tgz", - "integrity": "sha512-cC9/I+0+SK5L1k9J8CInahduTVWGMXhQoXFeNvF0uNs3Bt1Ub0Azb8JzTU9vNCr0hnaMqiWu/Z0S1hfKc3+dww==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.15.tgz", + "integrity": "sha512-5TGyjFcf8ampZP3e+FyCax5zFVHi+Oe7sZyaKOngsqyaNEpOgkKB3sqmymkZfowy3ufGA/tUgDPPxpQx931lHg==", "cpu": [ "x64" ], @@ -343,9 +343,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.14.tgz", - "integrity": "sha512-RMLOdA2NU4O7w1PQ3Z9ft3PxD6Htl4uB2TJpocm+4jcllHySPkFaUIFacQ3Jekcg6w+LBaFvjSPthZHiPmiAUg==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.15.tgz", + "integrity": "sha512-3Bwv4oc08ONiQ3FiOLKT72Q+ndEMyLNsc/D3qnLMbtUYTQAmkx9E/JRu0DBpHxNddBmNT5hxz1mYBphJ3mfrrw==", "cpu": [ "arm64" ], @@ -358,9 +358,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.14.tgz", - "integrity": "sha512-WgLOA4hT9EIP7jhlkPnvz49iSOMdZgDJVvbpb8WWzJv5wBD07M2wdJXLkDYIpZmCFfo/wPqFsFR4JS4V9KkQ2A==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.15.tgz", + "integrity": "sha512-k5xf/tg1FBv/M4CMd8S+JL3uV9BnnRmoe7F+GWC3DxkTCD9aewFRH1s5rJ1zkzDa+Do4zyN8qD0N8c84Hu96FQ==", "cpu": [ "arm64" ], @@ -373,9 +373,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.14.tgz", - "integrity": "sha512-lbn7svjUps1kmCettV/R9oAvEW+eUI0lo0LJNFOXoQM5NGNxloAyFRNByYeZKL3+1bF5YE0h0irIJfzXBq9Y6w==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.15.tgz", + "integrity": "sha512-kE6q38hbrRbKEkkVn62reLXhThLRh6/TvgSP56GkFNhU22TbIrQDEMrO7j0IcQHcew2wfykq8lZyHFabz0oBrA==", "cpu": [ "x64" ], @@ -388,9 +388,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.14.tgz", - "integrity": "sha512-7TcQCvLQ/hKfQRgjxMN4TZ2BRB0P7HwrGAYL+p+m3u3XcKTraUFerVbV3jkNZNwDeQDa8zdxkKkw2els/S5onQ==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.15.tgz", + "integrity": "sha512-PZ5YE9ouy/IdO7QVJeIcyLn/Rc4ml9M2G4y3kCM9MNf1YKvFY4heg3pVa/jQbMro+tP6yc4G2o9LjAz1zxD7tQ==", "cpu": [ "x64" ], @@ -403,9 +403,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.14.tgz", - "integrity": "sha512-8i0Ou5XjTLEje0oj0JiI0Xo9L/93ghFtAUYZ24jARSeTMXLUx8yFIdhS55mTExq5Tj4/dC2fJuaT4e3ySvXU1A==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.15.tgz", + "integrity": "sha512-2raR16703kBvYEQD9HNLyb0/394yfqzmIeyp2nDzcPV4yPjqNUG3ohX6jX00WryXz6s1FXpVhsCo3i+g4RUX+g==", "cpu": [ "arm64" ], @@ -418,9 +418,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.14.tgz", - "integrity": "sha512-2u2XcSaDEOj+96eXpyjHjtVPLhkAFw2nlaz83EPeuK4obF+HmtDJHqgR1dZB7Gb6V/d55FL26/lYVd0TwMgcOQ==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.15.tgz", + "integrity": "sha512-fyTE8cklgkyR1p03kJa5zXEaZ9El+kDNM5A+66+8evQS5e/6v0Gk28LqA0Jet8gKSOyP+OTm/tJHzMlGdQerdQ==", "cpu": [ "ia32" ], @@ -433,9 +433,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.14.tgz", - "integrity": "sha512-MZom+OvZ1NZxuRovKt1ApevjiUJTcU2PmdJKL66xUPaJeRywnbGGRWUlaAOwunD6dX+pm83vj979NTC8QXjGWg==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.15.tgz", + "integrity": "sha512-SzqGbsLsP9OwKNUG9nekShTwhj6JSB9ZLMWQ8g1gG6hdE5gQLncbnbymrwy2yVmH9nikSLYRYxYMFu78Ggp7/g==", "cpu": [ "x64" ], @@ -990,83 +990,83 @@ "dev": true }, "node_modules/@sentry-internal/feedback": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry-internal/feedback/-/feedback-7.119.0.tgz", - "integrity": "sha512-om8TkAU5CQGO8nkmr7qsSBVkP+/vfeS4JgtW3sjoTK0fhj26+DljR6RlfCGWtYQdPSP6XV7atcPTjbSnsmG9FQ==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry-internal/feedback/-/feedback-7.119.2.tgz", + "integrity": "sha512-bnR1yJWVBZfXGx675nMXE8hCXsxluCBfIFy9GQT8PTN/urxpoS9cGz+5F7MA7Xe3Q06/7TT0Mz3fcDvjkqTu3Q==", "dependencies": { - "@sentry/core": "7.119.0", - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0" + "@sentry/core": "7.119.2", + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2" }, "engines": { "node": ">=12" } }, "node_modules/@sentry-internal/replay-canvas": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry-internal/replay-canvas/-/replay-canvas-7.119.0.tgz", - "integrity": "sha512-NL02VQx6ekPxtVRcsdp1bp5Tb5w6vnfBKSIfMKuDRBy5A10Uc3GSoy/c3mPyHjOxB84452A+xZSx6bliEzAnuA==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry-internal/replay-canvas/-/replay-canvas-7.119.2.tgz", + "integrity": "sha512-Lqo8IFyeKkdOrOGRqm9jCEqeBl8kINe5+c2VqULpkO/I6ql6ISwPSYnmG6yL8cCVIaT1893CLog/pS4FxCv8/Q==", "dependencies": { - "@sentry/core": "7.119.0", - "@sentry/replay": "7.119.0", - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0" + "@sentry/core": "7.119.2", + "@sentry/replay": "7.119.2", + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2" }, "engines": { "node": ">=12" } }, "node_modules/@sentry-internal/tracing": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry-internal/tracing/-/tracing-7.119.0.tgz", - "integrity": "sha512-oKdFJnn+56f0DHUADlL8o9l8jTib3VDLbWQBVkjD9EprxfaCwt2m8L5ACRBdQ8hmpxCEo4I8/6traZ7qAdBUqA==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry-internal/tracing/-/tracing-7.119.2.tgz", + "integrity": "sha512-V2W+STWrafyGJhQv3ulMFXYDwWHiU6wHQAQBShsHVACiFaDrJ2kPRet38FKv4dMLlLlP2xN+ss2e5zv3tYlTiQ==", "dependencies": { - "@sentry/core": "7.119.0", - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0" + "@sentry/core": "7.119.2", + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2" }, "engines": { "node": ">=8" } }, "node_modules/@sentry/browser": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-7.119.0.tgz", - "integrity": "sha512-WwmW1Y4D764kVGeKmdsNvQESZiAn9t8LmCWO0ucBksrjL2zw9gBPtOpRcO6l064sCLeSxxzCN+kIxhRm1gDFEA==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-7.119.2.tgz", + "integrity": "sha512-Wb2RzCsJBTNCmS9KPmbVyV5GGzFXjFdUThAN9xlnN5GgemMBwdQjGu/tRYr8yJAVsRb0EOFH8IuJBNKKNnO49g==", "dependencies": { - "@sentry-internal/feedback": "7.119.0", - "@sentry-internal/replay-canvas": "7.119.0", - "@sentry-internal/tracing": "7.119.0", - "@sentry/core": "7.119.0", - "@sentry/integrations": "7.119.0", - "@sentry/replay": "7.119.0", - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0" + "@sentry-internal/feedback": "7.119.2", + "@sentry-internal/replay-canvas": "7.119.2", + "@sentry-internal/tracing": "7.119.2", + "@sentry/core": "7.119.2", + "@sentry/integrations": "7.119.2", + "@sentry/replay": "7.119.2", + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2" }, "engines": { "node": ">=8" } }, "node_modules/@sentry/core": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry/core/-/core-7.119.0.tgz", - "integrity": "sha512-CS2kUv9rAJJEjiRat6wle3JATHypB0SyD7pt4cpX5y0dN5dZ1JrF57oLHRMnga9fxRivydHz7tMTuBhSSwhzjw==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry/core/-/core-7.119.2.tgz", + "integrity": "sha512-hQr3d2yWq/2lMvoyBPOwXw1IHqTrCjOsU1vYKhAa6w9vGbJZFGhKGGE2KEi/92c3gqGn+gW/PC7cV6waCTDuVA==", "dependencies": { - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0" + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2" }, "engines": { "node": ">=8" } }, "node_modules/@sentry/integrations": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry/integrations/-/integrations-7.119.0.tgz", - "integrity": "sha512-OHShvtsRW0A+ZL/ZbMnMqDEtJddPasndjq+1aQXw40mN+zeP7At/V1yPZyFaURy86iX7Ucxw5BtmzuNy7hLyTA==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry/integrations/-/integrations-7.119.2.tgz", + "integrity": "sha512-dCuXKvbUE3gXVVa696SYMjlhSP6CxpMH/gl4Jk26naEB8Xjsn98z/hqEoXLg6Nab73rjR9c/9AdKqBbwVMHyrQ==", "dependencies": { - "@sentry/core": "7.119.0", - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0", + "@sentry/core": "7.119.2", + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2", "localforage": "^1.8.1" }, "engines": { @@ -1074,33 +1074,33 @@ } }, "node_modules/@sentry/replay": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry/replay/-/replay-7.119.0.tgz", - "integrity": "sha512-BnNsYL+X5I4WCH6wOpY6HQtp4MgVt0NVlhLUsEyrvMUiTs0bPkDBrulsgZQBUKJsbOr3l9nHrFoNVB/0i6WNLA==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry/replay/-/replay-7.119.2.tgz", + "integrity": "sha512-nHDsBt0mlJXTWAHjzQdCzDbhV2fv8B62PPB5mu5SpI+G5h+ir3r5lR0lZZrMT8eurVowb/HnLXAs+XYVug3blg==", "dependencies": { - "@sentry-internal/tracing": "7.119.0", - "@sentry/core": "7.119.0", - "@sentry/types": "7.119.0", - "@sentry/utils": "7.119.0" + "@sentry-internal/tracing": "7.119.2", + "@sentry/core": "7.119.2", + "@sentry/types": "7.119.2", + "@sentry/utils": "7.119.2" }, "engines": { "node": ">=12" } }, "node_modules/@sentry/types": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry/types/-/types-7.119.0.tgz", - "integrity": "sha512-27qQbutDBPKGbuJHROxhIWc1i0HJaGLA90tjMu11wt0E4UNxXRX+UQl4Twu68v4EV3CPvQcEpQfgsViYcXmq+w==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry/types/-/types-7.119.2.tgz", + "integrity": "sha512-ydq1tWsdG7QW+yFaTp0gFaowMLNVikIqM70wxWNK+u98QzKnVY/3XTixxNLsUtnAB4Y+isAzFhrc6Vb5GFdFeg==", "engines": { "node": ">=8" } }, "node_modules/@sentry/utils": { - "version": "7.119.0", - "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-7.119.0.tgz", - "integrity": "sha512-ZwyXexWn2ZIe2bBoYnXJVPc2esCSbKpdc6+0WJa8eutXfHq3FRKg4ohkfCBpfxljQGEfP1+kfin945lA21Ka+A==", + "version": "7.119.2", + "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-7.119.2.tgz", + "integrity": "sha512-TLdUCvcNgzKP0r9YD7tgCL1PEUp42TObISridsPJ5rhpVGQJvpr+Six0zIkfDUxerLYWZoK8QMm9KgFlPLNQzA==", "dependencies": { - "@sentry/types": "7.119.0" + "@sentry/types": "7.119.2" }, "engines": { "node": ">=8" @@ -1151,9 +1151,9 @@ "dev": true }, "node_modules/@types/node": { - "version": "20.16.10", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.16.10.tgz", - "integrity": "sha512-vQUKgWTjEIRFCvK6CyriPH3MZYiYlNy0fKiEYHWbcoWLEgs4opurGGKlebrTLqdSMIbXImH6XExNiIyNUv3WpA==", + "version": "20.16.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.16.13.tgz", + "integrity": "sha512-GjQ7im10B0labo8ZGXDGROUl9k0BNyDgzfGpb4g/cl+4yYDWVKcozANF4FGr4/p0O/rAkQClM6Wiwkije++1Tg==", "dev": true, "dependencies": { "undici-types": "~6.19.2" @@ -1176,9 +1176,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.3.0", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", - "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-qW1Mfv8taImTthu4KoXgDfLuk4bydU6Q/TkADnDWWHwi4NX4BR+LWfTp2sVmTqRrsHvyDDTelgelxJ+SsejKKQ==", "devOptional": true, "dependencies": { "@types/react": "*" @@ -1318,9 +1318,9 @@ "dev": true }, "node_modules/acorn": { - "version": "8.12.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", - "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.13.0.tgz", + "integrity": "sha512-8zSiw54Oxrdym50NlZ9sUusyO1Z1ZchgRLWRaK6c86XJFClyCgFKetdowBg5bKxyp/u+CDBJG4Mpp0m3HLZl9w==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -1642,9 +1642,9 @@ } }, "node_modules/axe-core": { - "version": "4.10.0", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.0.tgz", - "integrity": "sha512-Mr2ZakwQ7XUAjp7pAwQWRhhK8mQQ6JAaNWSjmjxil0R8BPioMtQsTLOolGYkji1rcL++3dCqZA3zWqpT+9Ew6g==", + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.1.tgz", + "integrity": "sha512-qPC9o+kD8Tir0lzNGLeghbOrWMr3ZJpaRlCIb6Uobt/7N4FiEDvqUMnxzCHRHmg8vOg14kr5gVNyScRmbMaJ9g==", "dev": true, "engines": { "node": ">=4" @@ -1781,9 +1781,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001666", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001666.tgz", - "integrity": "sha512-gD14ICmoV5ZZM1OdzPWmpx+q4GyefaK06zi8hmfHV5xe4/2nOQX3+Dw5o+fSqOws2xVwL9j+anOPFwHzdEdV4g==", + "version": "1.0.30001669", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001669.tgz", + "integrity": "sha512-DlWzFDJqstqtIVx1zeSpIMLjunf5SmwOw0N2Ck/QSQdS8PLS4+9HrLaYei4w8BIAL7IB/UEDu889d8vhCTPA0w==", "funding": [ { "type": "opencollective", @@ -2140,9 +2140,9 @@ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" }, "node_modules/electron-to-chromium": { - "version": "1.5.31", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.31.tgz", - "integrity": "sha512-QcDoBbQeYt0+3CWcK/rEbuHvwpbT/8SV9T3OSgs6cX1FlcUAkgrkqbg9zLnDrMM/rLamzQwal4LYFCiWk861Tg==", + "version": "1.5.41", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.41.tgz", + "integrity": "sha512-dfdv/2xNjX0P8Vzme4cfzHqnPm5xsZXwsolTYr0eyW18IUmNyG08vL+fttvinTfhKfIKdRoqkDIC9e9iWQCNYQ==", "dev": true }, "node_modules/emoji-regex": { @@ -2265,9 +2265,9 @@ } }, "node_modules/es-iterator-helpers": { - "version": "1.0.19", - "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.19.tgz", - "integrity": "sha512-zoMwbCcH5hwUkKJkT8kDIBZSz9I6mVG//+lDCinLCGov4+r7NIy0ld8o03M0cJxl2spVf6ESYVS6/gpIfq1FFw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.1.0.tgz", + "integrity": "sha512-/SurEfycdyssORP/E+bj4sEu1CWw4EmLDsHynHwSXQ7utgbrMRWW195pTrCjFgFCddf/UkYm3oqKPRq5i8bJbw==", "dev": true, "dependencies": { "call-bind": "^1.0.7", @@ -2277,12 +2277,12 @@ "es-set-tostringtag": "^2.0.3", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", - "globalthis": "^1.0.3", + "globalthis": "^1.0.4", "has-property-descriptors": "^1.0.2", "has-proto": "^1.0.3", "has-symbols": "^1.0.3", "internal-slot": "^1.0.7", - "iterator.prototype": "^1.1.2", + "iterator.prototype": "^1.1.3", "safe-array-concat": "^1.1.2" }, "engines": { @@ -2366,6 +2366,7 @@ "version": "8.57.1", "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", @@ -2525,9 +2526,9 @@ } }, "node_modules/eslint-plugin-import": { - "version": "2.30.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.30.0.tgz", - "integrity": "sha512-/mHNE9jINJfiD2EKkg1BKyPyUk4zdnT54YgbOgfjSakWT5oyX/qQLVNTkehyfpcMxZXMy1zyonZ2v7hZTX43Yw==", + "version": "2.31.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.31.0.tgz", + "integrity": "sha512-ixmkI62Rbc2/w8Vfxyh1jQRTdRTF52VxwRVHl/ykPAmqG+Nb7/kNn+byLP0LxPgI7zWA16Jt82SybJInmMia3A==", "dev": true, "dependencies": { "@rtsao/scc": "^1.1.0", @@ -2538,7 +2539,7 @@ "debug": "^3.2.7", "doctrine": "^2.1.0", "eslint-import-resolver-node": "^0.3.9", - "eslint-module-utils": "^2.9.0", + "eslint-module-utils": "^2.12.0", "hasown": "^2.0.2", "is-core-module": "^2.15.1", "is-glob": "^4.0.3", @@ -2547,13 +2548,14 @@ "object.groupby": "^1.0.3", "object.values": "^1.2.0", "semver": "^6.3.1", + "string.prototype.trimend": "^1.0.8", "tsconfig-paths": "^3.15.0" }, "engines": { "node": ">=4" }, "peerDependencies": { - "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9" } }, "node_modules/eslint-plugin-import/node_modules/debug": { @@ -2649,9 +2651,9 @@ } }, "node_modules/eslint-plugin-react-hooks": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", - "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", + "version": "5.0.0-canary-7118f5dd7-20230705", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.0.0-canary-7118f5dd7-20230705.tgz", + "integrity": "sha512-AZYbMo/NW9chdL7vk6HQzQhT+PvTAEVqWk9ziruUoW2kAOcN5qNyelv70e0F1VNQAbvutOC9oc+xfWycI9FxDw==", "dev": true, "engines": { "node": ">=10" @@ -2941,9 +2943,9 @@ } }, "node_modules/framer-motion": { - "version": "11.9.0", - "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.9.0.tgz", - "integrity": "sha512-nCfGxvsQecVLjjYDu35G2F5ls+ArE3FBfhxV0RSiisMaUKqteq5DMBFNRKwMyVj+VqKTNhawt+BV480YCHKFlQ==", + "version": "11.11.9", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.11.9.tgz", + "integrity": "sha512-XpdZseuCrZehdHGuW22zZt3SF5g6AHJHJi7JwQIigOznW4Jg1n0oGPMJQheMaKLC+0rp5gxUKMRYI6ytd3q4RQ==", "dependencies": { "tslib": "^2.4.0" }, @@ -3766,9 +3768,9 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" }, "node_modules/iterator.prototype": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.2.tgz", - "integrity": "sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.3.tgz", + "integrity": "sha512-FW5iMbeQ6rBGm/oKgzq2aW4KvAGpxPzYES8N4g4xNXUKpL1mclMvOe+76AcLDTvD+Ze+sOpVhgdAQEKF4L9iGQ==", "dev": true, "dependencies": { "define-properties": "^1.2.1", @@ -3776,6 +3778,9 @@ "has-symbols": "^1.0.3", "reflect.getprototypeof": "^1.0.4", "set-function-name": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" } }, "node_modules/jackspeak": { @@ -4065,11 +4070,11 @@ "dev": true }, "node_modules/next": { - "version": "14.2.14", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.14.tgz", - "integrity": "sha512-Q1coZG17MW0Ly5x76shJ4dkC23woLAhhnDnw+DfTc7EpZSGuWrlsZ3bZaO8t6u1Yu8FVfhkqJE+U8GC7E0GLPQ==", + "version": "14.2.15", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.15.tgz", + "integrity": "sha512-h9ctmOokpoDphRvMGnwOJAedT6zKhwqyZML9mDtspgf4Rh3Pn7UTYKqePNoDvhsWBAO5GoPNYshnAUGIazVGmw==", "dependencies": { - "@next/env": "14.2.14", + "@next/env": "14.2.15", "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -4084,15 +4089,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.14", - "@next/swc-darwin-x64": "14.2.14", - "@next/swc-linux-arm64-gnu": "14.2.14", - "@next/swc-linux-arm64-musl": "14.2.14", - "@next/swc-linux-x64-gnu": "14.2.14", - "@next/swc-linux-x64-musl": "14.2.14", - "@next/swc-win32-arm64-msvc": "14.2.14", - "@next/swc-win32-ia32-msvc": "14.2.14", - "@next/swc-win32-x64-msvc": "14.2.14" + "@next/swc-darwin-arm64": "14.2.15", + "@next/swc-darwin-x64": "14.2.15", + "@next/swc-linux-arm64-gnu": "14.2.15", + "@next/swc-linux-arm64-musl": "14.2.15", + "@next/swc-linux-x64-gnu": "14.2.15", + "@next/swc-linux-x64-musl": "14.2.15", + "@next/swc-win32-arm64-msvc": "14.2.15", + "@next/swc-win32-ia32-msvc": "14.2.15", + "@next/swc-win32-x64-msvc": "14.2.15" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -4421,9 +4426,9 @@ } }, "node_modules/picocolors": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", - "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -4817,15 +4822,15 @@ "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "node_modules/regexp.prototype.flags": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", - "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.3.tgz", + "integrity": "sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.6", + "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-errors": "^1.3.0", - "set-function-name": "^2.0.1" + "set-function-name": "^2.0.2" }, "engines": { "node": ">= 0.4" @@ -5169,13 +5174,17 @@ } }, "node_modules/string.prototype.includes": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.0.tgz", - "integrity": "sha512-E34CkBgyeqNDcrbU76cDjL5JLcVrtSdYq0MEh/B10r17pRP4ciHLwTgnuLV8Ay6cgEMLkcBkFCKyFZ43YldYzg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", + "integrity": "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==", "dev": true, "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3" + }, + "engines": { + "node": ">= 0.4" } }, "node_modules/string.prototype.matchall": { @@ -5374,18 +5383,18 @@ } }, "node_modules/tailwind-merge": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.5.2.tgz", - "integrity": "sha512-kjEBm+pvD+6eAwzJL2Bi+02/9LFLal1Gs61+QB7HvTfQQ0aXwC5LGT8PEt1gS0CWKktKe6ysPTAy3cBC5MeiIg==", + "version": "2.5.4", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.5.4.tgz", + "integrity": "sha512-0q8cfZHMu9nuYP/b5Shb7Y7Sh1B7Nnl5GqNr1U+n2p6+mybvRtayrQ+0042Z5byvTA8ihjlP8Odo8/VnHbZu4Q==", "funding": { "type": "github", "url": "https://github.com/sponsors/dcastil" } }, "node_modules/tailwindcss": { - "version": "3.4.13", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.13.tgz", - "integrity": "sha512-KqjHOJKogOUt5Bs752ykCeiwvi0fKVkr5oqsFNt/8px/tA8scFPIlkygsf6jXrfCqGHz7VflA6+yytWuM+XhFw==", + "version": "3.4.14", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.14.tgz", + "integrity": "sha512-IcSvOcTRcUtQQ7ILQL5quRDg7Xs93PdJEk1ZLbhhvJc7uj/OAhYOnruEiwnGgBvUtaUAJ8/mhSw1o8L2jCiENA==", "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", @@ -5501,9 +5510,9 @@ } }, "node_modules/tslib": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz", - "integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==" + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.0.tgz", + "integrity": "sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA==" }, "node_modules/type-check": { "version": "0.4.0", @@ -5603,9 +5612,9 @@ } }, "node_modules/typescript": { - "version": "5.6.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz", - "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==", + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", + "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -5917,9 +5926,9 @@ "dev": true }, "node_modules/yaml": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.5.1.tgz", - "integrity": "sha512-bLQOjaX/ADgQ20isPJRvF0iRUHIxVhYvr53Of7wGcWlO2jvtUlH5m87DsmulFVxRpNLOnI4tB6p/oh8D7kpn9Q==", + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.6.0.tgz", + "integrity": "sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ==", "bin": { "yaml": "bin.mjs" }, diff --git a/examples/storytelling-chatbot/frontend/package.json b/examples/storytelling-chatbot/frontend/package.json index 358bc3dd5..7d6319181 100644 --- a/examples/storytelling-chatbot/frontend/package.json +++ b/examples/storytelling-chatbot/frontend/package.json @@ -17,7 +17,7 @@ "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", "framer-motion": "^11.9.0", - "next": "^14.2.14", + "next": "^14.2.15", "react": "^18.3.1", "react-dom": "^18.3.1", "recoil": "^0.7.7", From 3815e9dec3ceac298db165068dbc6501bb5c7802 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleix=20Conchillo=20Flaqu=C3=A9?= Date: Sat, 19 Oct 2024 16:26:27 -0700 Subject: [PATCH 3/4] examples: fix dialin-chatbot python arguments --- examples/dialin-chatbot/bot_runner.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/examples/dialin-chatbot/bot_runner.py b/examples/dialin-chatbot/bot_runner.py index d29adc34e..ac2ce79bf 100644 --- a/examples/dialin-chatbot/bot_runner.py +++ b/examples/dialin-chatbot/bot_runner.py @@ -108,11 +108,9 @@ async def _create_daily_room(room_url, callId, callDomain=None, vendor="daily"): # Spawn a new agent, and join the user session # Note: this is mostly for demonstration purposes (refer to 'deployment' in docs) if vendor == "daily": - bot_proc = f"python3 - m bot_daily - u {room.url} - t {token} - i { - callId} - d {callDomain}" + bot_proc = f"python3 -m bot_daily -u {room.url} -t {token} -i {callId} -d {callDomain}" else: - bot_proc = f"python3 - m bot_twilio - u {room.url} - t { - token} - i {callId} - s {room.config.sip_endpoint}" + bot_proc = f"python3 -m bot_twilio -u {room.url} -t {token} -i {callId} -s {room.config.sip_endpoint}" try: subprocess.Popen( From f971dbe027afd77d02d77014232d7d7faddd3949 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleix=20Conchillo=20Flaqu=C3=A9?= Date: Sat, 19 Oct 2024 16:26:49 -0700 Subject: [PATCH 4/4] examples(audio-recording): record audio into a file --- examples/chatbot-audio-recording/bot.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/examples/chatbot-audio-recording/bot.py b/examples/chatbot-audio-recording/bot.py index b04f6d695..2542a9e40 100644 --- a/examples/chatbot-audio-recording/bot.py +++ b/examples/chatbot-audio-recording/bot.py @@ -9,6 +9,8 @@ import sys import aiohttp +import datetime +import wave from dotenv import load_dotenv from loguru import logger from runner import configure @@ -30,6 +32,20 @@ logger.add(sys.stderr, level="DEBUG") +async def save_audio(audiobuffer): + if audiobuffer.has_audio(): + merged_audio = audiobuffer.merge_audio_buffers() + filename = f"conversation_recording{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav" + with wave.open(filename, "wb") as wf: + wf.setnchannels(2) + wf.setsampwidth(2) + wf.setframerate(audiobuffer._sample_rate) + wf.writeframes(merged_audio) + print(f"Merged audio saved to {filename}") + else: + print("No audio data to save") + + async def main(): async with aiohttp.ClientSession() as session: (room_url, token) = await configure(session) @@ -114,11 +130,7 @@ async def on_first_participant_joined(transport, participant): async def on_participant_left(transport, participant, reason): print(f"Participant left: {participant}") await task.queue_frame(EndFrame()) - - @transport.event_handler("on_call_state_updated") - async def on_call_state_updated(transport, state): - if state == "left": - await task.queue_frame(EndFrame()) + await save_audio(audiobuffer) runner = PipelineRunner()