From 2b90c0c3138231dd0a50d5aa28848ddfe0ca93e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacques=20Verr=C3=A9?= Date: Thu, 12 Sep 2024 17:30:25 +0100 Subject: [PATCH] Update to docs (#230) --- .../evaluate_hallucination_metric.ipynb | 39 +++--- .../cookbook/evaluate_hallucination_metric.md | 55 ++++---- .../cookbook/evaluate_moderation_metric.ipynb | 127 +++++++++++++----- .../cookbook/evaluate_moderation_metric.md | 93 +++++++++---- .../docs/cookbook/langchain.ipynb | 4 +- .../documentation/docs/cookbook/langchain.md | 24 ++-- .../docs/cookbook/llama-index.ipynb | 22 +-- .../docs/cookbook/llama-index.md | 16 ++- .../documentation/docs/cookbook/openai.ipynb | 31 +---- .../documentation/docs/cookbook/openai.md | 22 ++- .../documentation/docs/cookbook/ragas.md | 9 +- .../docs/tracing/integrations/langchain.md | 7 + .../docs/tracing/integrations/llama_index.md | 7 + .../docs/tracing/integrations/openai.md | 15 ++- .../docs/tracing/integrations/ragas.md | 7 + .../documentation/docs/tracing/log_traces.md | 21 +++ .../documentation/docusaurus.config.ts | 22 ++- .../documentation/package-lock.json | 127 ++++++++++++++++++ .../documentation/package.json | 5 +- 19 files changed, 457 insertions(+), 196 deletions(-) diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb index d270ec588c..9982ed2ade 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.ipynb @@ -134,7 +134,12 @@ "source": [ "## Evaluating the hallucination metric\n", "\n", - "We can use the Opik SDK to compute a hallucination score for each item in the dataset:" + "In order to evaluate the performance of the Opik hallucination metric, we will define:\n", + "\n", + "- Evaluation task: Our evaluation task will use the data in the Dataset to return a hallucination score computed using the Opik hallucination metric.\n", + "- Scoring metric: We will use the `Equals` metric to check if the hallucination score computed matches the expected output.\n", + "\n", + "By defining the evaluation task in this way, we will be able to understand how well Opik's hallucination metric is able to detect hallucinations in the dataset." ] }, { @@ -143,26 +148,11 @@ "metadata": {}, "outputs": [], "source": [ - "from opik.evaluation.metrics import Hallucination\n", + "from opik.evaluation.metrics import Hallucination, Equals\n", "from opik.evaluation import evaluate\n", - "from opik.evaluation.metrics import base_metric, score_result\n", "from opik import Opik, DatasetItem\n", - "import pandas as pd\n", - "\n", - "client = Opik()\n", - "\n", - "class CheckHallucinated(base_metric.BaseMetric):\n", - " def __init__(self, name: str):\n", - " self.name = name\n", - "\n", - " def score(self, hallucination_score, expected_hallucination_score, **kwargs):\n", - " return score_result.ScoreResult(\n", - " value= None if hallucination_score is None else hallucination_score == expected_hallucination_score,\n", - " name=self.name,\n", - " reason=f\"Got the hallucination score of {hallucination_score} and expected {expected_hallucination_score}\",\n", - " scoring_failed=hallucination_score is None\n", - " )\n", "\n", + "# Define the evaluation task\n", "def evaluation_task(x: DatasetItem):\n", " metric = Hallucination()\n", " try:\n", @@ -179,18 +169,23 @@ " hallucination_reason = str(e)\n", " \n", " return {\n", - " \"hallucination_score\": \"FAIL\" if hallucination_score == 1 else \"PASS\",\n", + " \"output\": \"FAIL\" if hallucination_score == 1 else \"PASS\",\n", " \"hallucination_reason\": hallucination_reason,\n", - " \"expected_hallucination_score\": x.expected_output[\"expected_output\"]\n", + " \"reference\": x.expected_output[\"expected_output\"]\n", " }\n", "\n", + "# Get the dataset\n", + "client = Opik()\n", "dataset = client.get_dataset(name=\"HaluBench\")\n", "\n", + "# Define the scoring metric\n", + "check_hallucinated_metric = Equals(name=\"Correct hallucination score\")\n", + "\n", "res = evaluate(\n", - " experiment_name=\"Check Comet Metric\",\n", + " experiment_name=\"Evaluate Opik hallucination metric\",\n", " dataset=dataset,\n", " task=evaluation_task,\n", - " scoring_metrics=[CheckHallucinated(name=\"Detected hallucination\")]\n", + " scoring_metrics=[check_hallucinated_metric]\n", ")" ] }, diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md index 659ac16845..6bd4983301 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_hallucination_metric.md @@ -13,8 +13,10 @@ For this guide we will be evaluating the Hallucination metric included in the LL import os import getpass -os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") -os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") +if "OPIK_API_KEY" not in os.environ: + os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") +if "OPIK_WORKSPACE" not in os.environ: + os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") ``` If you are running the Opik platform locally, simply set: @@ -31,16 +33,16 @@ First, we will install the necessary libraries, configure the OpenAI API key and ```python -%pip install pyarrow fsspec huggingface_hub --quiet +%pip install opik pyarrow fsspec huggingface_hub --upgrade --quiet ``` ```python -# Configure OpenAI import os import getpass -os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API key: ") +if "OPENAI_API_KEY" not in os.environ: + os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") ``` We will be using the [HaluBench dataset](https://huggingface.co/datasets/PatronusAI/HaluBench?library=pandas) which according to this [paper](https://arxiv.org/pdf/2407.08488) GPT-4o detects 87.9% of hallucinations. The first step will be to create a dataset in the platform so we can keep track of the results of the evaluation. @@ -59,7 +61,7 @@ try: # Insert items into dataset df = pd.read_parquet("hf://datasets/PatronusAI/HaluBench/data/test-00000-of-00001.parquet") - df = df.sample(n=500, random_state=42) + df = df.sample(n=50, random_state=42) dataset_records = [ DatasetItem( @@ -81,30 +83,20 @@ except Exception as e: ## Evaluating the hallucination metric -We can use the Opik SDK to compute a hallucination score for each item in the dataset: +In order to evaluate the performance of the Opik hallucination metric, we will define: + +- Evaluation task: Our evaluation task will use the data in the Dataset to return a hallucination score computed using the Opik hallucination metric. +- Scoring metric: We will use the `Equals` metric to check if the hallucination score computed matches the expected output. + +By defining the evaluation task in this way, we will be able to understand how well Opik's hallucination metric is able to detect hallucinations in the dataset. ```python -from opik.evaluation.metrics import Hallucination +from opik.evaluation.metrics import Hallucination, Equals from opik.evaluation import evaluate -from opik.evaluation.metrics import base_metric, score_result from opik import Opik, DatasetItem -import pandas as pd - -client = Opik() - -class CheckHallucinated(base_metric.BaseMetric): - def __init__(self, name: str): - self.name = name - - def score(self, hallucination_score, expected_hallucination_score, **kwargs): - return score_result.ScoreResult( - value= None if hallucination_score is None else hallucination_score == expected_hallucination_score, - name=self.name, - reason=f"Got the hallucination score of {hallucination_score} and expected {expected_hallucination_score}", - scoring_failed=hallucination_score is None - ) +# Define the evaluation task def evaluation_task(x: DatasetItem): metric = Hallucination() try: @@ -121,23 +113,28 @@ def evaluation_task(x: DatasetItem): hallucination_reason = str(e) return { - "hallucination_score": "FAIL" if hallucination_score == 1 else "PASS", + "output": "FAIL" if hallucination_score == 1 else "PASS", "hallucination_reason": hallucination_reason, - "expected_hallucination_score": x.expected_output["expected_output"] + "reference": x.expected_output["expected_output"] } +# Get the dataset +client = Opik() dataset = client.get_dataset(name="HaluBench") +# Define the scoring metric +check_hallucinated_metric = Equals(name="Correct hallucination score") + res = evaluate( - experiment_name="Check Comet Metric", + experiment_name="Evaluate Opik hallucination metric", dataset=dataset, task=evaluation_task, - scoring_metrics=[CheckHallucinated(name="Detected hallucination")] + scoring_metrics=[check_hallucinated_metric] ) ``` We can see that the hallucination metric is able to detect ~80% of the hallucinations contained in the dataset and we can see the specific items where hallucinations were not detected. -![Hallucination Evaluation](/img/cookbook/hallucination_metric_cookbook.png) +![Hallucination Evaluation](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/hallucination_metric_cookbook.png) diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb index 4dbda3bd97..fcda92c93b 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -46,7 +46,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -65,16 +65,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ "%pip install opik --upgrade --quiet" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -94,9 +102,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "status_code: 409, body: {'errors': ['Dataset already exists']}\n" + ] + } + ], "source": [ "# Create dataset\n", "from opik import Opik, DatasetItem\n", @@ -145,36 +161,75 @@ "source": [ "## Evaluating the moderation metric\n", "\n", + "In order to evaluate the performance of the Opik moderation metric, we will define:\n", + "\n", + "- Evaluation task: Our evaluation task will use the data in the Dataset to return a moderation score computed using the Opik moderation metric.\n", + "- Scoring metric: We will use the `Equals` metric to check if the moderation score computed matches the expected output.\n", + "\n", + "By defining the evaluation task in this way, we will be able to understand how well Opik's moderation metric is able to detect moderation violations in the dataset.\n", + "\n", "We can use the Opik SDK to compute a moderation score for each item in the dataset:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluation: 100%|██████████| 50/50 [00:06<00:00, 8.09it/s]\n" + ] + }, + { + "data": { + "text/html": [ + "
╭─ OpenAIModerationDataset (50 samples) ─╮\n",
+       "│                                        │\n",
+       "│ Total time:        00:00:06            │\n",
+       "│ Number of samples: 50                  │\n",
+       "│                                        │\n",
+       "│ Correct moderation score: 0.8400 (avg) │\n",
+       "│                                        │\n",
+       "╰────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "╭─ OpenAIModerationDataset (50 samples) ─╮\n", + "│ │\n", + "│ \u001b[1mTotal time: \u001b[0m 00:00:06 │\n", + "│ \u001b[1mNumber of samples:\u001b[0m 50 │\n", + "│ │\n", + "│ \u001b[1;32mCorrect moderation score: 0.8400 (avg)\u001b[0m │\n", + "│ │\n", + "╰────────────────────────────────────────╯\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Uploading results to Opik ... \n",
+       "
\n" + ], + "text/plain": [ + "Uploading results to Opik \u001b[33m...\u001b[0m \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "from opik.evaluation.metrics import Moderation\n", + "from opik.evaluation.metrics import Moderation, Equals\n", "from opik.evaluation import evaluate\n", - "from opik.evaluation.metrics import base_metric, score_result\n", "from opik import Opik, DatasetItem\n", "\n", - "client = Opik()\n", - "\n", - "class CheckModerated(base_metric.BaseMetric):\n", - " def __init__(self, name: str):\n", - " self.name = name\n", - "\n", - " def score(self, moderation_score, moderation_reason, expected_moderation_score, **kwargs):\n", - " moderation_score = \"moderated\" if moderation_score > 0.5 else \"not_moderated\"\n", - "\n", - " return score_result.ScoreResult(\n", - " value= None if moderation_score is None else moderation_score == expected_moderation_score,\n", - " name=self.name,\n", - " reason=f\"Got the moderation score of {moderation_score} and expected {expected_moderation_score}\",\n", - " scoring_failed=moderation_score is None\n", - " )\n", - "\n", + "# Define the evaluation task\n", "def evaluation_task(x: DatasetItem):\n", " metric = Moderation()\n", " try:\n", @@ -188,19 +243,27 @@ " moderation_score = None\n", " moderation_reason = str(e)\n", " \n", + " moderation_score = \"moderated\" if metric_score.value > 0.5 else \"not_moderated\"\n", + "\n", " return {\n", - " \"moderation_score\": moderation_score,\n", - " \"moderation_reason\": moderation_reason,\n", - " \"expected_moderation_score\": x.expected_output[\"expected_output\"]\n", + " \"output\": moderation_score,\n", + " \"moderation_score\": metric_score.value,\n", + " \"moderation_reason\": metric_score.reason,\n", + " \"reference\": x.expected_output[\"expected_output\"]\n", " }\n", "\n", + "# Get the dataset\n", + "client = Opik()\n", "dataset = client.get_dataset(name=\"OpenAIModerationDataset\")\n", "\n", + "# Define the scoring metric\n", + "moderation_metric = Equals(name=\"Correct moderation score\")\n", + "\n", "res = evaluate(\n", - " experiment_name=\"Check Comet Metric\",\n", + " experiment_name=\"Evaluate Opik moderation metric\",\n", " dataset=dataset,\n", " task=evaluation_task,\n", - " scoring_metrics=[CheckModerated(name=\"Detected Moderation\")]\n", + " scoring_metrics=[moderation_metric]\n", ")" ] }, diff --git a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md index b22d474375..66b262badd 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md +++ b/apps/opik-documentation/documentation/docs/cookbook/evaluate_moderation_metric.md @@ -15,8 +15,10 @@ For this guide we will be evaluating the Moderation metric included in the LLM E import os import getpass -os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") -os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") +if "OPIK_API_KEY" not in os.environ: + os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") +if "OPIK_WORKSPACE" not in os.environ: + os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") ``` If you are running the Opik platform locally, simply set: @@ -32,11 +34,20 @@ If you are running the Opik platform locally, simply set: First, we will install the necessary libraries and configure the OpenAI API key and download a reference moderation dataset. +```python +%pip install opik --upgrade --quiet +``` + + Note: you may need to restart the kernel to use updated packages. + + + ```python import os import getpass -os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API key: ") +if "OPENAI_API_KEY" not in os.environ: + os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") ``` We will be using the [OpenAI Moderation API Release dataset](https://github.com/openai/moderation-api-release/tree/main/data) which according to this [blog post](https://openai.com/index/using-gpt-4-for-content-moderation/) GPT-4o detects ~60~% of hallucinations. The first step will be to create a dataset in the platform so we can keep track of the results of the evaluation. @@ -59,7 +70,7 @@ try: response = requests.get(url) df = pd.read_json(BytesIO(response.content), lines=True, compression='gzip') - df = df.sample(n=500, random_state=42) + df = df.sample(n=50, random_state=42) dataset_records = [] for x in df.to_dict(orient="records"): @@ -84,33 +95,27 @@ except Exception as e: print(e) ``` + status_code: 409, body: {'errors': ['Dataset already exists']} + + ## Evaluating the moderation metric +In order to evaluate the performance of the Opik moderation metric, we will define: + +- Evaluation task: Our evaluation task will use the data in the Dataset to return a moderation score computed using the Opik moderation metric. +- Scoring metric: We will use the `Equals` metric to check if the moderation score computed matches the expected output. + +By defining the evaluation task in this way, we will be able to understand how well Opik's moderation metric is able to detect moderation violations in the dataset. + We can use the Opik SDK to compute a moderation score for each item in the dataset: ```python -from opik.evaluation.metrics import Moderation +from opik.evaluation.metrics import Moderation, Equals from opik.evaluation import evaluate -from opik.evaluation.metrics import base_metric, score_result from opik import Opik, DatasetItem -client = Opik() - -class CheckModerated(base_metric.BaseMetric): - def __init__(self, name: str): - self.name = name - - def score(self, moderation_score, moderation_reason, expected_moderation_score, **kwargs): - moderation_score = "moderated" if moderation_score > 0.5 else "not_moderated" - - return score_result.ScoreResult( - value= None if moderation_score is None else moderation_score == expected_moderation_score, - name=self.name, - reason=f"Got the moderation score of {moderation_score} and expected {expected_moderation_score}", - scoring_failed=moderation_score is None - ) - +# Define the evaluation task def evaluation_task(x: DatasetItem): metric = Moderation() try: @@ -124,22 +129,54 @@ def evaluation_task(x: DatasetItem): moderation_score = None moderation_reason = str(e) + moderation_score = "moderated" if metric_score.value > 0.5 else "not_moderated" + return { - "moderation_score": moderation_score, - "moderation_reason": moderation_reason, - "expected_moderation_score": x.expected_output["expected_output"] + "output": moderation_score, + "moderation_score": metric_score.value, + "moderation_reason": metric_score.reason, + "reference": x.expected_output["expected_output"] } +# Get the dataset +client = Opik() dataset = client.get_dataset(name="OpenAIModerationDataset") +# Define the scoring metric +moderation_metric = Equals(name="Correct moderation score") + res = evaluate( - experiment_name="Check Comet Metric", + experiment_name="Evaluate Opik moderation metric", dataset=dataset, task=evaluation_task, - scoring_metrics=[CheckModerated(name="Detected Moderation")] + scoring_metrics=[moderation_metric] ) ``` + Evaluation: 100%|██████████| 50/50 [00:06<00:00, 8.09it/s] + + + +
╭─ OpenAIModerationDataset (50 samples) ─╮
+│                                        │
+│ Total time:        00:00:06            │
+│ Number of samples: 50                  │
+│                                        │
+│ Correct moderation score: 0.8400 (avg) │
+│                                        │
+╰────────────────────────────────────────╯
+
+ + + + +
Uploading results to Opik ... 
+
+ + + We are able to detect ~85% of moderation violations, this can be improved further by providing some additional examples to the model. We can view a breakdown of the results in the Opik UI: -![Moderation Evaluation](/img/cookbook/moderation_metric_cookbook.png) +![Moderation Evaluation](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/moderation_metric_cookbook.png) + + diff --git a/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb b/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb index 14eacdc970..ce37e7e378 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/langchain.ipynb @@ -50,7 +50,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -91,7 +91,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ diff --git a/apps/opik-documentation/documentation/docs/cookbook/langchain.md b/apps/opik-documentation/documentation/docs/cookbook/langchain.md index 37e2b98099..7162456aca 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/langchain.md +++ b/apps/opik-documentation/documentation/docs/cookbook/langchain.md @@ -19,8 +19,10 @@ We will highlight three different parts of the workflow: import os import getpass -os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") -os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") +if "OPIK_API_KEY" not in os.environ: + os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") +if "OPIK_WORKSPACE" not in os.environ: + os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") ``` If you are running the Opik platform locally, simply set: @@ -41,6 +43,15 @@ First, we will install the necessary libraries, download the Chinook database an ``` +```python +import os +import getpass + +if "OPENAI_API_KEY" not in os.environ: + os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") +``` + + ```python # Download the relevant data import os @@ -66,13 +77,6 @@ if not os.path.exists(filename): db = SQLDatabase.from_uri(f"sqlite:///{filename}") ``` - -```python -import os -import getpass -os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key: ") -``` - ## Creating a synthetic dataset In order to create our synthetic dataset, we will be using the OpenAI API to generate 20 different questions that a user might ask based on the Chinook database. @@ -215,6 +219,6 @@ res = evaluate( The evaluation results are now uploaded to the Opik platform and can be viewed in the UI. -![LangChain Evaluation](/img/cookbook/langchain_cookbook.png) +![LangChain Evaluation](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/langchain_cookbook.png) diff --git a/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb b/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb index 4ea4a7dd78..0900a80171 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/llama-index.ipynb @@ -52,7 +52,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -107,7 +107,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -142,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -172,7 +172,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -192,17 +192,9 @@ }, { "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The author worked on writing short stories and programming, starting with early attempts on an IBM 1401 in 9th grade, using an early version of Fortran. Later, the author transitioned to working with microcomputers, building a TRS-80 and writing simple games and programs. Despite enjoying programming, the author initially planned to study philosophy in college but eventually switched to AI due to a lack of interest in philosophy courses.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "response = query_engine.query(\"What did the author do growing up?\")\n", "print(response)" diff --git a/apps/opik-documentation/documentation/docs/cookbook/llama-index.md b/apps/opik-documentation/documentation/docs/cookbook/llama-index.md index 5be3664ce9..bf1218fd4a 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/llama-index.md +++ b/apps/opik-documentation/documentation/docs/cookbook/llama-index.md @@ -21,8 +21,10 @@ For this guide we will be downloading the essays from Paul Graham and use them a import os import getpass -os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") -os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") +if "OPIK_API_KEY" not in os.environ: + os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") +if "OPIK_WORKSPACE" not in os.environ: + os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") ``` If you are running the Opik platform locally, simply set: @@ -39,7 +41,7 @@ First, we will install the necessary libraries, download the Chinook database an ```python -%pip install opik llama-index llama-index-agent-openai llama-index-llms-openai --quiet +%pip install opik llama-index llama-index-agent-openai llama-index-llms-openai --upgrade --quiet ``` And configure the required environment variables: @@ -49,7 +51,8 @@ And configure the required environment variables: import os import getpass -os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") +if "OPENAI_API_KEY" not in os.environ: + os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") ``` In addition, we will download the Paul Graham essays: @@ -108,9 +111,8 @@ response = query_engine.query("What did the author do growing up?") print(response) ``` - The author worked on writing short stories and programming, starting with early attempts on an IBM 1401 in 9th grade, using an early version of Fortran. Later, the author transitioned to working with microcomputers, building a TRS-80 and writing simple games and programs. Despite enjoying programming, the author initially planned to study philosophy in college but eventually switched to AI due to a lack of interest in philosophy courses. +You can now go to the Opik app to see the trace: +![LlamaIndex trace in Opik](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/llamaIndex_cookbook.png) -You can now go to the Opik app to see the trace: -![LlamaIndex trace in Opik](/img/cookbook/llamaIndex_cookbook.png) diff --git a/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb b/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb index e698c045e1..fb2d27c4c4 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb +++ b/apps/opik-documentation/documentation/docs/cookbook/openai.ipynb @@ -44,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -72,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -94,17 +94,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Opik was a mischievous little elf who loved pulling pranks on his friends in the enchanted forest. One day, his antics went too far and he accidentally turned himself into a fluffy pink bunny.\n" - ] - } - ], + "outputs": [], "source": [ "from opik.integrations.openai import track_openai\n", "from openai import OpenAI\n", @@ -148,20 +140,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"Opik was a young wizard who lived in the small village of Mithos, where magic was both feared and revered. From a young age, Opik had shown a natural talent for magic, much to the dismay of his parents who were simple farmers. They feared the power that their son possessed and did everything they could to suppress it.\\n\\nDespite his parents' efforts, Opik continued to practice his magic in secret, honing his skills and learning all he could about the ancient art. He longed to become a powerful wizard, respected and feared by all who knew him. But as he grew older, he also began to realize that his thirst for power was beginning to consume him, turning him into a dark and reckless mage.\\n\\nOne day, a mysterious figure approached Opik in the village square, offering him a chance to join a secret society of powerful wizards. Intrigued by the offer, Opik accepted and was soon initiated into the group, which called themselves the Arcanum.\\n\\nUnder the guidance of the Arcanum, Opik's power grew exponentially. He could wield spells of immense power, bending reality to his will with a mere flick of his wrist. But as his power grew, so did his arrogance and greed. He began to see himself as above all others, using his magic to manipulate and control those around him.\\n\\nOne day, a great evil swept across the land, threatening to destroy everything in its path. The Arcanum tasked Opik with defeating this evil, seeing it as a chance for him to prove his worth and redeem himself. But as he faced the darkness head-on, Opik realized that true power lay not in domination and control, but in compassion and selflessness.\\n\\nIn a moment of clarity, Opik cast aside his dark ambitions and embraced the light within him. With newfound resolve, he fought against the evil that threatened his home, using his magic not to destroy, but to protect and heal. In the end, it was not his raw power that saved the day, but his courage and heart.\\n\\nAnd so, Opik returned to his village a changed man, no longer seeking power for power's sake, but striving to use his magic for the good of all. The villagers welcomed him back with open arms, seeing in him a hero and a protector. And as he walked among them, a new journey unfolded before him - a journey of redemption, compassion, and true magic.\"" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from opik import track\n", "from opik.integrations.openai import track_openai\n", diff --git a/apps/opik-documentation/documentation/docs/cookbook/openai.md b/apps/opik-documentation/documentation/docs/cookbook/openai.md index da12fca378..eaabd50ffe 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/openai.md +++ b/apps/opik-documentation/documentation/docs/cookbook/openai.md @@ -14,8 +14,10 @@ Opik integrates with OpenAI to provide a simple way to log traces for all OpenAI import os import getpass -os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") -os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") +if "OPIK_API_KEY" not in os.environ: + os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") +if "OPIK_WORKSPACE" not in os.environ: + os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") ``` If you are running the Opik platform locally, simply set: @@ -39,7 +41,9 @@ First, we will install the necessary libraries and set up our OpenAI API keys. ```python import os import getpass -os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key: ") + +if "OPENAI_API_KEY" not in os.environ: + os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") ``` ## Logging traces @@ -70,9 +74,6 @@ completion = openai_client.chat.completions.create( print(completion.choices[0].message.content) ``` - Opik was a mischievous little elf who loved pulling pranks on his friends in the enchanted forest. One day, his antics went too far and he accidentally turned himself into a fluffy pink bunny. - - The prompt and response messages are automatically logged to Opik and can be viewed in the UI. ![OpenAI Integration](/img/cookbook/openai_trace_cookbook.png) @@ -123,13 +124,8 @@ generate_opik_story() ``` +The trace can now be viewed in the UI: +![OpenAI Integration](https://raw.githubusercontent.com/comet-ml/opik/main/apps/opik-documentation/documentation/static/img/cookbook/openai_trace_decorator_cookbook.png) - "Opik was a young wizard who lived in the small village of Mithos, where magic was both feared and revered. From a young age, Opik had shown a natural talent for magic, much to the dismay of his parents who were simple farmers. They feared the power that their son possessed and did everything they could to suppress it.\n\nDespite his parents' efforts, Opik continued to practice his magic in secret, honing his skills and learning all he could about the ancient art. He longed to become a powerful wizard, respected and feared by all who knew him. But as he grew older, he also began to realize that his thirst for power was beginning to consume him, turning him into a dark and reckless mage.\n\nOne day, a mysterious figure approached Opik in the village square, offering him a chance to join a secret society of powerful wizards. Intrigued by the offer, Opik accepted and was soon initiated into the group, which called themselves the Arcanum.\n\nUnder the guidance of the Arcanum, Opik's power grew exponentially. He could wield spells of immense power, bending reality to his will with a mere flick of his wrist. But as his power grew, so did his arrogance and greed. He began to see himself as above all others, using his magic to manipulate and control those around him.\n\nOne day, a great evil swept across the land, threatening to destroy everything in its path. The Arcanum tasked Opik with defeating this evil, seeing it as a chance for him to prove his worth and redeem himself. But as he faced the darkness head-on, Opik realized that true power lay not in domination and control, but in compassion and selflessness.\n\nIn a moment of clarity, Opik cast aside his dark ambitions and embraced the light within him. With newfound resolve, he fought against the evil that threatened his home, using his magic not to destroy, but to protect and heal. In the end, it was not his raw power that saved the day, but his courage and heart.\n\nAnd so, Opik returned to his village a changed man, no longer seeking power for power's sake, but striving to use his magic for the good of all. The villagers welcomed him back with open arms, seeing in him a hero and a protector. And as he walked among them, a new journey unfolded before him - a journey of redemption, compassion, and true magic." - - - -The trace can now be viewed in the UI: - -![OpenAI Integration](/img/cookbook/openai_trace_decorator_cookbook.png) diff --git a/apps/opik-documentation/documentation/docs/cookbook/ragas.md b/apps/opik-documentation/documentation/docs/cookbook/ragas.md index f5eb32b320..6c32796a41 100644 --- a/apps/opik-documentation/documentation/docs/cookbook/ragas.md +++ b/apps/opik-documentation/documentation/docs/cookbook/ragas.md @@ -18,8 +18,10 @@ There are two main ways to use Opik with Ragas: import os import getpass -os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") -os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") +if "OPIK_API_KEY" not in os.environ: + os.environ["OPIK_API_KEY"] = getpass.getpass("Opik API Key: ") +if "OPIK_WORKSPACE" not in os.environ: + os.environ["OPIK_WORKSPACE"] = input("Comet workspace (often the same as your username): ") ``` If you are running the Opik platform locally, simply set: @@ -41,7 +43,8 @@ First, we will install the necessary libraries and configure the OpenAI API key. import os import getpass -os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") +if "OPENAI_API_KEY" not in os.environ: + os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ") ``` ## Integrating Opik with Ragas diff --git a/apps/opik-documentation/documentation/docs/tracing/integrations/langchain.md b/apps/opik-documentation/documentation/docs/tracing/integrations/langchain.md index b82c12af27..d69550394c 100644 --- a/apps/opik-documentation/documentation/docs/tracing/integrations/langchain.md +++ b/apps/opik-documentation/documentation/docs/tracing/integrations/langchain.md @@ -7,6 +7,13 @@ sidebar_label: LangChain Comet provides seamless integration with LangChain, allowing you to easily log and trace your LangChain-based applications. By using the `CometTracer` callback, you can automatically capture detailed information about your LangChain runs, including inputs, outputs, and metadata for each step in your chain. +
+ You can check out the Colab Notebook if you'd like to jump straight to the code: + + Open In Colab + +
+ ## Getting Started To use the `CometTracer` with LangChain, you'll need to have both the `opik` and `langchain` packages installed. You can install them using pip: diff --git a/apps/opik-documentation/documentation/docs/tracing/integrations/llama_index.md b/apps/opik-documentation/documentation/docs/tracing/integrations/llama_index.md index 2e43fe5225..96d36f8fd1 100644 --- a/apps/opik-documentation/documentation/docs/tracing/integrations/llama_index.md +++ b/apps/opik-documentation/documentation/docs/tracing/integrations/llama_index.md @@ -14,6 +14,13 @@ sidebar_label: LlamaIndex - Provides an advanced retrieval/query interface over your data: Feed in any LLM input prompt, get back retrieved context and knowledge-augmented output. - Allows easy integrations with your outer application framework (e.g. with LangChain, Flask, Docker, ChatGPT, anything else). +
+ You can check out the Colab Notebook if you'd like to jump straight to the code: + + Open In Colab + +
+ ## Getting Started To use the Opik integration with LlamaIndex, you'll need to have both the `opik` and `llama_index` packages installed. You can install them using pip: diff --git a/apps/opik-documentation/documentation/docs/tracing/integrations/openai.md b/apps/opik-documentation/documentation/docs/tracing/integrations/openai.md index 491d786864..36d2f6eeeb 100644 --- a/apps/opik-documentation/documentation/docs/tracing/integrations/openai.md +++ b/apps/opik-documentation/documentation/docs/tracing/integrations/openai.md @@ -5,7 +5,14 @@ sidebar_label: OpenAI # OpenAI -This guide explains how to integrate Comet Opik with the OpenAI Python SDK. By using the `openai_wrapper` method provided by opik, you can easily track and evaluate your OpenAI API calls within your Comet projects as Comet will automatically log the input prompt, model used, token usage, and response generated. +This guide explains how to integrate Comet Opik with the OpenAI Python SDK. By using the `track_openai` method provided by opik, you can easily track and evaluate your OpenAI API calls within your Comet projects as Comet will automatically log the input prompt, model used, token usage, and response generated. + +
+ You can check out the Colab Notebook if you'd like to jump straight to the code: + + Open In Colab + +
## Integration Steps @@ -18,11 +25,11 @@ pip install opik openai 2. Import the necessary modules and wrap the OpenAI client: ```python -from opik.integrations.openai import openai_wrapper +from opik.integrations.openai import track_openai from openai import OpenAI openai_client = OpenAI() -openai_client = openai_wrapper(openai_client) +openai_client = track_openai(openai_client) response = openai_client.Completion.create( model="gpt-3.5-turbo", @@ -35,6 +42,6 @@ response = openai_client.Completion.create( ) ``` -The `openai_wrapper` will automatically track and log the API call, including the input prompt, model used, and response generated. You can view these logs in your Comet project dashboard. +The `track_openai` will automatically track and log the API call, including the input prompt, model used, and response generated. You can view these logs in your Comet project dashboard. By following these steps, you can seamlessly integrate Comet Opik with the OpenAI Python SDK and gain valuable insights into your model's performance and usage. diff --git a/apps/opik-documentation/documentation/docs/tracing/integrations/ragas.md b/apps/opik-documentation/documentation/docs/tracing/integrations/ragas.md index 6e12919561..8d4fac4a83 100644 --- a/apps/opik-documentation/documentation/docs/tracing/integrations/ragas.md +++ b/apps/opik-documentation/documentation/docs/tracing/integrations/ragas.md @@ -98,6 +98,13 @@ In the Opik UI, you will be able to see the full trace including the score calcu We recommend using the Opik [evaluation framework](/evaluation/evaluate_your_llm) to evaluate your RAG pipeline. It shares similar concepts with the Ragas `evaluate` functionality but has a tighter integration with Opik. +
+ You can check out the Colab Notebook if you'd like to jump straight to the code: + + Open In Colab + +
+ ::: If you are using the Ragas `evaluate` functionality, you can use the `OpikTracer` callback to keep track of the score calculation in Opik. This will track as traces the computation of each evaluation metric: diff --git a/apps/opik-documentation/documentation/docs/tracing/log_traces.md b/apps/opik-documentation/documentation/docs/tracing/log_traces.md index f90c227ce6..e5e13c6ee4 100644 --- a/apps/opik-documentation/documentation/docs/tracing/log_traces.md +++ b/apps/opik-documentation/documentation/docs/tracing/log_traces.md @@ -23,6 +23,8 @@ Opik has a number of integrations for popular LLM frameworks like LangChain or O ## Log using function decorators +### Logging traces and spans + If you are manually defining your LLM chains and not using LangChain for example, you can use the `track` function decorators to track LLM calls: ```python @@ -64,6 +66,25 @@ print(result) If the `track` function decorators are used in conjunction with the `track_openai` or `CometTracer` callbacks, the LLM calls will be automatically logged to the corresponding trace. ::: +### Capturing inputs and ouputs + +By default the `track` decorator will capture the input and output of the function you are decorating and will map these to the input and output of the span. You can control what is captured using the `capture_input` and `capture_output` parameters. + +For example, if you don't want to capture the input and output of the function you can do: + +```python +from opik import track +from opik.opik_context import update_current_span + +@track(capture_input=False, capture_output=False) +def my_function(input_text): + res = input_text.upper() + + # Manually update the span with the input and output + update_current_span(input={"text": input_text}, output={"text": input_text.upper()}) + return res +``` + ## Log traces and spans manually If you wish to log traces and spans manually, you can use the `Comet` client: diff --git a/apps/opik-documentation/documentation/docusaurus.config.ts b/apps/opik-documentation/documentation/docusaurus.config.ts index 50a4bf259f..5fa11bc1ba 100644 --- a/apps/opik-documentation/documentation/docusaurus.config.ts +++ b/apps/opik-documentation/documentation/docusaurus.config.ts @@ -3,12 +3,13 @@ import type {Config} from '@docusaurus/types'; import type * as Preset from '@docusaurus/preset-classic'; const config: Config = { - title: 'My Site', - tagline: 'Dinosaurs are cool', + title: 'Opik Documentation', + tagline: 'Open source LLM evaluation platform', favicon: 'img/favicon.ico', // Set the production url of your site here - url: 'http://146.190.72.83/', + url: 'https://www.comet.com', + // Set the // pathname under which your site is served // For GitHub pages deployment, it is often '//' baseUrl: '/docs/opik/', @@ -53,7 +54,18 @@ const config: Config = { ], ], - plugins: ['docusaurus-plugin-sass'], + plugins: [ + 'docusaurus-plugin-sass', + [ + require.resolve("docusaurus-plugin-search-local"), + { + hashed: true, + indexPages: true, + searchResultLimits: 25, + docsRouteBasePath: "/docs/opik" + }, + ] + ], themeConfig: { // Replace with your project's social card @@ -77,10 +89,12 @@ const config: Config = { }, ], }, + prism: { theme: prismThemes.github, darkTheme: prismThemes.dracula, }, + } satisfies Preset.ThemeConfig, }; diff --git a/apps/opik-documentation/documentation/package-lock.json b/apps/opik-documentation/documentation/package-lock.json index cdda454925..8994e1d108 100644 --- a/apps/opik-documentation/documentation/package-lock.json +++ b/apps/opik-documentation/documentation/package-lock.json @@ -13,6 +13,7 @@ "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "docusaurus-plugin-sass": "^0.2.5", + "docusaurus-plugin-search-local": "^2.0.1", "prism-react-renderer": "^2.3.0", "react": "^18.0.0", "react-dom": "^18.0.0", @@ -2225,6 +2226,12 @@ "node": ">=6.9.0" } }, + "node_modules/@braintree/sanitize-url": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", + "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==", + "license": "MIT" + }, "node_modules/@colors/colors": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", @@ -6085,6 +6092,59 @@ "sass": "^1.30.0" } }, + "node_modules/docusaurus-plugin-search-local": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/docusaurus-plugin-search-local/-/docusaurus-plugin-search-local-2.0.1.tgz", + "integrity": "sha512-vS6LpTjHg4tkHLbnJqUH2v2sOM0/1HPXrQ1hXtxYalV/NCgaOLD2T+vjVvGbIkqxf91/h+Rzk2HydnyeAsa5Xw==", + "license": "MIT", + "workspaces": [ + "./website" + ], + "dependencies": { + "@braintree/sanitize-url": "^6.0.0", + "cheerio": "^1.0.0-rc.11", + "clsx": "^1.1.1", + "debug": "^4.3.4", + "fs-extra": "^10.1.0", + "klaw-sync": "^6.0.0", + "lunr": "^2.3.9", + "mark.js": "^8.11.1", + "validate-peer-dependencies": "^2.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@docusaurus/core": ">=3.0.0", + "@docusaurus/utils": ">=3.0.0", + "@docusaurus/utils-validation": ">=3.0.0", + "react": ">=18.2.0", + "react-dom": ">=18.2.0" + } + }, + "node_modules/docusaurus-plugin-search-local/node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/docusaurus-plugin-search-local/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/dom-converter": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", @@ -8720,6 +8780,15 @@ "node": ">=0.10.0" } }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, "node_modules/kleur": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", @@ -8904,6 +8973,18 @@ "yallist": "^3.0.2" } }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==", + "license": "MIT" + }, + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==", + "license": "MIT" + }, "node_modules/markdown-extensions": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", @@ -11868,6 +11949,27 @@ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "license": "MIT" }, + "node_modules/path-root": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/path-root/-/path-root-0.1.1.tgz", + "integrity": "sha512-QLcPegTHF11axjfojBIoDygmS2E3Lf+8+jI6wOVmNVenrKSo3mFdSGiIgdSHenczw3wPtlVMQaFVwGmM7BJdtg==", + "license": "MIT", + "dependencies": { + "path-root-regex": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-root-regex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/path-root-regex/-/path-root-regex-0.1.2.tgz", + "integrity": "sha512-4GlJ6rZDhQZFE0DPVKh0e9jmZ5egZfxTkp7bcRDuPlJXbAwhxcl2dINPUAsjLdejqaLsCeg8axcLjIbvBjN4pQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/path-to-regexp": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", @@ -13617,6 +13719,18 @@ "node": ">=4" } }, + "node_modules/resolve-package-path": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/resolve-package-path/-/resolve-package-path-4.0.3.tgz", + "integrity": "sha512-SRpNAPW4kewOaNUt8VPqhJ0UMxawMwzJD8V7m1cJfdSTK9ieZwS6K7Dabsm4bmLFM96Z5Y/UznrpG5kt1im8yA==", + "license": "MIT", + "dependencies": { + "path-root": "^0.1.1" + }, + "engines": { + "node": ">= 12" + } + }, "node_modules/resolve-pathname": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", @@ -15485,6 +15599,19 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/validate-peer-dependencies": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/validate-peer-dependencies/-/validate-peer-dependencies-2.2.0.tgz", + "integrity": "sha512-8X1OWlERjiUY6P6tdeU9E0EwO8RA3bahoOVG7ulOZT5MqgNDUO/BQoVjYiHPcNe+v8glsboZRIw9iToMAA2zAA==", + "license": "MIT", + "dependencies": { + "resolve-package-path": "^4.0.3", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 12" + } + }, "node_modules/value-equal": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", diff --git a/apps/opik-documentation/documentation/package.json b/apps/opik-documentation/documentation/package.json index 63d122063c..736b9c4ec8 100644 --- a/apps/opik-documentation/documentation/package.json +++ b/apps/opik-documentation/documentation/package.json @@ -21,6 +21,7 @@ "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "docusaurus-plugin-sass": "^0.2.5", + "docusaurus-plugin-search-local": "^2.0.1", "prism-react-renderer": "^2.3.0", "react": "^18.0.0", "react-dom": "^18.0.0", @@ -30,9 +31,9 @@ "@docusaurus/module-type-aliases": "3.4.0", "@docusaurus/tsconfig": "3.4.0", "@docusaurus/types": "3.4.0", - "typescript": "~5.2.2", + "concurrently": "^8.2.0", "nodemon": "^2.0.22", - "concurrently": "^8.2.0" + "typescript": "~5.2.2" }, "browserslist": { "production": [