diff --git a/docs/sphinx/source/examples/colpali-document-retrieval-vision-language-models-cloud.ipynb b/docs/sphinx/source/examples/colpali-document-retrieval-vision-language-models-cloud.ipynb index ebb48c5f..bf857580 100644 --- a/docs/sphinx/source/examples/colpali-document-retrieval-vision-language-models-cloud.ipynb +++ b/docs/sphinx/source/examples/colpali-document-retrieval-vision-language-models-cloud.ipynb @@ -50,6 +50,9 @@ "\n", "For a simpler example where we use one vespa document = One PDF page, see [simplified-retrieval-with-colpali](https://pyvespa.readthedocs.io/en/latest/examples/simplified-retrieval-with-colpali-vlm_Vespa-cloud.html).\n", "\n", + "Consider following the [ColQWen2](https://pyvespa.readthedocs.io/en/latest/examples/pdf-retrieval-with-ColQwen2-vlm_Vespa-cloud.html) notebook instead as it\n", + "use a better model with improved performance (Both accuracy and speed).\n", + "\n", "We also store the base64 encoded image, and page meta data like title and url so that we can display it in the result page, but also\n", "use it for RAG with powerful LLMs with vision capabilities. \n", "\n", diff --git a/docs/sphinx/source/examples/pdf-retrieval-with-ColQwen2-vlm_Vespa-cloud.ipynb b/docs/sphinx/source/examples/pdf-retrieval-with-ColQwen2-vlm_Vespa-cloud.ipynb index 52c57570..aee9dfe9 100644 --- a/docs/sphinx/source/examples/pdf-retrieval-with-ColQwen2-vlm_Vespa-cloud.ipynb +++ b/docs/sphinx/source/examples/pdf-retrieval-with-ColQwen2-vlm_Vespa-cloud.ipynb @@ -796,24 +796,9 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setting application...\n", - "Running: vespa config set application samples.visionrag5\n", - "Setting target cloud...\n", - "Running: vespa config set target cloud\n", - "\n", - "No api-key found for control plane access. Using access token.\n", - "Checking for access token in auth.json...\n", - "Successfully obtained access token for control plane access.\n" - ] - } - ], + "outputs": [], "source": [ "from vespa.deployment import VespaCloud\n", "import os\n", @@ -821,7 +806,7 @@ "os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n", "\n", "# Replace with your tenant name from the Vespa Cloud Console\n", - "tenant_name = \"samples\" \n", + "tenant_name = \"vespa-team\" \n", "\n", "key = os.getenv(\"VESPA_TEAM_API_KEY\", None)\n", "if key is not None:\n", diff --git a/docs/sphinx/source/examples/simplified-retrieval-with-colpali-vlm_Vespa-cloud.ipynb b/docs/sphinx/source/examples/simplified-retrieval-with-colpali-vlm_Vespa-cloud.ipynb index 778e7bef..f096320f 100644 --- a/docs/sphinx/source/examples/simplified-retrieval-with-colpali-vlm_Vespa-cloud.ipynb +++ b/docs/sphinx/source/examples/simplified-retrieval-with-colpali-vlm_Vespa-cloud.ipynb @@ -17,6 +17,9 @@ "This notebook demonstrates how to represent [ColPali](https://huggingface.co/vidore/colpali) in Vespa\n", "and to scale to large collections. Also see the blog post: [Scaling ColPali to billions of PDFs with Vespa](https://blog.vespa.ai/scaling-colpali-to-billions/)\n", "\n", + "Consider following the [ColQWen2](https://pyvespa.readthedocs.io/en/latest/examples/pdf-retrieval-with-ColQwen2-vlm_Vespa-cloud.html) notebook instead as it\n", + "use a better model with improved performance (Both accuracy and speed).\n", + "\n", "ColPali is a powerful visual language model that can generate embeddings for images (screenshots of PDF pages) and text queries.\n", "\n", "In this notebook, we will use ColPali to generate embeddings for images of PDF _pages_ and store the embeddings in Vespa. \n", @@ -1276,7 +1279,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.11.10" }, "widgets": { "application/vnd.jupyter.widget-state+json": {