diff --git a/examples/README.md b/examples/README.md index b4df46ad..d261d179 100644 --- a/examples/README.md +++ b/examples/README.md @@ -67,7 +67,7 @@ are listed in [the last section of this file](#customize). - [MistralAI](./customize/llms/mistalai_llm.py) - [Cohere](./customize/llms/cohere_llm.py) - [Anthropic (Claude)](./customize/llms/anthropic_llm.py) -- [Ollama]() +- [Ollama](./customize/llms/ollama_llm.py) - [Custom LLM](./customize/llms/custom_llm.py) @@ -91,6 +91,7 @@ are listed in [the last section of this file](#customize). - [End to end example with explicit components and text input](./customize/build_graph/pipeline/kg_builder_from_text.py) - [End to end example with explicit components and PDF input](./customize/build_graph/pipeline/kg_builder_from_pdf.py) +- [Process multiple documents](./customize/build_graph/pipeline/kg_builder_two_documents_entity_resolution.py) #### Components diff --git a/examples/customize/answer/custom_prompt.py b/examples/customize/answer/custom_prompt.py index f67cf33e..4089c703 100644 --- a/examples/customize/answer/custom_prompt.py +++ b/examples/customize/answer/custom_prompt.py @@ -54,7 +54,10 @@ rag = GraphRAG(retriever=retriever, llm=llm, prompt_template=template) -result = rag.search("Tell me more about Avatar movies") +result = rag.search( + "Tell me more about Avatar movies", + return_context=True, +) print(result.answer) driver.close() diff --git a/examples/customize/answer/langchain_compatiblity.py b/examples/customize/answer/langchain_compatiblity.py index d3382849..858dd12e 100644 --- a/examples/customize/answer/langchain_compatiblity.py +++ b/examples/customize/answer/langchain_compatiblity.py @@ -40,7 +40,10 @@ llm=llm, # type: ignore[arg-type, unused-ignore] ) -result = rag.search("Tell me more about Avatar movies") +result = rag.search( + "Tell me more about Avatar movies", + return_context=False, +) print(result.answer) driver.close() diff --git a/examples/old/pipeline/kg_builder_two_documents_entity_resolution.py b/examples/customize/build_graph/pipeline/kg_builder_two_documents_entity_resolution.py similarity index 100% rename from examples/old/pipeline/kg_builder_two_documents_entity_resolution.py rename to examples/customize/build_graph/pipeline/kg_builder_two_documents_entity_resolution.py diff --git a/examples/customize/llms/ollama_llm.py b/examples/customize/llms/ollama_llm.py new file mode 100644 index 00000000..56148042 --- /dev/null +++ b/examples/customize/llms/ollama_llm.py @@ -0,0 +1,12 @@ +from neo4j_graphrag.llm import LLMResponse, OpenAILLM + +# not used but needs to be provided +api_key = "ollama" + +llm = OpenAILLM( + base_url="http://localhost:11434/v1", + model_name="", + api_key=api_key, +) +res: LLMResponse = llm.invoke("What is the additive color model?") +print(res.content) diff --git a/examples/old/pipeline/__init__.py b/examples/old/pipeline/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/old/pipeline/kg_builder_example.py b/examples/old/pipeline/kg_builder_example.py deleted file mode 100644 index 03acb1ac..00000000 --- a/examples/old/pipeline/kg_builder_example.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) "Neo4j" -# Neo4j Sweden AB [https://neo4j.com] -# # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# # -# https://www.apache.org/licenses/LICENSE-2.0 -# # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import asyncio -import logging - -import neo4j -from neo4j_graphrag.embeddings import OpenAIEmbeddings -from neo4j_graphrag.experimental.pipeline.kg_builder import SimpleKGPipeline -from neo4j_graphrag.llm.openai_llm import OpenAILLM - -logging.basicConfig(level=logging.INFO) - - -async def main(neo4j_driver: neo4j.Driver) -> None: - # Instantiate Entity and Relation objects - entities = ["PERSON", "ORGANIZATION", "HORCRUX", "LOCATION"] - relations = ["SITUATED_AT", "INTERACTS", "OWNS", "LED_BY"] - potential_schema = [ - ("PERSON", "SITUATED_AT", "LOCATION"), - ("PERSON", "INTERACTS", "PERSON"), - ("PERSON", "OWNS", "HORCRUX"), - ("ORGANIZATION", "LED_BY", "PERSON"), - ] - - # Instantiate the LLM - llm = OpenAILLM( - model_name="gpt-4o", - model_params={ - "max_tokens": 2000, - "response_format": {"type": "json_object"}, - }, - ) - - # Use OpenAIEmbeddings as embedder - embedder = OpenAIEmbeddings() - - # Create an instance of the SimpleKGPipeline - kg_builder_pdf = SimpleKGPipeline( - llm=llm, - driver=neo4j_driver, - embedder=embedder, - entities=entities, - relations=relations, - potential_schema=potential_schema, - from_pdf=True, - on_error="RAISE", - ) - - # Run the knowledge graph building process asynchronously - pdf_file_path = "examples/pipeline/Harry Potter and the Death Hallows Summary.pdf" - pdf_result = await kg_builder_pdf.run_async(file_path=pdf_file_path) - print(f"PDF Processing Result: {pdf_result}") - - # Create an instance of the SimpleKGPipeline for text input - kg_builder_text = SimpleKGPipeline( - llm=llm, - driver=neo4j_driver, - embedder=embedder, - entities=entities, - relations=relations, - potential_schema=potential_schema, - from_pdf=False, - on_error="RAISE", - ) - - # Run the knowledge graph building process with text input - text_input = "John Doe lives in New York City." - text_result = await kg_builder_text.run_async(text=text_input) - print(f"Text Processing Result: {text_result}") - - await llm.async_client.close() - - -if __name__ == "__main__": - with neo4j.GraphDatabase.driver( - "bolt://localhost:7687", auth=("neo4j", "password") - ) as driver: - asyncio.run(main(driver)) diff --git a/examples/question_answering/graphrag.py b/examples/question_answering/graphrag.py index ad4aef61..c20f622d 100644 --- a/examples/question_answering/graphrag.py +++ b/examples/question_answering/graphrag.py @@ -52,7 +52,11 @@ def formatter(record: neo4j.Record) -> RetrieverResultItem: rag = GraphRAG(retriever=retriever, llm=llm) -result = rag.search("Tell me more about Avatar movies") +result = rag.search( + "Tell me more about Avatar movies", + return_context=True, +) print(result.answer) +# print(result.retriever_result) driver.close()