Skip to content

Commit

Permalink
Removed unneeded references to LangChain (#185)
Browse files Browse the repository at this point in the history
* Removed uneeded references to LangChain

* Remove unneeded langchain deps
  • Loading branch information
alexthomas93 authored Oct 16, 2024
1 parent 06d9889 commit 789fb35
Show file tree
Hide file tree
Showing 13 changed files with 1,195 additions and 1,120 deletions.
2 changes: 1 addition & 1 deletion docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ While the library has more retrievers than shown here, the following examples sh
.. code:: python
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.openai import OpenAIEmbeddings
from neo4j_graphrag.retrievers import VectorRetriever
from langchain_openai import OpenAIEmbeddings
URI = "neo4j://localhost:7687"
AUTH = ("neo4j", "password")
Expand Down
5 changes: 2 additions & 3 deletions docs/source/user_guide_rag.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ To perform a GraphRAG query using the `neo4j-graphrag` package, a few components

1. A Neo4j driver: used to query your Neo4j database.
2. A Retriever: the `neo4j-graphrag` package provides some implementations (see the :ref:`dedicated section <retriever-configuration>`) and lets you write your own if none of the provided implementations matches your needs (see :ref:`how to write a custom retriever <custom-retriever>`).
3. An LLM: to generate the answer, we need to call an LLM model. The neo4j-graphrag package currently only provides implementation for the OpenAI LLMs, but its interface is compatible with LangChain and let developers write their own interface if needed.
3. An LLM: to generate the answer, we need to call an LLM model. The neo4j-graphrag package's LLM interface is compatible with LangChain. Developers can also write their own interface if needed.

In practice, it's done with only a few lines of code:

Expand Down Expand Up @@ -223,8 +223,7 @@ Its interface is compatible with our `GraphRAG` interface, facilitating integrat
print(response.answer)
It is however not mandatory to use LangChain. The alternative is to implement
a custom model.
It is however not mandatory to use LangChain.

Using a Custom Model
--------------------
Expand Down
6 changes: 3 additions & 3 deletions examples/graphrag_with_langchain_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
import logging

import neo4j
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from neo4j_graphrag.embeddings.openai import OpenAIEmbeddings
from neo4j_graphrag.generation import GraphRAG
from neo4j_graphrag.llm import OpenAILLM
from neo4j_graphrag.retrievers import VectorCypherRetriever
from neo4j_graphrag.types import RetrieverResultItem

Expand Down Expand Up @@ -48,7 +48,7 @@ def formatter(record: neo4j.Record) -> RetrieverResultItem:
embedder=embedder, # type: ignore
)

llm = ChatOpenAI(model_name="gpt-4o", temperature=0) # type: ignore
llm = OpenAILLM(model_name="gpt-4o", model_params={"temperature": 0})

rag = GraphRAG(retriever=retriever, llm=llm)

Expand Down
4 changes: 2 additions & 2 deletions examples/openai_search.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from random import random

from langchain_openai import OpenAIEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.openai import OpenAIEmbeddings
from neo4j_graphrag.indexes import create_vector_index
from neo4j_graphrag.retrievers import VectorRetriever

Expand All @@ -19,7 +19,7 @@
embedder = OpenAIEmbeddings(model="text-embedding-3-large")

# Initialize the retriever
retriever = VectorRetriever(driver, INDEX_NAME, embedder) # type: ignore
retriever = VectorRetriever(driver, INDEX_NAME, embedder)

# Creating the index
create_vector_index(
Expand Down
9 changes: 5 additions & 4 deletions examples/pinecone/text_search.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import PineconeNeo4jRetriever
from pinecone import Pinecone

Expand All @@ -11,14 +13,13 @@
def main() -> None:
with GraphDatabase.driver(NEO4J_URL, auth=NEO4J_AUTH) as neo4j_driver:
pc_client = Pinecone(PC_API_KEY)
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")

embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
retriever = PineconeNeo4jRetriever(
driver=neo4j_driver,
client=pc_client,
index_name="jeopardy",
id_property_neo4j="id",
embedder=embedder, # type: ignore
embedder=embedder,
)

res = retriever.search(query_text="biology", top_k=2)
Expand Down
8 changes: 5 additions & 3 deletions examples/qdrant/text_search.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import QdrantNeo4jRetriever
from qdrant_client import QdrantClient

Expand All @@ -9,14 +11,14 @@

def main() -> None:
with GraphDatabase.driver(NEO4J_URL, auth=NEO4J_AUTH) as neo4j_driver:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
retriever = QdrantNeo4jRetriever(
driver=neo4j_driver,
client=QdrantClient(url="http://localhost:6333"),
collection_name="Jeopardy",
id_property_external="neo4j_id",
id_property_neo4j="id",
embedder=embedder, # type: ignore
embedder=embedder,
)

res = retriever.search(query_text="biology", top_k=2)
Expand Down
8 changes: 5 additions & 3 deletions examples/weaviate/text_search_local_embedder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import WeaviateNeo4jRetriever
from weaviate.connect.helpers import connect_to_local

Expand All @@ -10,14 +12,14 @@
def main() -> None:
with GraphDatabase.driver(NEO4J_URL, auth=NEO4J_AUTH) as neo4j_driver:
with connect_to_local() as w_client:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
retriever = WeaviateNeo4jRetriever(
driver=neo4j_driver,
client=w_client,
collection="Jeopardy",
id_property_external="neo4j_id",
id_property_neo4j="id",
embedder=embedder, # type: ignore
embedder=embedder,
)

res = retriever.search(query_text="biology", top_k=2)
Expand Down
Loading

0 comments on commit 789fb35

Please sign in to comment.