Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add reranking functionality to retrieval process #101

Merged
merged 2 commits into from
Apr 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions llm-complete-guide/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ tiktoken
umap-learn
matplotlib
pyarrow
rerankers[all]

# optional requirements for S3 artifact store
# s3fs>2022.3.0
Expand Down
39 changes: 35 additions & 4 deletions llm-complete-guide/utils/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from constants import EMBEDDINGS_MODEL, MODEL_NAME_MAP, OPENAI_MODEL
from pgvector.psycopg2 import register_vector
from psycopg2.extensions import connection
from rerankers import Reranker
from sentence_transformers import SentenceTransformer
from structures import Document

Expand Down Expand Up @@ -353,17 +354,40 @@ def get_embeddings(text):
return model.encode(text)


def rerank_documents(
query: str, documents: List[Tuple], reranker_model: str = "cross-encoder"
) -> List[str]:
"""Reranks the given documents based on the given query.

Args:
query (str): The query to use for reranking.
documents (List[Tuple]): The documents to rerank.

Returns:
List[str]: The reranked content.
"""
ranker = Reranker(reranker_model)
docs_texts = [doc[0] for doc in documents]
results = ranker.rank(query=query, docs=docs_texts)
return [document.text for document in results.results]


def process_input_with_retrieval(
input: str, model: str = OPENAI_MODEL, n_items_retrieved: int = 5
input: str,
model: str = OPENAI_MODEL,
n_items_retrieved: int = 20,
use_reranking: bool = True,
) -> str:
"""Process the input with retrieval.

Args:
input (str): The input to process.
model (str, optional): The model to use for completion. Defaults to
OPENAI_MODEL.
OPENAI_MODEL.
n_items_retrieved (int, optional): The number of items to retrieve from
the database. Defaults to 5.
the database. Defaults to 5.
use_reranking (bool, optional): Whether to use reranking. Defaults to
True.

Returns:
str: The processed output.
Expand All @@ -375,6 +399,13 @@ def process_input_with_retrieval(
get_embeddings(input), get_db_conn(), n=n_items_retrieved
)

if use_reranking:
# Rerank the documents based on the input
# and take the top 5 only
context_content = rerank_documents(input, related_docs)[:5]
else:
context_content = [doc[0] for doc in related_docs[:5]]

# Step 2: Get completion from OpenAI API
# Set system message to help set appropriate tone and context for model
system_message = f"""
Expand All @@ -398,7 +429,7 @@ def process_input_with_retrieval(
{
"role": "assistant",
"content": f"Relevant ZenML documentation: \n"
+ "\n".join(doc[0] for doc in related_docs),
+ "\n".join(context_content),
},
]
logger.debug("CONTEXT USED\n\n", messages[2]["content"], "\n\n")
Expand Down
Loading