Skip to content

Commit

Permalink
Fix llm config (#139)
Browse files Browse the repository at this point in the history
  • Loading branch information
moria97 authored Aug 1, 2024
1 parent 25e2ffb commit b37a44d
Showing 1 changed file with 1 addition and 19 deletions.
20 changes: 1 addition & 19 deletions src/pai_rag/modules/llm/llm_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
from llama_index.llms.openai import OpenAI
from llama_index.llms.azure_openai import AzureOpenAI

import os

# from llama_index.llms.dashscope import DashScope
from pai_rag.integrations.llms.dashscope.base import MyDashScope
from pai_rag.integrations.llms.paieas.base import PaiEAS
Expand All @@ -24,23 +22,7 @@ def _create_new_instance(self, new_params: Dict[str, Any]):
config = new_params[MODULE_PARAM_CONFIG]
source = config["source"].lower()

# Get DASHSCOPE KEY, will use dashscope LLM
# Since we might have already configured key for dashscope mebedding and multi-modal
# Will refine later.
if os.getenv("DASHSCOPE_API_KEY", None):
model_name = "qwen-turbo"
logger.info(
f"""
[Parameters][LLM:DashScope]
model = {model_name}
"""
)
llm = MyDashScope(
model_name=model_name,
temperature=config.get("temperature", 0.1),
max_tokens=2000,
)
elif source == "openai":
if source == "openai":
logger.info(
f"""
[Parameters][LLM:OpenAI]
Expand Down

0 comments on commit b37a44d

Please sign in to comment.