diff --git a/deepsearcher/agent/deep_search.py b/deepsearcher/agent/deep_search.py index f3bf0ad..ce55f29 100644 --- a/deepsearcher/agent/deep_search.py +++ b/deepsearcher/agent/deep_search.py @@ -52,14 +52,14 @@ Query Questions: {query} Retrieved Chunks: {retrieved_chunks} -Respond with a list of "YES" or "NO" values, one for each chunk, in the same order as the chunks are listed. For example a list of chunks of three: ["YES", "NO", "YES"]""" +Respond with a list of "YES" or "NO" values, one for each chunk, in the same order as the chunks are listed. For example a list of chunks of three: ["YES", "NO", "YES"] +""" REFLECT_PROMPT = """ Determine whether additional search queries are needed based on the original query, previous sub queries, and all retrieved document chunks. -If further research is required, provide a Python list of up to 3 search queries. If no further research is required, return an empty list. - -If the original query is to write a report, then you prefer to generate some further queries, instead return an empty list. +If further research is needed (which is prefered, but according to your judge), provide a Python list of more queries. +If no further research is needed, return an empty list. Original Query: {question} diff --git a/deepsearcher/embedding/openai_embedding.py b/deepsearcher/embedding/openai_embedding.py index f33df42..9bd6158 100644 --- a/deepsearcher/embedding/openai_embedding.py +++ b/deepsearcher/embedding/openai_embedding.py @@ -1,5 +1,3 @@ -import os - from openai import OpenAI from openai._types import NOT_GIVEN @@ -37,8 +35,6 @@ class OpenAIEmbedding(BaseEmbedding): if "base_url" in kwargs: base_url = kwargs.pop("base_url") - else: - base_url = os.getenv("OPENAI_BASE_URL") if "model_name" in kwargs: model = kwargs.pop("model_name") diff --git a/deepsearcher/llm/openai_llm.py b/deepsearcher/llm/openai_llm.py index ea94ca5..93336b0 100644 --- a/deepsearcher/llm/openai_llm.py +++ b/deepsearcher/llm/openai_llm.py @@ -1,5 +1,3 @@ -import os - from deepsearcher.llm.base import BaseLLM @@ -30,12 +28,8 @@ class OpenAILLM(BaseLLM): self.model = model if "api_key" in kwargs: api_key = kwargs.pop("api_key") - else: - api_key = os.getenv("OPENAI_API_KEY") if "base_url" in kwargs: base_url = kwargs.pop("base_url") - else: - base_url = os.getenv("OPENAI_BASE_URL") self.client = OpenAI(api_key=api_key, base_url=base_url, **kwargs) def chat(self, messages: list[dict], stream_callback = None) -> str: @@ -50,18 +44,27 @@ class OpenAILLM(BaseLLM): Returns: response (str) """ - completion = self.client.chat.completions.create( + with self.client.chat.completions.create( model=self.model, messages=messages, - stream=True - ) - response = "" - for chunk in completion: - stream_response = chunk.choices[0].delta.content - if stream_response: - print(stream_response, end="", flush=True) - response += stream_response - if stream_callback: - stream_callback(stream_response) + stream=True, + ) as stream: + content = "" + reasoning_content = "" + for chunk in stream: + if not chunk.choices: + continue + else: + delta = chunk.choices[0].delta + if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None: + print(delta.reasoning_content, end='', flush=True) + reasoning_content += delta.reasoning_content + if stream_callback: + stream_callback(delta.reasoning_content) + if hasattr(delta, 'content') and delta.content is not None: + print(delta.content, end="", flush=True) + content += delta.content + if stream_callback: + stream_callback(delta.content) print("\n") - return response + return content