Browse Source

解决llm流式返回思考链获取

main
tanxing 1 week ago
parent
commit
779173d0a5
  1. 8
      deepsearcher/agent/deep_search.py
  2. 4
      deepsearcher/embedding/openai_embedding.py
  3. 39
      deepsearcher/llm/openai_llm.py

8
deepsearcher/agent/deep_search.py

@ -52,14 +52,14 @@ Query Questions: {query}
Retrieved Chunks: Retrieved Chunks:
{retrieved_chunks} {retrieved_chunks}
Respond with a list of "YES" or "NO" values, one for each chunk, in the same order as the chunks are listed. For example a list of chunks of three: ["YES", "NO", "YES"]""" Respond with a list of "YES" or "NO" values, one for each chunk, in the same order as the chunks are listed. For example a list of chunks of three: ["YES", "NO", "YES"]
"""
REFLECT_PROMPT = """ REFLECT_PROMPT = """
Determine whether additional search queries are needed based on the original query, previous sub queries, and all retrieved document chunks. Determine whether additional search queries are needed based on the original query, previous sub queries, and all retrieved document chunks.
If further research is required, provide a Python list of up to 3 search queries. If no further research is required, return an empty list. If further research is needed (which is prefered, but according to your judge), provide a Python list of more queries.
If no further research is needed, return an empty list.
If the original query is to write a report, then you prefer to generate some further queries, instead return an empty list.
Original Query: {question} Original Query: {question}

4
deepsearcher/embedding/openai_embedding.py

@ -1,5 +1,3 @@
import os
from openai import OpenAI from openai import OpenAI
from openai._types import NOT_GIVEN from openai._types import NOT_GIVEN
@ -37,8 +35,6 @@ class OpenAIEmbedding(BaseEmbedding):
if "base_url" in kwargs: if "base_url" in kwargs:
base_url = kwargs.pop("base_url") base_url = kwargs.pop("base_url")
else:
base_url = os.getenv("OPENAI_BASE_URL")
if "model_name" in kwargs: if "model_name" in kwargs:
model = kwargs.pop("model_name") model = kwargs.pop("model_name")

39
deepsearcher/llm/openai_llm.py

@ -1,5 +1,3 @@
import os
from deepsearcher.llm.base import BaseLLM from deepsearcher.llm.base import BaseLLM
@ -30,12 +28,8 @@ class OpenAILLM(BaseLLM):
self.model = model self.model = model
if "api_key" in kwargs: if "api_key" in kwargs:
api_key = kwargs.pop("api_key") api_key = kwargs.pop("api_key")
else:
api_key = os.getenv("OPENAI_API_KEY")
if "base_url" in kwargs: if "base_url" in kwargs:
base_url = kwargs.pop("base_url") base_url = kwargs.pop("base_url")
else:
base_url = os.getenv("OPENAI_BASE_URL")
self.client = OpenAI(api_key=api_key, base_url=base_url, **kwargs) self.client = OpenAI(api_key=api_key, base_url=base_url, **kwargs)
def chat(self, messages: list[dict], stream_callback = None) -> str: def chat(self, messages: list[dict], stream_callback = None) -> str:
@ -50,18 +44,27 @@ class OpenAILLM(BaseLLM):
Returns: Returns:
response (str) response (str)
""" """
completion = self.client.chat.completions.create( with self.client.chat.completions.create(
model=self.model, model=self.model,
messages=messages, messages=messages,
stream=True stream=True,
) ) as stream:
response = "" content = ""
for chunk in completion: reasoning_content = ""
stream_response = chunk.choices[0].delta.content for chunk in stream:
if stream_response: if not chunk.choices:
print(stream_response, end="", flush=True) continue
response += stream_response else:
if stream_callback: delta = chunk.choices[0].delta
stream_callback(stream_response) if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
print(delta.reasoning_content, end='', flush=True)
reasoning_content += delta.reasoning_content
if stream_callback:
stream_callback(delta.reasoning_content)
if hasattr(delta, 'content') and delta.content is not None:
print(delta.content, end="", flush=True)
content += delta.content
if stream_callback:
stream_callback(delta.content)
print("\n") print("\n")
return response return content

Loading…
Cancel
Save