pd 3 days ago
parent
commit
e0256a3fe0
  1. 114
      deepsearcher/agent/deep_search.py
  2. 6
      deepsearcher/config.yaml
  3. 6
      deepsearcher/llm/openai_llm.py
  4. 3
      deepsearcher/online_query.py
  5. 6
      deepsearcher/templates/html/index.html
  6. 4
      deepsearcher/templates/static/js/app.js
  7. 4
      deepsearcher/vector_db/milvus.py
  8. 16
      docs/intro_docs/milvus_benchmark.md
  9. 49
      main.py
  10. 4
      test.py

114
deepsearcher/agent/deep_search.py

@ -23,9 +23,11 @@ COLLECTION_ROUTE_PROMPT = """
SUB_QUERY_PROMPT = """ SUB_QUERY_PROMPT = """
为了能够全面的回答这个问题请你尝试把原本的问题拆分或扩展为几个子问题 为了能够全面的回答这个问题请你尝试把原本的问题拆分或扩展为几个子问题
不可以太多但是也不可以太少请根据问题复杂程度来决定子问题的数量 请你使用自顶向下和自底向上两种方向来思考如何拆分问题
子问题的数量不可以太多但是也不可以太少应当保证问题的回答全面性请根据问题复杂程度来决定子问题的数量
如果原问题本身非常简单没有必要进行拆分则保留输出原问题本身 如果原问题本身非常简单没有必要进行拆分则保留输出原问题本身
需要保证每个子问题都具体清晰不可分原子性最终返回一个字符串列表 需要保证每个子问题都具体清晰不可分原子性即不可以再包含更细分的子问题子问题中不要包含"请你回答""请你总结""请你分析"等祈使类型词语
你需要最终返回一个字符串列表
原问题: {original_query} 原问题: {original_query}
@ -36,9 +38,10 @@ SUB_QUERY_PROMPT = """
示例输出例子中的数量不是要求: 示例输出例子中的数量不是要求:
[ [
"什么是机器学习?", "什么是机器学习?",
"机器学习的使用目的是什么?", "机器学习的使用目的",
"机器学习和深度学习的区别是什么?", "机器学习的常用算法",
"机器学习的历史演进过程?" "机器学习的历史演进过程",
"机器学习和深度学习的区别是什么?"
] ]
</EXAMPLE> </EXAMPLE>
@ -49,26 +52,27 @@ SUB_QUERY_PROMPT = """
RERANK_PROMPT = """ RERANK_PROMPT = """
根据当前的问题和获取到的文档片段 根据当前的问题和获取到的文档片段文档片段包裹都在<reference></reference><chunk></chunk>标签中并有对应的连续的id
请你对当前获取到的文档是否能帮助回答这个问题直接或间接全面或部分都可以给出一个快速判断 请你对当前获取到的文档片段是否能帮助回答这个问题直接或间接全面或部分都可以但需要有实际有效内容给出一个快速判断
对于每一个文档片段你只应该返回"YES"或者"NO"需要注意顺序和数量 对于每一个文档片段你只应该返回"True"或者"False"需要注意顺序和数量
问题: {query} 问题: {query}
检索到的文档片段: 检索到的文档片段:
{chunks} {chunks}
例如给定4个chunks实际检索到的文档片段不一定是4个返回: ["YES", "NO", "YES", "YES"] 例如假如给出4个chunks实际检索到的文档片段不一定是这么多返回4个"True"或者"False"注意这只是一个示例不代表实际判断: ["True", "False", "True", "True"]
使用的语言与问题相同 使用的语言与问题相同
你需要返回的是 a python list of str without any addtional content: 你需要返回的是 a python list of str(bool) without any addtional content:
""" """
REFLECT_PROMPT = """ REFLECT_PROMPT = """
根据原问题和子问题以及获取到的文档片段请你决定是否要生成更多的问题 根据原问题和子问题以及获取到的文档片段请你决定是否要生成更多的问题这些问题将被用于后续的思考和搜索
如果已经获得的文档片段没能覆盖所有的子问题这意味着这些文档无法被检索到 你应该根据已经获得到的文档片段进行批评思考生成其他新的问题来保证原问题的回答的准确和全面请你使用自顶向下和自底向上两种方向来思考如何生成新问题
你可以尝试生成相似但些许不同的问题来尝试重新检索但是也可以根据获得到的文档片段进行批评思考生成新的问题来保证原问题的回答的准确和全面 如果已经获得的文档片段没能覆盖所有的子问题这意味着有关这些问题的文档无法被检索到你应该根据你自己的知识补充思考
如果没有真的必要继续研究取决于你的判断返回一个空列表 需要保证每个新的问题都具体清晰不可分原子性并且不可以和之前的问题重复新的问题中不要包含"请你回答""请你总结""请你分析"等祈使类型词语
如果没有真的必要继续研究取决于你的判断返回一个空列表
原问题: {original_query} 原问题: {original_query}
@ -83,23 +87,19 @@ REFLECT_PROMPT = """
SUMMARY_PROMPT = """ SUMMARY_PROMPT = """
你是一个内容分析专家请你根据提供的问题和检索到的信息生成详尽的长文回答 你是一个内容分析专家
如果检索到的信息不足以回答问题或者必须添加额外信息才能能回答你应该使用你的知识来进行补充 请你综合已经提出的问题和检索到的信息以原问题为中心生成详细准确层次分明尽可能长的回答
这种情况下你自己提供的信息需要使用例如"your knowledge here[^0]"引用注意这里的"[^0]"的序号0是固定的表示你的知识下文当中有文末引用的例子 如果检索到的信息不足以回答问题你应该使用你的知识来进行扩展补充
同时你应该根据提供的信息生成文内引用和文末参考资料列表来自文档切片的reference引用从[^1]开始 注意不要逐个回答问题而是应该综合所有问题和信息生成一个完整的回答
如果多个片段是相同的来源或者一个片段可以回答多个问题文内引用可以引用多次但文末只引用一次来源即文末的引用列表中不能有重复的来源 同时你应该根据提供的信息生成文内引用"[^index]"(markdown文内引用)
如果你自己提供的信息需要使用"[^0]"引用即你提供的信息使用固定index=0
来自<chunk><reference>的引用序号从[^index]从index=1开始来源需要与前文<reference>中的"href"一致
不需要对每个<chunk>分配一个引用而是相同<reference><chunk>共用一个引用
另外如果回答的内容文内引用需要引用多个<reference>请添加多个[^index]到句尾
例子:
<EXAMPLE> <EXAMPLE>
文内引用示例使用markdown脚注: "XGBoost是非常强大的集成学习模型。[^1]但是XGBoost的缺点是计算复杂度高,需要大量的计算资源。[^0]"
"XGBoost是非常强大的集成学习模型[^2]"
(必须使用 "[^index]"这里的index是对应的<reference>的id)
文末引用示例 (需要与前文reference的href一致不需要对每个chunk分配一个引用而是每一个referecen共用一个引用):
[^0]: AI Generated
[^2]: files/docs/chap_001_003_models.md
</EXAMPLE> </EXAMPLE>
@ -241,7 +241,7 @@ class DeepSearch(BaseAgent):
continue continue
# Format all chunks for batch processing # Format all chunks for batch processing
chunks = self._format_chunks(retrieved_results) chunks, _ = self._format_chunks(retrieved_results)
# Batch process all chunks with a single LLM call # Batch process all chunks with a single LLM call
content = self.llm.chat( content = self.llm.chat(
@ -262,14 +262,14 @@ class DeepSearch(BaseAgent):
relevance_list = self.llm.literal_eval(content) relevance_list = self.llm.literal_eval(content)
if not isinstance(relevance_list, list): if not isinstance(relevance_list, list):
raise ValueError("Response is not a list") raise ValueError("Response is not a list")
except (ValueError, SyntaxError): except Exception as _:
# Fallback: if parsing fails, treat all chunks as relevant # Fallback: if parsing fails, treat all chunks as relevant
log.color_print(f"Warning: Failed to parse relevance response. Treating all chunks as relevant. Response was: {content}") log.color_print(f"Warning: Failed to parse relevance response. Treating all chunks as relevant. Response was: {content}")
relevance_list = ["YES"] * len(retrieved_results) relevance_list = ["True"] * len(retrieved_results)
# Ensure we have enough relevance judgments for all chunks # Ensure we have enough relevance judgments for all chunks
while len(relevance_list) < len(retrieved_results): while len(relevance_list) < len(retrieved_results):
relevance_list.append("YES") # Default to relevant if no judgment provided relevance_list.append("True") # Default to relevant if no judgment provided
# Filter relevant chunks based on LLM response # Filter relevant chunks based on LLM response
accepted_chunk_num = 0 accepted_chunk_num = 0
@ -278,9 +278,9 @@ class DeepSearch(BaseAgent):
# Check if we have a relevance judgment for this chunk # Check if we have a relevance judgment for this chunk
is_relevant = ( is_relevant = (
i < len(relevance_list) and i < len(relevance_list) and
"YES" in relevance_list[i].upper() and "True" in relevance_list[i] and
"NO" not in relevance_list[i].upper()) if i < len(relevance_list "False" not in relevance_list[i]
) else True ) if i < len(relevance_list) else True
if is_relevant: if is_relevant:
all_retrieved_results.append(retrieved_result) all_retrieved_results.append(retrieved_result)
@ -296,7 +296,7 @@ class DeepSearch(BaseAgent):
def _generate_more_sub_queries( def _generate_more_sub_queries(
self, original_query: str, all_sub_queries: list[str], all_retrieved_results: list[RetrievalResult] self, original_query: str, all_sub_queries: list[str], all_retrieved_results: list[RetrievalResult]
) -> list[str]: ) -> list[str]:
chunks = self._format_chunks(all_retrieved_results) chunks, _ = self._format_chunks(all_retrieved_results)
reflect_prompt = REFLECT_PROMPT.format( reflect_prompt = REFLECT_PROMPT.format(
original_query=original_query, original_query=original_query,
all_sub_queries=all_sub_queries, all_sub_queries=all_sub_queries,
@ -394,7 +394,7 @@ class DeepSearch(BaseAgent):
if not all_retrieved_results or len(all_retrieved_results) == 0: if not all_retrieved_results or len(all_retrieved_results) == 0:
send_info(f"'{original_query}'没能找到更多信息!") send_info(f"'{original_query}'没能找到更多信息!")
return "", [] return "", []
chunks = self._format_chunks(all_retrieved_results) chunks, refs = self._format_chunks(all_retrieved_results)
send_info(f"正在总结 {len(all_retrieved_results)} 个查找到的文档片段") send_info(f"正在总结 {len(all_retrieved_results)} 个查找到的文档片段")
summary_prompt = SUMMARY_PROMPT.format( summary_prompt = SUMMARY_PROMPT.format(
original_query=original_query, original_query=original_query,
@ -402,25 +402,33 @@ class DeepSearch(BaseAgent):
chunks=chunks chunks=chunks
) )
response = self.llm.chat([{"role": "user", "content": summary_prompt}]) response = self.llm.chat([{"role": "user", "content": summary_prompt}])
final_answer = self.llm.remove_think(response) response = self.llm.remove_think(response) + refs
send_answer(final_answer) send_answer(response)
return self.llm.remove_think(response), all_retrieved_results return response, all_retrieved_results
def _format_chunks(self, retrieved_results: list[RetrievalResult]): def _format_chunks(self, retrieved_results: list[RetrievalResult]) -> tuple[str, str]:
# 以referecen为key,把chunk放到字典中 # 以referecen为key,把chunk放到字典中
references = defaultdict(list) ref_dict = defaultdict(list)
for result in retrieved_results: for result in retrieved_results:
references[result.reference].append(result.text) ref_dict[result.reference].append(result.text)
chunks = [] formated_chunks = []
formated_refs = ["\n\n[^0]: AI 生成\n"]
chunk_count = 0 chunk_count = 0
for i, reference in enumerate(references): for i, reference in enumerate(ref_dict):
formated = f"<reference id='{i + 1}' href='{reference}'>\n" + "".join( formated_chunk = "".join(
[ [
f"<chunk id='{j + 1 + chunk_count}'>\n{chunk}\n</chunk id='{j + 1 + chunk_count}'>\n" (
for j, chunk in enumerate(references[reference]) f"<reference id='{i + 1}' href='{reference}'>" +
f"<chunk id='{j + 1 + chunk_count}'>\n{chunk}\n</chunk id='{j + 1 + chunk_count}'>" +
f"</reference id='{i + 1}'>\n"
)
for j, chunk in enumerate(ref_dict[reference])
] ]
) + f"</reference id='{i + 1}'>\n" )
print(formated) print(formated_chunk)
chunks.append(formated) formated_chunks.append(formated_chunk)
chunk_count += len(references[reference]) chunk_count += len(ref_dict[reference])
return "".join(chunks) formated_refs.append(f"[^{i + 1}]: " + str(reference) + "\n")
formated_chunks = "".join(formated_chunks)
formated_refs = "".join(formated_refs)
return formated_chunks, formated_refs

6
deepsearcher/config.yaml

@ -2,7 +2,7 @@ provide_settings:
llm: llm:
provider: "OpenAILLM" provider: "OpenAILLM"
config: config:
model: "Qwen/Qwen3-30B-A3B-Thinking-2507" model: "Qwen/Qwen3-32B"
api_key: "sk-fpzwvagjkhwysjsozfybvtjzongatcwqdihdxzuijnfdrjzt" api_key: "sk-fpzwvagjkhwysjsozfybvtjzongatcwqdihdxzuijnfdrjzt"
base_url: "https://api.siliconflow.cn/v1" base_url: "https://api.siliconflow.cn/v1"
@ -83,5 +83,5 @@ query_settings:
max_iter: 3 max_iter: 3
load_settings: load_settings:
chunk_size: 1024 chunk_size: 2048
chunk_overlap: 512 chunk_overlap: 1024

6
deepsearcher/llm/openai_llm.py

@ -48,9 +48,9 @@ class OpenAILLM(BaseLLM):
model=self.model, model=self.model,
messages=messages, messages=messages,
stream=True, stream=True,
temperature=0.6, temperature=0.8,
top_p=0.8, top_p=0.9,
presence_penalty=1.2 presence_penalty=1.4
) as stream: ) as stream:
# stream到控制台测试 # stream到控制台测试
content = "" content = ""

3
deepsearcher/online_query.py

@ -3,7 +3,7 @@ from deepsearcher import configuration
from deepsearcher.vector_db.base import RetrievalResult from deepsearcher.vector_db.base import RetrievalResult
def query(original_query: str, max_iter: int | None = None) -> tuple[str, list[RetrievalResult]]: def query(original_query: str, **kwargs) -> tuple[str, list[RetrievalResult]]:
""" """
Query the knowledge base with a question and get an answer. Query the knowledge base with a question and get an answer.
@ -20,6 +20,7 @@ def query(original_query: str, max_iter: int | None = None) -> tuple[str, list[R
- A list of retrieval results that were used to generate the answer - A list of retrieval results that were used to generate the answer
""" """
default_searcher = configuration.default_searcher default_searcher = configuration.default_searcher
max_iter = kwargs.get("max_iter", 3)
return default_searcher.query(original_query, max_iter=max_iter) return default_searcher.query(original_query, max_iter=max_iter)

6
deepsearcher/templates/html/index.html

@ -23,9 +23,9 @@
<body> <body>
<div class="container"> <div class="container">
<header> <header>
<h1>DeepSearcher 智能搜索系统</h1> <h1>DeepSearcher 智能深度搜索系统</h1>
<p class="app-description"> <p class="app-description">
基于大型语言模型和向量数据库的企业知识管理系统,支持私有数据搜索和在线内容整合,提供准确答案和综合报告。 基于大型语言模型和向量数据库的知识管理系统,支持私有数据搜索和在线内容整合,提供准确答案和综合报告。
</p> </p>
</header> </header>
@ -204,7 +204,7 @@
</main> </main>
<footer> <footer>
<p>DeepSearcher © 2025 | 企业知识管理与智能问答系统</p> <p>DeepSearcher © 2025 | 智能深度搜索系统</p>
</footer> </footer>
</div> </div>

4
deepsearcher/templates/static/js/app.js

@ -276,7 +276,7 @@ document
.filter((path) => path); .filter((path) => path);
setButtonLoading(button, true); setButtonLoading(button, true);
showStatus('loadStatus', '正在加载文件...', 'loading'); showStatus('loadStatus', ' 正在加载文件...', 'loading');
hideResult(); hideResult();
hideProcessResult(); hideProcessResult();
@ -363,7 +363,7 @@ document
.filter((url) => url); .filter((url) => url);
setButtonLoading(button, true); setButtonLoading(button, true);
showStatus('webLoadStatus', '正在加载网站内容...', 'loading'); showStatus('webLoadStatus', ' 正在加载网站...', 'loading');
hideResult(); hideResult();
hideProcessResult(); hideProcessResult();

4
deepsearcher/vector_db/milvus.py

@ -152,7 +152,7 @@ class Milvus(BaseVectorDB):
self, self,
collection: str, collection: str,
vector: np.ndarray | list[float], vector: np.ndarray | list[float],
top_k: int = 3, top_k: int = 4,
query_text: str = None, query_text: str = None,
*args, *args,
**kwargs, **kwargs,
@ -163,7 +163,7 @@ class Milvus(BaseVectorDB):
Args: Args:
collection (Optional[str]): Collection name. If None, uses default_collection. collection (Optional[str]): Collection name. If None, uses default_collection.
vector (Union[np.array, List[float]]): Query vector for similarity search. vector (Union[np.array, List[float]]): Query vector for similarity search.
top_k (int, optional): Number of results to return. Defaults to 5. top_k (int, optional): Number of results to return. Defaults to 4.
query_text (Optional[str], optional): Original query text for hybrid search. Defaults to None. query_text (Optional[str], optional): Original query text for hybrid search. Defaults to None.
*args: Variable length argument list. *args: Variable length argument list.
**kwargs: Arbitrary keyword arguments. **kwargs: Arbitrary keyword arguments.

16
docs/intro_docs/milvus_benchmark.md

File diff suppressed because one or more lines are too long

49
main.py

@ -112,7 +112,7 @@ def load_files(
examples=[256], examples=[256],
), ),
force_rebuild: bool = Body( force_rebuild: bool = Body(
False, True,
description="Whether to force rebuild the collection if it already exists.", description="Whether to force rebuild the collection if it already exists.",
examples=[False], examples=[False],
), ),
@ -141,7 +141,7 @@ def load_files(
batch_size=batch_size if batch_size is not None else 8, batch_size=batch_size if batch_size is not None else 8,
force_rebuild=force_rebuild, force_rebuild=force_rebuild,
) )
return {"message": "Files loaded successfully."} return {"message": "成功加载"}
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@ -198,48 +198,7 @@ def load_website(
batch_size=batch_size if batch_size is not None else 8, batch_size=batch_size if batch_size is not None else 8,
force_rebuild=force_rebuild, force_rebuild=force_rebuild,
) )
return {"message": "Website loaded successfully."} return {"message": "成功加载"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/query/")
def perform_query(
original_query: str = Query(
...,
description="Your question here.",
examples=["Write a report about Milvus."],
),
max_iter: int = Query(
3,
description="The maximum number of iterations for reflection.",
ge=1,
examples=[3],
),
):
"""
Perform a query against the loaded data.
Args:
original_query (str): The user's question or query.
max_iter (int, optional): Maximum number of iterations for reflection. Defaults to 3.
Returns:
dict: A dictionary containing the query result and token consumption.
Raises:
HTTPException: If the query fails.
"""
try:
# 清空之前的消息
message_stream = get_message_stream()
message_stream.clear_messages()
result_text, _ = query(original_query, max_iter)
return {
"result": result_text,
"messages": message_stream.get_messages_as_dicts()
}
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@ -297,7 +256,7 @@ async def perform_query_stream(
def run_query(): def run_query():
try: try:
print(f"Starting query: {original_query} with max_iter: {max_iter}") print(f"Starting query: {original_query} with max_iter: {max_iter}")
result_text, retrieval_results = query(original_query, max_iter) result_text, retrieval_results = query(original_query, max_iter=max_iter)
print(f"Query completed with result length: {len(result_text) if result_text else 0}") print(f"Query completed with result length: {len(result_text) if result_text else 0}")
print(f"Retrieved {len(retrieval_results) if retrieval_results else 0} documents") print(f"Retrieved {len(retrieval_results) if retrieval_results else 0} documents")
return result_text, None return result_text, None

4
test.py

@ -16,7 +16,7 @@ load_from_local_files(
paths_or_directory="docs", paths_or_directory="docs",
collection_name="default", collection_name="default",
collection_description="a general collection for all documents", collection_description="a general collection for all documents",
force_rebuild=True, batch_size=8 force_rebuild=True, batch_size=16
) )
@ -25,4 +25,4 @@ load_from_local_files(
# load_from_website(urls=website_url) # load_from_website(urls=website_url)
# Query # Query
result = query("Write a comprehensive report about Milvus.") # Your question here result = query("Write a comprehensive report about Milvus.", max_iter=1) # Your question here

Loading…
Cancel
Save