优化摘要配置加载与文档输出兼容性,并补充本地配置忽略规则。

通过引入 config.yaml.example 和环境变量覆盖提升可配置性,同时统一 Word 默认中文字体并忽略本地 config.yaml,避免敏感信息误提交。

Made-with: Cursor
This commit is contained in:
Oo
2026-03-26 09:39:07 +08:00
parent dbe9ba3629
commit d257cbaed3
4 changed files with 86 additions and 11 deletions

View File

@@ -3,12 +3,49 @@
使用大模型生成文档摘要
"""
import os
import re
from openai import OpenAI
# API 配置
API_BASE_URL = "https://yiming.zeroerr.team/v1"
API_KEY = "sk-LX1g8KkG61S6eUaVD567C0C187D4452c90F9E6985cDf3586"
MODEL = "Yiming"
def _load_llm_config() -> dict:
"""从 config.yaml 加载 LLM 配置,环境变量可覆盖。"""
config = {
"base_url": "https://yiming.zeroerr.team/v1",
"api_key": "",
"model": "minimax-2.5",
"max_tokens": 40960,
}
# 尝试从项目根目录 config.yaml 读取(与 0209 一致)
config_path = os.path.join(os.path.dirname(__file__), "..", "config.yaml")
if os.path.exists(config_path):
try:
import yaml
with open(config_path, "r", encoding="utf-8") as f:
data = yaml.safe_load(f) or {}
llm = data.get("llm", {})
config.update({k: v for k, v in llm.items() if v})
except Exception:
# 保持静默降级,继续使用默认值/环境变量
pass
# 环境变量优先级更高
config["base_url"] = os.environ.get("ZEROERR_LLM_BASE_URL", config["base_url"])
config["api_key"] = os.environ.get("ZEROERR_LLM_API_KEY", config["api_key"])
config["model"] = os.environ.get("ZEROERR_LLM_MODEL", config["model"])
config["max_tokens"] = int(
os.environ.get("ZEROERR_LLM_MAX_TOKENS", config["max_tokens"])
)
return config
_LLM_CONFIG = _load_llm_config()
API_BASE_URL = _LLM_CONFIG["base_url"]
API_KEY = _LLM_CONFIG["api_key"]
MODEL = _LLM_CONFIG["model"]
MAX_TOKENS = _LLM_CONFIG["max_tokens"]
def generate_abstract(all_pages: list[dict], category_name: str, index_url: str = None) -> str:
@@ -26,6 +63,10 @@ def generate_abstract(all_pages: list[dict], category_name: str, index_url: str
if not all_pages:
return ""
if not API_KEY:
print(" 警告: 未设置 ZEROERR_LLM_API_KEY跳过摘要生成")
return ""
try:
# 构建文档内容(用于生成摘要)
# 只使用标题和部分内容,避免内容过长
@@ -62,12 +103,13 @@ def generate_abstract(all_pages: list[dict], category_name: str, index_url: str
response = client.chat.completions.create(
model=MODEL,
temperature=0.3, # 使用较低的温度值,保证摘要的准确性
messages=[
{"role": "user", "content": prompt}
]
max_tokens=MAX_TOKENS,
messages=[{"role": "user", "content": prompt}],
)
abstract_text = response.choices[0].message.content.strip()
# 过滤掉 <think>...</think> 推理过程
abstract_text = re.sub(r"<think>.*?</think>\s*", "", abstract_text, flags=re.DOTALL).strip()
# 构建链接列表
links_section = "\n\n**相关链接:**\n\n"