Import 9 alphaear finance skills

- alphaear-deepear-lite: DeepEar Lite API integration
- alphaear-logic-visualizer: Draw.io XML finance diagrams
- alphaear-news: Real-time finance news (10+ sources)
- alphaear-predictor: Kronos time-series forecasting
- alphaear-reporter: Professional financial reports
- alphaear-search: Web search + local RAG
- alphaear-sentiment: FinBERT/LLM sentiment analysis
- alphaear-signal-tracker: Signal evolution tracking
- alphaear-stock: A-Share/HK/US stock data

Updates:
- All scripts updated to use universal .env path
- Added JINA_API_KEY, LLM_*, DEEPSEEK_API_KEY to .env.example
- Updated load_dotenv() to use ~/.config/opencode/.env
This commit is contained in:
Kunthawat Greethong
2026-03-27 10:11:37 +07:00
parent 7edf5bc4d0
commit 58f9380ec4
149 changed files with 26867 additions and 0 deletions

View File

@@ -0,0 +1,85 @@
import os
from typing import Optional, List, Dict, Any
from agno.agent import Agent
from agno.models.base import Model
from loguru import logger
from ..llm.factory import get_model
def test_tool_call_support(model: Model) -> bool:
"""
测试模型是否支持原生的 Tool Call (Function Calling)。
通过尝试执行一个简单的加法工具来验证。
"""
def get_current_weather(location: str):
"""获取指定地点的天气"""
return f"{location} 的天气是晴天25度。"
test_agent = Agent(
model=model,
tools=[get_current_weather],
instructions="请调用工具查询北京的天气,并直接返回工具的输出结果。",
)
try:
# 运行一个简单的任务,观察是否触发了 tool_call
response = test_agent.run("北京天气怎么样?")
# 检查 response 中是否包含 tool_calls
# Agno 的 RunResponse 对象通常包含 messages我们可以检查最后几条消息
has_tool_call = False
for msg in response.messages:
if hasattr(msg, "tool_calls") and msg.tool_calls:
has_tool_call = True
break
if has_tool_call:
logger.info(f"✅ Model {model.id} supports native tool calling.")
return True
else:
# 如果没有 tool_calls 但返回了正确答案可能是模型通过纯文本模拟了工具调用ReAct
# 或者根本没用工具。对于原生支持的判断,我们坚持要求有 tool_calls 结构。
logger.warning(
f"⚠️ Model {model.id} did NOT use native tool calling structure."
)
return False
except Exception as e:
logger.error(f"❌ Error testing tool call for {model.id}: {e}")
return False
class ModelCapabilityRegistry:
"""
模型能力注册表,用于缓存和管理不同模型的能力测试结果。
"""
_cache = {}
@classmethod
def get_capabilities(
cls, provider: str, model_id: str, **kwargs
) -> Dict[str, bool]:
key = f"{provider}:{model_id}"
if key not in cls._cache:
logger.info(f"🔍 Testing capabilities for {key}...")
model = get_model(provider, model_id, **kwargs)
supports_tool_call = test_tool_call_support(model)
cls._cache[key] = {"supports_tool_call": supports_tool_call}
return cls._cache[key]
if __name__ == "__main__":
import os
from dotenv import load_dotenv
load_dotenv(os.path.expanduser("~/.config/opencode/.env"))
# 测试当前配置的模型
p = os.getenv("LLM_PROVIDER", "ust")
m = os.getenv("LLM_MODEL", "Qwen")
print(f"Testing {p}/{m}...")
res = ModelCapabilityRegistry.get_capabilities(p, m)
print(f"Result: {res}")

View File

@@ -0,0 +1,114 @@
import os
from agno.models.openai import OpenAIChat
from agno.models.ollama import Ollama
from agno.models.dashscope import DashScope
from agno.models.deepseek import DeepSeek
from agno.models.openrouter import OpenRouter
def get_model(model_provider: str, model_id: str, **kwargs):
"""
Factory to get the appropriate LLM model.
Args:
model_provider: "openai", "ollama", "deepseek"
model_id: The specific model ID (e.g., "gpt-4o", "llama3", "deepseek-chat")
**kwargs: Additional arguments for the model constructor
"""
if model_provider == "openai":
return OpenAIChat(id=model_id, **kwargs)
elif model_provider == "ollama":
return Ollama(id=model_id, **kwargs)
elif model_provider == "deepseek":
# DeepSeek is OpenAI compatible
api_key = os.getenv("DEEPSEEK_API_KEY")
if not api_key:
print("Warning: DEEPSEEK_API_KEY not set.")
return DeepSeek(
id=model_id,
api_key=api_key,
**kwargs
)
elif model_provider == "dashscope":
api_key = os.getenv("DASHSCOPE_API_KEY")
if not api_key:
print("Warning: DASHSCOPE_API_KEY not set.")
return DashScope(
id=model_id,
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
api_key=api_key,
**kwargs
)
elif model_provider == 'openrouter':
api_key = os.getenv("OPENROUTER_API_KEY")
if not api_key:
print('Warning: OPENROUTER_API_KEY not set.')
return OpenRouter(
id=model_id,
api_key=api_key,
**kwargs
)
elif model_provider == 'zai':
api_key = os.getenv("ZAI_KEY_API")
if not api_key:
print('Warning: ZAI_KEY_API not set.')
# role_map to ensure compatibility.
default_role_map = {
"system": "system",
"user": "user",
"assistant": "assistant",
"tool": "tool",
"model": "assistant",
}
# Allow callers to override role_map via kwargs, otherwise use default
role_map = kwargs.pop("role_map", default_role_map)
return OpenAIChat(
id=model_id,
base_url="https://api.z.ai/api/paas/v4",
api_key=api_key,
timeout=60,
role_map=role_map,
extra_body={"enable_thinking": False}, # TODO: one more setting for thinking
**kwargs
)
elif model_provider == 'ust':
api_key = os.getenv("UST_KEY_API")
if not api_key:
print('Warning: UST_KEY_API not set.')
# Some UST-compatible endpoints expect the standard OpenAI role names
# (e.g. "system", "user", "assistant") rather than Agno's default
# mapping which maps "system" -> "developer". Provide an explicit
# role_map to ensure compatibility.
default_role_map = {
"system": "system",
"user": "user",
"assistant": "assistant",
"tool": "tool",
"model": "assistant",
}
# Allow callers to override role_map via kwargs, otherwise use default
role_map = kwargs.pop("role_map", default_role_map)
return OpenAIChat(
id=model_id,
api_key=api_key,
base_url=os.getenv("UST_URL"),
role_map=role_map,
extra_body={"enable_thinking": False}, # TODO: one more setting for thinking
**kwargs
)
else:
raise ValueError(f"Unknown model provider: {model_provider}")

View File

@@ -0,0 +1,81 @@
import os
from typing import Optional, List, Dict, Any, Union
from agno.models.base import Model
from loguru import logger
from dotenv import load_dotenv
from ..llm.factory import get_model
from ..llm.capability import ModelCapabilityRegistry
# Load environment variables from universal .env
load_dotenv(os.path.expanduser("~/.config/opencode/.env"))
class ModelRouter:
"""
模型路由管理器
功能:
1. 管理“推理/写作模型” (Reasoning Model) 和“工具调用模型” (Tool Model)。
2. 根据任务需求自动选择合适的模型。
"""
def __init__(self):
# 默认从环境变量读取
self.reasoning_provider = os.getenv(
"REASONING_MODEL_PROVIDER", os.getenv("LLM_PROVIDER", "openai")
)
self.reasoning_id = os.getenv(
"REASONING_MODEL_ID", os.getenv("LLM_MODEL", "gpt-4o")
)
self.reasoning_host = os.getenv("REASONING_MODEL_HOST", os.getenv("LLM_HOST"))
self.tool_provider = os.getenv("TOOL_MODEL_PROVIDER", self.reasoning_provider)
self.tool_id = os.getenv("TOOL_MODEL_ID", self.reasoning_id)
self.tool_host = os.getenv("TOOL_MODEL_HOST", self.reasoning_host)
self._reasoning_model = None
self._tool_model = None
logger.info(
f"🤖 ModelRouter initialized: Reasoning={self.reasoning_id} ({self.reasoning_host or 'default'}), Tool={self.tool_id} ({self.tool_host or 'default'})"
)
def get_reasoning_model(self, **kwargs) -> Model:
if not self._reasoning_model:
# 优先使用路由配置的 host
if self.reasoning_host and "host" not in kwargs:
kwargs["host"] = self.reasoning_host
self._reasoning_model = get_model(
self.reasoning_provider, self.reasoning_id, **kwargs
)
return self._reasoning_model
def get_tool_model(self, **kwargs) -> Model:
if not self._tool_model:
# 优先使用路由配置的 host
if self.tool_host and "host" not in kwargs:
kwargs["host"] = self.tool_host
# 检查 tool_model 是否真的支持 tool call
caps = ModelCapabilityRegistry.get_capabilities(
self.tool_provider, self.tool_id, **kwargs
)
if not caps["supports_tool_call"]:
logger.warning(
f"⚠️ Configured tool model {self.tool_id} might not support native tool calls! Consider using ReAct mode or a different model."
)
self._tool_model = get_model(self.tool_provider, self.tool_id, **kwargs)
return self._tool_model
def get_model_for_agent(self, has_tools: bool = False, **kwargs) -> Model:
"""
根据 Agent 是否包含工具来返回合适的模型。
"""
if has_tools:
return self.get_tool_model(**kwargs)
return self.get_reasoning_model(**kwargs)
# 全局单例
router = ModelRouter()