Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52284ce13c | ||
|
|
7eaf4d995f | ||
|
|
da84ef43aa | ||
|
|
90b23e72f5 | ||
|
|
570644d939 | ||
|
|
99789f9cd1 | ||
|
|
a879868396 | ||
|
|
0013415378 | ||
|
|
0fdfd35867 | ||
|
|
e994e56c23 |
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.10
|
||||
27
README.md
27
README.md
@@ -11,6 +11,18 @@
|
||||
<a href="https://github.com/TauricResearch/" target="_blank"><img alt="Community" src="https://img.shields.io/badge/Join_GitHub_Community-TauricResearch-14C290?logo=discourse"/></a>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=de">Deutsch</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=es">Español</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=fr">français</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ja">日本語</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ko">한국어</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=pt">Português</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ru">Русский</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=zh">中文</a>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
# TradingAgents: Multi-Agents LLM Financial Trading Framework
|
||||
@@ -19,6 +31,16 @@
|
||||
>
|
||||
> So we decided to fully open-source the framework. Looking forward to building impactful projects with you!
|
||||
|
||||
<div align="center">
|
||||
<a href="https://www.star-history.com/#TauricResearch/TradingAgents&Date">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" />
|
||||
<img alt="TradingAgents Star History" src="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" style="width: 80%; height: auto;" />
|
||||
</picture>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
🚀 [TradingAgents](#tradingagents-framework) | ⚡ [Installation & CLI](#installation-and-cli) | 🎬 [Demo](https://www.youtube.com/watch?v=90gr5lwjIho) | 📦 [Package Usage](#tradingagents-package) | 🤝 [Contributing](#contributing) | 📄 [Citation](#citation)
|
||||
@@ -92,7 +114,7 @@ pip install -r requirements.txt
|
||||
|
||||
### Required APIs
|
||||
|
||||
You will also need the FinnHub API and EODHD API for financial data. All of our code is implemented with the free tier.
|
||||
You will also need the FinnHub API for financial data. All of our code is implemented with the free tier.
|
||||
```bash
|
||||
export FINNHUB_API_KEY=$YOUR_FINNHUB_API_KEY
|
||||
```
|
||||
@@ -136,8 +158,9 @@ To use TradingAgents inside your code, you can import the `tradingagents` module
|
||||
|
||||
```python
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy())
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2024-05-10")
|
||||
|
||||
67
cli/main.py
67
cli/main.py
@@ -97,7 +97,7 @@ class MessageBuffer:
|
||||
if content is not None:
|
||||
latest_section = section
|
||||
latest_content = content
|
||||
|
||||
|
||||
if latest_section and latest_content:
|
||||
# Format the current section for display
|
||||
section_titles = {
|
||||
@@ -295,10 +295,27 @@ def update_display(layout, spinner_text=None):
|
||||
|
||||
# Add regular messages
|
||||
for timestamp, msg_type, content in message_buffer.messages:
|
||||
# Convert content to string if it's not already
|
||||
content_str = content
|
||||
if isinstance(content, list):
|
||||
# Handle list of content blocks (Anthropic format)
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type') == 'text':
|
||||
text_parts.append(item.get('text', ''))
|
||||
elif item.get('type') == 'tool_use':
|
||||
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
|
||||
else:
|
||||
text_parts.append(str(item))
|
||||
content_str = ' '.join(text_parts)
|
||||
elif not isinstance(content_str, str):
|
||||
content_str = str(content)
|
||||
|
||||
# Truncate message content if too long
|
||||
if isinstance(content, str) and len(content) > 200:
|
||||
content = content[:197] + "..."
|
||||
all_messages.append((timestamp, msg_type, content))
|
||||
if len(content_str) > 200:
|
||||
content_str = content_str[:197] + "..."
|
||||
all_messages.append((timestamp, msg_type, content_str))
|
||||
|
||||
# Sort by timestamp
|
||||
all_messages.sort(key=lambda x: x[0])
|
||||
@@ -444,20 +461,30 @@ def get_user_selections():
|
||||
)
|
||||
selected_research_depth = select_research_depth()
|
||||
|
||||
# Step 5: Thinking agents
|
||||
# Step 5: OpenAI backend
|
||||
console.print(
|
||||
create_question_box(
|
||||
"Step 5: Thinking Agents", "Select your thinking agents for analysis"
|
||||
"Step 5: OpenAI backend", "Select which service to talk to"
|
||||
)
|
||||
)
|
||||
selected_shallow_thinker = select_shallow_thinking_agent()
|
||||
selected_deep_thinker = select_deep_thinking_agent()
|
||||
selected_llm_provider, backend_url = select_llm_provider()
|
||||
|
||||
# Step 6: Thinking agents
|
||||
console.print(
|
||||
create_question_box(
|
||||
"Step 6: Thinking Agents", "Select your thinking agents for analysis"
|
||||
)
|
||||
)
|
||||
selected_shallow_thinker = select_shallow_thinking_agent(selected_llm_provider)
|
||||
selected_deep_thinker = select_deep_thinking_agent(selected_llm_provider)
|
||||
|
||||
return {
|
||||
"ticker": selected_ticker,
|
||||
"analysis_date": analysis_date,
|
||||
"analysts": selected_analysts,
|
||||
"research_depth": selected_research_depth,
|
||||
"llm_provider": selected_llm_provider.lower(),
|
||||
"backend_url": backend_url,
|
||||
"shallow_thinker": selected_shallow_thinker,
|
||||
"deep_thinker": selected_deep_thinker,
|
||||
}
|
||||
@@ -683,6 +710,24 @@ def update_research_team_status(status):
|
||||
for agent in research_team:
|
||||
message_buffer.update_agent_status(agent, status)
|
||||
|
||||
def extract_content_string(content):
|
||||
"""Extract string content from various message formats."""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
# Handle Anthropic's list format
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type') == 'text':
|
||||
text_parts.append(item.get('text', ''))
|
||||
elif item.get('type') == 'tool_use':
|
||||
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
|
||||
else:
|
||||
text_parts.append(str(item))
|
||||
return ' '.join(text_parts)
|
||||
else:
|
||||
return str(content)
|
||||
|
||||
def run_analysis():
|
||||
# First get all user selections
|
||||
@@ -694,6 +739,8 @@ def run_analysis():
|
||||
config["max_risk_discuss_rounds"] = selections["research_depth"]
|
||||
config["quick_think_llm"] = selections["shallow_thinker"]
|
||||
config["deep_think_llm"] = selections["deep_thinker"]
|
||||
config["backend_url"] = selections["backend_url"]
|
||||
config["llm_provider"] = selections["llm_provider"].lower()
|
||||
|
||||
# Initialize the graph
|
||||
graph = TradingAgentsGraph(
|
||||
@@ -754,14 +801,14 @@ def run_analysis():
|
||||
|
||||
# Extract message content and type
|
||||
if hasattr(last_message, "content"):
|
||||
content = last_message.content
|
||||
content = extract_content_string(last_message.content) # Use the helper function
|
||||
msg_type = "Reasoning"
|
||||
else:
|
||||
content = str(last_message)
|
||||
msg_type = "System"
|
||||
|
||||
# Add message to buffer
|
||||
message_buffer.add_message(msg_type, content)
|
||||
message_buffer.add_message(msg_type, content)
|
||||
|
||||
# If it's a tool call, add it to tool calls
|
||||
if hasattr(last_message, "tool_calls"):
|
||||
|
||||
119
cli/utils.py
119
cli/utils.py
@@ -122,22 +122,43 @@ def select_research_depth() -> int:
|
||||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent() -> str:
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define shallow thinking llm engine options with their corresponding model names
|
||||
SHALLOW_AGENT_OPTIONS = [
|
||||
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
]
|
||||
SHALLOW_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
|
||||
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
|
||||
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
|
||||
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
|
||||
],
|
||||
"openrouter": [
|
||||
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
|
||||
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
|
||||
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("llama3.2 local", "llama3.2"),
|
||||
]
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Quick-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in SHALLOW_AGENT_OPTIONS
|
||||
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
@@ -158,25 +179,47 @@ def select_shallow_thinking_agent() -> str:
|
||||
return choice
|
||||
|
||||
|
||||
def select_deep_thinking_agent() -> str:
|
||||
def select_deep_thinking_agent(provider) -> str:
|
||||
"""Select deep thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define deep thinking llm engine options with their corresponding model names
|
||||
DEEP_AGENT_OPTIONS = [
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
("o4-mini - Specialized reasoning model (compact)", "o4-mini"),
|
||||
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
|
||||
("o3 - Full advanced reasoning model", "o3"),
|
||||
("o1 - Premier reasoning and problem-solving model", "o1"),
|
||||
]
|
||||
|
||||
DEEP_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
("o4-mini - Specialized reasoning model (compact)", "o4-mini"),
|
||||
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
|
||||
("o3 - Full advanced reasoning model", "o3"),
|
||||
("o1 - Premier reasoning and problem-solving model", "o1"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
|
||||
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
|
||||
("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
|
||||
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
|
||||
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
|
||||
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
|
||||
],
|
||||
"openrouter": [
|
||||
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("qwen3", "qwen3"),
|
||||
]
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Deep-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in DEEP_AGENT_OPTIONS
|
||||
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
@@ -193,3 +236,39 @@ def select_deep_thinking_agent() -> str:
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
def select_llm_provider() -> tuple[str, str]:
|
||||
"""Select the OpenAI api url using interactive selection."""
|
||||
# Define OpenAI api options with their corresponding endpoints
|
||||
BASE_URLS = [
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select your LLM Provider:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=(display, value))
|
||||
for display, value in BASE_URLS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
display_name, url = choice
|
||||
print(f"You selected: {display_name}\tURL: {url}")
|
||||
|
||||
return display_name, url
|
||||
|
||||
6
main.py
6
main.py
@@ -3,8 +3,10 @@ from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
|
||||
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
|
||||
config["llm_provider"] = "google" # Use a different model
|
||||
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
|
||||
config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
config["online_tools"] = True # Increase debate rounds
|
||||
|
||||
|
||||
34
pyproject.toml
Normal file
34
pyproject.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[project]
|
||||
name = "tradingagents"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"akshare>=1.16.98",
|
||||
"backtrader>=1.9.78.123",
|
||||
"chainlit>=2.5.5",
|
||||
"chromadb>=1.0.12",
|
||||
"eodhd>=1.0.32",
|
||||
"feedparser>=6.0.11",
|
||||
"finnhub-python>=2.4.23",
|
||||
"langchain-anthropic>=0.3.15",
|
||||
"langchain-experimental>=0.3.4",
|
||||
"langchain-google-genai>=2.1.5",
|
||||
"langchain-openai>=0.3.23",
|
||||
"langgraph>=0.4.8",
|
||||
"pandas>=2.3.0",
|
||||
"parsel>=1.10.0",
|
||||
"praw>=7.8.1",
|
||||
"pytz>=2025.2",
|
||||
"questionary>=2.1.0",
|
||||
"redis>=6.2.0",
|
||||
"requests>=2.32.4",
|
||||
"rich>=14.0.0",
|
||||
"setuptools>=80.9.0",
|
||||
"stockstats>=0.6.5",
|
||||
"tqdm>=4.67.1",
|
||||
"tushare>=1.4.21",
|
||||
"typing-extensions>=4.14.0",
|
||||
"yfinance>=0.2.63",
|
||||
]
|
||||
@@ -51,9 +51,14 @@ def create_fundamentals_analyst(llm, toolkit):
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"fundamentals_report": result.content,
|
||||
"fundamentals_report": report,
|
||||
}
|
||||
|
||||
return fundamentals_analyst_node
|
||||
|
||||
@@ -76,9 +76,14 @@ Volume-Based Indicators:
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"market_report": result.content,
|
||||
"market_report": report,
|
||||
}
|
||||
|
||||
return market_analyst_node
|
||||
|
||||
@@ -47,9 +47,14 @@ def create_news_analyst(llm, toolkit):
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"news_report": result.content,
|
||||
"news_report": report,
|
||||
}
|
||||
|
||||
return news_analyst_node
|
||||
|
||||
@@ -47,9 +47,14 @@ def create_social_media_analyst(llm, toolkit):
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"sentiment_report": result.content,
|
||||
"sentiment_report": report,
|
||||
}
|
||||
|
||||
return social_media_analyst_node
|
||||
|
||||
@@ -12,14 +12,22 @@ from dateutil.relativedelta import relativedelta
|
||||
from langchain_openai import ChatOpenAI
|
||||
import tradingagents.dataflows.interface as interface
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
|
||||
def create_msg_delete():
|
||||
def delete_messages(state):
|
||||
"""To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done"""
|
||||
"""Clear messages and add placeholder for Anthropic compatibility"""
|
||||
messages = state["messages"]
|
||||
return {"messages": [RemoveMessage(id=m.id) for m in messages]}
|
||||
|
||||
|
||||
# Remove all messages
|
||||
removal_operations = [RemoveMessage(id=m.id) for m in messages]
|
||||
|
||||
# Add a minimal placeholder message
|
||||
placeholder = HumanMessage(content="Continue")
|
||||
|
||||
return {"messages": removal_operations + [placeholder]}
|
||||
|
||||
return delete_messages
|
||||
|
||||
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
from openai import OpenAI
|
||||
import numpy as np
|
||||
|
||||
|
||||
class FinancialSituationMemory:
|
||||
def __init__(self, name):
|
||||
self.client = OpenAI()
|
||||
def __init__(self, name, config):
|
||||
if config["backend_url"] == "http://localhost:11434/v1":
|
||||
self.embedding = "nomic-embed-text"
|
||||
else:
|
||||
self.embedding = "text-embedding-3-small"
|
||||
self.client = OpenAI()
|
||||
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
|
||||
self.situation_collection = self.chroma_client.create_collection(name=name)
|
||||
|
||||
def get_embedding(self, text):
|
||||
"""Get OpenAI embedding for a text"""
|
||||
|
||||
response = self.client.embeddings.create(
|
||||
model="text-embedding-ada-002", input=text
|
||||
model=self.embedding, input=text
|
||||
)
|
||||
return response.data[0].embedding
|
||||
|
||||
|
||||
@@ -703,6 +703,7 @@ def get_YFin_data(
|
||||
|
||||
|
||||
def get_stock_news_openai(ticker, curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
@@ -713,7 +714,7 @@ def get_stock_news_openai(ticker, curr_date):
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": f"Can you search Social Media for {ticker} on TSLA from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
|
||||
"text": f"Can you search Social Media for {ticker} from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
|
||||
}
|
||||
],
|
||||
}
|
||||
@@ -737,6 +738,7 @@ def get_stock_news_openai(ticker, curr_date):
|
||||
|
||||
|
||||
def get_global_news_openai(curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
@@ -771,6 +773,7 @@ def get_global_news_openai(curr_date):
|
||||
|
||||
|
||||
def get_fundamentals_openai(ticker, curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
|
||||
@@ -8,8 +8,10 @@ DEFAULT_CONFIG = {
|
||||
"dataflows/data_cache",
|
||||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"deep_think_llm": "o4-mini",
|
||||
"quick_think_llm": "gpt-4o-mini",
|
||||
"backend_url": "https://api.openai.com/v1",
|
||||
# Debate and discussion settings
|
||||
"max_debate_rounds": 1,
|
||||
"max_risk_discuss_rounds": 1,
|
||||
|
||||
@@ -7,6 +7,9 @@ from datetime import date
|
||||
from typing import Dict, Any, Tuple, List, Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
from langgraph.prebuilt import ToolNode
|
||||
|
||||
from tradingagents.agents import *
|
||||
@@ -55,18 +58,26 @@ class TradingAgentsGraph:
|
||||
)
|
||||
|
||||
# Initialize LLMs
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"])
|
||||
self.quick_thinking_llm = ChatOpenAI(
|
||||
model=self.config["quick_think_llm"], temperature=0.1
|
||||
)
|
||||
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "anthropic":
|
||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "google":
|
||||
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
|
||||
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")
|
||||
|
||||
self.toolkit = Toolkit(config=self.config)
|
||||
|
||||
# Initialize memories
|
||||
self.bull_memory = FinancialSituationMemory("bull_memory")
|
||||
self.bear_memory = FinancialSituationMemory("bear_memory")
|
||||
self.trader_memory = FinancialSituationMemory("trader_memory")
|
||||
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory")
|
||||
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory")
|
||||
self.bull_memory = FinancialSituationMemory("bull_memory", self.config)
|
||||
self.bear_memory = FinancialSituationMemory("bear_memory", self.config)
|
||||
self.trader_memory = FinancialSituationMemory("trader_memory", self.config)
|
||||
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config)
|
||||
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config)
|
||||
|
||||
# Create tool nodes
|
||||
self.tool_nodes = self._create_tool_nodes()
|
||||
|
||||
Reference in New Issue
Block a user