Merge branch 'TauricResearch:main' into save_results

This commit is contained in:
Huijae Lee
2025-06-25 08:43:19 +09:00
committed by GitHub
17 changed files with 5674 additions and 56 deletions

View File

@@ -22,7 +22,7 @@ def create_fundamentals_analyst(llm, toolkit):
system_message = (
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ " Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.",
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.",
)
prompt = ChatPromptTemplate.from_messages(
@@ -51,9 +51,14 @@ def create_fundamentals_analyst(llm, toolkit):
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"fundamentals_report": result.content,
"fundamentals_report": report,
}
return fundamentals_analyst_node

View File

@@ -76,9 +76,14 @@ Volume-Based Indicators:
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"market_report": result.content,
"market_report": report,
}
return market_analyst_node

View File

@@ -47,9 +47,14 @@ def create_news_analyst(llm, toolkit):
chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"news_report": result.content,
"news_report": report,
}
return news_analyst_node

View File

@@ -47,9 +47,14 @@ def create_social_media_analyst(llm, toolkit):
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"sentiment_report": result.content,
"sentiment_report": report,
}
return social_media_analyst_node

View File

@@ -12,14 +12,22 @@ from dateutil.relativedelta import relativedelta
from langchain_openai import ChatOpenAI
import tradingagents.dataflows.interface as interface
from tradingagents.default_config import DEFAULT_CONFIG
from langchain_core.messages import HumanMessage
def create_msg_delete():
def delete_messages(state):
"""To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done"""
"""Clear messages and add placeholder for Anthropic compatibility"""
messages = state["messages"]
return {"messages": [RemoveMessage(id=m.id) for m in messages]}
# Remove all messages
removal_operations = [RemoveMessage(id=m.id) for m in messages]
# Add a minimal placeholder message
placeholder = HumanMessage(content="Continue")
return {"messages": removal_operations + [placeholder]}
return delete_messages

View File

@@ -1,19 +1,23 @@
import chromadb
from chromadb.config import Settings
from openai import OpenAI
import numpy as np
class FinancialSituationMemory:
def __init__(self, name):
self.client = OpenAI()
def __init__(self, name, config):
if config["backend_url"] == "http://localhost:11434/v1":
self.embedding = "nomic-embed-text"
else:
self.embedding = "text-embedding-3-small"
self.client = OpenAI()
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
self.situation_collection = self.chroma_client.create_collection(name=name)
def get_embedding(self, text):
"""Get OpenAI embedding for a text"""
response = self.client.embeddings.create(
model="text-embedding-ada-002", input=text
model=self.embedding, input=text
)
return response.data[0].embedding

View File

@@ -703,6 +703,7 @@ def get_YFin_data(
def get_stock_news_openai(ticker, curr_date):
config = get_config()
client = OpenAI()
response = client.responses.create(
@@ -713,7 +714,7 @@ def get_stock_news_openai(ticker, curr_date):
"content": [
{
"type": "input_text",
"text": f"Can you search Social Media for {ticker} on TSLA from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
"text": f"Can you search Social Media for {ticker} from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
}
],
}
@@ -737,6 +738,7 @@ def get_stock_news_openai(ticker, curr_date):
def get_global_news_openai(curr_date):
config = get_config()
client = OpenAI()
response = client.responses.create(
@@ -771,6 +773,7 @@ def get_global_news_openai(curr_date):
def get_fundamentals_openai(ticker, curr_date):
config = get_config()
client = OpenAI()
response = client.responses.create(

View File

@@ -9,8 +9,10 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
"llm_provider": "openai",
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"backend_url": "https://api.openai.com/v1",
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,

View File

@@ -7,6 +7,9 @@ from datetime import date
from typing import Dict, Any, Tuple, List, Optional
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_google_genai import ChatGoogleGenerativeAI
from langgraph.prebuilt import ToolNode
from tradingagents.agents import *
@@ -55,18 +58,26 @@ class TradingAgentsGraph:
)
# Initialize LLMs
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatOpenAI(
model=self.config["quick_think_llm"], temperature=0.1
)
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
elif self.config["llm_provider"].lower() == "anthropic":
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
elif self.config["llm_provider"].lower() == "google":
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
else:
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")
self.toolkit = Toolkit(config=self.config)
# Initialize memories
self.bull_memory = FinancialSituationMemory("bull_memory")
self.bear_memory = FinancialSituationMemory("bear_memory")
self.trader_memory = FinancialSituationMemory("trader_memory")
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory")
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory")
self.bull_memory = FinancialSituationMemory("bull_memory", self.config)
self.bear_memory = FinancialSituationMemory("bear_memory", self.config)
self.trader_memory = FinancialSituationMemory("trader_memory", self.config)
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config)
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config)
# Create tool nodes
self.tool_nodes = self._create_tool_nodes()