chore(release): v0.1.0 – initial public release of TradingAgents
This commit is contained in:
46
tradingagents/dataflows/__init__.py
Normal file
46
tradingagents/dataflows/__init__.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from .finnhub_utils import get_data_in_range
|
||||
from .googlenews_utils import getNewsData
|
||||
from .yfin_utils import YFinanceUtils
|
||||
from .reddit_utils import fetch_top_from_category
|
||||
from .stockstats_utils import StockstatsUtils
|
||||
from .yfin_utils import YFinanceUtils
|
||||
|
||||
from .interface import (
|
||||
# News and sentiment functions
|
||||
get_finnhub_news,
|
||||
get_finnhub_company_insider_sentiment,
|
||||
get_finnhub_company_insider_transactions,
|
||||
get_google_news,
|
||||
get_reddit_global_news,
|
||||
get_reddit_company_news,
|
||||
# Financial statements functions
|
||||
get_simfin_balance_sheet,
|
||||
get_simfin_cashflow,
|
||||
get_simfin_income_statements,
|
||||
# Technical analysis functions
|
||||
get_stock_stats_indicators_window,
|
||||
get_stockstats_indicator,
|
||||
# Market data functions
|
||||
get_YFin_data_window,
|
||||
get_YFin_data,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# News and sentiment functions
|
||||
"get_finnhub_news",
|
||||
"get_finnhub_company_insider_sentiment",
|
||||
"get_finnhub_company_insider_transactions",
|
||||
"get_google_news",
|
||||
"get_reddit_global_news",
|
||||
"get_reddit_company_news",
|
||||
# Financial statements functions
|
||||
"get_simfin_balance_sheet",
|
||||
"get_simfin_cashflow",
|
||||
"get_simfin_income_statements",
|
||||
# Technical analysis functions
|
||||
"get_stock_stats_indicators_window",
|
||||
"get_stockstats_indicator",
|
||||
# Market data functions
|
||||
"get_YFin_data_window",
|
||||
"get_YFin_data",
|
||||
]
|
||||
34
tradingagents/dataflows/config.py
Normal file
34
tradingagents/dataflows/config.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import tradingagents.default_config as default_config
|
||||
from typing import Dict, Optional
|
||||
|
||||
# Use default config but allow it to be overridden
|
||||
_config: Optional[Dict] = None
|
||||
DATA_DIR: Optional[str] = None
|
||||
|
||||
|
||||
def initialize_config():
|
||||
"""Initialize the configuration with default values."""
|
||||
global _config, DATA_DIR
|
||||
if _config is None:
|
||||
_config = default_config.DEFAULT_CONFIG.copy()
|
||||
DATA_DIR = _config["data_dir"]
|
||||
|
||||
|
||||
def set_config(config: Dict):
|
||||
"""Update the configuration with custom values."""
|
||||
global _config, DATA_DIR
|
||||
if _config is None:
|
||||
_config = default_config.DEFAULT_CONFIG.copy()
|
||||
_config.update(config)
|
||||
DATA_DIR = _config["data_dir"]
|
||||
|
||||
|
||||
def get_config() -> Dict:
|
||||
"""Get the current configuration."""
|
||||
if _config is None:
|
||||
initialize_config()
|
||||
return _config.copy()
|
||||
|
||||
|
||||
# Initialize with default config
|
||||
initialize_config()
|
||||
36
tradingagents/dataflows/finnhub_utils.py
Normal file
36
tradingagents/dataflows/finnhub_utils.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
def get_data_in_range(ticker, start_date, end_date, data_type, data_dir, period=None):
|
||||
"""
|
||||
Gets finnhub data saved and processed on disk.
|
||||
Args:
|
||||
start_date (str): Start date in YYYY-MM-DD format.
|
||||
end_date (str): End date in YYYY-MM-DD format.
|
||||
data_type (str): Type of data from finnhub to fetch. Can be insider_trans, SEC_filings, news_data, insider_senti, or fin_as_reported.
|
||||
data_dir (str): Directory where the data is saved.
|
||||
period (str): Default to none, if there is a period specified, should be annual or quarterly.
|
||||
"""
|
||||
|
||||
if period:
|
||||
data_path = os.path.join(
|
||||
data_dir,
|
||||
"finnhub_data",
|
||||
data_type,
|
||||
f"{ticker}_{period}_data_formatted.json",
|
||||
)
|
||||
else:
|
||||
data_path = os.path.join(
|
||||
data_dir, "finnhub_data", data_type, f"{ticker}_data_formatted.json"
|
||||
)
|
||||
|
||||
data = open(data_path, "r")
|
||||
data = json.load(data)
|
||||
|
||||
# filter keys (date, str in format YYYY-MM-DD) by the date range (str, str in format YYYY-MM-DD)
|
||||
filtered_data = {}
|
||||
for key, value in data.items():
|
||||
if start_date <= key <= end_date and len(value) > 0:
|
||||
filtered_data[key] = value
|
||||
return filtered_data
|
||||
108
tradingagents/dataflows/googlenews_utils.py
Normal file
108
tradingagents/dataflows/googlenews_utils.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import json
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from datetime import datetime
|
||||
import time
|
||||
import random
|
||||
from tenacity import (
|
||||
retry,
|
||||
stop_after_attempt,
|
||||
wait_exponential,
|
||||
retry_if_exception_type,
|
||||
retry_if_result,
|
||||
)
|
||||
|
||||
|
||||
def is_rate_limited(response):
|
||||
"""Check if the response indicates rate limiting (status code 429)"""
|
||||
return response.status_code == 429
|
||||
|
||||
|
||||
@retry(
|
||||
retry=(retry_if_result(is_rate_limited)),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=60),
|
||||
stop=stop_after_attempt(5),
|
||||
)
|
||||
def make_request(url, headers):
|
||||
"""Make a request with retry logic for rate limiting"""
|
||||
# Random delay before each request to avoid detection
|
||||
time.sleep(random.uniform(2, 6))
|
||||
response = requests.get(url, headers=headers)
|
||||
return response
|
||||
|
||||
|
||||
def getNewsData(query, start_date, end_date):
|
||||
"""
|
||||
Scrape Google News search results for a given query and date range.
|
||||
query: str - search query
|
||||
start_date: str - start date in the format yyyy-mm-dd or mm/dd/yyyy
|
||||
end_date: str - end date in the format yyyy-mm-dd or mm/dd/yyyy
|
||||
"""
|
||||
if "-" in start_date:
|
||||
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
start_date = start_date.strftime("%m/%d/%Y")
|
||||
if "-" in end_date:
|
||||
end_date = datetime.strptime(end_date, "%Y-%m-%d")
|
||||
end_date = end_date.strftime("%m/%d/%Y")
|
||||
|
||||
headers = {
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/101.0.4951.54 Safari/537.36"
|
||||
)
|
||||
}
|
||||
|
||||
news_results = []
|
||||
page = 0
|
||||
while True:
|
||||
offset = page * 10
|
||||
url = (
|
||||
f"https://www.google.com/search?q={query}"
|
||||
f"&tbs=cdr:1,cd_min:{start_date},cd_max:{end_date}"
|
||||
f"&tbm=nws&start={offset}"
|
||||
)
|
||||
|
||||
try:
|
||||
response = make_request(url, headers)
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
results_on_page = soup.select("div.SoaBEf")
|
||||
|
||||
if not results_on_page:
|
||||
break # No more results found
|
||||
|
||||
for el in results_on_page:
|
||||
try:
|
||||
link = el.find("a")["href"]
|
||||
title = el.select_one("div.MBeuO").get_text()
|
||||
snippet = el.select_one(".GI74Re").get_text()
|
||||
date = el.select_one(".LfVVr").get_text()
|
||||
source = el.select_one(".NUnG9d span").get_text()
|
||||
news_results.append(
|
||||
{
|
||||
"link": link,
|
||||
"title": title,
|
||||
"snippet": snippet,
|
||||
"date": date,
|
||||
"source": source,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error processing result: {e}")
|
||||
# If one of the fields is not found, skip this result
|
||||
continue
|
||||
|
||||
# Update the progress bar with the current count of results scraped
|
||||
|
||||
# Check for the "Next" link (pagination)
|
||||
next_link = soup.find("a", id="pnnext")
|
||||
if not next_link:
|
||||
break
|
||||
|
||||
page += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"Failed after multiple retries: {e}")
|
||||
break
|
||||
|
||||
return news_results
|
||||
804
tradingagents/dataflows/interface.py
Normal file
804
tradingagents/dataflows/interface.py
Normal file
@@ -0,0 +1,804 @@
|
||||
from typing import Annotated, Dict
|
||||
from .reddit_utils import fetch_top_from_category
|
||||
from .yfin_utils import *
|
||||
from .stockstats_utils import *
|
||||
from .googlenews_utils import *
|
||||
from .finnhub_utils import get_data_in_range
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
import json
|
||||
import os
|
||||
import pandas as pd
|
||||
from tqdm import tqdm
|
||||
import yfinance as yf
|
||||
from openai import OpenAI
|
||||
from .config import get_config, set_config, DATA_DIR
|
||||
|
||||
|
||||
def get_finnhub_news(
|
||||
ticker: Annotated[
|
||||
str,
|
||||
"Search query of a company's, e.g. 'AAPL, TSM, etc.",
|
||||
],
|
||||
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
):
|
||||
"""
|
||||
Retrieve news about a company within a time frame
|
||||
|
||||
Args
|
||||
ticker (str): ticker for the company you are interested in
|
||||
start_date (str): Start date in yyyy-mm-dd format
|
||||
end_date (str): End date in yyyy-mm-dd format
|
||||
Returns
|
||||
str: dataframe containing the news of the company in the time frame
|
||||
|
||||
"""
|
||||
|
||||
start_date = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = start_date - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
result = get_data_in_range(ticker, before, curr_date, "news_data", DATA_DIR)
|
||||
|
||||
if len(result) == 0:
|
||||
return ""
|
||||
|
||||
combined_result = ""
|
||||
for day, data in result.items():
|
||||
if len(data) == 0:
|
||||
continue
|
||||
for entry in data:
|
||||
current_news = (
|
||||
"### " + entry["headline"] + f" ({day})" + "\n" + entry["summary"]
|
||||
)
|
||||
combined_result += current_news + "\n\n"
|
||||
|
||||
return f"## {ticker} News, from {before} to {curr_date}:\n" + str(combined_result)
|
||||
|
||||
|
||||
def get_finnhub_company_insider_sentiment(
|
||||
ticker: Annotated[str, "ticker symbol for the company"],
|
||||
curr_date: Annotated[
|
||||
str,
|
||||
"current date of you are trading at, yyyy-mm-dd",
|
||||
],
|
||||
look_back_days: Annotated[int, "number of days to look back"],
|
||||
):
|
||||
"""
|
||||
Retrieve insider sentiment about a company (retrieved from public SEC information) for the past 15 days
|
||||
Args:
|
||||
ticker (str): ticker symbol of the company
|
||||
curr_date (str): current date you are trading on, yyyy-mm-dd
|
||||
Returns:
|
||||
str: a report of the sentiment in the past 15 days starting at curr_date
|
||||
"""
|
||||
|
||||
date_obj = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = date_obj - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
data = get_data_in_range(ticker, before, curr_date, "insider_senti", DATA_DIR)
|
||||
|
||||
if len(data) == 0:
|
||||
return ""
|
||||
|
||||
result_str = ""
|
||||
seen_dicts = []
|
||||
for date, senti_list in data.items():
|
||||
for entry in senti_list:
|
||||
if entry not in seen_dicts:
|
||||
result_str += f"### {entry['year']}-{entry['month']}:\nChange: {entry['change']}\nMonthly Share Purchase Ratio: {entry['mspr']}\n\n"
|
||||
seen_dicts.append(entry)
|
||||
|
||||
return (
|
||||
f"## {ticker} Insider Sentiment Data for {before} to {curr_date}:\n"
|
||||
+ result_str
|
||||
+ "The change field refers to the net buying/selling from all insiders' transactions. The mspr field refers to monthly share purchase ratio."
|
||||
)
|
||||
|
||||
|
||||
def get_finnhub_company_insider_transactions(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
curr_date: Annotated[
|
||||
str,
|
||||
"current date you are trading at, yyyy-mm-dd",
|
||||
],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
):
|
||||
"""
|
||||
Retrieve insider transcaction information about a company (retrieved from public SEC information) for the past 15 days
|
||||
Args:
|
||||
ticker (str): ticker symbol of the company
|
||||
curr_date (str): current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: a report of the company's insider transaction/trading informtaion in the past 15 days
|
||||
"""
|
||||
|
||||
date_obj = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = date_obj - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
data = get_data_in_range(ticker, before, curr_date, "insider_trans", DATA_DIR)
|
||||
|
||||
if len(data) == 0:
|
||||
return ""
|
||||
|
||||
result_str = ""
|
||||
|
||||
seen_dicts = []
|
||||
for date, senti_list in data.items():
|
||||
for entry in senti_list:
|
||||
if entry not in seen_dicts:
|
||||
result_str += f"### Filing Date: {entry['filingDate']}, {entry['name']}:\nChange:{entry['change']}\nShares: {entry['share']}\nTransaction Price: {entry['transactionPrice']}\nTransaction Code: {entry['transactionCode']}\n\n"
|
||||
seen_dicts.append(entry)
|
||||
|
||||
return (
|
||||
f"## {ticker} insider transactions from {before} to {curr_date}:\n"
|
||||
+ result_str
|
||||
+ "The change field reflects the variation in share count—here a negative number indicates a reduction in holdings—while share specifies the total number of shares involved. The transactionPrice denotes the per-share price at which the trade was executed, and transactionDate marks when the transaction occurred. The name field identifies the insider making the trade, and transactionCode (e.g., S for sale) clarifies the nature of the transaction. FilingDate records when the transaction was officially reported, and the unique id links to the specific SEC filing, as indicated by the source. Additionally, the symbol ties the transaction to a particular company, isDerivative flags whether the trade involves derivative securities, and currency notes the currency context of the transaction."
|
||||
)
|
||||
|
||||
|
||||
def get_simfin_balance_sheet(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[
|
||||
str,
|
||||
"reporting frequency of the company's financial history: annual / quarterly",
|
||||
],
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
|
||||
):
|
||||
data_path = os.path.join(
|
||||
DATA_DIR,
|
||||
"fundamental_data",
|
||||
"simfin_data_all",
|
||||
"balance_sheet",
|
||||
"companies",
|
||||
"us",
|
||||
f"us-balance-{freq}.csv",
|
||||
)
|
||||
df = pd.read_csv(data_path, sep=";")
|
||||
|
||||
# Convert date strings to datetime objects and remove any time components
|
||||
df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize()
|
||||
df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize()
|
||||
|
||||
# Convert the current date to datetime and normalize
|
||||
curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize()
|
||||
|
||||
# Filter the DataFrame for the given ticker and for reports that were published on or before the current date
|
||||
filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)]
|
||||
|
||||
# Check if there are any available reports; if not, return a notification
|
||||
if filtered_df.empty:
|
||||
print("No balance sheet available before the given current date.")
|
||||
return ""
|
||||
|
||||
# Get the most recent balance sheet by selecting the row with the latest Publish Date
|
||||
latest_balance_sheet = filtered_df.loc[filtered_df["Publish Date"].idxmax()]
|
||||
|
||||
# drop the SimFinID column
|
||||
latest_balance_sheet = latest_balance_sheet.drop("SimFinId")
|
||||
|
||||
return (
|
||||
f"## {freq} balance sheet for {ticker} released on {str(latest_balance_sheet['Publish Date'])[0:10]}: \n"
|
||||
+ str(latest_balance_sheet)
|
||||
+ "\n\nThis includes metadata like reporting dates and currency, share details, and a breakdown of assets, liabilities, and equity. Assets are grouped as current (liquid items like cash and receivables) and noncurrent (long-term investments and property). Liabilities are split between short-term obligations and long-term debts, while equity reflects shareholder funds such as paid-in capital and retained earnings. Together, these components ensure that total assets equal the sum of liabilities and equity."
|
||||
)
|
||||
|
||||
|
||||
def get_simfin_cashflow(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[
|
||||
str,
|
||||
"reporting frequency of the company's financial history: annual / quarterly",
|
||||
],
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
|
||||
):
|
||||
data_path = os.path.join(
|
||||
DATA_DIR,
|
||||
"fundamental_data",
|
||||
"simfin_data_all",
|
||||
"cash_flow",
|
||||
"companies",
|
||||
"us",
|
||||
f"us-cashflow-{freq}.csv",
|
||||
)
|
||||
df = pd.read_csv(data_path, sep=";")
|
||||
|
||||
# Convert date strings to datetime objects and remove any time components
|
||||
df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize()
|
||||
df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize()
|
||||
|
||||
# Convert the current date to datetime and normalize
|
||||
curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize()
|
||||
|
||||
# Filter the DataFrame for the given ticker and for reports that were published on or before the current date
|
||||
filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)]
|
||||
|
||||
# Check if there are any available reports; if not, return a notification
|
||||
if filtered_df.empty:
|
||||
print("No cash flow statement available before the given current date.")
|
||||
return ""
|
||||
|
||||
# Get the most recent cash flow statement by selecting the row with the latest Publish Date
|
||||
latest_cash_flow = filtered_df.loc[filtered_df["Publish Date"].idxmax()]
|
||||
|
||||
# drop the SimFinID column
|
||||
latest_cash_flow = latest_cash_flow.drop("SimFinId")
|
||||
|
||||
return (
|
||||
f"## {freq} cash flow statement for {ticker} released on {str(latest_cash_flow['Publish Date'])[0:10]}: \n"
|
||||
+ str(latest_cash_flow)
|
||||
+ "\n\nThis includes metadata like reporting dates and currency, share details, and a breakdown of cash movements. Operating activities show cash generated from core business operations, including net income adjustments for non-cash items and working capital changes. Investing activities cover asset acquisitions/disposals and investments. Financing activities include debt transactions, equity issuances/repurchases, and dividend payments. The net change in cash represents the overall increase or decrease in the company's cash position during the reporting period."
|
||||
)
|
||||
|
||||
|
||||
def get_simfin_income_statements(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[
|
||||
str,
|
||||
"reporting frequency of the company's financial history: annual / quarterly",
|
||||
],
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
|
||||
):
|
||||
data_path = os.path.join(
|
||||
DATA_DIR,
|
||||
"fundamental_data",
|
||||
"simfin_data_all",
|
||||
"income_statements",
|
||||
"companies",
|
||||
"us",
|
||||
f"us-income-{freq}.csv",
|
||||
)
|
||||
df = pd.read_csv(data_path, sep=";")
|
||||
|
||||
# Convert date strings to datetime objects and remove any time components
|
||||
df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize()
|
||||
df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize()
|
||||
|
||||
# Convert the current date to datetime and normalize
|
||||
curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize()
|
||||
|
||||
# Filter the DataFrame for the given ticker and for reports that were published on or before the current date
|
||||
filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)]
|
||||
|
||||
# Check if there are any available reports; if not, return a notification
|
||||
if filtered_df.empty:
|
||||
print("No income statement available before the given current date.")
|
||||
return ""
|
||||
|
||||
# Get the most recent income statement by selecting the row with the latest Publish Date
|
||||
latest_income = filtered_df.loc[filtered_df["Publish Date"].idxmax()]
|
||||
|
||||
# drop the SimFinID column
|
||||
latest_income = latest_income.drop("SimFinId")
|
||||
|
||||
return (
|
||||
f"## {freq} income statement for {ticker} released on {str(latest_income['Publish Date'])[0:10]}: \n"
|
||||
+ str(latest_income)
|
||||
+ "\n\nThis includes metadata like reporting dates and currency, share details, and a comprehensive breakdown of the company's financial performance. Starting with Revenue, it shows Cost of Revenue and resulting Gross Profit. Operating Expenses are detailed, including SG&A, R&D, and Depreciation. The statement then shows Operating Income, followed by non-operating items and Interest Expense, leading to Pretax Income. After accounting for Income Tax and any Extraordinary items, it concludes with Net Income, representing the company's bottom-line profit or loss for the period."
|
||||
)
|
||||
|
||||
|
||||
def get_google_news(
|
||||
query: Annotated[str, "Query to search with"],
|
||||
curr_date: Annotated[str, "Curr date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
) -> str:
|
||||
query = query.replace(" ", "+")
|
||||
|
||||
start_date = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = start_date - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
news_results = getNewsData(query, before, curr_date)
|
||||
|
||||
news_str = ""
|
||||
|
||||
for news in news_results:
|
||||
news_str += (
|
||||
f"### {news['title']} (source: {news['source']}) \n\n{news['snippet']}\n\n"
|
||||
)
|
||||
|
||||
if len(news_results) == 0:
|
||||
return ""
|
||||
|
||||
return f"## {query} Google News, from {before} to {curr_date}:\n\n{news_str}"
|
||||
|
||||
|
||||
def get_reddit_global_news(
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
max_limit_per_day: Annotated[int, "Maximum number of news per day"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve the latest top reddit news
|
||||
Args:
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
Returns:
|
||||
str: A formatted dataframe containing the latest news articles posts on reddit and meta information in these columns: "created_utc", "id", "title", "selftext", "score", "num_comments", "url"
|
||||
"""
|
||||
|
||||
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
before = start_date - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
posts = []
|
||||
# iterate from start_date to end_date
|
||||
curr_date = datetime.strptime(before, "%Y-%m-%d")
|
||||
|
||||
total_iterations = (start_date - curr_date).days + 1
|
||||
pbar = tqdm(desc=f"Getting Global News on {start_date}", total=total_iterations)
|
||||
|
||||
while curr_date <= start_date:
|
||||
curr_date_str = curr_date.strftime("%Y-%m-%d")
|
||||
fetch_result = fetch_top_from_category(
|
||||
"global_news",
|
||||
curr_date_str,
|
||||
max_limit_per_day,
|
||||
data_path=os.path.join(DATA_DIR, "reddit_data"),
|
||||
)
|
||||
posts.extend(fetch_result)
|
||||
curr_date += relativedelta(days=1)
|
||||
pbar.update(1)
|
||||
|
||||
pbar.close()
|
||||
|
||||
if len(posts) == 0:
|
||||
return ""
|
||||
|
||||
news_str = ""
|
||||
for post in posts:
|
||||
if post["content"] == "":
|
||||
news_str += f"### {post['title']}\n\n"
|
||||
else:
|
||||
news_str += f"### {post['title']}\n\n{post['content']}\n\n"
|
||||
|
||||
return f"## Global News Reddit, from {before} to {curr_date}:\n{news_str}"
|
||||
|
||||
|
||||
def get_reddit_company_news(
|
||||
ticker: Annotated[str, "ticker symbol of the company"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
max_limit_per_day: Annotated[int, "Maximum number of news per day"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve the latest top reddit news
|
||||
Args:
|
||||
ticker: ticker symbol of the company
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
Returns:
|
||||
str: A formatted dataframe containing the latest news articles posts on reddit and meta information in these columns: "created_utc", "id", "title", "selftext", "score", "num_comments", "url"
|
||||
"""
|
||||
|
||||
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
before = start_date - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
posts = []
|
||||
# iterate from start_date to end_date
|
||||
curr_date = datetime.strptime(before, "%Y-%m-%d")
|
||||
|
||||
total_iterations = (start_date - curr_date).days + 1
|
||||
pbar = tqdm(
|
||||
desc=f"Getting Company News for {ticker} on {start_date}",
|
||||
total=total_iterations,
|
||||
)
|
||||
|
||||
while curr_date <= start_date:
|
||||
curr_date_str = curr_date.strftime("%Y-%m-%d")
|
||||
fetch_result = fetch_top_from_category(
|
||||
"company_news",
|
||||
curr_date_str,
|
||||
max_limit_per_day,
|
||||
ticker,
|
||||
data_path=os.path.join(DATA_DIR, "reddit_data"),
|
||||
)
|
||||
posts.extend(fetch_result)
|
||||
curr_date += relativedelta(days=1)
|
||||
|
||||
pbar.update(1)
|
||||
|
||||
pbar.close()
|
||||
|
||||
if len(posts) == 0:
|
||||
return ""
|
||||
|
||||
news_str = ""
|
||||
for post in posts:
|
||||
if post["content"] == "":
|
||||
news_str += f"### {post['title']}\n\n"
|
||||
else:
|
||||
news_str += f"### {post['title']}\n\n{post['content']}\n\n"
|
||||
|
||||
return f"##{ticker} News Reddit, from {before} to {curr_date}:\n\n{news_str}"
|
||||
|
||||
|
||||
def get_stock_stats_indicators_window(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
indicator: Annotated[str, "technical indicator to get the analysis and report of"],
|
||||
curr_date: Annotated[
|
||||
str, "The current trading date you are trading on, YYYY-mm-dd"
|
||||
],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
online: Annotated[bool, "to fetch data online or offline"],
|
||||
) -> str:
|
||||
|
||||
best_ind_params = {
|
||||
# Moving Averages
|
||||
"close_50_sma": (
|
||||
"50 SMA: A medium-term trend indicator. "
|
||||
"Usage: Identify trend direction and serve as dynamic support/resistance. "
|
||||
"Tips: It lags price; combine with faster indicators for timely signals."
|
||||
),
|
||||
"close_200_sma": (
|
||||
"200 SMA: A long-term trend benchmark. "
|
||||
"Usage: Confirm overall market trend and identify golden/death cross setups. "
|
||||
"Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries."
|
||||
),
|
||||
"close_10_ema": (
|
||||
"10 EMA: A responsive short-term average. "
|
||||
"Usage: Capture quick shifts in momentum and potential entry points. "
|
||||
"Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals."
|
||||
),
|
||||
# MACD Related
|
||||
"macd": (
|
||||
"MACD: Computes momentum via differences of EMAs. "
|
||||
"Usage: Look for crossovers and divergence as signals of trend changes. "
|
||||
"Tips: Confirm with other indicators in low-volatility or sideways markets."
|
||||
),
|
||||
"macds": (
|
||||
"MACD Signal: An EMA smoothing of the MACD line. "
|
||||
"Usage: Use crossovers with the MACD line to trigger trades. "
|
||||
"Tips: Should be part of a broader strategy to avoid false positives."
|
||||
),
|
||||
"macdh": (
|
||||
"MACD Histogram: Shows the gap between the MACD line and its signal. "
|
||||
"Usage: Visualize momentum strength and spot divergence early. "
|
||||
"Tips: Can be volatile; complement with additional filters in fast-moving markets."
|
||||
),
|
||||
# Momentum Indicators
|
||||
"rsi": (
|
||||
"RSI: Measures momentum to flag overbought/oversold conditions. "
|
||||
"Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. "
|
||||
"Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis."
|
||||
),
|
||||
# Volatility Indicators
|
||||
"boll": (
|
||||
"Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. "
|
||||
"Usage: Acts as a dynamic benchmark for price movement. "
|
||||
"Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals."
|
||||
),
|
||||
"boll_ub": (
|
||||
"Bollinger Upper Band: Typically 2 standard deviations above the middle line. "
|
||||
"Usage: Signals potential overbought conditions and breakout zones. "
|
||||
"Tips: Confirm signals with other tools; prices may ride the band in strong trends."
|
||||
),
|
||||
"boll_lb": (
|
||||
"Bollinger Lower Band: Typically 2 standard deviations below the middle line. "
|
||||
"Usage: Indicates potential oversold conditions. "
|
||||
"Tips: Use additional analysis to avoid false reversal signals."
|
||||
),
|
||||
"atr": (
|
||||
"ATR: Averages true range to measure volatility. "
|
||||
"Usage: Set stop-loss levels and adjust position sizes based on current market volatility. "
|
||||
"Tips: It's a reactive measure, so use it as part of a broader risk management strategy."
|
||||
),
|
||||
# Volume-Based Indicators
|
||||
"vwma": (
|
||||
"VWMA: A moving average weighted by volume. "
|
||||
"Usage: Confirm trends by integrating price action with volume data. "
|
||||
"Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses."
|
||||
),
|
||||
"mfi": (
|
||||
"MFI: The Money Flow Index is a momentum indicator that uses both price and volume to measure buying and selling pressure. "
|
||||
"Usage: Identify overbought (>80) or oversold (<20) conditions and confirm the strength of trends or reversals. "
|
||||
"Tips: Use alongside RSI or MACD to confirm signals; divergence between price and MFI can indicate potential reversals."
|
||||
),
|
||||
}
|
||||
|
||||
if indicator not in best_ind_params:
|
||||
raise ValueError(
|
||||
f"Indicator {indicator} is not supported. Please choose from: {list(best_ind_params.keys())}"
|
||||
)
|
||||
|
||||
end_date = curr_date
|
||||
curr_date = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = curr_date - relativedelta(days=look_back_days)
|
||||
|
||||
if not online:
|
||||
# read from YFin data
|
||||
data = pd.read_csv(
|
||||
os.path.join(
|
||||
DATA_DIR,
|
||||
f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv",
|
||||
)
|
||||
)
|
||||
data["Date"] = pd.to_datetime(data["Date"], utc=True)
|
||||
dates_in_df = data["Date"].astype(str).str[:10]
|
||||
|
||||
ind_string = ""
|
||||
while curr_date >= before:
|
||||
# only do the trading dates
|
||||
if curr_date.strftime("%Y-%m-%d") in dates_in_df.values:
|
||||
indicator_value = get_stockstats_indicator(
|
||||
symbol, indicator, curr_date.strftime("%Y-%m-%d"), online
|
||||
)
|
||||
|
||||
ind_string += f"{curr_date.strftime('%Y-%m-%d')}: {indicator_value}\n"
|
||||
|
||||
curr_date = curr_date - relativedelta(days=1)
|
||||
else:
|
||||
# online gathering
|
||||
ind_string = ""
|
||||
while curr_date >= before:
|
||||
indicator_value = get_stockstats_indicator(
|
||||
symbol, indicator, curr_date.strftime("%Y-%m-%d"), online
|
||||
)
|
||||
|
||||
ind_string += f"{curr_date.strftime('%Y-%m-%d')}: {indicator_value}\n"
|
||||
|
||||
curr_date = curr_date - relativedelta(days=1)
|
||||
|
||||
result_str = (
|
||||
f"## {indicator} values from {before.strftime('%Y-%m-%d')} to {end_date}:\n\n"
|
||||
+ ind_string
|
||||
+ "\n\n"
|
||||
+ best_ind_params.get(indicator, "No description available.")
|
||||
)
|
||||
|
||||
return result_str
|
||||
|
||||
|
||||
def get_stockstats_indicator(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
indicator: Annotated[str, "technical indicator to get the analysis and report of"],
|
||||
curr_date: Annotated[
|
||||
str, "The current trading date you are trading on, YYYY-mm-dd"
|
||||
],
|
||||
online: Annotated[bool, "to fetch data online or offline"],
|
||||
) -> str:
|
||||
|
||||
curr_date = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
curr_date = curr_date.strftime("%Y-%m-%d")
|
||||
|
||||
try:
|
||||
indicator_value = StockstatsUtils.get_stock_stats(
|
||||
symbol,
|
||||
indicator,
|
||||
curr_date,
|
||||
os.path.join(DATA_DIR, "market_data", "price_data"),
|
||||
online=online,
|
||||
)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"Error getting stockstats indicator data for indicator {indicator} on {curr_date}: {e}"
|
||||
)
|
||||
return ""
|
||||
|
||||
return str(indicator_value)
|
||||
|
||||
|
||||
def get_YFin_data_window(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
curr_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "how many days to look back"],
|
||||
) -> str:
|
||||
# calculate past days
|
||||
date_obj = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = date_obj - relativedelta(days=look_back_days)
|
||||
start_date = before.strftime("%Y-%m-%d")
|
||||
|
||||
# read in data
|
||||
data = pd.read_csv(
|
||||
os.path.join(
|
||||
DATA_DIR,
|
||||
f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv",
|
||||
)
|
||||
)
|
||||
|
||||
# Extract just the date part for comparison
|
||||
data["DateOnly"] = data["Date"].str[:10]
|
||||
|
||||
# Filter data between the start and end dates (inclusive)
|
||||
filtered_data = data[
|
||||
(data["DateOnly"] >= start_date) & (data["DateOnly"] <= curr_date)
|
||||
]
|
||||
|
||||
# Drop the temporary column we created
|
||||
filtered_data = filtered_data.drop("DateOnly", axis=1)
|
||||
|
||||
# Set pandas display options to show the full DataFrame
|
||||
with pd.option_context(
|
||||
"display.max_rows", None, "display.max_columns", None, "display.width", None
|
||||
):
|
||||
df_string = filtered_data.to_string()
|
||||
|
||||
return (
|
||||
f"## Raw Market Data for {symbol} from {start_date} to {curr_date}:\n\n"
|
||||
+ df_string
|
||||
)
|
||||
|
||||
|
||||
def get_YFin_data_online(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
end_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
):
|
||||
|
||||
datetime.strptime(start_date, "%Y-%m-%d")
|
||||
datetime.strptime(end_date, "%Y-%m-%d")
|
||||
|
||||
# Create ticker object
|
||||
ticker = yf.Ticker(symbol.upper())
|
||||
|
||||
# Fetch historical data for the specified date range
|
||||
data = ticker.history(start=start_date, end=end_date)
|
||||
|
||||
# Check if data is empty
|
||||
if data.empty:
|
||||
return (
|
||||
f"No data found for symbol '{symbol}' between {start_date} and {end_date}"
|
||||
)
|
||||
|
||||
# Remove timezone info from index for cleaner output
|
||||
if data.index.tz is not None:
|
||||
data.index = data.index.tz_localize(None)
|
||||
|
||||
# Round numerical values to 2 decimal places for cleaner display
|
||||
numeric_columns = ["Open", "High", "Low", "Close", "Adj Close"]
|
||||
for col in numeric_columns:
|
||||
if col in data.columns:
|
||||
data[col] = data[col].round(2)
|
||||
|
||||
# Convert DataFrame to CSV string
|
||||
csv_string = data.to_csv()
|
||||
|
||||
# Add header information
|
||||
header = f"# Stock data for {symbol.upper()} from {start_date} to {end_date}\n"
|
||||
header += f"# Total records: {len(data)}\n"
|
||||
header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
||||
|
||||
return header + csv_string
|
||||
|
||||
|
||||
def get_YFin_data(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
end_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
) -> str:
|
||||
# read in data
|
||||
data = pd.read_csv(
|
||||
os.path.join(
|
||||
DATA_DIR,
|
||||
f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv",
|
||||
)
|
||||
)
|
||||
|
||||
if end_date > "2025-03-25":
|
||||
raise Exception(
|
||||
f"Get_YFin_Data: {end_date} is outside of the data range of 2015-01-01 to 2025-03-25"
|
||||
)
|
||||
|
||||
# Extract just the date part for comparison
|
||||
data["DateOnly"] = data["Date"].str[:10]
|
||||
|
||||
# Filter data between the start and end dates (inclusive)
|
||||
filtered_data = data[
|
||||
(data["DateOnly"] >= start_date) & (data["DateOnly"] <= end_date)
|
||||
]
|
||||
|
||||
# Drop the temporary column we created
|
||||
filtered_data = filtered_data.drop("DateOnly", axis=1)
|
||||
|
||||
# remove the index from the dataframe
|
||||
filtered_data = filtered_data.reset_index(drop=True)
|
||||
|
||||
return filtered_data
|
||||
|
||||
|
||||
def get_stock_news_openai(ticker, curr_date):
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
model="gpt-4.1-mini",
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": f"Can you search Social Media for {ticker} on TSLA from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
text={"format": {"type": "text"}},
|
||||
reasoning={},
|
||||
tools=[
|
||||
{
|
||||
"type": "web_search_preview",
|
||||
"user_location": {"type": "approximate"},
|
||||
"search_context_size": "low",
|
||||
}
|
||||
],
|
||||
temperature=1,
|
||||
max_output_tokens=4096,
|
||||
top_p=1,
|
||||
store=True,
|
||||
)
|
||||
|
||||
return response.output[1].content[0].text
|
||||
|
||||
|
||||
def get_global_news_openai(curr_date):
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
model="gpt-4.1-mini",
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": f"Can you search global or macroeconomics news from 7 days before {curr_date} to {curr_date} that would be informative for trading purposes? Make sure you only get the data posted during that period.",
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
text={"format": {"type": "text"}},
|
||||
reasoning={},
|
||||
tools=[
|
||||
{
|
||||
"type": "web_search_preview",
|
||||
"user_location": {"type": "approximate"},
|
||||
"search_context_size": "low",
|
||||
}
|
||||
],
|
||||
temperature=1,
|
||||
max_output_tokens=4096,
|
||||
top_p=1,
|
||||
store=True,
|
||||
)
|
||||
|
||||
return response.output[1].content[0].text
|
||||
|
||||
|
||||
def get_fundamentals_openai(ticker, curr_date):
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
model="gpt-4.1-mini",
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": f"Can you search Fundamental for discussions on {ticker} during of the month before {curr_date} to the month of {curr_date}. Make sure you only get the data posted during that period. List as a table, with PE/PS/Cash flow/ etc",
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
text={"format": {"type": "text"}},
|
||||
reasoning={},
|
||||
tools=[
|
||||
{
|
||||
"type": "web_search_preview",
|
||||
"user_location": {"type": "approximate"},
|
||||
"search_context_size": "low",
|
||||
}
|
||||
],
|
||||
temperature=1,
|
||||
max_output_tokens=4096,
|
||||
top_p=1,
|
||||
store=True,
|
||||
)
|
||||
|
||||
return response.output[1].content[0].text
|
||||
135
tradingagents/dataflows/reddit_utils.py
Normal file
135
tradingagents/dataflows/reddit_utils.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from contextlib import contextmanager
|
||||
from typing import Annotated
|
||||
import os
|
||||
import re
|
||||
|
||||
ticker_to_company = {
|
||||
"AAPL": "Apple",
|
||||
"MSFT": "Microsoft",
|
||||
"GOOGL": "Google",
|
||||
"AMZN": "Amazon",
|
||||
"TSLA": "Tesla",
|
||||
"NVDA": "Nvidia",
|
||||
"TSM": "Taiwan Semiconductor Manufacturing Company OR TSMC",
|
||||
"JPM": "JPMorgan Chase OR JP Morgan",
|
||||
"JNJ": "Johnson & Johnson OR JNJ",
|
||||
"V": "Visa",
|
||||
"WMT": "Walmart",
|
||||
"META": "Meta OR Facebook",
|
||||
"AMD": "AMD",
|
||||
"INTC": "Intel",
|
||||
"QCOM": "Qualcomm",
|
||||
"BABA": "Alibaba",
|
||||
"ADBE": "Adobe",
|
||||
"NFLX": "Netflix",
|
||||
"CRM": "Salesforce",
|
||||
"PYPL": "PayPal",
|
||||
"PLTR": "Palantir",
|
||||
"MU": "Micron",
|
||||
"SQ": "Block OR Square",
|
||||
"ZM": "Zoom",
|
||||
"CSCO": "Cisco",
|
||||
"SHOP": "Shopify",
|
||||
"ORCL": "Oracle",
|
||||
"X": "Twitter OR X",
|
||||
"SPOT": "Spotify",
|
||||
"AVGO": "Broadcom",
|
||||
"ASML": "ASML ",
|
||||
"TWLO": "Twilio",
|
||||
"SNAP": "Snap Inc.",
|
||||
"TEAM": "Atlassian",
|
||||
"SQSP": "Squarespace",
|
||||
"UBER": "Uber",
|
||||
"ROKU": "Roku",
|
||||
"PINS": "Pinterest",
|
||||
}
|
||||
|
||||
|
||||
def fetch_top_from_category(
|
||||
category: Annotated[
|
||||
str, "Category to fetch top post from. Collection of subreddits."
|
||||
],
|
||||
date: Annotated[str, "Date to fetch top posts from."],
|
||||
max_limit: Annotated[int, "Maximum number of posts to fetch."],
|
||||
query: Annotated[str, "Optional query to search for in the subreddit."] = None,
|
||||
data_path: Annotated[
|
||||
str,
|
||||
"Path to the data folder. Default is 'reddit_data'.",
|
||||
] = "reddit_data",
|
||||
):
|
||||
base_path = data_path
|
||||
|
||||
all_content = []
|
||||
|
||||
if max_limit < len(os.listdir(os.path.join(base_path, category))):
|
||||
raise ValueError(
|
||||
"REDDIT FETCHING ERROR: max limit is less than the number of files in the category. Will not be able to fetch any posts"
|
||||
)
|
||||
|
||||
limit_per_subreddit = max_limit // len(
|
||||
os.listdir(os.path.join(base_path, category))
|
||||
)
|
||||
|
||||
for data_file in os.listdir(os.path.join(base_path, category)):
|
||||
# check if data_file is a .jsonl file
|
||||
if not data_file.endswith(".jsonl"):
|
||||
continue
|
||||
|
||||
all_content_curr_subreddit = []
|
||||
|
||||
with open(os.path.join(base_path, category, data_file), "rb") as f:
|
||||
for i, line in enumerate(f):
|
||||
# skip empty lines
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
parsed_line = json.loads(line)
|
||||
|
||||
# select only lines that are from the date
|
||||
post_date = datetime.utcfromtimestamp(
|
||||
parsed_line["created_utc"]
|
||||
).strftime("%Y-%m-%d")
|
||||
if post_date != date:
|
||||
continue
|
||||
|
||||
# if is company_news, check that the title or the content has the company's name (query) mentioned
|
||||
if "company" in category and query:
|
||||
search_terms = []
|
||||
if "OR" in ticker_to_company[query]:
|
||||
search_terms = ticker_to_company[query].split(" OR ")
|
||||
else:
|
||||
search_terms = [ticker_to_company[query]]
|
||||
|
||||
search_terms.append(query)
|
||||
|
||||
found = False
|
||||
for term in search_terms:
|
||||
if re.search(
|
||||
term, parsed_line["title"], re.IGNORECASE
|
||||
) or re.search(term, parsed_line["selftext"], re.IGNORECASE):
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
continue
|
||||
|
||||
post = {
|
||||
"title": parsed_line["title"],
|
||||
"content": parsed_line["selftext"],
|
||||
"url": parsed_line["url"],
|
||||
"upvotes": parsed_line["ups"],
|
||||
"posted_date": post_date,
|
||||
}
|
||||
|
||||
all_content_curr_subreddit.append(post)
|
||||
|
||||
# sort all_content_curr_subreddit by upvote_ratio in descending order
|
||||
all_content_curr_subreddit.sort(key=lambda x: x["upvotes"], reverse=True)
|
||||
|
||||
all_content.extend(all_content_curr_subreddit[:limit_per_subreddit])
|
||||
|
||||
return all_content
|
||||
87
tradingagents/dataflows/stockstats_utils.py
Normal file
87
tradingagents/dataflows/stockstats_utils.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import pandas as pd
|
||||
import yfinance as yf
|
||||
from stockstats import wrap
|
||||
from typing import Annotated
|
||||
import os
|
||||
from .config import get_config
|
||||
|
||||
|
||||
class StockstatsUtils:
|
||||
@staticmethod
|
||||
def get_stock_stats(
|
||||
symbol: Annotated[str, "ticker symbol for the company"],
|
||||
indicator: Annotated[
|
||||
str, "quantitative indicators based off of the stock data for the company"
|
||||
],
|
||||
curr_date: Annotated[
|
||||
str, "curr date for retrieving stock price data, YYYY-mm-dd"
|
||||
],
|
||||
data_dir: Annotated[
|
||||
str,
|
||||
"directory where the stock data is stored.",
|
||||
],
|
||||
online: Annotated[
|
||||
bool,
|
||||
"whether to use online tools to fetch data or offline tools. If True, will use online tools.",
|
||||
] = False,
|
||||
):
|
||||
df = None
|
||||
data = None
|
||||
|
||||
if not online:
|
||||
try:
|
||||
data = pd.read_csv(
|
||||
os.path.join(
|
||||
data_dir,
|
||||
f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv",
|
||||
)
|
||||
)
|
||||
df = wrap(data)
|
||||
except FileNotFoundError:
|
||||
raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!")
|
||||
else:
|
||||
# Get today's date as YYYY-mm-dd to add to cache
|
||||
today_date = pd.Timestamp.today()
|
||||
curr_date = pd.to_datetime(curr_date)
|
||||
|
||||
end_date = today_date
|
||||
start_date = today_date - pd.DateOffset(years=15)
|
||||
start_date = start_date.strftime("%Y-%m-%d")
|
||||
end_date = end_date.strftime("%Y-%m-%d")
|
||||
|
||||
# Get config and ensure cache directory exists
|
||||
config = get_config()
|
||||
os.makedirs(config["data_cache_dir"], exist_ok=True)
|
||||
|
||||
data_file = os.path.join(
|
||||
config["data_cache_dir"],
|
||||
f"{symbol}-YFin-data-{start_date}-{end_date}.csv",
|
||||
)
|
||||
|
||||
if os.path.exists(data_file):
|
||||
data = pd.read_csv(data_file)
|
||||
data["Date"] = pd.to_datetime(data["Date"])
|
||||
else:
|
||||
data = yf.download(
|
||||
symbol,
|
||||
start=start_date,
|
||||
end=end_date,
|
||||
multi_level_index=False,
|
||||
progress=False,
|
||||
auto_adjust=True,
|
||||
)
|
||||
data = data.reset_index()
|
||||
data.to_csv(data_file, index=False)
|
||||
|
||||
df = wrap(data)
|
||||
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")
|
||||
curr_date = curr_date.strftime("%Y-%m-%d")
|
||||
|
||||
df[indicator] # trigger stockstats to calculate the indicator
|
||||
matching_rows = df[df["Date"].str.startswith(curr_date)]
|
||||
|
||||
if not matching_rows.empty:
|
||||
indicator_value = matching_rows[indicator].values[0]
|
||||
return indicator_value
|
||||
else:
|
||||
return "N/A: Not a trading day (weekend or holiday)"
|
||||
39
tradingagents/dataflows/utils.py
Normal file
39
tradingagents/dataflows/utils.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import os
|
||||
import json
|
||||
import pandas as pd
|
||||
from datetime import date, timedelta, datetime
|
||||
from typing import Annotated
|
||||
|
||||
SavePathType = Annotated[str, "File path to save data. If None, data is not saved."]
|
||||
|
||||
def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None:
|
||||
if save_path:
|
||||
data.to_csv(save_path)
|
||||
print(f"{tag} saved to {save_path}")
|
||||
|
||||
|
||||
def get_current_date():
|
||||
return date.today().strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def decorate_all_methods(decorator):
|
||||
def class_decorator(cls):
|
||||
for attr_name, attr_value in cls.__dict__.items():
|
||||
if callable(attr_value):
|
||||
setattr(cls, attr_name, decorator(attr_value))
|
||||
return cls
|
||||
|
||||
return class_decorator
|
||||
|
||||
|
||||
def get_next_weekday(date):
|
||||
|
||||
if not isinstance(date, datetime):
|
||||
date = datetime.strptime(date, "%Y-%m-%d")
|
||||
|
||||
if date.weekday() >= 5:
|
||||
days_to_add = 7 - date.weekday()
|
||||
next_weekday = date + timedelta(days=days_to_add)
|
||||
return next_weekday
|
||||
else:
|
||||
return date
|
||||
117
tradingagents/dataflows/yfin_utils.py
Normal file
117
tradingagents/dataflows/yfin_utils.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# gets data/stats
|
||||
|
||||
import yfinance as yf
|
||||
from typing import Annotated, Callable, Any, Optional
|
||||
from pandas import DataFrame
|
||||
import pandas as pd
|
||||
from functools import wraps
|
||||
|
||||
from .utils import save_output, SavePathType, decorate_all_methods
|
||||
|
||||
|
||||
def init_ticker(func: Callable) -> Callable:
|
||||
"""Decorator to initialize yf.Ticker and pass it to the function."""
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any:
|
||||
ticker = yf.Ticker(symbol)
|
||||
return func(ticker, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@decorate_all_methods(init_ticker)
|
||||
class YFinanceUtils:
|
||||
|
||||
def get_stock_data(
|
||||
symbol: Annotated[str, "ticker symbol"],
|
||||
start_date: Annotated[
|
||||
str, "start date for retrieving stock price data, YYYY-mm-dd"
|
||||
],
|
||||
end_date: Annotated[
|
||||
str, "end date for retrieving stock price data, YYYY-mm-dd"
|
||||
],
|
||||
save_path: SavePathType = None,
|
||||
) -> DataFrame:
|
||||
"""retrieve stock price data for designated ticker symbol"""
|
||||
ticker = symbol
|
||||
# add one day to the end_date so that the data range is inclusive
|
||||
end_date = pd.to_datetime(end_date) + pd.DateOffset(days=1)
|
||||
end_date = end_date.strftime("%Y-%m-%d")
|
||||
stock_data = ticker.history(start=start_date, end=end_date)
|
||||
# save_output(stock_data, f"Stock data for {ticker.ticker}", save_path)
|
||||
return stock_data
|
||||
|
||||
def get_stock_info(
|
||||
symbol: Annotated[str, "ticker symbol"],
|
||||
) -> dict:
|
||||
"""Fetches and returns latest stock information."""
|
||||
ticker = symbol
|
||||
stock_info = ticker.info
|
||||
return stock_info
|
||||
|
||||
def get_company_info(
|
||||
symbol: Annotated[str, "ticker symbol"],
|
||||
save_path: Optional[str] = None,
|
||||
) -> DataFrame:
|
||||
"""Fetches and returns company information as a DataFrame."""
|
||||
ticker = symbol
|
||||
info = ticker.info
|
||||
company_info = {
|
||||
"Company Name": info.get("shortName", "N/A"),
|
||||
"Industry": info.get("industry", "N/A"),
|
||||
"Sector": info.get("sector", "N/A"),
|
||||
"Country": info.get("country", "N/A"),
|
||||
"Website": info.get("website", "N/A"),
|
||||
}
|
||||
company_info_df = DataFrame([company_info])
|
||||
if save_path:
|
||||
company_info_df.to_csv(save_path)
|
||||
print(f"Company info for {ticker.ticker} saved to {save_path}")
|
||||
return company_info_df
|
||||
|
||||
def get_stock_dividends(
|
||||
symbol: Annotated[str, "ticker symbol"],
|
||||
save_path: Optional[str] = None,
|
||||
) -> DataFrame:
|
||||
"""Fetches and returns the latest dividends data as a DataFrame."""
|
||||
ticker = symbol
|
||||
dividends = ticker.dividends
|
||||
if save_path:
|
||||
dividends.to_csv(save_path)
|
||||
print(f"Dividends for {ticker.ticker} saved to {save_path}")
|
||||
return dividends
|
||||
|
||||
def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
|
||||
"""Fetches and returns the latest income statement of the company as a DataFrame."""
|
||||
ticker = symbol
|
||||
income_stmt = ticker.financials
|
||||
return income_stmt
|
||||
|
||||
def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
|
||||
"""Fetches and returns the latest balance sheet of the company as a DataFrame."""
|
||||
ticker = symbol
|
||||
balance_sheet = ticker.balance_sheet
|
||||
return balance_sheet
|
||||
|
||||
def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
|
||||
"""Fetches and returns the latest cash flow statement of the company as a DataFrame."""
|
||||
ticker = symbol
|
||||
cash_flow = ticker.cashflow
|
||||
return cash_flow
|
||||
|
||||
def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple:
|
||||
"""Fetches the latest analyst recommendations and returns the most common recommendation and its count."""
|
||||
ticker = symbol
|
||||
recommendations = ticker.recommendations
|
||||
if recommendations.empty:
|
||||
return None, 0 # No recommendations available
|
||||
|
||||
# Assuming 'period' column exists and needs to be excluded
|
||||
row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary
|
||||
|
||||
# Find the maximum voting result
|
||||
max_votes = row_0.max()
|
||||
majority_voting_result = row_0[row_0 == max_votes].index.tolist()
|
||||
|
||||
return majority_voting_result[0], max_votes
|
||||
Reference in New Issue
Block a user