Merge branch 'main' into 38-news-problem

This commit is contained in:
2025-10-27 21:10:02 +01:00
59 changed files with 11204 additions and 1504 deletions

View File

@@ -5,6 +5,15 @@
# https://makersuite.google.com/app/apikey
GOOGLE_API_KEY=
# https://platform.openai.com/settings/organization/api-keys
OPENAI_API_KEY=
# https://admin.mistral.ai/organization/api-keys
MISTRAL_API_KEY=
# https://platform.deepseek.com/api_keys
DEEPSEEK_API_KEY=
###############################################################################
# Configurazioni per gli agenti di mercato
@@ -42,6 +51,9 @@ CRYPTOPANIC_API_KEY=
REDDIT_API_CLIENT_ID=
REDDIT_API_CLIENT_SECRET=
# https://www.npmjs.com/package/rettiwt-api
X_API_KEY=
###############################################################################
# Configurazioni per API di messaggistica

View File

@@ -1,6 +1,10 @@
# Utilizziamo Debian slim invece di Alpine per migliore compatibilità
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
# Installiamo le dipendenze di sistema
RUN apt update && \
apt install -y curl && \
rm -rf /var/lib/apt/lists/*
# Installiamo uv
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
@@ -15,6 +19,11 @@ COPY uv.lock ./
RUN uv sync --frozen --no-dev
ENV PYTHONPATH="./src"
# Installiamo le dipendenze per X (rettiwt, nodejs e npm)
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash -
RUN apt install -y nodejs && rm -rf /var/lib/apt/lists/*
RUN npm install -g rettiwt-api
# Copiamo i file del progetto
COPY LICENSE ./
COPY src/ ./src/

View File

@@ -19,6 +19,15 @@ models:
label: Gemini
# - name: gemini-2.0-pro # TODO Non funziona, ha un nome diverso
# label: Gemini Pro
gpt:
- name: gpt-4o
label: OpenAIChat
deepseek:
- name: deepseek-chat
label: DeepSeek
mistral:
- name: mistral-large-latest
label: Mistral
ollama:
- name: gpt-oss:latest
label: Ollama GPT
@@ -32,14 +41,13 @@ models:
api:
retry_attempts: 3
retry_delay_seconds: 2
currency: USD
# TODO Magari implementare un sistema per settare i providers
market_providers: [BinanceWrapper, YFinanceWrapper]
news_providers: [GoogleNewsWrapper, DuckDuckGoWrapper]
social_providers: [RedditWrapper]
market_providers: [YFinanceWrapper, BinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper]
news_providers: [DuckDuckGoWrapper, GoogleNewsWrapper, NewsApiWrapper, CryptoPanicWrapper]
social_providers: [RedditWrapper, XWrapper, ChanWrapper]
agents:
strategy: Conservative
team_model: qwen3:1.7b
team_leader_model: qwen3:4b
predictor_model: qwen3:4b
team_leader_model: qwen3:8b
query_analyzer_model: qwen3:4b
report_generation_model: qwen3:8b

View File

@@ -8,9 +8,7 @@ try:
reasoning_agent = Agent(
model=Gemini(),
tools=[
ReasoningTools(),
],
tools=[ReasoningTools()],
instructions="Use tables to display data.",
markdown=True,
)

View File

@@ -0,0 +1,12 @@
from dotenv import load_dotenv
from app.api.tools import MarketAPIsTool
def main():
api = MarketAPIsTool()
prices_aggregated = api.get_historical_prices_aggregated("BTC", limit=5)
for price in prices_aggregated:
print(f"== [{price.timestamp}] {price.low:.2f} - {price.high:.2f} ==")
if __name__ == "__main__":
load_dotenv()
main()

View File

@@ -0,0 +1,16 @@
from dotenv import load_dotenv
from app.api.tools import NewsAPIsTool
def main():
api = NewsAPIsTool()
articles_aggregated = api.get_latest_news_aggregated(query="bitcoin", limit=2)
for provider, articles in articles_aggregated.items():
print("===================================")
print(f"Provider: {provider}")
for article in articles:
print(f"== [{article.time}] {article.title} ==")
print(f" {article.description}")
if __name__ == "__main__":
load_dotenv()
main()

View File

@@ -0,0 +1,17 @@
from dotenv import load_dotenv
from app.api.tools import SocialAPIsTool
def main():
api = SocialAPIsTool()
articles_aggregated = api.get_top_crypto_posts_aggregated(limit_per_wrapper=2)
for provider, posts in articles_aggregated.items():
print("===================================")
print(f"Provider: {provider}")
for post in posts:
print(f"== [{post.time}] - {post.title} ==")
print(f" {post.description}")
print(f" {len(post.comments)}")
if __name__ == "__main__":
load_dotenv()
main()

View File

@@ -1,353 +0,0 @@
#!/usr/bin/env python3
"""
Demo Completo per Market Data Providers
========================================
Questo script dimostra l'utilizzo di tutti i wrapper che implementano BaseWrapper:
- CoinBaseWrapper (richiede credenziali)
- CryptoCompareWrapper (richiede API key)
- BinanceWrapper (richiede credenziali)
- PublicBinanceAgent (accesso pubblico)
- YFinanceWrapper (accesso gratuito a dati azionari e crypto)
Lo script effettua chiamate GET a diversi provider e visualizza i dati
in modo strutturato con informazioni dettagliate su timestamp, stato
delle richieste e formattazione tabellare.
"""
import sys
import os
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Optional, Any
import traceback
# Aggiungi il path src al PYTHONPATH
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root / "src"))
from dotenv import load_dotenv
from app.api.markets import (
CoinBaseWrapper,
CryptoCompareWrapper,
BinanceWrapper,
YFinanceWrapper,
MarketWrapper
)
# Carica variabili d'ambiente
load_dotenv()
class DemoFormatter:
"""Classe per formattare l'output del demo in modo strutturato."""
@staticmethod
def print_header(title: str, char: str = "=", width: int = 80):
"""Stampa un'intestazione formattata."""
print(f"\n{char * width}")
print(f"{title:^{width}}")
print(f"{char * width}")
@staticmethod
def print_subheader(title: str, char: str = "-", width: int = 60):
"""Stampa una sotto-intestazione formattata."""
print(f"\n{char * width}")
print(f" {title}")
print(f"{char * width}")
@staticmethod
def print_request_info(provider_name: str, method: str, timestamp: datetime,
status: str, error: Optional[str] = None):
"""Stampa informazioni sulla richiesta."""
print(f"🕒 Timestamp: {timestamp.strftime('%Y-%m-%d %H:%M:%S')}")
print(f"🏷️ Provider: {provider_name}")
print(f"🔧 Method: {method}")
print(f"📊 Status: {status}")
if error:
print(f"❌ Error: {error}")
print()
@staticmethod
def print_product_table(products: List[Any], title: str = "Products"):
"""Stampa una tabella di prodotti."""
if not products:
print(f"📋 {title}: Nessun prodotto trovato")
return
print(f"📋 {title} ({len(products)} items):")
print(f"{'Symbol':<15} {'ID':<20} {'Price':<12} {'Quote':<10} {'Status':<10}")
print("-" * 67)
for product in products[:10]: # Mostra solo i primi 10
symbol = getattr(product, 'symbol', 'N/A')
product_id = getattr(product, 'id', 'N/A')
price = getattr(product, 'price', 0.0)
quote = getattr(product, 'quote_currency', 'N/A')
status = getattr(product, 'status', 'N/A')
# Tronca l'ID se troppo lungo
if len(product_id) > 18:
product_id = product_id[:15] + "..."
price_str = f"${price:.2f}" if price > 0 else "N/A"
print(f"{symbol:<15} {product_id:<20} {price_str:<12} {quote:<10} {status:<10}")
if len(products) > 10:
print(f"... e altri {len(products) - 10} prodotti")
print()
@staticmethod
def print_prices_table(prices: List[Any], title: str = "Historical Prices"):
"""Stampa una tabella di prezzi storici."""
if not prices:
print(f"💰 {title}: Nessun prezzo trovato")
return
print(f"💰 {title} ({len(prices)} entries):")
print(f"{'Time':<12} {'Open':<12} {'High':<12} {'Low':<12} {'Close':<12} {'Volume':<15}")
print("-" * 75)
for price in prices[:5]: # Mostra solo i primi 5
time_str = getattr(price, 'time', 'N/A')
# Il time è già una stringa, non serve strftime
if len(time_str) > 10:
time_str = time_str[:10] # Tronca se troppo lungo
open_price = f"${getattr(price, 'open', 0):.2f}"
high_price = f"${getattr(price, 'high', 0):.2f}"
low_price = f"${getattr(price, 'low', 0):.2f}"
close_price = f"${getattr(price, 'close', 0):.2f}"
volume = f"{getattr(price, 'volume', 0):,.0f}"
print(f"{time_str:<12} {open_price:<12} {high_price:<12} {low_price:<12} {close_price:<12} {volume:<15}")
if len(prices) > 5:
print(f"... e altri {len(prices) - 5} prezzi")
print()
class ProviderTester:
"""Classe per testare i provider di market data."""
def __init__(self):
self.formatter = DemoFormatter()
self.test_symbols = ["BTC", "ETH", "ADA"]
def test_provider(self, wrapper: MarketWrapper, provider_name: str) -> Dict[str, Any]:
"""Testa un provider specifico con tutti i metodi disponibili."""
results: Dict[str, Any] = {
"provider_name": provider_name,
"tests": {},
"overall_status": "SUCCESS"
}
self.formatter.print_subheader(f"🔍 Testing {provider_name}")
# Test get_product
for symbol in self.test_symbols:
timestamp = datetime.now()
try:
product = wrapper.get_product(symbol)
self.formatter.print_request_info(
provider_name, f"get_product({symbol})", timestamp, "✅ SUCCESS"
)
if product:
print(f"📦 Product: {product.symbol} (ID: {product.id})")
print(f" Price: ${product.price:.2f}, Quote: {product.currency}")
print(f" Volume 24h: {product.volume_24h:,.2f}")
else:
print(f"📦 Product: Nessun prodotto trovato per {symbol}")
results["tests"][f"get_product_{symbol}"] = "SUCCESS"
except Exception as e:
error_msg = str(e)
self.formatter.print_request_info(
provider_name, f"get_product({symbol})", timestamp, "❌ ERROR", error_msg
)
results["tests"][f"get_product_{symbol}"] = f"ERROR: {error_msg}"
results["overall_status"] = "PARTIAL"
# Test get_products
timestamp = datetime.now()
try:
products = wrapper.get_products(self.test_symbols)
self.formatter.print_request_info(
provider_name, f"get_products({self.test_symbols})", timestamp, "✅ SUCCESS"
)
self.formatter.print_product_table(products, f"{provider_name} Products")
results["tests"]["get_products"] = "SUCCESS"
except Exception as e:
error_msg = str(e)
self.formatter.print_request_info(
provider_name, f"get_products({self.test_symbols})", timestamp, "❌ ERROR", error_msg
)
results["tests"]["get_products"] = f"ERROR: {error_msg}"
results["overall_status"] = "PARTIAL"
# Test get_historical_prices
timestamp = datetime.now()
try:
prices = wrapper.get_historical_prices("BTC")
self.formatter.print_request_info(
provider_name, "get_historical_prices(BTC)", timestamp, "✅ SUCCESS"
)
self.formatter.print_prices_table(prices, f"{provider_name} BTC Historical Prices")
results["tests"]["get_historical_prices"] = "SUCCESS"
except Exception as e:
error_msg = str(e)
self.formatter.print_request_info(
provider_name, "get_historical_prices(BTC)", timestamp, "❌ ERROR", error_msg
)
results["tests"]["get_historical_prices"] = f"ERROR: {error_msg}"
results["overall_status"] = "PARTIAL"
return results
def check_environment_variables() -> Dict[str, bool]:
"""Verifica la presenza delle variabili d'ambiente necessarie."""
env_vars = {
"COINBASE_API_KEY": bool(os.getenv("COINBASE_API_KEY")),
"COINBASE_API_SECRET": bool(os.getenv("COINBASE_API_SECRET")),
"CRYPTOCOMPARE_API_KEY": bool(os.getenv("CRYPTOCOMPARE_API_KEY")),
"BINANCE_API_KEY": bool(os.getenv("BINANCE_API_KEY")),
"BINANCE_API_SECRET": bool(os.getenv("BINANCE_API_SECRET")),
}
return env_vars
def initialize_providers() -> Dict[str, MarketWrapper]:
"""Inizializza tutti i provider disponibili."""
providers: Dict[str, MarketWrapper] = {}
env_vars = check_environment_variables()
# CryptoCompareWrapper
if env_vars["CRYPTOCOMPARE_API_KEY"]:
try:
providers["CryptoCompare"] = CryptoCompareWrapper()
print("✅ CryptoCompareWrapper inizializzato con successo")
except Exception as e:
print(f"❌ Errore nell'inizializzazione di CryptoCompareWrapper: {e}")
else:
print("⚠️ CryptoCompareWrapper saltato: CRYPTOCOMPARE_API_KEY non trovata")
# CoinBaseWrapper
if env_vars["COINBASE_API_KEY"] and env_vars["COINBASE_API_SECRET"]:
try:
providers["CoinBase"] = CoinBaseWrapper()
print("✅ CoinBaseWrapper inizializzato con successo")
except Exception as e:
print(f"❌ Errore nell'inizializzazione di CoinBaseWrapper: {e}")
else:
print("⚠️ CoinBaseWrapper saltato: credenziali Coinbase non complete")
# BinanceWrapper
try:
providers["Binance"] = BinanceWrapper()
print("✅ BinanceWrapper inizializzato con successo")
except Exception as e:
print(f"❌ Errore nell'inizializzazione di BinanceWrapper: {e}")
# YFinanceWrapper (sempre disponibile - dati azionari e crypto gratuiti)
try:
providers["YFinance"] = YFinanceWrapper()
print("✅ YFinanceWrapper inizializzato con successo")
except Exception as e:
print(f"❌ Errore nell'inizializzazione di YFinanceWrapper: {e}")
return providers
def print_summary(results: List[Dict[str, Any]]):
"""Stampa un riassunto finale dei risultati."""
formatter = DemoFormatter()
formatter.print_header("📊 RIASSUNTO FINALE", "=", 80)
total_providers = len(results)
successful_providers = sum(1 for r in results if r["overall_status"] == "SUCCESS")
partial_providers = sum(1 for r in results if r["overall_status"] == "PARTIAL")
print(f"🔢 Provider testati: {total_providers}")
print(f"✅ Provider completamente funzionanti: {successful_providers}")
print(f"⚠️ Provider parzialmente funzionanti: {partial_providers}")
print(f"❌ Provider non funzionanti: {total_providers - successful_providers - partial_providers}")
print("\n📋 Dettaglio per provider:")
for result in results:
provider_name = result["provider_name"]
status = result["overall_status"]
status_icon = "" if status == "SUCCESS" else "⚠️" if status == "PARTIAL" else ""
print(f"\n{status_icon} {provider_name}:")
for test_name, test_result in result["tests"].items():
test_icon = "" if test_result == "SUCCESS" else ""
print(f" {test_icon} {test_name}: {test_result}")
def main():
"""Funzione principale del demo."""
formatter = DemoFormatter()
# Intestazione principale
formatter.print_header("🚀 DEMO COMPLETO MARKET DATA PROVIDERS", "=", 80)
print(f"🕒 Avvio demo: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print("📝 Questo demo testa tutti i wrapper BaseWrapper disponibili")
print("🔍 Ogni test include timestamp, stato della richiesta e dati formattati")
# Verifica variabili d'ambiente
formatter.print_subheader("🔐 Verifica Configurazione")
env_vars = check_environment_variables()
print("Variabili d'ambiente:")
for var_name, is_present in env_vars.items():
status = "✅ Presente" if is_present else "❌ Mancante"
print(f" {var_name}: {status}")
# Inizializza provider
formatter.print_subheader("🏗️ Inizializzazione Provider")
providers = initialize_providers()
if not providers:
print("❌ Nessun provider disponibile. Verifica la configurazione.")
return
print(f"\n🎯 Provider disponibili per il test: {list(providers.keys())}")
# Testa ogni provider
formatter.print_header("🧪 ESECUZIONE TEST PROVIDER", "=", 80)
tester = ProviderTester()
all_results: List[Dict[str, Any]] = []
for provider_name, wrapper in providers.items():
try:
result = tester.test_provider(wrapper, provider_name)
all_results.append(result)
except Exception as e:
print(f"❌ Errore critico nel test di {provider_name}: {e}")
traceback.print_exc()
all_results.append({
"provider_name": provider_name,
"tests": {},
"overall_status": "CRITICAL_ERROR",
"error": str(e)
})
# Stampa riassunto finale
print_summary(all_results)
# Informazioni aggiuntive
formatter.print_header(" INFORMAZIONI AGGIUNTIVE", "=", 80)
print("📚 Documentazione:")
print(" - BaseWrapper: src/app/markets/base.py")
print(" - Test completi: tests/agents/test_market.py")
print(" - Configurazione: .env")
print("\n🔧 Per abilitare tutti i provider:")
print(" 1. Configura le credenziali nel file .env")
print(" 2. Segui la documentazione di ogni provider")
print(" 3. Riavvia il demo")
print(f"\n🏁 Demo completato: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
if __name__ == "__main__":
main()

View File

@@ -1,18 +0,0 @@
#### FOR ALL FILES OUTSIDE src/ FOLDER ####
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
###########################################
from dotenv import load_dotenv
from app.api.news import NewsApiWrapper
def main():
api = NewsApiWrapper()
articles = api.get_latest_news(query="bitcoin", limit=5)
assert len(articles) > 0
print("ok")
if __name__ == "__main__":
load_dotenv()
main()

View File

@@ -1,20 +1,4 @@
#!/usr/bin/env python3
"""
Demo di Ollama (Python) mostra:
1. Elenco dei modelli disponibili
2. Generazione di testo semplice
3. Chat con streaming
4. Calcolo di embeddings
5. Esempio (opzionale) di function calling / tools
Uso:
python ollama_demo.py
Requisiti:
pip install ollama
Avviare il server Ollama (es. 'ollama serve' o l'app desktop) e avere i modelli già pullati.
"""
from typing import Any
import ollama
# Configurazione modelli
@@ -33,8 +17,8 @@ def list_models():
print(" (Nessun modello trovato)")
return
for m in models:
name = getattr(m, 'model', None) or (m.get('model') if isinstance(m, dict) else 'sconosciuto')
details = getattr(m, 'details', None)
name = getattr(m, 'model', None) or (m.get('model') if isinstance(m, dict) else 'sconosciuto') # type: ignore
details = getattr(m, 'details', None) # type: ignore
fmt = getattr(details, 'format', None) if details else 'unknown'
print(f"{name} {fmt}")
except Exception as e:
@@ -46,7 +30,7 @@ def list_models():
def generate_text(model: str, prompt: str, max_tokens: int = 200) -> str:
"""Genera testo dal modello indicato."""
print(f"\n[2] Generazione testo con '{model}'")
response = ollama.chat(
response = ollama.chat( # type: ignore
model=model,
messages=[{"role": "user", "content": prompt}]
)
@@ -57,10 +41,10 @@ def generate_text(model: str, prompt: str, max_tokens: int = 200) -> str:
# 3. Chat con streaming --------------------------------------------------------
def chat_streaming(model: str, messages: list) -> str:
def chat_streaming(model: str, messages: list[dict[str, str]]) -> str:
"""Esegue una chat mostrando progressivamente la risposta."""
print(f"\n[3] Chat (streaming) con '{model}'")
stream = ollama.chat(model=model, messages=messages, stream=True)
stream = ollama.chat(model=model, messages=messages, stream=True) # type: ignore
full = ""
for chunk in stream:
if 'message' in chunk and 'content' in chunk['message']:
@@ -91,7 +75,7 @@ def get_embedding(model: str, text: str):
def try_tools(model: str):
"""Esempio di function calling; se non supportato mostra messaggio informativo."""
print(f"\n[5] Function calling / tools con '{model}'")
tools = [
tools: list[dict[str, Any]] = [
{
"type": "function",
"function": {
@@ -109,7 +93,7 @@ def try_tools(model: str):
}
]
try:
response = ollama.chat(
response = ollama.chat( # type: ignore
model=model,
messages=[{"role": "user", "content": "Che tempo fa a Milano?"}],
tools=tools

View File

@@ -1,255 +1,160 @@
# 📊 Architettura e Flussi dell'App upo-appAI
# 📊 Architettura upo-appAI
## 🏗️ Diagramma Architettura Generale
## 🏗️ Architettura Generale
```
┌─────────────────────────────────────────────────────────────────┐
│ 🌐 GRADIO UI │
│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
│ │ User Input │ │ Provider │ │ Style │
│ (Query) │ │ (Model) │ │ (Conservative/ │ │
│ │ │ │ │ │ Aggressive) │ │
│ └─────────────────┘ └─────────────────┘ └─────────────────┘ │
└─────────────────────────┬───────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ 🔧 TOOL AGENT │
│ (Central Orchestrator) │
│ │
│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
│ │ 1. Collect Data │ │ 2. Analyze │ │ 3. Predict & │ │
│ │ │ │ Sentiment │ │ Recommend │
└─────────────────┘ └─────────────────┘ └─────────────────┘ │
└─────────────────────────┬───────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ 📊 AGENT ECOSYSTEM │
│ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌───────────┐│
│ │ MARKET │ │ NEWS │ │ SOCIAL │ │ PREDICTOR ││
│ │ AGENT │ │ AGENT │ │ AGENT │ │ AGENT ││
│ │ │ │ │ │ │ │ ││
│ │ 📈 Coinbase │ │ 📰 News API │ │ 🐦 Social │ │ 🤖 LLM ││
│ │ 📊 CryptoCmp│ │ │ │ Media │ │ Analysis ││
│ │ 🟡 Binance │ │ │ │ │ │ ││
│ └─────────────┘ └─────────────┘ └─────────────┘ └───────────┘│
└─────────────────────────────────────────────────────────────────┘
INTERFACCE UTENTE
├── 💬 Gradio Web (Chat + Dropdown modelli/strategie)
└── 📱 Telegram Bot (Mini App)
CHAT MANAGER
├── Storico messaggi
├── Gestione PipelineInputs
└── Salva/Carica chat
AGNO WORKFLOW PIPELINE (4 Steps)
├── 1. Query Check → Verifica crypto
├── 2. Condition → Valida procedere
├── 3. Info Recovery → Team raccolta dati
└── 4. Report Generation → Report finale
AGNO AGENT ECOSYSTEM
├── 👔 TEAM LEADER (coordina Market, News, Social)
│ Tools: ReasoningTools, PlanMemoryTool, CryptoSymbolsTools
├── 📈 MARKET AGENT → MarketAPIsTool
├── 📰 NEWS AGENT → NewsAPIsTool
├── 🐦 SOCIAL AGENT → SocialAPIsTool
├── 🔍 QUERY CHECK AGENT → QueryOutputs (is_crypto: bool)
└── 📋 REPORT GENERATOR AGENT → Strategia applicata
```
## 🔄 Flusso di Esecuzione Dettagliato
## 🔄 Flusso Esecuzione
```
👤 USER REQUEST
│ "Analizza Bitcoin con strategia aggressiva"
┌─────────────────────────────────────────────────────────────┐
🔧 TOOL AGENT │
│ │
│ def interact(query, provider, style): │
│ │ │
│ ├── 📊 market_data = market_agent.analyze(query) │
│ ├── 📰 news_sentiment = news_agent.analyze(query) │
│ ├── 🐦 social_sentiment = social_agent.analyze(query) │
│ │ │
│ └── 🤖 prediction = predictor_agent.predict(...) │
└─────────────────────────────────────────────────────────────┘
📊 MARKET AGENT - Parallel Data Collection
┌─────────────────────────────────────────────────────────────┐
│ │
│ 🔍 Auto-detect Available Providers: │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Coinbase │ │ CryptoComp │ │ Binance │ │
│ │ REST │ │ API │ │ Mock │ │
│ │ │ │ │ │ │ │
│ │ ✅ Active │ │ ✅ Active │ │ ✅ Active │ │
│ │ $63,500 BTC │ │ $63,450 BTC │ │ $63,600 BTC │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ │
│ 📈 Aggregated Result: │
│ { │
│ "aggregated_data": { │
│ "BTC_USD": { │
│ "price": 63516.67, │
│ "confidence": 0.94, │
│ "sources_count": 3 │
│ } │
│ }, │
│ "individual_sources": {...}, │
│ "market_signals": {...} │
│ } │
└─────────────────────────────────────────────────────────────┘
📰 NEWS AGENT + 🐦 SOCIAL AGENT
┌─────────────────────────────────────────────────────────────┐
│ │
│ 📰 News Sentiment: "Positive momentum, institutional │
│ adoption increasing..." │
│ │
│ 🐦 Social Sentiment: "Bullish sentiment on Reddit, │
│ Twitter mentions up 15%..." │
└─────────────────────────────────────────────────────────────┘
🤖 PREDICTOR AGENT
┌─────────────────────────────────────────────────────────────┐
│ │
│ Input: │
│ ├── 📊 Market Data (aggregated + confidence) │
│ ├── 📰🐦 Combined Sentiment │
│ ├── 🎯 Style: "aggressive" │
│ └── 🤖 Provider: "openai/anthropic/google..." │
│ │
│ 🧠 LLM Processing: │
│ "Based on high confidence market data (0.94) showing │
│ $63,516 BTC with positive sentiment across news and │
│ social channels, aggressive strategy recommendation..." │
└─────────────────────────────────────────────────────────────┘
📋 FINAL OUTPUT
┌─────────────────────────────────────────────────────────────┐
│ 📊 Market Data Summary │
│ 📰🐦 Sentiment Analysis │
│ 📈 Final Recommendation: │
│ "Strong BUY signal with 85% confidence..." │
└─────────────────────────────────────────────────────────────┘
**Input:** "Analizza Bitcoin con strategia aggressiva"
1. CHAT MANAGER riceve e prepara PipelineInputs
2. WORKFLOW PIPELINE esegue 4 step:
- Query Check: valida `is_crypto: true`
- Condition: se false, termina
- Info Recovery: Team raccoglie dati
- Report Generation: genera report
3. OUTPUT: Report con analisi + raccomandazioni
## 🏛️ Architettura API
**Tools (Agno Toolkit):**
- MarketAPIsTool: Binance, YFinance, CoinBase, CryptoCompare
- NewsAPIsTool: NewsAPI, GoogleNews, DuckDuckGo, CryptoPanic
- SocialAPIsTool: Reddit, X, 4chan
- CryptoSymbolsTools: `resources/cryptos.csv`
**WrapperHandler:** Failover automatico (3 tentativi/wrapper, 2s delay)
## 📊 Data Aggregation
**ProductInfo:**
- Volume: media tra sources
- Price: weighted average (price × volume)
- Confidence: spread + numero sources
**Historical Price:**
- Align per timestamp
- Media: high, low, open, close, volume
## 🎯 Configuration
**configs.yaml:**
```yaml
port: 8000
models: [Ollama, OpenAI, Anthropic, Google]
strategies: [Conservative, Aggressive]
agents: {team_model, team_leader_model, ...}
api: {retry_attempts: 3, retry_delay_seconds: 2}
```
## 🏛️ Architettura dei Provider (Market Agent)
**.env (API Keys):**
- Market: CDP_API_KEY, CRYPTOCOMPARE_API_KEY, ...
- News: NEWS_API_KEY, CRYPTOPANIC_API_KEY, ...
- Social: REDDIT_CLIENT_ID, X_API_KEY, ...
- LLM: OPENAI_API_KEY, ANTHROPIC_API_KEY, ...
- Bot: TELEGRAM_BOT_TOKEN
## 🗂️ Struttura Progetto
```
┌─────────────────────────────────────────────────────────────────┐
│ 📊 MARKET AGENT │
│ │
│ 🔍 Provider Detection Logic: │
┌─────────────────────────────────────────────────────────────┐│
│ def _setup_providers(): ││
│ ├── 🔑 Check CDP_API_KEY_NAME + CDP_API_PRIVATE_KEY ││
│ │ │ └── ✅ Setup Coinbase Advanced Trade ││
│ │ ├── 🔑 Check CRYPTOCOMPARE_API_KEY ││
│ │ └── ✅ Setup CryptoCompare ││
│ └── 🔑 Check BINANCE_API_KEY (future) ││
└── ✅ Setup Binance API ││
└─────────────────────────────────────────────────────────────┘│
📡 Data Flow: │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Provider 1 │───▶│ │◀───│ Provider 2 │ │
│ │ Coinbase │ │ AGGREGATOR │ │ CryptoComp │ │
│ │ │ │ │ │ │ │
│ │ Real-time │ │ ┌─────────┐ │ │ Real-time │ │
│ │ Market Data │ │ │Confidence│ │ │ Market Data │ │
│ └─────────────┘ │ │Scoring │ │ └─────────────┘ │
│ │ │ │ │ │
│ ┌─────────────┐ │ │ Spread │ │ ┌─────────────┐ │
│ │ Provider 3 │───▶│ │Analysis │ │◀───│ Provider N │ │
│ │ Binance │ │ │ │ │ │ Future │ │
│ │ │ │ └─────────┘ │ │ │ │
│ │ Mock Data │ │ │ │ │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
└─────────────────────────────────────────────────────────────────┘
src/app/
├── __main__.py
├── configs.py
├── agents/
├── core.py
├── pipeline.py
│ ├── plan_memory_tool.py
│ └── prompts/
├── api/
├── wrapper_handler.py
├── core/ (markets, news, social)
├── markets/ (Binance, CoinBase, CryptoCompare, YFinance)
├── news/ (NewsAPI, GoogleNews, DuckDuckGo, CryptoPanic)
├── social/ (Reddit, X, 4chan)
└── tools/ (Agno Toolkits)
└── interface/ (chat.py, telegram_app.py)
tests/
demos/
resources/cryptos.csv
docs/
configs.yaml
.env
```
## 🔧 Signers Architecture
## 🔑 Componenti Chiave
```
┌─────────────────────────────────────────────────────────────────┐
│ 🔐 SIGNERS ECOSYSTEM │
│ │
│ 📁 src/app/signers/market_signers/ │
│ │ │
│ ├── 🏦 coinbase_rest_signer.py │
│ │ ├── 🔑 Uses: CDP_API_KEY_NAME + CDP_API_PRIVATE_KEY │
│ │ ├── 📡 RESTClient from coinbase.rest │
│ │ ├── 📊 get_asset_info() → Real Coinbase data │
│ │ └── 📈 get_multiple_assets() → Bulk data │
│ │ │
│ ├── 📊 cryptocompare_signer.py │
│ │ ├── 🔑 Uses: CRYPTOCOMPARE_API_KEY │
│ │ ├── 📡 Direct HTTP requests │
│ │ ├── 💰 get_crypto_prices() → Multi-currency │
│ │ └── 🏆 get_top_cryptocurrencies() → Market cap │
│ │ │
│ └── 🟡 binance_signer.py │
│ ├── 🔑 Uses: BINANCE_API_KEY (future) │
│ ├── 📡 Mock implementation │
│ ├── 🎭 Simulated market data │
│ └── 📈 Compatible interface │
└─────────────────────────────────────────────────────────────────┘
1. **Agno Framework**: Agent, Team, Workflow, Toolkit, RunEvent
2. **WrapperHandler**: Failover, Retry logic, Type safety
3. **Data Aggregation**: Multiple sources, Confidence score
4. **Multi-Interface**: Gradio + Telegram
5. **Configuration**: configs.yaml + .env
## 🚀 Deployment
**Docker:**
```bash
docker-compose up --build -d
```
## 🚀 Future Enhancement: Async Flow
```
📱 USER REQUEST
🔧 TOOL AGENT (async)
┌────────────────┼────────────────┐
│ │ │
▼ ▼ ▼
📊 Market 📰 News 🐦 Social
Agent (async) Agent (async) Agent (async)
│ │ │
┌────┼────┐ │ │
▼ ▼ ▼ │ │
Coinbase │ Binance │ │
CC │ │ │
▼▼▼ ▼ ▼
🔄 Parallel 📰 Sentiment 🐦 Sentiment
Aggregation Analysis Analysis
│ │ │
└────────────────┼────────────────┘
🤖 PREDICTOR AGENT
(LLM Analysis)
📋 FINAL RESULT
(JSON + Confidence)
**Local (UV):**
```bash
uv venv
uv pip install -e .
uv run src/app
```
## 📊 Data Flow Example
## 🎯 Workflow Asincrono
```
Input: "Analyze Bitcoin aggressive strategy"
├── 📊 Market Agent Output:
│ {
│ "aggregated_data": {
│ "BTC_USD": {"price": 63516.67, "confidence": 0.94}
│ },
│ "individual_sources": {
│ "coinbase": {"price": 63500, "volume": "1.2M"},
│ "cryptocompare": {"price": 63450, "volume": "N/A"},
│ "binance": {"price": 63600, "volume": "2.1M"}
│ },
│ "market_signals": {
│ "spread_analysis": "Low spread (0.24%) - healthy liquidity",
│ "price_divergence": "Max deviation: 0.24% - Normal range"
│ }
│ }
├── 📰 News Sentiment: "Positive institutional adoption news..."
├── 🐦 Social Sentiment: "Bullish Reddit sentiment, +15% mentions"
└── 🤖 Predictor Output:
"📈 Strong BUY recommendation based on:
- High confidence market data (94%)
- Positive news sentiment
- Bullish social indicators
- Low spread indicates healthy liquidity
```python
workflow = Workflow(steps=[
query_check, condition,
info_recovery, report_generation
])
Aggressive Strategy: Consider 15-20% portfolio allocation"
iterator = await workflow.arun(query, stream=True)
async for event in iterator:
if event.event == PipelineEvent.TOOL_USED:
log(f"Tool: {event.tool.tool_name}")
```
---
*Diagrammi creati: 2025-09-23*
*Sistema: upo-appAI Market Analysis Platform*
**Vantaggi:** Asincrono, Streaming, Condizionale, Retry
## 📈 Future Enhancements
- Parallel Tool Execution
- Caching (Redis)
- Database (PostgreSQL)
- Real-time WebSocket
- ML Models
- User Profiles
- Backtesting

View File

@@ -1,203 +0,0 @@
# 🚀 Diagramma Dettaglio: Implementazione Asincrona
## ⚡ Async Market Data Collection (Fase 3)
```
┌─────────────────────────────────────────────────────────────────┐
│ 🔧 TOOL AGENT │
│ │
│ async def interact(query, provider, style): │
│ │ │
│ ├── 📊 market_data = await market_agent.analyze_async() │
│ ├── 📰 news_data = await news_agent.analyze_async() │
│ ├── 🐦 social_data = await social_agent.analyze_async() │
│ │ │
│ └── 🤖 prediction = await predictor.predict_async(...) │
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ 📊 MARKET AGENT - ASYNC IMPLEMENTATION │
│ │
│ async def analyze_async(self, query): │
│ symbols = extract_symbols(query) # ["BTC", "ETH"] │
│ │ │
│ └── 🔄 tasks = [ │
│ │ self._query_coinbase_async(symbols), │
│ │ self._query_cryptocompare_async(symbols), │
│ │ self._query_binance_async(symbols) │
│ │ ] │
│ │ │
│ └── 📊 results = await asyncio.gather(*tasks) │
│ │ │
│ ▼ │
│ 🧮 aggregate_results(results) │
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ ⏱️ TIMING DIAGRAM │
│ │
│ Time: 0ms 500ms 1000ms 1500ms 2000ms │
│ │ │ │ │ │ │
│ 📡 Start all requests │
│ ├─────────────────────────────────────────┐ │
│ │ 🏦 Coinbase Request │ │
│ │ ✅ Response │ (1.2s) │
│ ├─────────────────────────────┐ │ │
│ │ 📊 CryptoCompare Request │ │ │
│ │ ✅ Response (0.8s) │ │
│ ├─────────────┐ │ │ │
│ │ 🟡 Binance │ │ │ │
│ │ ✅ Response (0.3s - mock) │ │ │
│ │ │ │ │ │
│ └─────────────┼───────────────┼───────────┘ │
│ │ │ │
│ Wait for all... │ │
│ │ │
│ 🧮 Aggregate (1.2s total) │
│ │
│ 📈 Performance Gain: │
│ Sequential: 1.2s + 0.8s + 0.3s = 2.3s │
│ Parallel: max(1.2s, 0.8s, 0.3s) = 1.2s │
│ Improvement: ~48% faster! 🚀 │
└─────────────────────────────────────────────────────────────────┘
```
## 🧮 Aggregation Algorithm Detail
```
┌─────────────────────────────────────────────────────────────────┐
│ 🔬 DATA AGGREGATION LOGIC │
│ │
│ def aggregate_market_data(results): │
│ │ │
│ ├── 📊 Input Data: │
│ │ ┌─────────────────────────────────────────────────┐ │
│ │ │ coinbase: {"BTC": 63500, "ETH": 4150} │ │
│ │ │ cryptocomp: {"BTC": 63450, "ETH": 4160} │ │
│ │ │ binance: {"BTC": 63600, "ETH": 4140} │ │
│ │ └─────────────────────────────────────────────────┘ │
│ │ │
│ ├── 🧮 Price Calculation: │
│ │ ┌─────────────────────────────────────────────────┐ │
│ │ │ BTC_prices = [63500, 63450, 63600] │ │
│ │ │ BTC_avg = 63516.67 │ │
│ │ │ BTC_std = 75.83 │ │
│ │ │ BTC_spread = (max-min)/avg = 0.24% │ │
│ │ └─────────────────────────────────────────────────┘ │
│ │ │
│ ├── 🎯 Confidence Scoring: │
│ │ ┌─────────────────────────────────────────────────┐ │
│ │ │ confidence = 1 - (std_dev / mean) │ │
│ │ │ if spread < 0.5%: confidence += 0.1 │ │
│ │ │ if sources >= 3: confidence += 0.05 │ │
│ │ │ BTC_confidence = 0.94 (excellent!) │ │
│ │ └─────────────────────────────────────────────────┘ │
│ │ │
│ └── 📈 Market Signals: │
│ ┌─────────────────────────────────────────────────┐ │
│ │ spread_analysis: │ │
│ │ "Low spread (0.24%) indicates healthy liq." │ │
│ │ volume_trend: │ │
│ │ "Combined volume: 4.1M USD" │ │
│ │ price_divergence: │ │
│ │ "Max deviation: 0.24% - Normal range" │ │
│ └─────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────┘
```
## 🔄 Error Handling & Resilience
```
┌─────────────────────────────────────────────────────────────────┐
│ 🛡️ RESILIENCE STRATEGY │
│ │
│ Scenario 1: One Provider Fails │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 🏦 Coinbase: ✅ Success (BTC: $63500) │ │
│ │ 📊 CryptoComp: ❌ Timeout/Error │ │
│ │ 🟡 Binance: ✅ Success (BTC: $63600) │ │
│ │ │ │
│ │ Result: Continue with 2 sources │ │
│ │ Confidence: 0.89 (slightly reduced) │ │
│ │ Note: "CryptoCompare unavailable" │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ Scenario 2: Multiple Providers Fail │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 🏦 Coinbase: ❌ API Limit │ │
│ │ 📊 CryptoComp: ✅ Success (BTC: $63450) │ │
│ │ 🟡 Binance: ❌ Network Error │ │
│ │ │ │
│ │ Result: Single source data │ │
│ │ Confidence: 0.60 (low - warn user) │ │
│ │ Note: "Limited data - consider waiting" │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ Scenario 3: All Providers Fail │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 🏦 Coinbase: ❌ Maintenance │ │
│ │ 📊 CryptoComp: ❌ API Down │ │
│ │ 🟡 Binance: ❌ Rate Limit │ │
│ │ │ │
│ │ Result: Graceful degradation │ │
│ │ Message: "Market data temporarily unavailable" │ │
│ │ Fallback: Cached data (if available) │ │
│ └─────────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────┘
```
## 📊 JSON Output Schema
```json
{
"aggregated_data": {
"BTC_USD": {
"price": 63516.67,
"confidence": 0.94,
"sources_count": 3,
"last_updated": "2025-09-23T17:30:00Z"
},
"ETH_USD": {
"price": 4150.33,
"confidence": 0.91,
"sources_count": 3,
"last_updated": "2025-09-23T17:30:00Z"
}
},
"individual_sources": {
"coinbase": {
"BTC": {"price": 63500, "volume": "1.2M", "status": "online"},
"ETH": {"price": 4150, "volume": "25.6M", "status": "online"}
},
"cryptocompare": {
"BTC": {"price": 63450, "volume": "N/A", "status": "active"},
"ETH": {"price": 4160, "volume": "N/A", "status": "active"}
},
"binance": {
"BTC": {"price": 63600, "volume": "2.1M", "status": "mock"},
"ETH": {"price": 4140, "volume": "18.3M", "status": "mock"}
}
},
"market_signals": {
"spread_analysis": "Low spread (0.24%) indicates healthy liquidity",
"volume_trend": "Combined BTC volume: 3.3M USD (+12% from avg)",
"price_divergence": "Max deviation: 0.24% - Normal range",
"data_quality": "High - 3 sources, low variance",
"recommendation": "Data suitable for trading decisions"
},
"metadata": {
"query_time_ms": 1247,
"sources_queried": ["coinbase", "cryptocompare", "binance"],
"sources_successful": ["coinbase", "cryptocompare", "binance"],
"sources_failed": [],
"aggregation_method": "weighted_average",
"confidence_threshold": 0.75
}
}
```
---
*Diagramma dettaglio asincrono: 2025-09-23*
*Focus: Performance, Resilienza, Qualità Dati*

View File

@@ -1,96 +0,0 @@
# 🚀 Piano di Implementazione - Market Data Enhancement
## 📋 Roadmap Implementazioni
### **Fase 1: Binance Mock Provider**
**Obiettivo**: Aggiungere terzo provider per test aggregazione
- ✅ Creare `binance_signer.py` con mock data
- ✅ Integrare nel MarketAgent
- ✅ Testare detection automatica provider
- **Deliverable**: 3 provider funzionanti (Coinbase, CryptoCompare, Binance)
### **Fase 2: Interrogazione Condizionale**
**Obiettivo**: Auto-detection credenziali e interrogazione intelligente
- ✅ Migliorare detection chiavi API nel MarketAgent
- ✅ Skip provider se credenziali mancanti (no errori)
- ✅ Logging informativo per provider disponibili/non disponibili
- ✅ Gestione graceful degradation
- **Deliverable**: Sistema resiliente che funziona con qualsiasi combinazione di provider
### **Fase 3: Interrogazione Asincrona + Aggregazione JSON**
**Obiettivo**: Performance boost e formato dati professionale
#### **3A. Implementazione Asincrona**
- ✅ Refactor MarketAgent per supporto `async/await`
- ✅ Chiamate parallele a tutti i provider disponibili
- ✅ Timeout management per provider lenti
- ✅ Error handling per provider che falliscono
#### **3B. Aggregazione Dati Intelligente**
- ✅ Calcolo `confidence` basato su concordanza prezzi
- ✅ Analisi `spread` tra provider
- ✅ Detection `price_divergence` per anomalie
- ✅ Volume trend analysis
- ✅ Formato JSON strutturato:
```json
{
"aggregated_data": {
"BTC_USD": {
"price": 43250.12,
"confidence": 0.95,
"sources_count": 4
}
},
"individual_sources": {
"coinbase": {"price": 43245.67, "volume": "1.2M"},
"binance": {"price": 43255.89, "volume": "2.1M"},
"cryptocompare": {"price": 43248.34, "volume": "0.8M"}
},
"market_signals": {
"spread_analysis": "Low spread (0.02%) indicates healthy liquidity",
"volume_trend": "Volume up 15% from 24h average",
"price_divergence": "Max deviation: 0.05% - Normal range"
}
}
```
**Deliverable**: Sistema asincrono con analisi avanzata dei dati di mercato
## 🎯 Benefici Attesi
### **Performance**
- ⚡ Tempo risposta: da ~4s sequenziali a ~1s paralleli
- 🔄 Resilienza: sistema funziona anche se 1-2 provider falliscono
- 📊 Qualità dati: validazione incrociata tra provider
### **Professionalità**
- 📈 Confidence scoring per decisioni informate
- 🔍 Market signals per trading insights
- 📋 Formato standardizzato per integrazioni future
### **Scalabilità**
- Facile aggiunta nuovi provider
- 🔧 Configurazione flessibile via environment
- 📝 Logging completo per debugging
## 🧪 Test Strategy
1. **Unit Tests**: Ogni provider singolarmente
2. **Integration Tests**: Aggregazione multi-provider
3. **Performance Tests**: Confronto sync vs async
4. **Resilience Tests**: Fallimento provider singoli
5. **E2E Tests**: Full workflow con UI Gradio
## 📅 Timeline Stimata
- **Fase 1**: ~1h (setup Binance mock)
- **Fase 2**: ~1h (detection condizionale)
- **Fase 3**: ~2-3h (async + aggregazione)
- **Testing**: ~1h (validation completa)
**Total**: ~5-6h di lavoro strutturato
---
*Documento creato: 2025-09-23*
*Versione: 1.0*

View File

@@ -1,73 +0,0 @@
# Guida alla Realizzazione del Progetto
Questa guida è una lista di controllo per l'implementazione del tuo progetto. È divisa in fasi logiche, ognuna con i compiti specifici da svolgere.
## Fase 1: Preparazione e Architettura di Base
### Impostazione dell'ambiente
* Scegliere il linguaggio di programmazione (es. **Python**).
* Utilizzare la libreria `agno` per la creazione di agenti e **LangChain/LlamaIndex** per la gestione dell'LLM e dell'orchestrazione.
### Definizione dell'Architettura degli agenti
* Definire la classe base per gli agenti, con metodi comuni come `execute()` e `reason()`.
* Delineare i ruoli e le interfacce di tutti gli agenti (`RicercatoreDati`, `AnalistaSentiment`, `MotorePredittivo`, `Orchestratore`), stabilendo come comunicheranno tra loro.
---
## Fase 2: Implementazione degli Agenti Core
### Agente `RicercatoreDati`
* Implementare la logica per connettersi a un'API di exchange (es. **Binance, Coindesk, CoinMarketCap**).
* Testare la capacità di recuperare dati in tempo reale per diverse criptovalute (prezzo, volume, capitalizzazione) e **assicurarsi che la gestione degli errori sia robusta**.
### Agente `AnalistaSentiment`
* **Agente `Social`:**
* Scegliere un metodo per lo scraping di forum e social media (es. **Reddit API, librerie per Twitter/X, BeautifulSoup per web scraping**).
* Implementare un modulo di analisi del testo per classificare il sentiment (positivo, negativo, neutro) utilizzando **modelli pre-addestrati (es. VADER) o fine-tuning di modelli più avanzati**.
* **Agente `News`:**
* Ottenere una chiave API per un servizio di notizie (es. **NewsAPI**).
* Implementare la logica per cercare articoli pertinenti a una criptovaluta specifica o al mercato in generale, e **filtrare le notizie in base a parole chiave rilevanti**.
### Agente `MotorePredittivo`
* Definire la logica per integrare i dati numerici del `RicercatoreDati` con il sentiment dell'`AnalistaSentiment`.
* Creare un **prompt avanzato** per l'LLM che lo guidi a generare previsioni e strategie. Dovrai usare tecniche come la **chain-of-thought** per rendere il ragionamento trasparente. Assicurarsi che il prompt includa vincoli specifici per lo stile di investimento (aggressivo/conservativo).
---
## Fase 3: Costruzione dell'Orchestratore e Test di Integrazione
### Implementazione dell'Agente Orchestratore
* **Gestione dell'Input Utente:** Creare un metodo che riceve la richiesta dell'utente (es. `analizza_cripto('Bitcoin', 'aggressivo')`). Analizzare il tipo di richiesta e le preferenze utente.
* **Recupero della Memoria Utente:** Integrare la logica per recuperare la cronologia delle richieste passate dal database e preparare i dati come contesto aggiuntivo per l'LLM.
* **Orchestrazione e Flusso di Lavoro:** Chiamare gli agenti (`RicercatoreDati`, `AnalistaSentiment`) e passare i risultati combinati all'**Agente `MotorePredittivo`** per generare previsioni e strategie.
* **Valutazione e Selezione Strategica:** Ricevere le previsioni dal `MotorePredittivo` e applicare le regole di valutazione basate sulle preferenze dell'utente per selezionare le strategie più appropriate.
* **Presentazione e Persistenza:** Costruire il report finale e salvare la sessione completa nel database.
---
## Fase 4: Gestione della Persistenza e dell'Interfaccia Utente
* **Database per la persistenza:** Scegli un database (es. **Firestore, MongoDB, PostgreSQL**) per salvare la cronologia delle richieste degli utenti. Implementa la logica per salvare e recuperare le sessioni di consulenza passate, associandole a un ID utente, e **struttura i dati per una ricerca efficiente**.
* **Interfaccia utente (UI):** Costruisci un'interfaccia utente semplice e intuitiva che permetta di inserire i parametri di richiesta. Aggiungi una sezione per visualizzare i risultati, inclusi i grafici e le note che spiegano il ragionamento dell'agente.
---
## Fase 5: Test del Sistema
* **Test unitari:** Esegui test su ogni agente singolarmente per assicurarti che funzioni correttamente (es. l'agente `RicercatoreDati` recupera i dati, l'agente `AnalistaSentiment` classifica correttamente un testo). **Crea dei mock per le API esterne per testare la logica interna senza dipendenze esterne**.
* **Test di integrazione:** Esegui scenari di test completi per l'intero sistema. Verifica che l'orchestrazione tra gli agenti avvenga senza intoppi e che i dati vengano passati correttamente tra di essi.
---
## Fase 6: Valutazione dei Risultati
* **Valutazione della qualità:** Verifica la qualità delle raccomandazioni generate. L'output è logico e ben argomentato?
* **Trasparenza del ragionamento:** Controlla che le note (`Ragionamenti`) siano chiare e forniscano un'effettiva trasparenza del processo decisionale dell'agente.
* **Confronto e validazione:** Confronta le raccomandazioni con dati storici e scenari ipotetici per valutarne la plausibilità.

View File

@@ -14,6 +14,7 @@ dependencies = [
"dotenv", # Gestire variabili d'ambiente (generalmente API keys od opzioni)
"gradio", # UI web semplice con user_input e output
"colorlog", # Log colorati in console
"html5lib", # Parsing HTML & Scraping
# Per costruire agenti (ovvero modelli che possono fare più cose tramite tool) https://github.com/agno-agi/agno
# altamente consigliata dato che ha anche tools integrati per fare scraping, calcoli e molto altro
@@ -23,6 +24,9 @@ dependencies = [
# Modelli supportati e installati (aggiungere qui sotto quelli che si vogliono usare)
"google-genai",
"ollama",
"openai",
"mistralai",
"deepseek",
# API di exchange di criptovalute
"coinbase-advanced-py",

9570
resources/cryptos.csv Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
from app.agents.predictor import PredictorInput, PredictorOutput
from app.agents.pipeline import Pipeline, PipelineInputs, PipelineEvent
from app.agents.pipeline import Pipeline, PipelineEvent
from app.agents.core import PipelineInputs, QueryOutputs
__all__ = ["PredictorInput", "PredictorOutput", "Pipeline", "PipelineInputs", "PipelineEvent"]
__all__ = ["Pipeline", "PipelineInputs", "PipelineEvent", "QueryOutputs"]

198
src/app/agents/core.py Normal file
View File

@@ -0,0 +1,198 @@
from pydantic import BaseModel
from agno.agent import Agent
from agno.team import Team
from agno.tools.reasoning import ReasoningTools
from app.agents.plan_memory_tool import PlanMemoryTool
from app.api.tools import *
from app.configs import AppConfig
from app.agents.prompts import *
class QueryInputs(BaseModel):
user_query: str
strategy: str
class QueryOutputs(BaseModel):
response: str
is_crypto: bool
class PipelineInputs:
"""
Classe necessaria per passare gli input alla Pipeline.
Serve per raggruppare i parametri e semplificare l'inizializzazione.
"""
def __init__(self, configs: AppConfig | None = None) -> None:
"""
Inputs per la Pipeline di agenti.
Setta i valori di default se non specificati.
"""
self.configs = configs if configs else AppConfig()
agents = self.configs.agents
self.team_model = self.configs.get_model_by_name(agents.team_model)
self.team_leader_model = self.configs.get_model_by_name(agents.team_leader_model)
self.query_analyzer_model = self.configs.get_model_by_name(agents.query_analyzer_model)
self.report_generation_model = self.configs.get_model_by_name(agents.report_generation_model)
self.strategy = self.configs.get_strategy_by_name(agents.strategy)
self.user_query = ""
# ======================
# Dropdown handlers
# ======================
def choose_query_checker(self, index: int):
"""
Sceglie il modello LLM da usare per l'analizzatore di query.
"""
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list."
self.query_analyzer_model = self.configs.models.all_models[index]
def choose_team_leader(self, index: int):
"""
Sceglie il modello LLM da usare per il Team Leader.
"""
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list."
self.team_leader_model = self.configs.models.all_models[index]
def choose_team(self, index: int):
"""
Sceglie il modello LLM da usare per il Team.
"""
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list."
self.team_model = self.configs.models.all_models[index]
def choose_report_generator(self, index: int):
"""
Sceglie il modello LLM da usare per il generatore di report.
"""
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list."
self.report_generation_model = self.configs.models.all_models[index]
def choose_strategy(self, index: int):
"""
Sceglie la strategia da usare per il Team.
"""
self.strategy = self.configs.strategies[index]
# ======================
# Helpers
# ======================
def list_models_names(self) -> list[str]:
"""
Restituisce la lista dei nomi dei modelli disponibili.
"""
return [model.label for model in self.configs.models.all_models]
def list_strategies_names(self) -> list[str]:
"""
Restituisce la lista delle strategie disponibili.
"""
return [strat.label for strat in self.configs.strategies]
def get_query_inputs(self) -> QueryInputs:
"""
Restituisce gli input per l'agente di verifica della query.
"""
return QueryInputs(
user_query=self.user_query,
strategy=self.strategy.label,
)
# ======================
# Agent getters
# ======================
def get_agent_team(self) -> Team:
market_agent = self.team_model.get_agent(MARKET_INSTRUCTIONS, "Market Agent", tools=[MarketAPIsTool()])
news_agent = self.team_model.get_agent(NEWS_INSTRUCTIONS, "News Agent", tools=[NewsAPIsTool()])
social_agent = self.team_model.get_agent(SOCIAL_INSTRUCTIONS, "Socials Agent", tools=[SocialAPIsTool()])
return Team(
model=self.team_leader_model.get_model(TEAM_LEADER_INSTRUCTIONS),
name="CryptoAnalysisTeam",
tools=[ReasoningTools(), PlanMemoryTool(), CryptoSymbolsTools()],
members=[market_agent, news_agent, social_agent],
)
def get_agent_query_checker(self) -> Agent:
return self.query_analyzer_model.get_agent(QUERY_CHECK_INSTRUCTIONS, "Query Check Agent", output_schema=QueryOutputs)
def get_agent_report_generator(self) -> Agent:
return self.report_generation_model.get_agent(REPORT_GENERATION_INSTRUCTIONS, "Report Generator Agent")
def __str__(self) -> str:
return "\n".join([
f"Query Check: {self.query_analyzer_model.label}",
f"Team Leader: {self.team_leader_model.label}",
f"Team: {self.team_model.label}",
f"Report: {self.report_generation_model.label}",
f"Strategy: {self.strategy.label}",
f"User Query: \"{self.user_query}\"",
])
class RunMessage:
"""
Classe per gestire i messaggi di stato durante l'esecuzione della pipeline.
Inizializza il messaggio con gli step e aggiorna lo stato, permettendo di ottenere
il messaggio più recente da inviare all'utente.
"""
def __init__(self, inputs: PipelineInputs, prefix: str = "", suffix: str = ""):
"""
Inizializza il messaggio di esecuzione con gli step iniziali.
Tre stati possibili per ogni step:
- In corso (🔳)
- In esecuzione (➡️)
- Completato (✅)
Lo stato di esecuzione può essere assegnato solo ad uno step alla volta.
Args:
inputs (PipelineInputs): Input della pipeline per mostrare la configurazione.
prefix (str, optional): Prefisso del messaggio. Defaults to "".
suffix (str, optional): Suffisso del messaggio. Defaults to "".
"""
self.base_message = f"Running configurations: \n{prefix}{inputs}{suffix}\n\n"
self.emojis = ['🔳', '➡️', '']
self.placeholder = '<<<>>>'
self.current = 0
self.steps_total = [
(f"{self.placeholder} Query Check", 1),
(f"{self.placeholder} Info Recovery", 0),
(f"{self.placeholder} Report Generation", 0),
]
def update(self) -> 'RunMessage':
"""
Sposta lo stato di esecuzione al passo successivo.
Lo step precedente completato viene marcato come completato.
Returns:
RunMessage: L'istanza aggiornata di RunMessage.
"""
text_curr, state_curr = self.steps_total[self.current]
self.steps_total[self.current] = (text_curr, state_curr + 1)
self.current = min(self.current + 1, len(self.steps_total))
if self.current < len(self.steps_total):
text_curr, state_curr = self.steps_total[self.current]
self.steps_total[self.current] = (text_curr, state_curr + 1)
return self
def update_step(self, text_extra: str = "") -> 'RunMessage':
"""
Aggiorna il messaggio per lo step corrente.
Args:
text_extra (str, optional): Testo aggiuntivo da includere nello step. Defaults to "".
"""
text_curr, state_curr = self.steps_total[self.current]
if text_extra:
text_curr = f"{text_curr.replace('', '')}\n╚═ {text_extra}"
self.steps_total[self.current] = (text_curr, state_curr)
return self
def get_latest(self) -> str:
"""
Restituisce il messaggio di esecuzione più recente.
Returns:
str: Messaggio di esecuzione aggiornato.
"""
steps = [msg.replace(self.placeholder, self.emojis[state]) for msg, state in self.steps_total]
return self.base_message + "\n".join(steps)

View File

@@ -4,85 +4,38 @@ import logging
import random
from typing import Any, Callable
from agno.agent import RunEvent
from agno.team import Team, TeamRunEvent
from agno.tools.reasoning import ReasoningTools
from agno.run.workflow import WorkflowRunEvent
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
from app.api.tools import *
from app.agents.prompts import *
from app.configs import AppConfig
from app.agents.core import *
logging = logging.getLogger("pipeline")
class PipelineEvent(str, Enum):
PLANNER = "Planner"
QUERY_CHECK = "Query Check"
QUERY_ANALYZER = "Query Analyzer"
INFO_RECOVERY = "Info Recovery"
REPORT_GENERATION = "Report Generation"
REPORT_TRANSLATION = "Report Translation"
TOOL_USED = RunEvent.tool_call_completed
RUN_FINISHED = WorkflowRunEvent.workflow_completed.value
TOOL_USED = RunEvent.tool_call_completed.value
def check_event(self, event: str, step_name: str) -> bool:
return event == self.value or (WorkflowRunEvent.step_completed and step_name == self.value)
return event == self.value or (WorkflowRunEvent.step_completed == event and step_name == self.value)
class PipelineInputs:
"""
Classe necessaria per passare gli input alla Pipeline.
Serve per raggruppare i parametri e semplificare l'inizializzazione.
"""
def __init__(self, configs: AppConfig | None = None) -> None:
"""
Inputs per la Pipeline di agenti.
Setta i valori di default se non specificati.
"""
self.configs = configs if configs else AppConfig()
agents = self.configs.agents
self.team_model = self.configs.get_model_by_name(agents.team_model)
self.team_leader_model = self.configs.get_model_by_name(agents.team_leader_model)
self.predictor_model = self.configs.get_model_by_name(agents.predictor_model)
self.strategy = self.configs.get_strategy_by_name(agents.strategy)
self.user_query = ""
# ======================
# Dropdown handlers
# ======================
def choose_team_leader(self, index: int):
"""
Sceglie il modello LLM da usare per il Team Leader.
"""
self.leader_model = self.configs.models.all_models[index]
def choose_team(self, index: int):
"""
Sceglie il modello LLM da usare per il Team.
"""
self.team_model = self.configs.models.all_models[index]
def choose_strategy(self, index: int):
"""
Sceglie la strategia da usare per il Team.
"""
self.strategy = self.configs.strategies[index]
# ======================
# Helpers
# ======================
def list_models_names(self) -> list[str]:
"""
Restituisce la lista dei nomi dei modelli disponibili.
"""
return [model.label for model in self.configs.models.all_models]
def list_strategies_names(self) -> list[str]:
"""
Restituisce la lista delle strategie disponibili.
"""
return [strat.label for strat in self.configs.strategies]
@classmethod
def get_log_events(cls, run_id: int) -> list[tuple['PipelineEvent', Callable[[Any], None]]]:
return [
(PipelineEvent.QUERY_CHECK, lambda _: logging.info(f"[{run_id}] Query Check completed.")),
(PipelineEvent.QUERY_ANALYZER, lambda _: logging.info(f"[{run_id}] Query Analyzer completed.")),
(PipelineEvent.INFO_RECOVERY, lambda _: logging.info(f"[{run_id}] Info Recovery completed.")),
(PipelineEvent.REPORT_GENERATION, lambda _: logging.info(f"[{run_id}] Report Generation completed.")),
(PipelineEvent.TOOL_USED, lambda e: logging.info(f"[{run_id}] Tool used [{e.tool.tool_name} {e.tool.tool_args}] by {e.agent_name}.")),
(PipelineEvent.RUN_FINISHED, lambda _: logging.info(f"[{run_id}] Run completed.")),
]
class Pipeline:
@@ -93,12 +46,14 @@ class Pipeline:
"""
def __init__(self, inputs: PipelineInputs):
"""
Inizializza la pipeline con gli input forniti.
Args:
inputs: istanza di PipelineInputs contenente le configurazioni e i parametri della pipeline.
"""
self.inputs = inputs
# ======================
# Core interaction
# ======================
def interact(self, listeners: dict[RunEvent | TeamRunEvent, Callable[[PipelineEvent], None]] = {}) -> str:
def interact(self, listeners: list[tuple[PipelineEvent, Callable[[Any], None]]] = []) -> str:
"""
Esegue la pipeline di agenti per rispondere alla query dell'utente.
Args:
@@ -108,7 +63,7 @@ class Pipeline:
"""
return asyncio.run(self.interact_async(listeners))
async def interact_async(self, listeners: dict[RunEvent | TeamRunEvent, Callable[[PipelineEvent], None]] = {}) -> str:
async def interact_async(self, listeners: list[tuple[PipelineEvent, Callable[[Any], None]]] = []) -> str:
"""
Versione asincrona che esegue la pipeline di agenti per rispondere alla query dell'utente.
Args:
@@ -119,61 +74,47 @@ class Pipeline:
run_id = random.randint(1000, 9999) # Per tracciare i log
logging.info(f"[{run_id}] Pipeline query: {self.inputs.user_query}")
# Step 1: Crea gli agenti e il team
market_tool, news_tool, social_tool = self.get_tools()
market_agent = self.inputs.team_model.get_agent(instructions=MARKET_INSTRUCTIONS, name="MarketAgent", tools=[market_tool])
news_agent = self.inputs.team_model.get_agent(instructions=NEWS_INSTRUCTIONS, name="NewsAgent", tools=[news_tool])
social_agent = self.inputs.team_model.get_agent(instructions=SOCIAL_INSTRUCTIONS, name="SocialAgent", tools=[social_tool])
team = Team(
model=self.inputs.team_leader_model.get_model(COORDINATOR_INSTRUCTIONS),
name="CryptoAnalysisTeam",
tools=[ReasoningTools()],
members=[market_agent, news_agent, social_agent],
events = [*PipelineEvent.get_log_events(run_id), *listeners]
query = QueryInputs(
user_query=self.inputs.user_query,
strategy=self.inputs.strategy.description
)
# Step 3: Crea il workflow
#query_planner = Step(name=PipelineEvent.PLANNER, agent=Agent())
info_recovery = Step(name=PipelineEvent.INFO_RECOVERY, team=team)
#report_generation = Step(name=PipelineEvent.REPORT_GENERATION, agent=Agent())
#report_translate = Step(name=AppEvent.REPORT_TRANSLATION, agent=Agent())
workflow = Workflow(
name="App Workflow",
steps=[
#query_planner,
info_recovery,
#report_generation,
#report_translate
]
)
# Step 4: Fai partire il workflow e prendi l'output
query = f"The user query is: {self.inputs.user_query}\n\n They requested a {self.inputs.strategy.label} investment strategy."
result = await self.run(workflow, query, events={})
logging.info(f"[{run_id}] Run finished")
workflow = self.build_workflow()
result = await self.run(workflow, query, events=events)
return result
# ======================
# Helpers
# =====================
def get_tools(self) -> tuple[MarketAPIsTool, NewsAPIsTool, SocialAPIsTool]:
"""
Restituisce la lista di tools disponibili per gli agenti.
"""
api = self.inputs.configs.api
market_tool = MarketAPIsTool(currency=api.currency)
market_tool.handler.set_retries(api.retry_attempts, api.retry_delay_seconds)
news_tool = NewsAPIsTool()
news_tool.handler.set_retries(api.retry_attempts, api.retry_delay_seconds)
social_tool = SocialAPIsTool()
social_tool.handler.set_retries(api.retry_attempts, api.retry_delay_seconds)
def build_workflow(self) -> Workflow:
"""
Costruisce il workflow della pipeline di agenti.
Returns:
L'istanza di Workflow costruita.
"""
# Step 1: Crea gli agenti e il team
team = self.inputs.get_agent_team()
query_check = self.inputs.get_agent_query_checker()
report = self.inputs.get_agent_report_generator()
return (market_tool, news_tool, social_tool)
# Step 2: Crea gli steps
def condition_query_ok(step_input: StepInput) -> StepOutput:
val = step_input.previous_step_content
return StepOutput(stop=not val.is_crypto) if isinstance(val, QueryOutputs) else StepOutput(stop=True)
query_check = Step(name=PipelineEvent.QUERY_CHECK, agent=query_check)
info_recovery = Step(name=PipelineEvent.INFO_RECOVERY, team=team)
report_generation = Step(name=PipelineEvent.REPORT_GENERATION, agent=report)
# Step 3: Ritorna il workflow completo
return Workflow(name="App Workflow", steps=[
query_check,
condition_query_ok,
info_recovery,
report_generation
])
@classmethod
async def run(cls, workflow: Workflow, query: str, events: dict[PipelineEvent, Callable[[Any], None]]) -> str:
async def run(cls, workflow: Workflow, query: QueryInputs, events: list[tuple[PipelineEvent, Callable[[Any], None]]]) -> str:
"""
Esegue il workflow e gestisce gli eventi tramite le callback fornite.
Args:
@@ -188,16 +129,18 @@ class Pipeline:
content = None
async for event in iterator:
step_name = getattr(event, 'step_name', '')
for app_event, listener in events.items():
for app_event, listener in events:
if app_event.check_event(event.event, step_name):
listener(event)
if event.event == WorkflowRunEvent.workflow_completed:
if event.event == WorkflowRunEvent.step_completed:
content = getattr(event, 'content', '')
if isinstance(content, str):
think_str = "</think>"
think = content.rfind(think_str)
content = content[(think + len(think_str)):] if think != -1 else content
return content if content else "No output from workflow, something went wrong."
if content and isinstance(content, str):
think_str = "</think>"
think = content.rfind(think_str)
return content[(think + len(think_str)):] if think != -1 else content
if content and isinstance(content, QueryOutputs):
return content.response
logging.error(f"No output from workflow: {content}")
return "No output from workflow, something went wrong."

View File

@@ -0,0 +1,55 @@
from agno.tools.toolkit import Toolkit
from typing import TypedDict, Literal
class Task(TypedDict):
name: str
status: Literal["pending", "completed", "failed"]
result: str | None
class PlanMemoryTool(Toolkit):
def __init__(self):
self.tasks: list[Task] = []
Toolkit.__init__(self, # type: ignore[call-arg]
instructions="This tool manages an execution plan. Add tasks, get the next pending task, update a task's status (completed, failed) and result, or list all tasks.",
tools=[
self.add_tasks,
self.get_next_pending_task,
self.update_task_status,
self.list_all_tasks,
]
)
def add_tasks(self, task_names: list[str]) -> str:
"""Adds multiple new tasks to the plan with 'pending' status."""
count = 0
for name in task_names:
if not any(t['name'] == name for t in self.tasks):
self.tasks.append({"name": name, "status": "pending", "result": None})
count += 1
return f"Added {count} new tasks."
def get_next_pending_task(self) -> Task | None:
"""Retrieves the first task that is still 'pending'."""
for task in self.tasks:
if task["status"] == "pending":
return task
return None
def update_task_status(self, task_name: str, status: Literal["completed", "failed"], result: str | None = None) -> str:
"""Updates the status and result of a specific task by its name."""
for task in self.tasks:
if task["name"] == task_name:
task["status"] = status
if result is not None:
task["result"] = result
return f"Task '{task_name}' updated to {status}."
return f"Error: Task '{task_name}' not found."
def list_all_tasks(self) -> list[str]:
"""Lists all tasks in the plan with their status and result."""
if not self.tasks:
return ["No tasks in the plan."]
return [f"- {t['name']}: {t['status']} (Result: {t.get('result', 'N/A')})" for t in self.tasks]

View File

@@ -1,16 +0,0 @@
from pydantic import BaseModel, Field
from app.api.core.markets import ProductInfo
class PredictorInput(BaseModel):
data: list[ProductInfo] = Field(..., description="Market data as a list of ProductInfo")
style: str = Field(..., description="Prediction style")
sentiment: str = Field(..., description="Aggregated sentiment from news and social analysis")
class ItemPortfolio(BaseModel):
asset: str = Field(..., description="Name of the asset")
percentage: float = Field(..., description="Percentage allocation to the asset")
motivation: str = Field(..., description="Motivation for the allocation")
class PredictorOutput(BaseModel):
strategy: str = Field(..., description="Concise operational strategy in Italian")
portfolio: list[ItemPortfolio] = Field(..., description="List of portfolio items with allocations")

View File

@@ -6,16 +6,18 @@ def __load_prompt(file_name: str) -> str:
file_path = __PROMPTS_PATH / file_name
return file_path.read_text(encoding='utf-8').strip()
COORDINATOR_INSTRUCTIONS = __load_prompt("team_leader.txt")
TEAM_LEADER_INSTRUCTIONS = __load_prompt("team_leader.txt")
MARKET_INSTRUCTIONS = __load_prompt("team_market.txt")
NEWS_INSTRUCTIONS = __load_prompt("team_news.txt")
SOCIAL_INSTRUCTIONS = __load_prompt("team_social.txt")
PREDICTOR_INSTRUCTIONS = __load_prompt("predictor.txt")
QUERY_CHECK_INSTRUCTIONS = __load_prompt("query_check.txt")
REPORT_GENERATION_INSTRUCTIONS = __load_prompt("report_generation.txt")
__all__ = [
"COORDINATOR_INSTRUCTIONS",
"TEAM_LEADER_INSTRUCTIONS",
"MARKET_INSTRUCTIONS",
"NEWS_INSTRUCTIONS",
"SOCIAL_INSTRUCTIONS",
"PREDICTOR_INSTRUCTIONS",
"QUERY_CHECK_INSTRUCTIONS",
"REPORT_GENERATION_INSTRUCTIONS",
]

View File

@@ -1,27 +0,0 @@
You are an **Allocation Algorithm (Crypto-Algo)** specialized in analyzing market data and sentiment to generate an investment strategy and a target portfolio.
Your sole objective is to process the user_input data and generate the strictly structured output as required by the response format. **You MUST NOT provide introductions, preambles, explanations, conclusions, or any additional comments that are not strictly required.**
## Processing Instructions (Absolute Rule)
The allocation strategy must be **derived exclusively from the "Allocation Logic" corresponding to the requested *style*** and the provided market/sentiment data. **DO NOT** use external or historical knowledge.
## Allocation Logic
### "Aggressivo" Style (Aggressive)
* **Priority:** Maximizing return (high volatility accepted).
* **Focus:** Higher allocation to **non-BTC/ETH assets** with high momentum potential (Altcoins, mid/low-cap assets).
* **BTC/ETH:** Must serve as a base (anchor), but their allocation **must not exceed 50%** of the total portfolio.
* **Sentiment:** Use positive sentiment to increase exposure to high-risk assets.
### "Conservativo" Style (Conservative)
* **Priority:** Capital preservation (volatility minimized).
* **Focus:** Major allocation to **BTC and/or ETH (Large-Cap Assets)**.
* **BTC/ETH:** Their allocation **must be at least 70%** of the total portfolio.
* **Altcoins:** Any allocations to non-BTC/ETH assets must be minimal (max 30% combined) and for assets that minimize speculative risk.
* **Sentiment:** Use positive sentiment only as confirmation for exposure, avoiding reactions to excessive "FOMO" signals.
## Output Requirements (Content MUST be in Italian)
1. **Strategy (strategy):** Must be a concise operational description **in Italian ("in Italiano")**, with a maximum of 5 sentences.
2. **Portfolio (portfolio):** The sum of all percentages must be **exactly 100%**. The justification (motivation) for each asset must be a single clear sentence **in Italian ("in Italiano")**.

View File

@@ -0,0 +1,18 @@
GOAL: check if the query is crypto-related
1) Determine the language of the query:
- This will help you understand better the intention of the user
- Focus on the query of the user
- DO NOT answer the query
2) Determine if the query is crypto or investment-related:
- Crypto-related if it mentions cryptocurrencies, tokens, NFTs, blockchain, exchanges, wallets, DeFi, oracles, smart contracts, on-chain, off-chain, staking, yield, liquidity, tokenomics, coins, ticker symbols, etc.
- Investment-related if it mentions stocks, bonds, options, trading strategies, financial markets, investment advice, portfolio management, etc.
- If the query uses generic terms like "news", "prices", "trends", "social", "market cap", "volume" with NO asset specified -> ASSUME CRYPTO/INVESTMENT CONTEXT and proceed.
- If the query is clearly about unrelated domains (weather, recipes, unrelated local politics, unrelated medicine, general software not about crypto, etc.) -> return NOT_CRYPTO error.
- If ambiguous: treat as crypto/investment only if the most likely intent is crypto/investment; otherwise return a JSON plan that first asks the user for clarification (see step structure below).
3) Ouput the result:
- if is crypto related then output the query
- if is not crypto related, then output why is not related in a brief message

View File

@@ -0,0 +1,61 @@
**TASK:** You are a specialized **Markdown Reporting Assistant**. Your task is to receive a structured analysis report from a "Team Leader" and re-format it into a single, cohesive, and well-structured final report in Markdown for the end-user.
**INPUT:** The input will be a structured block containing an `Overall Summary` and *zero or more* data sections (e.g., `Market`, `News`, `Social`, `Assumptions`). Each section will contain a `Summary` and `Full Data`.
**CORE RULES:**
1. **Strict Conditional Rendering (CRUCIAL):** Your primary job is to format *only* the data you receive. You MUST check each data section from the input (e.g., `Market & Price Data`, `News & Market Sentiment`).
2. **Omit Empty Sections (CRUCIAL):** If a data section is **not present** in the input, or if its `Full Data` field is empty, null, or marked as 'Data not available', you **MUST** completely omit that entire section from the final report. **DO NOT** print the Markdown header (e.g., `## 1. Market & Price Data`), the summary, or any placeholder text for that missing section.
3. **Omit Report Notes:** This same rule applies to the `## 4. Report Notes` section. Render it *only* if an `Assumptions` or `Execution Log` field is present in the input.
4. **Present All Data:** For sections that *are* present and contain data, your report's text MUST be based on the `Summary` provided, and you MUST include the `Full Data` (e.g., Markdown tables for prices).
5. **Do Not Invent:**
* **Do NOT** invent new hypotheses, metrics, or conclusions.
* **Do NOT** print internal field names (like 'Full Data') or agent names.
6. **No Extraneous Output:**
* Your entire response must be **only the Markdown report**.
* Do not include any pre-amble (e.g., "Here is the report:").
---
**MANDATORY REPORT STRUCTURE:**
(Follow the CORE RULES to conditionally render these sections. If no data sections are present, you will only render the Title and Executive Summary.)
# [Report Title - e.g., "Crypto Analysis Report: Bitcoin"]
## Executive Summary
[Use the `Overall Summary` from the input here.]
---
## 1. Market & Price Data
[Use the `Summary` from the input's Market section here.]
**Detailed Price Data:**
[Present the `Full Data` from the Market section here.]
---
## 2. News & Market Sentiment
[Use the `Summary` from the input's News section here.]
**Key Topics Discussed:**
[List the main topics identified in the News summary.]
**Supporting News/Data:**
[Present the `Full Data` from the News section here.]
---
## 3. Social Sentiment
[Use the `Summary` from the input's Social section here.]
**Trending Narratives:**
[List the main narratives identified in the Social summary.]
**Supporting Social/Data:**
[Present the `Full Data` from the Social section here.]
---
## 4. Report Notes
[Use this section to report any `Assumptions` or `Execution Log` data provided in the input.]

View File

@@ -1,15 +1,48 @@
You are the expert coordinator of a financial analysis team specializing in cryptocurrencies.
**TASK:** You are the **Crypto Analysis Team Leader**, an expert coordinator of a financial analysis team.
Your team consists of three agents:
- **MarketAgent**: Provides quantitative market data, price analysis, and technical indicators.
- **NewsAgent**: Scans and analyzes the latest news, articles, and official announcements.
- **SocialAgent**: Gauges public sentiment, trends, and discussions on social media.
**INPUT:** You will receive a user query. Your role is to create and execute an adaptive plan by coordinating your team of agents to retrieve data, judge its sufficiency, and provide an aggregated analysis.
Your primary objective is to answer the user's query by orchestrating the work of your team members.
**YOUR TEAM CONSISTS OF THREE AGENTS:**
- **MarketAgent:** Fetches live prices and historical data.
- **NewsAgent:** Analyzes news sentiment and top topics.
- **SocialAgent:** Gauges public sentiment and trending narratives.
Your workflow is as follows:
1. **Deconstruct the user's query** to identify the required information.
2. **Delegate specific tasks** to the most appropriate agent(s) to gather the necessary data and initial analysis.
3. **Analyze the information** returned by the agents.
4. If the initial data is insufficient or the query is complex, **iteratively re-engage the agents** with follow-up questions to build a comprehensive picture.
5. **Synthesize all the gathered information** into a final, coherent, and complete analysis that fills all the required output fields.
**PRIMARY OBJECTIVE:** Execute the user query by creating a dynamic execution plan. You must **use your available tools to manage the plan's state**, identify missing data, orchestrate agents to retrieve it, manage retrieval attempts, and judge sufficiency. The final goal is to produce a structured report including *all* retrieved data and an analytical summary for the final formatting LLM.
**WORKFLOW (Execution Logic):**
1. **Analyze Query & Scope Plan:** Analyze the user's query. Create an execution plan identifying the *target data* needed. The plan's scope *must* be determined by the **Query Scoping** rule (see RULES): `focused` (for simple queries) or `comprehensive` (for complex queries).
2. **Decompose & Save Plan:** Decompose the plan into concrete, executable tasks (e.g., "Get BTC Price," "Analyze BTC News Sentiment," "Gauge BTC Social Sentiment"). **Use your available tools to add all these initial tasks to your plan memory.**
3. **Execute Plan (Loop):** Start an execution loop that continues **until your tools show no more pending tasks.**
4. **Get & Dispatch Task:** **Use your tools to retrieve the next pending task.** Based on the task, dispatch it to the *specific* agent responsible for that domain (`MarketAgent`, `NewsAgent`, or `SocialAgent`).
5. **Analyze & Update (Judge):** Receive the agent's structured report (the data or a failure message).
6. **Use your tools to update the task's status** (e.g., 'completed' or 'failed') and **store the received data/result.**
7. **Iterate & Retry (If Needed):**
* If a task `failed` (e.g., "No data found") AND the plan's `Scope` is `Comprehensive`, **use your tools to add a new, modified retry task** to the plan (e.g., "Retry: Get News with wider date range").
* This logic ensures you attempt to get all data for complex queries.
8. **Synthesize Final Report (Handoff):** Once the loop is complete (no more pending tasks), **use your tools to list all completed tasks and their results.** Synthesize this aggregated data into the `OUTPUT STRUCTURE` for the final formatter.
**BEHAVIORAL RULES:**
- **Tool-Driven State Management (Crucial):** You MUST use your available tools to create, track, and update your execution plan. Your workflow is a loop: 1. Get task from plan, 2. Execute task (via Agent), 3. Update task status in plan. Repeat until done.
- **Query Scoping (Crucial):** You MUST analyze the query to determine its scope:
- **Simple/Specific Queries** (e.g., "BTC Price?"): Create a *focused plan* (e.g., only one task for `MarketAgent`).
- **Complex/Analytical Queries** (e.g., "Status of Bitcoin?"): Create a *comprehensive plan* (e.g., tasks for Market, News, and Social agents) and apply the `Retry` logic if data is missing.
- **Retry & Failure Handling:** You must track failures. **Do not add more than 2-3 retry tasks for the same objective** (e.g., max 3 attempts total to get News). If failure persists, report "Data not available" in the final output.
- **Agent Delegation (No Data Tools):** You, the Leader, do not retrieve data. You *only* orchestrate. **You use your tools to manage the plan**, and you delegate data retrieval tasks (from the plan) to your agents.
- **Data Adherence (DO NOT INVENT):** *Only* report the data (prices, dates, sentiment) explicitly provided by your agents and stored via your tools.
**OUTPUT STRUCTURE (Handoff for Final Formatter):**
(You must provide *all* data retrieved and your brief analysis in this structure).
1. **Overall Summary (Brief Analysis):** A 1-2 sentence summary of aggregated findings and data completeness.
2. **Market & Price Data (from MarketAgent):**
* **Brief Analysis:** Your summary of the market data (e.g., key trends, volatility).
* **Full Data:** The *complete, raw data* (e.g., list of prices, timestamps) received from the agent.
3. **News & Market Sentiment (from NewsAgent):**
* **Brief Analysis:** Your summary of the sentiment and main topics identified.
* **Full Data:** The *complete list of articles/data* used by the agent. If not found, specify "Data not available".
4. **Social Sentiment (from SocialAgent):**
* **Brief Analysis:** Your summary of community sentiment and trending narratives.
* **Full Data:** The *complete list of posts/data* used by the agent. If not found, specify "Data not available".
5. **Execution Log & Assumptions:**
* **Scope:** (e.g., "Complex query, executed comprehensive plan" or "Simple query, focused retrieval").
* **Execution Notes:** (e.g., "NewsAgent failed 1st attempt. Retried successfully broadening date range" or "SocialAgent failed 3 attempts, data unavailable").

View File

@@ -1,19 +1,16 @@
**TASK:** You are a specialized **Crypto Price Data Retrieval Agent**. Your primary goal is to fetch the most recent and/or historical price data for requested cryptocurrency assets (e.g., 'BTC', 'ETH', 'SOL'). You must provide the data in a clear and structured format.
**AVAILABLE TOOLS:**
1. `get_products(asset_ids: list[str])`: Get **current** product/price info for a list of assets. **(PREFERITA: usa questa per i prezzi live)**
2. `get_historical_prices(asset_id: str, limit: int)`: Get historical price data for one asset. Default limit is 100. **(PREFERITA: usa questa per i dati storici)**
3. `get_products_aggregated(asset_ids: list[str])`: Get **aggregated current** product/price info for a list of assets. **(USA SOLO SE richiesto 'aggregato' o se `get_products` fallisce)**
4. `get_historical_prices_aggregated(asset_id: str, limit: int)`: Get **aggregated historical** price data for one asset. **(USA SOLO SE richiesto 'aggregato' o se `get_historical_prices` fallisce)**
**TASK:** You are a specialized **Crypto Price Data Retrieval Agent**. Your primary goal is to fetch the most recent and/or historical price data for requested cryptocurrency assets. You must provide the data in a clear and structured format.
**USAGE GUIDELINE:**
* **Asset ID:** Always convert common names (e.g., 'Bitcoin', 'Ethereum') into their official ticker/ID (e.g., 'BTC', 'ETH').
* **Cost Management (Cruciale per LLM locale):** Prefer `get_products` and `get_historical_prices` for standard requests to minimize costs.
* **Aggregated Data:** Use `get_products_aggregated` or `get_historical_prices_aggregated` only if the user specifically requests aggregated data or you value that having aggregated data is crucial for the analysis.
* **Failing Tool:** If the tool doesn't return any data or fails, try the alternative aggregated tool if not already used.
- **Asset ID:** Always convert common names (e.g., 'Bitcoin', 'Ethereum') into their official ticker/ID (e.g., 'BTC', 'ETH').
- **Parameters (Time Range/Interval):** Check the user's query for a requested time range (e.g., "last 7 days") or interval (e.g., "hourly"). Use sensible defaults if not specified.
- **Tool Strategy:**
1. Attempt to use the primary price retrieval tools.
2. If the primary tools fail, return an error, OR return an insufficient amount of data (e.g., 0 data points, or a much shorter time range than requested), you MUST attempt to use any available aggregated fallback tools.
- **Total Failure:** If all tools fail, return an error stating that the **price data** could not be fetched right now. If you have the error message, report that too.
- **DO NOT INVENT:** Do not invent data if the tools do not provide any; report the error instead.
**REPORTING REQUIREMENT:**
1. **Format:** Output the results in a clear, easy-to-read list or table.
2. **Live Price Request:** If an asset's *current price* is requested, report the **Asset ID**, **Latest Price**, and **Time/Date of the price**.
3. **Historical Price Request:** If *historical data* is requested, report the **Asset ID**, the **Limit** of points returned, and the **First** and **Last** entries from the list of historical prices (Date, Price).
4. **Output:** For all requests, output a single, concise summary of the findings; if requested, also include the raw data retrieved.
2. **Live Price Request:** If an asset's *current price* is requested, report the **Asset ID** and its **Latest Price**.
3. **Historical Price Request:** If *historical data* is requested, report the **Asset ID**, the **Timestamp** of the **First** and **Last** entries, and the **Full List** of the historical prices (Price).
4. **Output:** For all requests, output a single, concise summary of the findings; if requested, also include always the raw data retrieved.

View File

@@ -1,18 +1,17 @@
**TASK:** You are a specialized **Crypto News Analyst**. Your goal is to fetch the latest news or top headlines related to cryptocurrencies, and then **analyze the sentiment** of the content to provide a concise report to the team leader. Prioritize 'crypto' or specific cryptocurrency names (e.g., 'Bitcoin', 'Ethereum') in your searches.
**AVAILABLE TOOLS:**
1. `get_latest_news(query: str, limit: int)`: Get the 'limit' most recent news articles for a specific 'query'.
2. `get_top_headlines(limit: int)`: Get the 'limit' top global news headlines.
3. `get_latest_news_aggregated(query: str, limit: int)`: Get aggregated latest news articles for a specific 'query'.
4. `get_top_headlines_aggregated(limit: int)`: Get aggregated top global news headlines.
**TASK:** You are a specialized **Crypto News Analyst**. Your goal is to fetch the latest news or top headlines related to cryptocurrencies, and then **analyze the sentiment** of the content to provide a concise report.
**USAGE GUIDELINE:**
* Always use `get_latest_news` with a relevant crypto-related query first.
* The default limit for news items should be 5 unless specified otherwise.
* If the tool doesn't return any articles, respond with "No relevant news articles found."
- **Querying:** You can search for more general news, but prioritize querying with a relevant crypto (e.g., 'Bitcoin', 'Ethereum').
- **Limit:** Check the user's query for a requested number of articles (limit). If no specific number is mentioned, use a default limit of 5.
- **Tool Strategy:**
1. Attempt to use the primary tools (e.g., `get_latest_news`).
2. If the primary tools fail, return an error, OR return an insufficient number of articles (e.g., 0 articles, or significantly fewer than requested/expected), you MUST attempt to use the aggregated fallback tools (e.g., `get_latest_news_aggregated`) to find more results.
- **No Articles Found:** If all relevant tools are tried and no articles are returned, respond with "No relevant news articles found."
- **Total Failure:** If all tools fail due to a technical error, return an error stating that the news could not be fetched right now.
- **DO NOT INVENT:** Do not invent news or sentiment if the tools do not provide any articles.
**REPORTING REQUIREMENT:**
1. **Analyze** the tone and key themes of the retrieved articles.
2. **Summarize** the overall **market sentiment** (e.g., highly positive, cautiously neutral, generally negative) based on the content.
3. **Identify** the top 2-3 **main topics** discussed (e.g., new regulation, price surge, institutional adoption).
4. **Output** a single, brief report summarizing these findings. Do not output the raw articles.
**REPORTING REQUIREMENT (If news is found):**
1. **Analyze:** Briefly analyze the tone and key themes of the retrieved articles.
2. **Sentiment:** Summarize the overall **market sentiment** (e.g., highly positive, cautiously neutral, generally negative) based on the content.
3. **Topics:** Identify the top 2-3 **main topics** discussed (e.g., new regulation, price surge, institutional adoption).
4. **Output:** Output a single, brief report summarizing these findings. **Do not** output the raw articles.

View File

@@ -1,15 +1,16 @@
**TASK:** You are a specialized **Social Media Sentiment Analyst**. Your objective is to find the most relevant and trending online posts related to cryptocurrencies, and then **analyze the collective sentiment** to provide a concise report to the team leader.
**AVAILABLE TOOLS:**
1. `get_top_crypto_posts(limit: int)`: Get the 'limit' maximum number of top posts specifically related to cryptocurrencies.
**TASK:** You are a specialized **Social Media Sentiment Analyst**. Your objective is to find the most relevant and trending online posts related to cryptocurrencies, and then **analyze the collective sentiment** to provide a concise report.
**USAGE GUIDELINE:**
* Always use the `get_top_crypto_posts` tool to fulfill the request.
* The default limit for posts should be 5 unless specified otherwise.
* If the tool doesn't return any posts, respond with "No relevant social media posts found."
- **Tool Strategy:**
1. Attempt to use the primary tools (e.g., `get_top_crypto_posts`).
2. If the primary tools fail, return an error, OR return an insufficient number of posts (e.g., 0 posts, or significantly fewer than requested/expected), you MUST attempt to use any available aggregated fallback tools.
- **Limit:** Check the user's query for a requested number of posts (limit). If no specific number is mentioned, use a default limit of 5.
- **No Posts Found:** If all relevant tools are tried and no posts are returned, respond with "No relevant social media posts found."
- **Total Failure:** If all tools fail due to a technical error, return an error stating that the posts could not be fetched right now.
- **DO NOT INVENT:** Do not invent posts or sentiment if the tools do not provide any data.
**REPORTING REQUIREMENT:**
1. **Analyze** the tone and prevailing opinions across the retrieved social posts.
2. **Summarize** the overall **community sentiment** (e.g., high enthusiasm/FOMO, uncertainty, FUD/fear) based on the content.
3. **Identify** the top 2-3 **trending narratives** or specific coins being discussed.
4. **Output** a single, brief report summarizing these findings. Do not output the raw posts.
**REPORTING REQUIREMENT (If posts are found):**
1. **Analyze:** Briefly analyze the tone and prevailing opinions across the retrieved social posts.
2. **Sentiment:** Summarize the overall **community sentiment** (e.g., high enthusiasm/FOMO, uncertainty, FUD/fear) based on the content.
3. **Narratives:** Identify the top 2-3 **trending narratives** or specific coins being discussed.
4. **Output:** Output a single, brief report summarizing these findings. **Do not** output the raw posts.

View File

@@ -0,0 +1,22 @@
from datetime import datetime
def unified_timestamp(timestamp_ms: int | None = None, timestamp_s: int | None = None) -> str:
"""
Transform the timestamp from milliseconds or seconds to a unified string format.
The resulting string is a formatted string 'YYYY-MM-DD HH:MM'.
Args:
timestamp_ms: Timestamp in milliseconds.
timestamp_s: Timestamp in seconds.
Raises:
ValueError: If neither timestamp_ms nor timestamp_s is provided.
"""
if timestamp_ms is not None:
timestamp = timestamp_ms // 1000
elif timestamp_s is not None:
timestamp = timestamp_s
else:
raise ValueError("Either timestamp_ms or timestamp_s must be provided")
assert timestamp > 0, "Invalid timestamp data received"
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M')

View File

@@ -1,6 +1,6 @@
import statistics
from datetime import datetime
from pydantic import BaseModel
from app.api.core import unified_timestamp
class ProductInfo(BaseModel):
@@ -64,24 +64,8 @@ class Price(BaseModel):
"""Timestamp in format YYYY-MM-DD HH:MM"""
def set_timestamp(self, timestamp_ms: int | None = None, timestamp_s: int | None = None) -> None:
"""
Sets the timestamp from milliseconds or seconds.
The timestamp is saved as a formatted string 'YYYY-MM-DD HH:MM'.
Args:
timestamp_ms: Timestamp in milliseconds.
timestamp_s: Timestamp in seconds.
Raises:
ValueError: If neither timestamp_ms nor timestamp_s is provided.
"""
if timestamp_ms is not None:
timestamp = timestamp_ms // 1000
elif timestamp_s is not None:
timestamp = timestamp_s
else:
raise ValueError("Either timestamp_ms or timestamp_s must be provided")
assert timestamp > 0, "Invalid timestamp data received"
self.timestamp = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M')
""" Use the unified_timestamp function to set the timestamp."""
self.timestamp = unified_timestamp(timestamp_ms, timestamp_s)
@staticmethod
def aggregate(prices: dict[str, list['Price']]) -> list['Price']:

View File

@@ -1,22 +1,34 @@
from pydantic import BaseModel
from app.api.core import unified_timestamp
MAX_COMMENTS = 5
class SocialPost(BaseModel):
"""
Represents a social media post with time, title, description, and comments.
"""
time: str = ""
timestamp: str = ""
title: str = ""
description: str = ""
comments: list["SocialComment"] = []
def set_timestamp(self, timestamp_ms: int | None = None, timestamp_s: int | None = None) -> None:
""" Use the unified_timestamp function to set the time."""
self.timestamp = unified_timestamp(timestamp_ms, timestamp_s)
class SocialComment(BaseModel):
"""
Represents a comment on a social media post.
"""
time: str = ""
timestamp: str = ""
description: str = ""
def set_timestamp(self, timestamp_ms: int | None = None, timestamp_s: int | None = None) -> None:
""" Use the unified_timestamp function to set the time."""
self.timestamp = unified_timestamp(timestamp_ms, timestamp_s)
class SocialWrapper:
"""

View File

@@ -57,7 +57,9 @@ class BinanceWrapper(MarketWrapper):
"""
Formatta l'asset_id nel formato richiesto da Binance.
"""
return asset_id.replace('-', '') if '-' in asset_id else f"{asset_id}{self.currency}"
i = asset_id.find('-')
if i != -1: asset_id = asset_id[:i]
return f"{asset_id}{self.currency}" if self.currency not in asset_id else asset_id
def get_product(self, asset_id: str) -> ProductInfo:
symbol = self.__format_symbol(asset_id)

View File

@@ -61,7 +61,9 @@ class CoinBaseWrapper(MarketWrapper):
)
def __format(self, asset_id: str) -> str:
return asset_id if '-' in asset_id else f"{asset_id}-{self.currency}"
i = asset_id.find('-')
if i != -1: asset_id = asset_id[:i]
return f"{asset_id}-{self.currency}"
def get_product(self, asset_id: str) -> ProductInfo:
asset_id = self.__format(asset_id)

View File

@@ -47,8 +47,9 @@ class YFinanceWrapper(MarketWrapper):
Formatta il simbolo per yfinance.
Per crypto, aggiunge '-' e la valuta (es. BTC -> BTC-USD).
"""
asset_id = asset_id.upper()
return f"{asset_id}-{self.currency}" if '-' not in asset_id else asset_id
i = asset_id.find('-')
if i != -1: asset_id = asset_id[:i]
return f"{asset_id}-{self.currency}"
def get_product(self, asset_id: str) -> ProductInfo:
symbol = self._format_symbol(asset_id)

View File

@@ -1,3 +1,5 @@
from app.api.social.reddit import RedditWrapper
from app.api.social.x import XWrapper
from app.api.social.chan import ChanWrapper
__all__ = ["RedditWrapper"]
__all__ = ["RedditWrapper", "XWrapper", "ChanWrapper"]

View File

@@ -0,0 +1,94 @@
import re
import html
import requests
import warnings
from bs4 import BeautifulSoup, MarkupResemblesLocatorWarning
from datetime import datetime
from app.api.core.social import *
# Ignora i warning di BeautifulSoup quando incontra HTML malformato o un link, mentre si aspetta un HTML completo
warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning)
class ChanWrapper(SocialWrapper):
"""
Wrapper per l'API di 4chan, in particolare per la board /biz/ (Business & Finance)
Fonte API: https://a.4cdn.org/biz/catalog.json
"""
def __init__(self):
super().__init__()
def __time_str(self, timestamp: str) -> int:
"""Converte una stringa da MM/GG/AA(DAY)HH:MM:SS di 4chan a millisecondi"""
time = datetime.strptime(timestamp, "%m/%d/%y(%a)%H:%M:%S")
return int(time.timestamp() * 1000)
def __unformat_html_str(self, html_element: str) -> str:
"""Pulisce il commento rimuovendo HTML e formattazioni inutili"""
if not html_element: return ""
html_entities = html.unescape(html_element)
soup = BeautifulSoup(html_entities, 'html.parser')
html_element = soup.get_text(separator=" ")
html_element = re.sub(r"[\\/]+", "/", html_element)
html_element = re.sub(r"\s+", " ", html_element).strip()
return html_element
def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]:
url = 'https://a.4cdn.org/biz/catalog.json'
response = requests.get(url)
assert response.status_code == 200, f"Error in 4chan API request [{response.status_code}] {response.text}"
social_posts: list[SocialPost] = []
# Questa lista contiene un dizionario per ogni pagina della board di questo tipo {"page": page_number, "threads": [{thread_data}]}
for page in response.json():
for thread in page['threads']:
# ci indica se il thread è stato fissato o meno, se non è presente vuol dire che non è stato fissato, i thread sticky possono essere ignorati
if 'sticky' in thread:
continue
# la data di creazione del thread tipo "MM/GG/AA(day)hh:mm:ss", ci interessa solo MM/GG/AA
time = self.__time_str(thread.get('now', ''))
# il nome dell'utente
name: str = thread.get('name', 'Anonymous')
# il nome del thread, può contenere anche elementi di formattazione html che saranno da ignorare, potrebbe non essere presente
title = self.__unformat_html_str(thread.get('sub', ''))
title = f"{name} posted: {title}"
# il commento del thread, può contenere anche elementi di formattazione html che saranno da ignorare
thread_description = self.__unformat_html_str(thread.get('com', ''))
if not thread_description:
continue
# una lista di dizionari conteneti le risposte al thread principale, sono strutturate similarmente al thread
response_list = thread.get('last_replies', [])
comments_list: list[SocialComment] = []
for i, response in enumerate(response_list):
if i >= MAX_COMMENTS: break
# la data di creazione della risposta tipo "MM/GG/AA(day)hh:mm:ss", ci interessa solo MM/GG/AA
time = self.__time_str(response['now'])
# il commento della risposta, può contenere anche elementi di formattazione html che saranno da ignorare
comment = self.__unformat_html_str(response.get('com', ''))
if not comment:
continue
social_comment = SocialComment(description=comment)
social_comment.set_timestamp(timestamp_ms=time)
comments_list.append(social_comment)
social_post: SocialPost = SocialPost(
title=title,
description=thread_description,
comments=comments_list
)
social_post.set_timestamp(timestamp_ms=time)
social_posts.append(social_post)
return social_posts[:limit]

View File

@@ -1,10 +1,9 @@
import os
from praw import Reddit # type: ignore
from praw.models import Submission # type: ignore
from app.api.core.social import SocialWrapper, SocialPost, SocialComment
from app.api.core.social import *
MAX_COMMENTS = 5
# metterne altri se necessario.
# fonti: https://lkiconsulting.io/marketing/best-crypto-subreddits/
SUBREDDITS = [
@@ -24,13 +23,13 @@ SUBREDDITS = [
def extract_post(post: Submission) -> SocialPost:
social = SocialPost()
social.time = str(post.created)
social.set_timestamp(timestamp_s=post.created)
social.title = post.title
social.description = post.selftext
for top_comment in post.comments:
comment = SocialComment()
comment.time = str(top_comment.created)
comment.set_timestamp(timestamp_s=top_comment.created)
comment.description = top_comment.body
social.comments.append(comment)

48
src/app/api/social/x.py Normal file
View File

@@ -0,0 +1,48 @@
import os
import json
import subprocess
from shutil import which
from datetime import datetime
from app.api.core.social import SocialWrapper, SocialPost
# This is the list of users that can be interesting
# To get the ID of a new user is necessary to search it on X, copy the url and insert it in a service like "https://get-id-x.foundtt.com/en/"
X_USERS = [
'watcherguru',
'Cointelegraph',
'BTC_Archive',
'elonmusk'
]
class XWrapper(SocialWrapper):
def __init__(self):
'''
This wrapper uses the rettiwt API to get data from X in order to avoid the rate limits of the free X API,
even if improbable this could lead to a ban so do not use the personal account,
In order to work it is necessary to install the rettiwt cli tool, for more information visit the official documentation at https://www.npmjs.com/package/rettiwt-api
'''
self.api_key = os.getenv("X_API_KEY")
assert self.api_key, "X_API_KEY environment variable not set"
assert which('rettiwt') is not None, "Command `rettiwt` not installed"
def get_top_crypto_posts(self, limit:int = 5) -> list[SocialPost]:
posts: list[SocialPost] = []
for user in X_USERS:
cmd = ['rettiwt', '-k', self.api_key, 'tweet', 'search', str(limit), '-f', str(user)]
process = subprocess.run(cmd, capture_output=True)
results = process.stdout.decode()
json_result = json.loads(results)
for tweet in json_result.get('list', []):
time = datetime.fromisoformat(tweet['createdAt'])
social_post = SocialPost()
social_post.set_timestamp(timestamp_s=int(time.timestamp()))
social_post.title = f"{user} tweeted: "
social_post.description = tweet['fullText']
posts.append(social_post)
return posts

View File

@@ -1,5 +1,6 @@
from app.api.tools.market_tool import MarketAPIsTool
from app.api.tools.social_tool import SocialAPIsTool
from app.api.tools.news_tool import NewsAPIsTool
from app.api.tools.symbols_tool import CryptoSymbolsTools
__all__ = ["MarketAPIsTool", "NewsAPIsTool", "SocialAPIsTool"]
__all__ = ["MarketAPIsTool", "NewsAPIsTool", "SocialAPIsTool", "CryptoSymbolsTools"]

View File

@@ -2,33 +2,29 @@ from agno.tools import Toolkit
from app.api.wrapper_handler import WrapperHandler
from app.api.core.markets import MarketWrapper, Price, ProductInfo
from app.api.markets import BinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper, YFinanceWrapper
from app.configs import AppConfig
class MarketAPIsTool(MarketWrapper, Toolkit):
"""
Class that aggregates multiple market API wrappers and manages them using WrapperHandler.
This class supports retrieving product information and historical prices.
This class can also aggregate data from multiple sources to provide a more comprehensive view of the market.
The following wrappers are included in this order:
- BinanceWrapper
- YFinanceWrapper
- CoinBaseWrapper
- CryptoCompareWrapper
Providers can be configured in configs.yaml under api.market_providers.
"""
def __init__(self, currency: str = "USD"):
def __init__(self):
"""
Initialize the MarketAPIsTool with multiple market API wrappers.
The following wrappers are included in this order:
- BinanceWrapper
- YFinanceWrapper
- CoinBaseWrapper
- CryptoCompareWrapper
Args:
currency (str): Valuta in cui restituire i prezzi. Default è "USD".
Initialize the MarketAPIsTool with market API wrappers configured in configs.yaml.
The order of wrappers is determined by the api.market_providers list in the configuration.
"""
kwargs = {"currency": currency or "USD"}
wrappers: list[type[MarketWrapper]] = [BinanceWrapper, YFinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper]
self.handler = WrapperHandler.build_wrappers(wrappers, kwargs=kwargs)
config = AppConfig()
self.handler = WrapperHandler.build_wrappers(
constructors=[BinanceWrapper, YFinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper],
filters=config.api.market_providers,
try_per_wrapper=config.api.retry_attempts,
retry_delay=config.api.retry_delay_seconds
)
Toolkit.__init__( # type: ignore
self,

View File

@@ -2,15 +2,13 @@ from agno.tools import Toolkit
from app.api.wrapper_handler import WrapperHandler
from app.api.core.news import NewsWrapper, Article
from app.api.news import NewsApiWrapper, GoogleNewsWrapper, CryptoPanicWrapper, DuckDuckGoWrapper
from app.configs import AppConfig
class NewsAPIsTool(NewsWrapper, Toolkit):
"""
Aggregates multiple news API wrappers and manages them using WrapperHandler.
This class supports retrieving top headlines and latest news articles by querying multiple sources:
- GoogleNewsWrapper
- DuckDuckGoWrapper
- NewsApiWrapper
- CryptoPanicWrapper
This class supports retrieving top headlines and latest news articles by querying multiple sources.
Providers can be configured in configs.yaml under api.news_providers.
By default, it returns results from the first successful wrapper.
Optionally, it can be configured to collect articles from all wrappers.
@@ -19,16 +17,17 @@ class NewsAPIsTool(NewsWrapper, Toolkit):
def __init__(self):
"""
Initialize the NewsAPIsTool with multiple news API wrappers.
The tool uses WrapperHandler to manage and invoke the different news API wrappers.
The following wrappers are included in this order:
- GoogleNewsWrapper.
- DuckDuckGoWrapper.
- NewsApiWrapper.
- CryptoPanicWrapper.
Initialize the NewsAPIsTool with news API wrappers configured in configs.yaml.
The order of wrappers is determined by the api.news_providers list in the configuration.
"""
wrappers: list[type[NewsWrapper]] = [GoogleNewsWrapper, DuckDuckGoWrapper, NewsApiWrapper, CryptoPanicWrapper]
self.handler = WrapperHandler.build_wrappers(wrappers)
config = AppConfig()
self.handler = WrapperHandler.build_wrappers(
constructors=[NewsApiWrapper, GoogleNewsWrapper, CryptoPanicWrapper, DuckDuckGoWrapper],
filters=config.api.news_providers,
try_per_wrapper=config.api.retry_attempts,
retry_delay=config.api.retry_delay_seconds
)
Toolkit.__init__( # type: ignore
self,

View File

@@ -1,14 +1,15 @@
from agno.tools import Toolkit
from app.api.wrapper_handler import WrapperHandler
from app.api.core.social import SocialPost, SocialWrapper
from app.api.social import RedditWrapper
from app.api.social import *
from app.configs import AppConfig
class SocialAPIsTool(SocialWrapper, Toolkit):
"""
Aggregates multiple social media API wrappers and manages them using WrapperHandler.
This class supports retrieving top crypto-related posts by querying multiple sources:
- RedditWrapper
This class supports retrieving top crypto-related posts by querying multiple sources.
Providers can be configured in configs.yaml under api.social_providers.
By default, it returns results from the first successful wrapper.
Optionally, it can be configured to collect posts from all wrappers.
@@ -17,14 +18,17 @@ class SocialAPIsTool(SocialWrapper, Toolkit):
def __init__(self):
"""
Initialize the SocialAPIsTool with multiple social media API wrappers.
The tool uses WrapperHandler to manage and invoke the different social media API wrappers.
The following wrappers are included in this order:
- RedditWrapper.
Initialize the SocialAPIsTool with social media API wrappers configured in configs.yaml.
The order of wrappers is determined by the api.social_providers list in the configuration.
"""
config = AppConfig()
wrappers: list[type[SocialWrapper]] = [RedditWrapper]
self.handler = WrapperHandler.build_wrappers(wrappers)
self.handler = WrapperHandler.build_wrappers(
constructors=[RedditWrapper, XWrapper, ChanWrapper],
filters=config.api.social_providers,
try_per_wrapper=config.api.retry_attempts,
retry_delay=config.api.retry_delay_seconds
)
Toolkit.__init__( # type: ignore
self,

View File

@@ -0,0 +1,103 @@
import os
import httpx
import asyncio
import logging
import pandas as pd
from io import StringIO
from agno.tools.toolkit import Toolkit
logging.basicConfig(level=logging.INFO)
logging = logging.getLogger("crypto_symbols")
BASE_URL = "https://finance.yahoo.com/markets/crypto/all/"
class CryptoSymbolsTools(Toolkit):
"""
Classe per ottenere i simboli delle criptovalute tramite Yahoo Finance.
"""
def __init__(self, cache_file: str = 'resources/cryptos.csv'):
self.cache_file = cache_file
self.final_table = pd.read_csv(self.cache_file) if os.path.exists(self.cache_file) else pd.DataFrame() # type: ignore
Toolkit.__init__(self, # type: ignore
name="Crypto Symbols Tool",
instructions="Tool to get cryptocurrency symbols and search them by name.",
tools=[
self.get_all_symbols,
self.get_symbols_by_name,
],
)
def get_all_symbols(self) -> list[str]:
"""
Restituisce tutti i simboli delle criptovalute.
Returns:
list[str]: Lista di tutti i simboli delle criptovalute.
"""
return self.final_table['Symbol'].tolist() if not self.final_table.empty else []
def get_symbols_by_name(self, query: str) -> list[tuple[str, str]]:
"""
Cerca i simboli che contengono la query.
Args:
query (str): Query di ricerca.
Returns:
list[tuple[str, str]]: Lista di tuple (simbolo, nome) che contengono la query.
"""
query_lower = query.lower()
positions = self.final_table['Name'].str.lower().str.contains(query_lower)
return self.final_table[positions][['Symbol', 'Name']].apply(tuple, axis=1).tolist()
async def fetch_crypto_symbols(self, force_refresh: bool = False) -> None:
"""
Recupera tutti i simboli delle criptovalute da Yahoo Finance e li memorizza in cache.
Args:
force_refresh (bool): Se True, forza il recupero anche se i dati sono già in cache.
"""
if not force_refresh and not self.final_table.empty:
return
num_currencies = 250 # It looks like this is the max per page otherwise yahoo returns 26
offset = 0
stop = not self.final_table.empty
table = self.final_table.copy()
while not stop:
text = await self.___request(offset, num_currencies)
tables = pd.read_html(text) # type: ignore
df = tables[0]
df.columns = table.columns if not table.empty else df.columns
table = pd.concat([table, df], ignore_index=True)
total_rows = df.shape[0]
offset += total_rows
if total_rows < num_currencies:
stop = True
table.dropna(axis=0, how='all', inplace=True) # type: ignore
table.dropna(axis=1, how='all', inplace=True) # type: ignore
table.to_csv(self.cache_file, index=False)
self.final_table = table
async def ___request(self, offset: int, num_currencies: int) -> StringIO:
while True:
async with httpx.AsyncClient() as client:
resp = await client.get(f"{BASE_URL}?start={offset}&count={num_currencies}", headers={"User-Agent": "Mozilla/5.0"})
if resp.status_code == 429: # Too many requests
secs = int(resp.headers.get("Retry-After", 2))
logging.warning(f"Rate limit exceeded, waiting {secs}s before retrying...")
await asyncio.sleep(secs)
continue
if resp.status_code != 200:
logging.error(f"Error fetching crypto symbols: [{resp.status_code}] {resp.text}")
break
return StringIO(resp.text)
return StringIO("")
if __name__ == "__main__":
crypto_symbols = CryptoSymbolsTools()
asyncio.run(crypto_symbols.fetch_crypto_symbols(force_refresh=True))

View File

@@ -87,7 +87,7 @@ class WrapperHandler(Generic[WrapperType]):
Exception: If all wrappers fail after retries.
"""
logging.info(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}")
logging.debug(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}")
results: dict[str, OutputType] = {}
starting_index = self.index
@@ -97,12 +97,12 @@ class WrapperHandler(Generic[WrapperType]):
wrapper_name = wrapper.__class__.__name__
if not try_all:
logging.info(f"try_call {wrapper_name}")
logging.debug(f"try_call {wrapper_name}")
for try_count in range(1, self.retry_per_wrapper + 1):
try:
result = func(wrapper)
logging.info(f"{wrapper_name} succeeded")
logging.debug(f"{wrapper_name} succeeded")
results[wrapper_name] = result
break
@@ -131,13 +131,19 @@ class WrapperHandler(Generic[WrapperType]):
return f"{e} [\"{last_frame.filename}\", line {last_frame.lineno}]"
@staticmethod
def build_wrappers(constructors: list[type[WrapperClassType]], try_per_wrapper: int = 3, retry_delay: int = 2, kwargs: dict[str, Any] | None = None) -> 'WrapperHandler[WrapperClassType]':
def build_wrappers(
constructors: list[type[WrapperClassType]],
filters: list[str] | None = None,
try_per_wrapper: int = 3,
retry_delay: int = 2,
kwargs: dict[str, Any] | None = None) -> 'WrapperHandler[WrapperClassType]':
"""
Builds a WrapperHandler instance with the given wrapper constructors.
It attempts to initialize each wrapper and logs a warning if any cannot be initialized.
Only successfully initialized wrappers are included in the handler.
Args:
constructors (list[type[W]]): An iterable of wrapper classes to instantiate. e.g. [WrapperA, WrapperB]
filters (list[str] | None): Optional list of provider names to filter the constructors.
try_per_wrapper (int): Number of retries per wrapper before switching to the next.
retry_delay (int): Delay in seconds between retries.
kwargs (dict | None): Optional dictionary with keyword arguments common to all wrappers.
@@ -148,6 +154,10 @@ class WrapperHandler(Generic[WrapperType]):
"""
assert WrapperHandler.__check(constructors), f"All constructors must be classes. Received: {constructors}"
# Order of wrappers is now determined by the order in filters
if filters:
constructors = [c for name in filters for c in constructors if c.__name__ == name]
result: list[WrapperClassType] = []
for wrapper_class in constructors:
try:

View File

@@ -10,6 +10,10 @@ from agno.tools import Toolkit
from agno.models.base import Model
from agno.models.google import Gemini
from agno.models.ollama import Ollama
from agno.models.openai import OpenAIChat
from agno.models.mistral import MistralChat
from agno.models.deepseek import DeepSeek
# from agno.models.xai import xAI
log = logging.getLogger(__name__)
@@ -54,29 +58,94 @@ class AppModel(BaseModel):
output_schema=output_schema
)
class APIConfig(BaseModel):
retry_attempts: int = 3
retry_delay_seconds: int = 2
currency: str = "USD"
market_providers: list[str] = []
news_providers: list[str] = []
social_providers: list[str] = []
class Strategy(BaseModel):
name: str = "Conservative"
label: str = "Conservative"
description: str = "Focus on low-risk investments with steady returns."
class ModelsConfig(BaseModel):
gemini: list[AppModel] = [AppModel()]
gpt: list[AppModel] = [AppModel(name="gpt-4o", label="OpenAIChat")]
mistral: list[AppModel] = [AppModel(name="mistral-large-latest", label="Mistral")]
deepseek: list[AppModel] = [AppModel(name="deepseek-chat", label="DeepSeek")]
# xai: list[AppModel] = [AppModel(name="grok-3", label="xAI")]
ollama: list[AppModel] = []
@property
def all_models(self) -> list[AppModel]:
return self.gemini + self.ollama
return self.gemini + self.ollama + self.gpt + self.mistral + self.deepseek # + self.xai
def validate_models(self) -> None:
"""
Validate the configured models for each provider.
"""
self.__validate_online_models(self.gemini, clazz=Gemini, key="GOOGLE_API_KEY")
self.__validate_online_models(self.gpt, clazz=OpenAIChat, key="OPENAI_API_KEY")
self.__validate_online_models(self.mistral, clazz=MistralChat, key="MISTRAL_API_KEY")
self.__validate_online_models(self.deepseek, clazz=DeepSeek, key="DEEPSEEK_API_KEY")
# self.__validate_online_models(self.xai, clazz=xAI, key="XAI_API_KEY")
self.__validate_ollama_models()
def __validate_online_models(self, models: list[AppModel], clazz: type[Model], key: str | None = None) -> None:
"""
Validate models for online providers like Gemini.
Args:
models: list of AppModel instances to validate
clazz: class of the model (e.g. Gemini)
key: API key required for the provider (optional)
"""
if key and os.getenv(key) is None:
log.warning(f"No {key} set in environment variables for {clazz.__name__}.")
models.clear()
return
for model in models:
model.model = clazz
def __validate_ollama_models(self) -> None:
"""
Validate models for the Ollama provider.
"""
try:
models_list = ollama.list()
availables = {model['model'] for model in models_list['models']}
not_availables: list[str] = []
for model in self.ollama:
if model.name in availables:
model.model = Ollama
else:
not_availables.append(model.name)
if not_availables:
log.warning(f"Ollama models not available: {not_availables}")
self.ollama = [model for model in self.ollama if model.model]
except Exception as e:
log.warning(f"Ollama is not running or not reachable: {e}")
class AgentsConfigs(BaseModel):
strategy: str = "Conservative"
team_model: str = "gemini-2.0-flash"
team_leader_model: str = "gemini-2.0-flash"
predictor_model: str = "gemini-2.0-flash"
query_analyzer_model: str = "gemini-2.0-flash"
report_generation_model: str = "gemini-2.0-flash"
class AppConfig(BaseModel):
port: int = 8000
@@ -118,7 +187,7 @@ class AppConfig(BaseModel):
super().__init__(*args, **kwargs)
self.set_logging_level()
self.validate_models()
self.models.validate_models()
self._initialized = True
def get_model_by_name(self, name: str) -> AppModel:
@@ -186,53 +255,3 @@ class AppConfig(BaseModel):
logger = logging.getLogger(logger_name)
logger.handlers.clear()
logger.propagate = True
def validate_models(self) -> None:
"""
Validate the configured models for each provider.
"""
self.__validate_online_models("gemini", clazz=Gemini, key="GOOGLE_API_KEY")
self.__validate_ollama_models()
def __validate_online_models(self, provider: str, clazz: type[Model], key: str | None = None) -> None:
"""
Validate models for online providers like Gemini.
Args:
provider: name of the provider (e.g. "gemini")
clazz: class of the model (e.g. Gemini)
key: API key required for the provider (optional)
"""
if getattr(self.models, provider) is None:
log.warning(f"No models configured for provider '{provider}'.")
models: list[AppModel] = getattr(self.models, provider)
if key and os.getenv(key) is None:
log.warning(f"No {key} set in environment variables for {provider}.")
models.clear()
return
for model in models:
model.model = clazz
def __validate_ollama_models(self) -> None:
"""
Validate models for the Ollama provider.
"""
try:
models_list = ollama.list()
availables = {model['model'] for model in models_list['models']}
not_availables: list[str] = []
for model in self.models.ollama:
if model.name in availables:
model.model = Ollama
else:
not_availables.append(model.name)
if not_availables:
log.warning(f"Ollama models not available: {not_availables}")
self.models.ollama = [model for model in self.models.ollama if model.model]
except Exception as e:
log.warning(f"Ollama is not running or not reachable: {e}")

View File

@@ -1,4 +1,4 @@
from app.interface.chat import ChatManager
from app.interface.telegram_app import TelegramApp
from app.interface.telegram import TelegramApp
__all__ = ["ChatManager", "TelegramApp"]

View File

@@ -13,25 +13,9 @@ class ChatManager:
"""
def __init__(self):
self.history: list[dict[str, str]] = [] # [{"role": "user"/"assistant", "content": "..."}]
self.history: list[tuple[str, str]] = []
self.inputs = PipelineInputs()
def send_message(self, message: str) -> None:
"""
Aggiunge un messaggio utente, chiama la Pipeline e salva la risposta nello storico.
"""
# Aggiungi messaggio utente allo storico
self.history.append({"role": "user", "content": message})
def receive_message(self, response: str) -> str:
"""
Riceve un messaggio dalla pipeline e lo aggiunge allo storico.
"""
# Aggiungi risposta assistente allo storico
self.history.append({"role": "assistant", "content": response})
return response
def save_chat(self, filename: str = "chat.json") -> None:
"""
Salva la chat corrente in src/saves/<filename>.
@@ -55,7 +39,7 @@ class ChatManager:
"""
self.history = []
def get_history(self) -> list[dict[str, str]]:
def get_history(self) -> list[tuple[str, str]]:
"""
Restituisce lo storico completo della chat.
"""
@@ -65,33 +49,28 @@ class ChatManager:
########################################
# Funzioni Gradio
########################################
def gradio_respond(self, message: str, history: list[dict[str, str]]) -> tuple[list[dict[str, str]], list[dict[str, str]], str]:
self.send_message(message)
def gradio_respond(self, message: str, history: list[tuple[str, str]]) -> str:
self.inputs.user_query = message
pipeline = Pipeline(self.inputs)
response = pipeline.interact()
self.receive_message(response)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, history, ""
self.history.append((message, response))
return response
def gradio_save(self) -> str:
self.save_chat("chat.json")
return "💾 Chat salvata in chat.json"
def gradio_load(self) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
def gradio_load(self) -> tuple[list[tuple[str, str]], list[tuple[str, str]]]:
self.load_chat("chat.json")
history: list[dict[str, str]] = []
for m in self.get_history():
history.append({"role": m["role"], "content": m["content"]})
history = self.get_history()
return history, history
def gradio_clear(self) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
def gradio_clear(self) -> tuple[list[str], list[str]]:
self.reset_chat()
return [], []
def gradio_build_interface(self) -> gr.Blocks:
with gr.Blocks() as interface:
gr.Markdown("# 🤖 Agente di Analisi e Consulenza Crypto (Chat)")
@@ -100,6 +79,7 @@ class ChatManager:
with gr.Row():
provider = gr.Dropdown(
choices=self.inputs.list_models_names(),
value=self.inputs.team_leader_model.label,
type="index",
label="Modello da usare"
)
@@ -107,23 +87,22 @@ class ChatManager:
style = gr.Dropdown(
choices=self.inputs.list_strategies_names(),
value=self.inputs.strategy.label,
type="index",
label="Stile di investimento"
)
style.change(fn=self.inputs.choose_strategy, inputs=style, outputs=None)
chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages")
msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?")
chat = gr.ChatInterface(
fn=self.gradio_respond
)
with gr.Row():
clear_btn = gr.Button("🗑️ Reset Chat")
save_btn = gr.Button("💾 Salva Chat")
load_btn = gr.Button("📂 Carica Chat")
# Eventi e interazioni
msg.submit(self.gradio_respond, inputs=[msg, chatbot], outputs=[chatbot, chatbot, msg])
clear_btn.click(self.gradio_clear, inputs=None, outputs=[chatbot, chatbot])
clear_btn.click(self.gradio_clear, inputs=None, outputs=[chat.chatbot, chat.chatbot_state])
save_btn.click(self.gradio_save, inputs=None, outputs=None)
load_btn.click(self.gradio_load, inputs=None, outputs=[chatbot, chatbot])
load_btn.click(self.gradio_load, inputs=None, outputs=[chat.chatbot, chat.chatbot_state])
return interface

View File

@@ -1,6 +1,8 @@
import asyncio
import io
import os
import json
from typing import Any
import httpx
import logging
import warnings
@@ -9,7 +11,7 @@ from markdown_pdf import MarkdownPdf, Section
from telegram import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, Message, Update, User
from telegram.constants import ChatAction
from telegram.ext import Application, CallbackQueryHandler, CommandHandler, ContextTypes, ConversationHandler, MessageHandler, filters
from app.agents.pipeline import Pipeline, PipelineInputs
from app.agents.pipeline import Pipeline, PipelineEvent, PipelineInputs, RunMessage
# per per_message di ConversationHandler che rompe sempre qualunque input tu metta
warnings.filterwarnings("ignore")
@@ -21,23 +23,44 @@ logging = logging.getLogger("telegram")
# Un semplice schema delle interazioni:
# /start
# ║
# V
# v
# ╔══ CONFIGS <═════╗
# ║ ║ ╚══> SELECT_CONFIG
# ║ V
# ║ start_team (polling for updates)
# ║ v ^
# ║ MODELS ══════╝
# ║
# ╠══> start (polling for updates)
# ║ ║
# ║ V
# ║ v
# ╚═══> END
CONFIGS, SELECT_CONFIG = range(2)
CONFIGS, SELECT_MODEL, SELECT_CONFIG = range(3)
# Usato per separare la query arrivata da Telegram
QUERY_SEP = "|==|"
class ConfigsChat(Enum):
MODEL_CHECK = "Check Model"
MODEL_TEAM_LEADER = "Team Leader Model"
MODEL_TEAM = "Team Model"
MODEL_OUTPUT = "Output Model"
MODEL_REPORT = "Report Model"
CHANGE_MODELS = "Change Models"
STRATEGY = "Strategy"
CANCEL = "Cancel"
def get_inline_button(self, value_to_display:str="") -> InlineKeyboardButton:
display = self.value if not value_to_display else f"{self.value}: {value_to_display}"
return InlineKeyboardButton(display, callback_data=self.name)
def change_value(self, inputs: PipelineInputs, new_value:int) -> None:
functions_map = {
self.MODEL_CHECK.name: inputs.choose_query_checker,
self.MODEL_TEAM_LEADER.name: inputs.choose_team_leader,
self.MODEL_TEAM.name: inputs.choose_team,
self.MODEL_REPORT.name: inputs.choose_report_generator,
self.STRATEGY.name: inputs.choose_strategy,
}
functions_map[self.name](new_value)
class TelegramApp:
def __init__(self):
@@ -72,14 +95,21 @@ class TelegramApp:
entry_points=[CommandHandler('start', self.__start)],
states={
CONFIGS: [
CallbackQueryHandler(self.__model_team, pattern=ConfigsChat.MODEL_TEAM.name),
CallbackQueryHandler(self.__model_output, pattern=ConfigsChat.MODEL_OUTPUT.name),
CallbackQueryHandler(self.__models, pattern=ConfigsChat.CHANGE_MODELS.name),
CallbackQueryHandler(self.__strategy, pattern=ConfigsChat.STRATEGY.name),
CallbackQueryHandler(self.__cancel, pattern='^cancel$'),
MessageHandler(filters.TEXT, self.__start_team) # Any text message
CallbackQueryHandler(self.__cancel, pattern='^CANCEL$'),
MessageHandler(filters.TEXT, self.__start_llms) # Any text message
],
SELECT_MODEL: [
CallbackQueryHandler(self.__model_select, pattern=ConfigsChat.MODEL_CHECK.name),
CallbackQueryHandler(self.__model_select, pattern=ConfigsChat.MODEL_TEAM_LEADER.name),
CallbackQueryHandler(self.__model_select, pattern=ConfigsChat.MODEL_TEAM.name),
CallbackQueryHandler(self.__model_select, pattern=ConfigsChat.MODEL_REPORT.name),
CallbackQueryHandler(self.__go_to_start, pattern='^CANCEL$'),
],
SELECT_CONFIG: [
CallbackQueryHandler(self.__select_config, pattern=f"^__select_config{QUERY_SEP}.*$"),
CallbackQueryHandler(self.__go_to_start, pattern='^CANCEL$'),
]
},
fallbacks=[CommandHandler('start', self.__start)],
@@ -87,45 +117,28 @@ class TelegramApp:
self.app = app
def run(self) -> None:
"""
Start the Telegram bot polling. This will keep the bot running and listening for updates.\n
This function blocks until the bot is stopped.
"""
self.app.run_polling()
########################################
# Funzioni di utilità
########################################
async def start_message(self, user: User, query: CallbackQuery | Message) -> None:
confs = self.user_requests.setdefault(user, PipelineInputs())
str_model_team = f"{ConfigsChat.MODEL_TEAM.value}: {confs.team_model.label}"
str_model_output = f"{ConfigsChat.MODEL_OUTPUT.value}: {confs.team_leader_model.label}"
str_strategy = f"{ConfigsChat.STRATEGY.value}: {confs.strategy.label}"
msg, keyboard = (
"Please choose an option or write your query",
InlineKeyboardMarkup([
[InlineKeyboardButton(str_model_team, callback_data=ConfigsChat.MODEL_TEAM.name)],
[InlineKeyboardButton(str_model_output, callback_data=ConfigsChat.MODEL_OUTPUT.name)],
[InlineKeyboardButton(str_strategy, callback_data=ConfigsChat.STRATEGY.name)],
[InlineKeyboardButton("Cancel", callback_data='cancel')]
])
)
if isinstance(query, CallbackQuery):
await query.edit_message_text(msg, reply_markup=keyboard, parse_mode='MarkdownV2')
else:
await query.reply_text(msg, reply_markup=keyboard, parse_mode='MarkdownV2')
async def handle_callbackquery(self, update: Update) -> tuple[CallbackQuery, User]:
assert update.callback_query and update.callback_query.from_user, "Update callback_query or user is None"
assert update.callback_query, "Update callback_query is None"
assert update.effective_user, "Update effective_user is None"
query = update.callback_query
await query.answer() # Acknowledge the callback query
return query, query.from_user
return query, update.effective_user
async def handle_message(self, update: Update) -> tuple[Message, User]:
assert update.message and update.message.from_user, "Update message or user is None"
return update.message, update.message.from_user
def handle_message(self, update: Update) -> tuple[Message, User]:
assert update.message and update.effective_user, "Update message or user is None"
return update.message, update.effective_user
def build_callback_data(self, callback: str, config: ConfigsChat, labels: list[str]) -> list[tuple[str, str]]:
return [(label, QUERY_SEP.join((callback, config.value, str(i)))) for i, label in enumerate(labels)]
return [(label, QUERY_SEP.join((callback, config.name, str(i)))) for i, label in enumerate(labels)]
async def __error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
try:
@@ -142,28 +155,69 @@ class TelegramApp:
logging.exception("Exception in the error handler")
#########################################
# Funzioni async per i comandi e messaggi
# Funzioni base di gestione stati
#########################################
async def __start(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
message, user = await self.handle_message(update)
logging.info(f"@{user.username} started the conversation.")
await self.start_message(user, message)
user = update.effective_user.username if update.effective_user else "Unknown"
logging.info(f"@{user} started the conversation.")
return await self.__go_to_start(update, context)
async def __go_to_start(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
user = update.effective_user
assert user, "Update effective_user is None"
msg = update.callback_query if update.callback_query else update.message
assert msg, "Update message and callback_query are both None"
confs = self.user_requests.setdefault(user, PipelineInputs()) # despite the name, it creates a default only if not present
args: dict[str, Any] = {
"text": "Please choose an option or write your query",
"parse_mode": 'MarkdownV2',
"reply_markup": InlineKeyboardMarkup([
[ConfigsChat.CHANGE_MODELS.get_inline_button()],
[ConfigsChat.STRATEGY.get_inline_button(confs.strategy.label)],
[ConfigsChat.CANCEL.get_inline_button()],
])
}
await (msg.edit_message_text(**args) if isinstance(msg, CallbackQuery) else msg.reply_text(**args))
return CONFIGS
async def __model_team(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
return await self._model_select(update, ConfigsChat.MODEL_TEAM)
async def __cancel(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
logging.info(f"@{user.username} canceled the conversation.")
if user in self.user_requests:
del self.user_requests[user]
await query.edit_message_text("Conversation canceled. Use /start to begin again.")
return ConversationHandler.END
async def __model_output(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
return await self._model_select(update, ConfigsChat.MODEL_OUTPUT)
##########################################
# Configurazioni
##########################################
async def __models(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
req = self.user_requests[user]
async def _model_select(self, update: Update, state: ConfigsChat, msg: str | None = None) -> int:
await query.edit_message_text("Select a model", reply_markup=InlineKeyboardMarkup([
[ConfigsChat.MODEL_CHECK.get_inline_button(req.query_analyzer_model.label)],
[ConfigsChat.MODEL_TEAM_LEADER.get_inline_button(req.team_leader_model.label)],
[ConfigsChat.MODEL_TEAM.get_inline_button(req.team_model.label)],
[ConfigsChat.MODEL_REPORT.get_inline_button(req.report_generation_model.label)],
[ConfigsChat.CANCEL.get_inline_button()]
]))
return SELECT_MODEL
async def __model_select(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
if not query.data:
logging.error("Callback query data is None")
return CONFIGS
req = self.user_requests[user]
models = self.build_callback_data("__select_config", state, req.list_models_names())
models = self.build_callback_data("__select_config", ConfigsChat[query.data], req.list_models_names())
inline_btns = [[InlineKeyboardButton(name, callback_data=callback_data)] for name, callback_data in models]
await query.edit_message_text(msg or state.value, reply_markup=InlineKeyboardMarkup(inline_btns))
await query.edit_message_text("Select a model", reply_markup=InlineKeyboardMarkup(inline_btns))
return SELECT_CONFIG
async def __strategy(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
@@ -182,71 +236,62 @@ class TelegramApp:
req = self.user_requests[user]
_, state, index = str(query.data).split(QUERY_SEP)
if state == str(ConfigsChat.MODEL_TEAM):
req.choose_team(int(index))
if state == str(ConfigsChat.MODEL_OUTPUT):
req.choose_team_leader(int(index))
if state == str(ConfigsChat.STRATEGY):
req.choose_strategy(int(index))
ConfigsChat[state].change_value(req, int(index))
await self.start_message(user, query)
return CONFIGS
return await self.__go_to_start(update, context)
async def __start_team(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
message, user = await self.handle_message(update)
async def __start_llms(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
message, user = self.handle_message(update)
confs = self.user_requests[user]
confs.user_query = message.text or ""
logging.info(f"@{user.username} started the team with [{confs.team_model.label}, {confs.team_leader_model.label}, {confs.strategy.label}]")
await self.__run_team(update, confs)
logging.info(f"@{user.username} started the team with [{confs.query_analyzer_model.label}, {confs.team_model.label}, {confs.team_leader_model.label}, {confs.report_generation_model.label}, {confs.strategy.label}]")
await self.__run(update, confs)
logging.info(f"@{user.username} team finished.")
return ConversationHandler.END
async def __cancel(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
logging.info(f"@{user.username} canceled the conversation.")
if user in self.user_requests:
del self.user_requests[user]
await query.edit_message_text("Conversation canceled. Use /start to begin again.")
return ConversationHandler.END
async def __run_team(self, update: Update, inputs: PipelineInputs) -> None:
##########################################
# RUN APP
##########################################
async def __run(self, update: Update, inputs: PipelineInputs) -> None:
if not update.message: return
bot = update.get_bot()
msg_id = update.message.message_id - 1
chat_id = update.message.chat_id
configs_str = [
'Running with configurations: ',
f'Team: {inputs.team_model.label}',
f'Output: {inputs.team_leader_model.label}',
f'Strategy: {inputs.strategy.label}',
f'Query: "{inputs.user_query}"'
]
full_message = f"""```\n{'\n'.join(configs_str)}\n```\n\n"""
first_message = full_message + "Generating report, please wait"
msg = await bot.edit_message_text(chat_id=chat_id, message_id=msg_id, text=first_message, parse_mode='MarkdownV2')
run_message = RunMessage(inputs, prefix="```\n", suffix="\n```")
msg = await bot.edit_message_text(chat_id=chat_id, message_id=msg_id, text=run_message.get_latest(), parse_mode='MarkdownV2')
if isinstance(msg, bool): return
# Remove user query and bot message
await bot.delete_message(chat_id=chat_id, message_id=update.message.id)
# TODO migliorare messaggi di attesa
def update_user(update_step: str = "") -> None:
if update_step: run_message.update_step(update_step)
else: run_message.update()
message = run_message.get_latest()
if msg.text != message:
asyncio.create_task(msg.edit_text(message, parse_mode='MarkdownV2'))
await bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
pipeline = Pipeline(inputs)
report_content = await pipeline.interact_async()
await msg.delete()
report_content = await pipeline.interact_async(listeners=[
(PipelineEvent.QUERY_CHECK, lambda _: update_user()),
(PipelineEvent.TOOL_USED, lambda e: update_user(e.tool.tool_name.replace('get_', '').replace("_", "\\_"))),
(PipelineEvent.INFO_RECOVERY, lambda _: update_user()),
(PipelineEvent.REPORT_GENERATION, lambda _: update_user()),
])
# attach report file to the message
pdf = MarkdownPdf(toc_level=2, optimize=True)
pdf.add_section(Section(report_content, toc=False))
# TODO vedere se ha senso dare il pdf o solo il messaggio
document = io.BytesIO()
pdf.save_bytes(document)
document.seek(0)
await bot.send_document(chat_id=chat_id, document=document, filename="report.pdf", parse_mode='MarkdownV2', caption=full_message)
await msg.reply_document(document=document, filename="report.pdf")

View File

@@ -0,0 +1,48 @@
import pytest
from app.agents.core import QueryOutputs
from app.agents.prompts import QUERY_CHECK_INSTRUCTIONS
from app.configs import AppConfig
class TestQueryCheckAgent:
@pytest.fixture(autouse=True)
def setup(self):
self.configs = AppConfig.load()
self.model = self.configs.get_model_by_name("qwen3:1.7b")
self.agent = self.model.get_agent(QUERY_CHECK_INSTRUCTIONS, output_schema=QueryOutputs)
def test_query_not_ok(self):
response = self.agent.run("Is the sky blue?") # type: ignore
assert response is not None
assert response.content is not None
content = response.content
assert isinstance(content, QueryOutputs)
assert content.is_crypto == False
def test_query_not_ok2(self):
response = self.agent.run("What is the capital of France?") # type: ignore
assert response is not None
assert response.content is not None
content = response.content
assert isinstance(content, QueryOutputs)
assert content.is_crypto == False
def test_query_ok(self):
response = self.agent.run("Bitcoin") # type: ignore
assert response is not None
assert response.content is not None
content = response.content
assert isinstance(content, QueryOutputs)
assert content.is_crypto == True
def test_query_ok2(self):
response = self.agent.run("Ha senso investire in Ethereum?") # type: ignore
assert response is not None
assert response.content is not None
content = response.content
assert isinstance(content, QueryOutputs)
assert content.is_crypto == True

View File

@@ -0,0 +1,31 @@
import pytest
from app.agents.prompts import REPORT_GENERATION_INSTRUCTIONS
from app.configs import AppConfig
class TestReportGenerationAgent:
@pytest.fixture(autouse=True)
def setup(self):
self.configs = AppConfig.load()
self.model = self.configs.get_model_by_name("qwen3:1.7b")
self.agent = self.model.get_agent(REPORT_GENERATION_INSTRUCTIONS)
def test_report_generation(self):
sample_data = """
The analysis reported from the Market Agent have highlighted the following key metrics for the cryptocurrency market:
Bitcoin (BTC) has shown strong performance over the last 24 hours with a price of $30,000 and a Market Cap of $600 Billion
Ethereum (ETH) is currently priced at $2,000 with a Market Cap of $250 Billion and a 24h Volume of $20 Billion.
The overall market sentiment is bullish with a 5% increase in total market capitalization.
No significant regulatory news has been reported and the social media sentiment remains unknown.
"""
response = self.agent.run(sample_data) # type: ignore
assert response is not None
assert response.content is not None
content = response.content
assert isinstance(content, str)
print(content)
assert "Bitcoin" in content
assert "Ethereum" in content
assert "Summary" in content

37
tests/agents/test_team.py Normal file
View File

@@ -0,0 +1,37 @@
import asyncio
import pytest
from app.agents.core import PipelineInputs
from app.agents.prompts import *
from app.configs import AppConfig
# fix warning about no event loop
@pytest.fixture(scope="session", autouse=True)
def event_loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.close()
@pytest.mark.slow
class TestTeamAgent:
@pytest.fixture(autouse=True)
def setup(self):
self.configs = AppConfig.load()
self.configs.agents.team_model = "qwen3:1.7b"
self.configs.agents.team_leader_model = "qwen3:1.7b"
self.inputs = PipelineInputs(self.configs)
self.team = self.inputs.get_agent_team()
def test_team_agent_response(self):
self.inputs.user_query = "Is Bitcoin a good investment now?"
inputs = self.inputs.get_query_inputs()
response = self.team.run(inputs) # type: ignore
assert response is not None
assert response.content is not None
content = response.content
print(content)
assert isinstance(content, str)
assert "Bitcoin" in content

View File

@@ -0,0 +1,22 @@
import re
import pytest
from app.api.social.chan import ChanWrapper
@pytest.mark.social
@pytest.mark.api
class TestChanWrapper:
def test_initialization(self):
wrapper = ChanWrapper()
assert wrapper is not None
def test_get_top_crypto_posts(self):
wrapper = ChanWrapper()
posts = wrapper.get_top_crypto_posts(limit=2)
assert isinstance(posts, list)
assert len(posts) == 2
for post in posts:
assert post.title != ""
assert post.timestamp != ""
assert re.match(r'\d{4}-\d{2}-\d{2}', post.timestamp)
assert isinstance(post.comments, list)

View File

@@ -1,4 +1,5 @@
import os
import re
import pytest
from app.api.social.reddit import MAX_COMMENTS, RedditWrapper
@@ -18,6 +19,8 @@ class TestRedditWrapper:
assert len(posts) == 2
for post in posts:
assert post.title != ""
assert re.match(r'\d{4}-\d{2}-\d{2}', post.timestamp)
assert isinstance(post.comments, list)
assert len(post.comments) <= MAX_COMMENTS
for comment in post.comments:

View File

@@ -0,0 +1,24 @@
import os
import re
import pytest
from shutil import which
from app.api.social.x import XWrapper
@pytest.mark.social
@pytest.mark.api
@pytest.mark.skipif(not os.getenv("X_API_KEY"), reason="X_API_KEY not set in environment variables")
@pytest.mark.skipif(which('rettiwt') is None, reason="rettiwt not installed")
class TestXWrapper:
def test_initialization(self):
wrapper = XWrapper()
assert wrapper is not None
def test_get_top_crypto_posts(self):
wrapper = XWrapper()
posts = wrapper.get_top_crypto_posts(limit=2)
assert isinstance(posts, list)
assert len(posts) == 2
for post in posts:
assert post.title != ""
assert re.match(r'\d{4}-\d{2}-\d{2}', post.timestamp)
assert isinstance(post.comments, list)

View File

@@ -0,0 +1,27 @@
import pytest
from app.api.tools import CryptoSymbolsTools
@pytest.mark.tools
class TestCryptoSymbolsTools:
def test_get_symbols(self):
tool = CryptoSymbolsTools()
symbols = tool.get_all_symbols()
assert isinstance(symbols, list)
assert "BTC-USD" in symbols
def test_get_symbol_by_name(self):
tool = CryptoSymbolsTools()
results = tool.get_symbols_by_name("Bitcoin")
assert isinstance(results, list)
assert ("BTC-USD", "Bitcoin USD") in results
results = tool.get_symbols_by_name("Banana")
assert isinstance(results, list)
assert ("BANANA28886-USD", "BananaCoin USD") in results
def test_get_symbol_by_invalid_name(self):
tool = CryptoSymbolsTools()
results = tool.get_symbols_by_name("InvalidName")
assert isinstance(results, list)
assert not results

View File

@@ -7,14 +7,14 @@ from app.api.tools import MarketAPIsTool
@pytest.mark.api
class TestMarketAPIsTool:
def test_wrapper_initialization(self):
market_wrapper = MarketAPIsTool("EUR")
market_wrapper = MarketAPIsTool()
assert market_wrapper is not None
assert hasattr(market_wrapper, 'get_product')
assert hasattr(market_wrapper, 'get_products')
assert hasattr(market_wrapper, 'get_historical_prices')
def test_wrapper_capabilities(self):
market_wrapper = MarketAPIsTool("EUR")
market_wrapper = MarketAPIsTool()
capabilities: list[str] = []
if hasattr(market_wrapper, 'get_product'):
capabilities.append('single_product')
@@ -25,7 +25,7 @@ class TestMarketAPIsTool:
assert len(capabilities) > 0
def test_market_data_retrieval(self):
market_wrapper = MarketAPIsTool("EUR")
market_wrapper = MarketAPIsTool()
btc_product = market_wrapper.get_product("BTC")
assert btc_product is not None
assert hasattr(btc_product, 'symbol')
@@ -34,7 +34,7 @@ class TestMarketAPIsTool:
def test_error_handling(self):
try:
market_wrapper = MarketAPIsTool("EUR")
market_wrapper = MarketAPIsTool()
fake_product = market_wrapper.get_product("NONEXISTENT_CRYPTO_SYMBOL_12345")
assert fake_product is None or fake_product.price == 0
except Exception as _:

View File

@@ -17,9 +17,9 @@ class TestSocialAPIsTool:
assert len(result) > 0
for post in result:
assert post.title is not None
assert post.time is not None
assert post.timestamp is not None
def test_social_api_tool_get_top__all_results(self):
def test_social_api_tool_get_top_all_results(self):
tool = SocialAPIsTool()
result = tool.handler.try_call_all(lambda w: w.get_top_crypto_posts(limit=2))
assert isinstance(result, dict)
@@ -27,4 +27,4 @@ class TestSocialAPIsTool:
for _provider, posts in result.items():
for post in posts:
assert post.title is not None
assert post.time is not None
assert post.timestamp is not None

130
uv.lock generated
View File

@@ -389,6 +389,26 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/40/cd/ef820662e0d87f46b829bba7e2324c7978e0153692bbd2f08f7746049708/ddgs-9.6.0-py3-none-any.whl", hash = "sha256:24120f1b672fd3a28309db029e7038eb3054381730aea7a08d51bb909dd55520", size = 41558, upload-time = "2025-09-17T13:27:08.99Z" },
]
[[package]]
name = "deepseek"
version = "1.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "requests" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/04/7b/bede06edf1a25a6ab06553b15f6abf8e912848dfa5f68514720d3e388550/deepseek-1.0.0-py3-none-any.whl", hash = "sha256:ee4175bfcb7ac1154369dbd86a4d8bc1809f6fa20e3e7baa362544567197cb3f", size = 4542, upload-time = "2025-01-03T08:06:23.887Z" },
]
[[package]]
name = "distro"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" },
]
[[package]]
name = "dnspython"
version = "2.8.0"
@@ -418,6 +438,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b2/b7/545d2c10c1fc15e48653c91efde329a790f2eecfbbf2bd16003b5db2bab0/dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9", size = 1892, upload-time = "2025-02-19T22:15:01.647Z" },
]
[[package]]
name = "eval-type-backport"
version = "0.2.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/30/ea/8b0ac4469d4c347c6a385ff09dc3c048c2d021696664e26c7ee6791631b5/eval_type_backport-0.2.2.tar.gz", hash = "sha256:f0576b4cf01ebb5bd358d02314d31846af5e07678387486e2c798af0e7d849c1", size = 9079, upload-time = "2024-12-21T20:09:46.005Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ce/31/55cd413eaccd39125368be33c46de24a1f639f2e12349b0361b4678f3915/eval_type_backport-0.2.2-py3-none-any.whl", hash = "sha256:cb6ad7c393517f476f96d456d0412ea80f0a8cf96f6892834cd9340149111b0a", size = 5830, upload-time = "2024-12-21T20:09:44.175Z" },
]
[[package]]
name = "fastapi"
version = "0.118.0"
@@ -690,6 +719,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" },
]
[[package]]
name = "html5lib"
version = "1.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "six" },
{ name = "webencodings" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ac/b6/b55c3f49042f1df3dcd422b7f224f939892ee94f22abcf503a9b7339eaf2/html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f", size = 272215, upload-time = "2020-06-22T23:32:38.834Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6c/dd/a834df6482147d48e225a49515aabc28974ad5a4ca3215c18a882565b028/html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d", size = 112173, upload-time = "2020-06-22T23:32:36.781Z" },
]
[[package]]
name = "httpcore"
version = "1.0.9"
@@ -776,6 +818,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
]
[[package]]
name = "invoke"
version = "2.2.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" },
]
[[package]]
name = "jinja2"
version = "3.1.6"
@@ -788,6 +839,31 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
]
[[package]]
name = "jiter"
version = "0.11.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a3/68/0357982493a7b20925aece061f7fb7a2678e3b232f8d73a6edb7e5304443/jiter-0.11.1.tar.gz", hash = "sha256:849dcfc76481c0ea0099391235b7ca97d7279e0fa4c86005457ac7c88e8b76dc", size = 168385, upload-time = "2025-10-17T11:31:15.186Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/15/8b/318e8af2c904a9d29af91f78c1e18f0592e189bbdb8a462902d31fe20682/jiter-0.11.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c92148eec91052538ce6823dfca9525f5cfc8b622d7f07e9891a280f61b8c96c", size = 305655, upload-time = "2025-10-17T11:29:18.859Z" },
{ url = "https://files.pythonhosted.org/packages/f7/29/6c7de6b5d6e511d9e736312c0c9bfcee8f9b6bef68182a08b1d78767e627/jiter-0.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ecd4da91b5415f183a6be8f7158d127bdd9e6a3174138293c0d48d6ea2f2009d", size = 315645, upload-time = "2025-10-17T11:29:20.889Z" },
{ url = "https://files.pythonhosted.org/packages/ac/5f/ef9e5675511ee0eb7f98dd8c90509e1f7743dbb7c350071acae87b0145f3/jiter-0.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7e3ac25c00b9275684d47aa42febaa90a9958e19fd1726c4ecf755fbe5e553b", size = 348003, upload-time = "2025-10-17T11:29:22.712Z" },
{ url = "https://files.pythonhosted.org/packages/56/1b/abe8c4021010b0a320d3c62682769b700fb66f92c6db02d1a1381b3db025/jiter-0.11.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d7305c0a841858f866cd459cd9303f73883fb5e097257f3d4a3920722c69d4", size = 365122, upload-time = "2025-10-17T11:29:24.408Z" },
{ url = "https://files.pythonhosted.org/packages/2a/2d/4a18013939a4f24432f805fbd5a19893e64650b933edb057cd405275a538/jiter-0.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e86fa10e117dce22c547f31dd6d2a9a222707d54853d8de4e9a2279d2c97f239", size = 488360, upload-time = "2025-10-17T11:29:25.724Z" },
{ url = "https://files.pythonhosted.org/packages/f0/77/38124f5d02ac4131f0dfbcfd1a19a0fac305fa2c005bc4f9f0736914a1a4/jiter-0.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae5ef1d48aec7e01ee8420155d901bb1d192998fa811a65ebb82c043ee186711", size = 376884, upload-time = "2025-10-17T11:29:27.056Z" },
{ url = "https://files.pythonhosted.org/packages/7b/43/59fdc2f6267959b71dd23ce0bd8d4aeaf55566aa435a5d00f53d53c7eb24/jiter-0.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb68e7bf65c990531ad8715e57d50195daf7c8e6f1509e617b4e692af1108939", size = 358827, upload-time = "2025-10-17T11:29:28.698Z" },
{ url = "https://files.pythonhosted.org/packages/7d/d0/b3cc20ff5340775ea3bbaa0d665518eddecd4266ba7244c9cb480c0c82ec/jiter-0.11.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43b30c8154ded5845fa454ef954ee67bfccce629b2dea7d01f795b42bc2bda54", size = 385171, upload-time = "2025-10-17T11:29:30.078Z" },
{ url = "https://files.pythonhosted.org/packages/d2/bc/94dd1f3a61f4dc236f787a097360ec061ceeebebf4ea120b924d91391b10/jiter-0.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:586cafbd9dd1f3ce6a22b4a085eaa6be578e47ba9b18e198d4333e598a91db2d", size = 518359, upload-time = "2025-10-17T11:29:31.464Z" },
{ url = "https://files.pythonhosted.org/packages/7e/8c/12ee132bd67e25c75f542c227f5762491b9a316b0dad8e929c95076f773c/jiter-0.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:677cc2517d437a83bb30019fd4cf7cad74b465914c56ecac3440d597ac135250", size = 509205, upload-time = "2025-10-17T11:29:32.895Z" },
{ url = "https://files.pythonhosted.org/packages/39/d5/9de848928ce341d463c7e7273fce90ea6d0ea4343cd761f451860fa16b59/jiter-0.11.1-cp312-cp312-win32.whl", hash = "sha256:fa992af648fcee2b850a3286a35f62bbbaeddbb6dbda19a00d8fbc846a947b6e", size = 205448, upload-time = "2025-10-17T11:29:34.217Z" },
{ url = "https://files.pythonhosted.org/packages/ee/b0/8002d78637e05009f5e3fb5288f9d57d65715c33b5d6aa20fd57670feef5/jiter-0.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:88b5cae9fa51efeb3d4bd4e52bfd4c85ccc9cac44282e2a9640893a042ba4d87", size = 204285, upload-time = "2025-10-17T11:29:35.446Z" },
{ url = "https://files.pythonhosted.org/packages/9f/a2/bb24d5587e4dff17ff796716542f663deee337358006a80c8af43ddc11e5/jiter-0.11.1-cp312-cp312-win_arm64.whl", hash = "sha256:9a6cae1ab335551917f882f2c3c1efe7617b71b4c02381e4382a8fc80a02588c", size = 188712, upload-time = "2025-10-17T11:29:37.027Z" },
{ url = "https://files.pythonhosted.org/packages/a6/bc/950dd7f170c6394b6fdd73f989d9e729bd98907bcc4430ef080a72d06b77/jiter-0.11.1-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:0d4d6993edc83cf75e8c6828a8d6ce40a09ee87e38c7bfba6924f39e1337e21d", size = 302626, upload-time = "2025-10-17T11:31:09.645Z" },
{ url = "https://files.pythonhosted.org/packages/3a/65/43d7971ca82ee100b7b9b520573eeef7eabc0a45d490168ebb9a9b5bb8b2/jiter-0.11.1-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:f78d151c83a87a6cf5461d5ee55bc730dd9ae227377ac6f115b922989b95f838", size = 297034, upload-time = "2025-10-17T11:31:10.975Z" },
{ url = "https://files.pythonhosted.org/packages/19/4c/000e1e0c0c67e96557a279f8969487ea2732d6c7311698819f977abae837/jiter-0.11.1-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9022974781155cd5521d5cb10997a03ee5e31e8454c9d999dcdccd253f2353f", size = 337328, upload-time = "2025-10-17T11:31:12.399Z" },
{ url = "https://files.pythonhosted.org/packages/d9/71/71408b02c6133153336d29fa3ba53000f1e1a3f78bb2fc2d1a1865d2e743/jiter-0.11.1-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18c77aaa9117510d5bdc6a946baf21b1f0cfa58ef04d31c8d016f206f2118960", size = 343697, upload-time = "2025-10-17T11:31:13.773Z" },
]
[[package]]
name = "lxml"
version = "6.0.2"
@@ -867,6 +943,24 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
]
[[package]]
name = "mistralai"
version = "1.9.11"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "eval-type-backport" },
{ name = "httpx" },
{ name = "invoke" },
{ name = "pydantic" },
{ name = "python-dateutil" },
{ name = "pyyaml" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/5a/8d/d8b7af67a966b6f227024e1cb7287fc19901a434f87a5a391dcfe635d338/mistralai-1.9.11.tar.gz", hash = "sha256:3df9e403c31a756ec79e78df25ee73cea3eb15f86693773e16b16adaf59c9b8a", size = 208051, upload-time = "2025-10-02T15:53:40.473Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/fe/76/4ce12563aea5a76016f8643eff30ab731e6656c845e9e4d090ef10c7b925/mistralai-1.9.11-py3-none-any.whl", hash = "sha256:7a3dc2b8ef3fceaa3582220234261b5c4e3e03a972563b07afa150e44a25a6d3", size = 442796, upload-time = "2025-10-02T15:53:39.134Z" },
]
[[package]]
name = "multidict"
version = "6.6.4"
@@ -944,6 +1038,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b5/c1/edc9f41b425ca40b26b7c104c5f6841a4537bb2552bfa6ca66e81405bb95/ollama-0.6.0-py3-none-any.whl", hash = "sha256:534511b3ccea2dff419ae06c3b58d7f217c55be7897c8ce5868dfb6b219cf7a0", size = 14130, upload-time = "2025-09-24T22:46:01.19Z" },
]
[[package]]
name = "openai"
version = "2.6.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c4/44/303deb97be7c1c9b53118b52825cbd1557aeeff510f3a52566b1fa66f6a2/openai-2.6.1.tar.gz", hash = "sha256:27ae704d190615fca0c0fc2b796a38f8b5879645a3a52c9c453b23f97141bb49", size = 593043, upload-time = "2025-10-24T13:29:52.79Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/15/0e/331df43df633e6105ff9cf45e0ce57762bd126a45ac16b25a43f6738d8a2/openai-2.6.1-py3-none-any.whl", hash = "sha256:904e4b5254a8416746a2f05649594fa41b19d799843cd134dac86167e094edef", size = 1005551, upload-time = "2025-10-24T13:29:50.973Z" },
]
[[package]]
name = "orjson"
version = "3.11.3"
@@ -1658,13 +1771,17 @@ dependencies = [
{ name = "coinbase-advanced-py" },
{ name = "colorlog" },
{ name = "ddgs" },
{ name = "deepseek" },
{ name = "dotenv" },
{ name = "gnews" },
{ name = "google-genai" },
{ name = "gradio" },
{ name = "html5lib" },
{ name = "markdown-pdf" },
{ name = "mistralai" },
{ name = "newsapi-python" },
{ name = "ollama" },
{ name = "openai" },
{ name = "praw" },
{ name = "pytest" },
{ name = "python-binance" },
@@ -1678,13 +1795,17 @@ requires-dist = [
{ name = "coinbase-advanced-py" },
{ name = "colorlog" },
{ name = "ddgs" },
{ name = "deepseek" },
{ name = "dotenv" },
{ name = "gnews" },
{ name = "google-genai" },
{ name = "gradio" },
{ name = "html5lib" },
{ name = "markdown-pdf" },
{ name = "mistralai" },
{ name = "newsapi-python" },
{ name = "ollama" },
{ name = "openai" },
{ name = "praw" },
{ name = "pytest" },
{ name = "python-binance" },
@@ -1714,6 +1835,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/85/cd/584a2ceb5532af99dd09e50919e3615ba99aa127e9850eafe5f31ddfdb9a/uvicorn-0.37.0-py3-none-any.whl", hash = "sha256:913b2b88672343739927ce381ff9e2ad62541f9f8289664fa1d1d3803fa2ce6c", size = 67976, upload-time = "2025-09-23T13:33:45.842Z" },
]
[[package]]
name = "webencodings"
version = "0.5.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" },
]
[[package]]
name = "websocket-client"
version = "1.8.0"