Merge branch 'main' into 14-socials-integration

This commit is contained in:
2025-10-20 15:33:18 +02:00
76 changed files with 2131 additions and 1360 deletions

View File

@@ -5,8 +5,6 @@
# https://makersuite.google.com/app/apikey # https://makersuite.google.com/app/apikey
GOOGLE_API_KEY= GOOGLE_API_KEY=
OPENAI_API_KEY=
DEEPSEEK_API_KEY=
############################################################################### ###############################################################################
# Configurazioni per gli agenti di mercato # Configurazioni per gli agenti di mercato
############################################################################### ###############################################################################
@@ -23,6 +21,7 @@ CRYPTOCOMPARE_API_KEY=
BINANCE_API_KEY= BINANCE_API_KEY=
BINANCE_API_SECRET= BINANCE_API_SECRET=
############################################################################### ###############################################################################
# Configurazioni per gli agenti di notizie # Configurazioni per gli agenti di notizie
############################################################################### ###############################################################################
@@ -33,6 +32,7 @@ NEWS_API_KEY=
# https://cryptopanic.com/developers/api/ # https://cryptopanic.com/developers/api/
CRYPTOPANIC_API_KEY= CRYPTOPANIC_API_KEY=
############################################################################### ###############################################################################
# Configurazioni per API di social media # Configurazioni per API di social media
############################################################################### ###############################################################################
@@ -41,7 +41,6 @@ CRYPTOPANIC_API_KEY=
REDDIT_API_CLIENT_ID= REDDIT_API_CLIENT_ID=
REDDIT_API_CLIENT_SECRET= REDDIT_API_CLIENT_SECRET=
# Per ottenere questa API è necessario seguire i seguenti passaggi: # Per ottenere questa API è necessario seguire i seguenti passaggi:
# - Installare l'estensione su chrome X Auth Helper # - Installare l'estensione su chrome X Auth Helper
# - Dargli il permesso di girare in incognito # - Dargli il permesso di girare in incognito
@@ -50,3 +49,11 @@ REDDIT_API_CLIENT_SECRET=
# - Chiudere chrome # - Chiudere chrome
# Dovrebbe funzionare per 5 anni o finchè non si si fa il log out, in ogni caso si può ricreare # Dovrebbe funzionare per 5 anni o finchè non si si fa il log out, in ogni caso si può ricreare
X_API_KEY= X_API_KEY=
###############################################################################
# Configurazioni per API di messaggistica
###############################################################################
# https://core.telegram.org/bots/features#creating-a-new-bot
TELEGRAM_BOT_TOKEN=

6
.gitignore vendored
View File

@@ -173,8 +173,8 @@ cython_debug/
# PyPI configuration file # PyPI configuration file
.pypirc .pypirc
# chroma db
./chroma_db/
# VS Code # VS Code
.vscode/ .vscode/
# Gradio
.gradio/

View File

@@ -1,25 +1,29 @@
# Vogliamo usare una versione di linux leggera con già uv installato # Utilizziamo Debian slim invece di Alpine per migliore compatibilità
# Infatti scegliamo l'immagine ufficiale di uv che ha già tutto configurato FROM debian:bookworm-slim
FROM ghcr.io/astral-sh/uv:python3.12-alpine
RUN apk add --update npm # Installiamo le dipendenze di sistema
RUN apt-get update && \
apt-get install -y curl npm && \
rm -rf /var/lib/apt/lists/*
RUN npm install -g rettiwt-api RUN npm install -g rettiwt-api
# Dopo aver definito la workdir mi trovo già in essa
WORKDIR /app
# Settiamo variabili d'ambiente per usare python del sistema invece che venv # Installiamo uv
ENV UV_PROJECT_ENVIRONMENT=/usr/local RUN curl -LsSf https://astral.sh/uv/install.sh | sh
ENV PATH="/root/.local/bin:$PATH"
# Configuriamo UV per usare copy mode ed evitare problemi di linking
ENV UV_LINK_MODE=copy ENV UV_LINK_MODE=copy
# Copiamo prima i file di configurazione delle dipendenze e installiamo le dipendenze # Creiamo l'ambiente virtuale con tutto già presente
COPY pyproject.toml ./ COPY pyproject.toml ./
COPY uv.lock ./ COPY uv.lock ./
RUN uv sync --frozen --no-cache RUN uv sync --frozen --no-dev
ENV PYTHONPATH="./src"
# Copiamo i file sorgente dopo aver installato le dipendenze per sfruttare la cache di Docker # Copiamo i file del progetto
COPY LICENSE . COPY LICENSE ./
COPY src ./src COPY src/ ./src/
COPY configs.yaml ./
# Comando di default all'avvio dell'applicazione # Comando di avvio dell'applicazione
CMD ["echo", "Benvenuto in UPO AppAI!"] CMD ["uv", "run", "src/app"]
CMD ["uv", "run", "src/app.py"]

View File

@@ -14,14 +14,14 @@ L'obiettivo è quello di creare un sistema di consulenza finanziaria basato su L
- [3. Docker](#3-docker) - [3. Docker](#3-docker)
- [4. UV (solo per sviluppo locale)](#4-uv-solo-per-sviluppo-locale) - [4. UV (solo per sviluppo locale)](#4-uv-solo-per-sviluppo-locale)
- [Applicazione](#applicazione) - [Applicazione](#applicazione)
- [Ultimo Aggiornamento](#ultimo-aggiornamento) - [Struttura del codice del Progetto](#struttura-del-codice-del-progetto)
- [Tests](#tests) - [Tests](#tests)
# **Installazione** # **Installazione**
L'installazione di questo progetto richiede 3 passaggi totali (+1 se si vuole sviluppare in locale) che devono essere eseguiti in sequenza. Se questi passaggi sono eseguiti correttamente, l'applicazione dovrebbe partire senza problemi. Altrimenti è molto probabile che si verifichino errori di vario tipo (moduli mancanti, chiavi API non trovate, ecc.). L'installazione di questo progetto richiede 3 passaggi totali (+1 se si vuole sviluppare in locale) che devono essere eseguiti in sequenza. Se questi passaggi sono eseguiti correttamente, l'applicazione dovrebbe partire senza problemi. Altrimenti è molto probabile che si verifichino errori di vario tipo (moduli mancanti, chiavi API non trovate, ecc.).
1. Configurare le variabili d'ambiente 1. Configurazioni dell'app e delle variabili d'ambiente
2. Installare Ollama e i modelli locali 2. Installare Ollama e i modelli locali
3. Far partire il progetto con Docker (consigliato) 3. Far partire il progetto con Docker (consigliato)
4. (Solo per sviluppo locale) Installare uv e creare l'ambiente virtuale 4. (Solo per sviluppo locale) Installare uv e creare l'ambiente virtuale
@@ -29,11 +29,15 @@ L'installazione di questo progetto richiede 3 passaggi totali (+1 se si vuole sv
> [!IMPORTANT]\ > [!IMPORTANT]\
> Prima di iniziare, assicurarsi di avere clonato il repository e di essere nella cartella principale del progetto. > Prima di iniziare, assicurarsi di avere clonato il repository e di essere nella cartella principale del progetto.
### **1. Variabili d'Ambiente** ### **1. Configurazioni**
Copia il file `.env.example` in `.env` e modificalo con le tue API keys: Ci sono due file di configurazione principali che l'app utilizza: `config.yaml` e `.env`.\
Il primo contiene le configurazioni generali dell'applicazione e può essere modificato a piacimento, mentre il secondo è utilizzato per le variabili d'ambiente.
Per il secondo, bisogna copiare il file `.env.example` in `.env` e successivamente modificalo con le tue API keys:
```sh ```sh
cp .env.example .env cp .env.example .env
nano .env # esempio di modifica del file
``` ```
Le API Keys devono essere inserite nelle variabili opportune dopo l'uguale e ***senza*** spazi. Esse si possono ottenere tramite i loro providers (alcune sono gratuite, altre a pagamento).\ Le API Keys devono essere inserite nelle variabili opportune dopo l'uguale e ***senza*** spazi. Esse si possono ottenere tramite i loro providers (alcune sono gratuite, altre a pagamento).\
@@ -48,21 +52,13 @@ Per l'installazione scaricare Ollama dal loro [sito ufficiale](https://ollama.co
Dopo l'installazione, si possono iniziare a scaricare i modelli desiderati tramite il comando `ollama pull <model>:<tag>`. Dopo l'installazione, si possono iniziare a scaricare i modelli desiderati tramite il comando `ollama pull <model>:<tag>`.
I modelli usati dall'applicazione sono visibili in [src/app/models.py](src/app/models.py). Di seguito metto lo stesso una lista di modelli, ma potrebbe non essere aggiornata: I modelli usati dall'applicazione sono quelli specificati nel file [config.yaml](config.yaml) alla voce `model`. Se in locale si hanno dei modelli diversi, è possibile modificare questa voce per usare quelli disponibili.
- `gpt-oss:latest` I modelli consigliati per questo progetto sono `qwen3:4b` e `qwen3:1.7b`.
- `qwen3:latest`
- `qwen3:4b`
- `qwen3:1.7b`
### **3. Docker** ### **3. Docker**
Se si vuole solamente avviare il progetto, si consiglia di utilizzare [Docker](https://www.docker.com), dato che sono stati creati i files [Dockerfile](Dockerfile) e [docker-compose.yaml](docker-compose.yaml) per creare il container con tutti i file necessari e già in esecuzione. Se si vuole solamente avviare il progetto, si consiglia di utilizzare [Docker](https://www.docker.com), dato che sono stati creati i files [Dockerfile](Dockerfile) e [docker-compose.yaml](docker-compose.yaml) per creare il container con tutti i file necessari e già in esecuzione.
```sh ```sh
# Configura le variabili d'ambiente
cp .env.example .env
nano .env # Modifica il file
# Avvia il container
docker compose up --build -d docker compose up --build -d
``` ```
@@ -80,27 +76,54 @@ powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | ie
curl -LsSf https://astral.sh/uv/install.sh | sh curl -LsSf https://astral.sh/uv/install.sh | sh
``` ```
UV installerà python e creerà automaticamente l'ambiente virtuale con le dipendenze corrette (nota che questo passaggio è opzionale, dato che uv, ogni volta che si esegue un comando, controlla se l'ambiente è attivo e se le dipendenze sono installate): Dopodiché bisogna creare un ambiente virtuale per lo sviluppo locale e impostare PYTHONPATH. Questo passaggio è necessario per far sì che Python riesca a trovare tutti i moduli del progetto e ad installare tutte le dipendenze. Fortunatamente uv semplifica molto questo processo:
```sh ```sh
uv sync --frozen --no-cache uv venv
uv pip install -e .
``` ```
A questo punto si può far partire il progetto tramite il comando: A questo punto si può già modificare il codice e, quando necessario, far partire il progetto tramite il comando:
```sh ```sh
uv run python src/app.py uv run src/app
``` ```
# **Applicazione** # **Applicazione**
***L'applicazione è attualmente in fase di sviluppo.*** > [!CAUTION]\
> ***L'applicazione è attualmente in fase di sviluppo.***
Usando la libreria ``gradio`` è stata creata un'interfaccia web semplice per interagire con l'agente principale. Gli agenti secondari si trovano nella cartella `src/app/agents` e sono: L'applicazione viene fatta partire tramite il file [src/app/\_\_main\_\_.py](src/app/__main__.py) che inizializza l'agente principale e gli agenti secondari.
- **Market Agent**: Agente unificato che supporta multiple fonti di dati con auto-retry e gestione degli errori.
- **News Agent**: Recupera le notizie finanziarie più recenti sul mercato delle criptovalute. In esso viene creato il server `gradio` per l'interfaccia web e viene anche inizializzato il bot di Telegram (se è stata inserita la chiave nel file `.env` ottenuta da [BotFather](https://core.telegram.org/bots/features#creating-a-new-bot)).
- **Social Agent**: Analizza i sentimenti sui social media riguardo alle criptovalute.
- **Predictor Agent**: Utilizza i dati raccolti dagli altri agenti per fare previsioni. L'interazione è guidata, sia tramite l'interfaccia web che tramite il bot di Telegram; l'utente può scegliere prima di tutto delle opzioni generali (come il modello e la strategia di investimento), dopodiché può inviare un messaggio di testo libero per chiedere consigli o informazioni specifiche. Per esempio: "Qual è l'andamento attuale di Bitcoin?" o "Consigliami quali sono le migliori criptovalute in cui investire questo mese".
L'applicazione, una volta ricevuta la richiesta, la passa al [Team](src/app/agents/team.py) di agenti che si occupano di raccogliere i dati necessari per rispondere in modo completo e ragionato.
Gli agenti coinvolti nel Team sono:
- **Leader**: Coordina gli altri agenti e fornisce la risposta finale all'utente.
- **Market Agent**: Recupera i dati di mercato attuali delle criptovalute da Binance e Yahoo Finance.
- **News Agent**: Recupera le ultime notizie sul mercato delle criptovalute da NewsAPI e GNews.
- **Social Agent**: Recupera i dati dai social media (Reddit) per analizzare il sentiment del mercato.
## Struttura del codice del Progetto
```
src
└── app
├── __main__.py
├── config.py <-- Configurazioni app
├── agents <-- Agenti, Team, prompts e simili
├── api <-- Tutte le API esterne
│ ├── core <-- Classi core per le API
│ ├── markets <-- Market data provider (Es. Binance)
│ ├── news <-- News data provider (Es. NewsAPI)
│ ├── social <-- Social data provider (Es. Reddit)
│ └── tools <-- Tools per agenti creati dalle API
└── interface <-- Interfacce utente
```
## Tests ## Tests

45
configs.yaml Normal file
View File

@@ -0,0 +1,45 @@
port: 8000
gradio_share: false
logging_level: INFO
strategies:
- name: Conservative
label: Conservative
description: Focus on stable and low-risk investments.
- name: Balanced
label: Balanced
description: A mix of growth and stability.
- name: Aggressive
label: Aggressive
description: High-risk, high-reward investments.
models:
gemini:
- name: gemini-2.0-flash
label: Gemini
# - name: gemini-2.0-pro # TODO Non funziona, ha un nome diverso
# label: Gemini Pro
ollama:
- name: gpt-oss:latest
label: Ollama GPT
- name: qwen3:8b
label: Qwen 3 (8B)
- name: qwen3:4b
label: Qwen 3 (4B)
- name: qwen3:1.7b
label: Qwen 3 (1.7B)
api:
retry_attempts: 3
retry_delay_seconds: 2
currency: USD
# TODO Magari implementare un sistema per settare i providers
market_providers: [BinanceWrapper, YFinanceWrapper]
news_providers: [GoogleNewsWrapper, DuckDuckGoWrapper]
social_providers: [RedditWrapper]
agents:
strategy: Conservative
team_model: qwen3:1.7b
team_leader_model: qwen3:4b
predictor_model: qwen3:4b

View File

@@ -14,7 +14,7 @@ try:
instructions="Use tables to display data.", instructions="Use tables to display data.",
markdown=True, markdown=True,
) )
result = reasoning_agent.run("Scrivi una poesia su un gatto. Sii breve.") result = reasoning_agent.run("Scrivi una poesia su un gatto. Sii breve.") # type: ignore
print(result.content) print(result.content)
except Exception as e: except Exception as e:
print(f"Si è verificato un errore: {e}") print(f"Si è verificato un errore: {e}")

69
demos/agno_workflow.py Normal file
View File

@@ -0,0 +1,69 @@
import asyncio
from agno.agent import Agent
from agno.models.ollama import Ollama
from agno.run.workflow import WorkflowRunEvent
from agno.workflow.step import Step
from agno.workflow.steps import Steps
from agno.workflow.types import StepOutput, StepInput
from agno.workflow.parallel import Parallel
from agno.workflow.workflow import Workflow
def my_sum(a: int, b: int) -> int:
return a + b
def my_mul(a: int, b: int) -> int:
return a * b
def build_agent(instructions: str) -> Agent:
return Agent(
instructions=instructions,
model=Ollama(id='qwen3:1.7b'),
tools=[my_sum]
)
def remove_think(text: str) -> str:
thinking = text.rfind("</think>")
if thinking != -1:
return text[thinking + len("</think>"):].strip()
return text.strip()
def combine_steps_output(inputs: StepInput) -> StepOutput:
parallel = inputs.get_step_content("parallel")
if not isinstance(parallel, dict): return StepOutput()
lang = remove_think(parallel.get("Lang", ""))
answer = remove_think(parallel.get("Predict", ""))
content = f"Language: {lang}\nPhrase: {answer}"
return StepOutput(content=content)
async def main():
query = "Quanto fa 50 + 150 * 50?"
s1 = Step(name="Translate", agent=build_agent(instructions="Transform in English the user query. DO NOT answer the question and output ONLY the translated question."))
s2 = Step(name="Predict", agent=build_agent(instructions="You will be given a question in English. You can use the tools at your disposal. Answer the question and output ONLY the answer."))
step_a = Step(name="Lang", agent=build_agent(instructions="Detect the language from the question and output ONLY the language code. Es: 'en' for English, 'it' for Italian, 'ja' for Japanese."))
step_b = Steps(name="Answer", steps=[s1, s2])
step_c = Step(name="Combine", executor=combine_steps_output)
step_f = Step(name="Final", agent=build_agent(instructions="Translate the phrase in the language code provided. Respond only with the translated answer."))
wf = Workflow(name="Pipeline Workflow", steps=[
Parallel(step_a, step_b, name="parallel"), # type: ignore
step_c,
step_f
])
result = ""
async for event in await wf.arun(query, stream=True, stream_intermediate_steps=True):
content = getattr(event, 'content', '')
step_name = getattr(event, 'step_name', '')
if event.event in [WorkflowRunEvent.step_completed]:
print(f"{str(event.event)} --- {step_name} --- {remove_think(content).replace('\n', '\\n')[:80]}")
if event.event in [WorkflowRunEvent.workflow_completed]:
result = remove_think(content)
print(f"\nFinal result: {result}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -27,12 +27,12 @@ project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root / "src")) sys.path.insert(0, str(project_root / "src"))
from dotenv import load_dotenv from dotenv import load_dotenv
from app.markets import ( from app.api.markets import (
CoinBaseWrapper, CoinBaseWrapper,
CryptoCompareWrapper, CryptoCompareWrapper,
BinanceWrapper, BinanceWrapper,
YFinanceWrapper, YFinanceWrapper,
BaseWrapper MarketWrapper
) )
# Carica variabili d'ambiente # Carica variabili d'ambiente
@@ -133,9 +133,9 @@ class ProviderTester:
self.formatter = DemoFormatter() self.formatter = DemoFormatter()
self.test_symbols = ["BTC", "ETH", "ADA"] self.test_symbols = ["BTC", "ETH", "ADA"]
def test_provider(self, wrapper: BaseWrapper, provider_name: str) -> Dict[str, Any]: def test_provider(self, wrapper: MarketWrapper, provider_name: str) -> Dict[str, Any]:
"""Testa un provider specifico con tutti i metodi disponibili.""" """Testa un provider specifico con tutti i metodi disponibili."""
results = { results: Dict[str, Any] = {
"provider_name": provider_name, "provider_name": provider_name,
"tests": {}, "tests": {},
"overall_status": "SUCCESS" "overall_status": "SUCCESS"
@@ -153,7 +153,7 @@ class ProviderTester:
) )
if product: if product:
print(f"📦 Product: {product.symbol} (ID: {product.id})") print(f"📦 Product: {product.symbol} (ID: {product.id})")
print(f" Price: ${product.price:.2f}, Quote: {product.quote_currency}") print(f" Price: ${product.price:.2f}, Quote: {product.currency}")
print(f" Volume 24h: {product.volume_24h:,.2f}") print(f" Volume 24h: {product.volume_24h:,.2f}")
else: else:
print(f"📦 Product: Nessun prodotto trovato per {symbol}") print(f"📦 Product: Nessun prodotto trovato per {symbol}")
@@ -217,9 +217,9 @@ def check_environment_variables() -> Dict[str, bool]:
} }
return env_vars return env_vars
def initialize_providers() -> Dict[str, BaseWrapper]: def initialize_providers() -> Dict[str, MarketWrapper]:
"""Inizializza tutti i provider disponibili.""" """Inizializza tutti i provider disponibili."""
providers = {} providers: Dict[str, MarketWrapper] = {}
env_vars = check_environment_variables() env_vars = check_environment_variables()
# CryptoCompareWrapper # CryptoCompareWrapper
@@ -316,7 +316,7 @@ def main():
formatter.print_header("🧪 ESECUZIONE TEST PROVIDER", "=", 80) formatter.print_header("🧪 ESECUZIONE TEST PROVIDER", "=", 80)
tester = ProviderTester() tester = ProviderTester()
all_results = [] all_results: List[Dict[str, Any]] = []
for provider_name, wrapper in providers.items(): for provider_name, wrapper in providers.items():
try: try:

View File

@@ -5,10 +5,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'
########################################### ###########################################
from dotenv import load_dotenv from dotenv import load_dotenv
from app.news import NewsApiWrapper from app.api.news import NewsApiWrapper
def main(): def main():
api = NewsApiWrapper() api = NewsApiWrapper()
articles = api.get_latest_news(query="bitcoin", limit=5)
assert len(articles) > 0
print("ok") print("ok")
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -0,0 +1,59 @@
import os
from dotenv import load_dotenv
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import Application, CommandHandler, CallbackQueryHandler, MessageHandler, filters, ContextTypes
# Esempio di funzione per gestire il comando /start
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if not update.message: return
await update.message.reply_text('Ciao! Inviami un messaggio e ti risponderò!')
# Esempio di funzione per fare echo del messaggio ricevuto
async def echo(update: Update, context: ContextTypes.DEFAULT_TYPE):
message = update.message
if not message: return
print(f"Ricevuto messaggio: {message.text} da chat id: {message.chat.id}")
await message.reply_text(text=f"Hai detto: {message.text}")
# Esempio di funzione per far partire una inline keyboard (comando /keyboard)
async def inline_keyboard(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if not update.message: return
keyboard = [
[
InlineKeyboardButton("Option 1", callback_data='1'),
InlineKeyboardButton("Option 2", callback_data='2'),
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text('Please choose:', reply_markup=reply_markup)
async def button_handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
query = update.callback_query
if not query: return
await query.answer()
await query.edit_message_text(text=f"Selected option: {query.data}")
def main():
print("Bot in ascolto...")
load_dotenv()
token = os.getenv("TELEGRAM_BOT_TOKEN", '')
app = Application.builder().token(token).build()
app.add_handler(CommandHandler("start", start))
app.add_handler(CommandHandler("keyboard", inline_keyboard))
app.add_handler(MessageHandler(filters=filters.TEXT, callback=echo))
app.add_handler(CallbackQueryHandler(button_handler))
app.run_polling(allowed_updates=Update.ALL_TYPES)
if __name__ == "__main__":
main()

View File

@@ -13,6 +13,7 @@ dependencies = [
"pytest", # Test "pytest", # Test
"dotenv", # Gestire variabili d'ambiente (generalmente API keys od opzioni) "dotenv", # Gestire variabili d'ambiente (generalmente API keys od opzioni)
"gradio", # UI web semplice con user_input e output "gradio", # UI web semplice con user_input e output
"colorlog", # Log colorati in console
# Per costruire agenti (ovvero modelli che possono fare più cose tramite tool) https://github.com/agno-agi/agno # Per costruire agenti (ovvero modelli che possono fare più cose tramite tool) https://github.com/agno-agi/agno
# altamente consigliata dato che ha anche tools integrati per fare scraping, calcoli e molto altro # altamente consigliata dato che ha anche tools integrati per fare scraping, calcoli e molto altro
@@ -38,6 +39,10 @@ dependencies = [
# API di social media # API di social media
"praw", # Reddit "praw", # Reddit
# Per telegram bot
"python-telegram-bot", # Interfaccia Telegram Bot
"markdown-pdf", # Per convertire markdown in pdf
] ]
[tool.pytest.ini_options] [tool.pytest.ini_options]

View File

@@ -1,84 +0,0 @@
import gradio as gr
from agno.utils.log import log_info
from dotenv import load_dotenv
from app.chat_manager import ChatManager
########################################
# MAIN APP & GRADIO CHAT INTERFACE
########################################
if __name__ == "__main__":
# Carica variabili dambiente (.env)
load_dotenv()
# Inizializza ChatManager
chat = ChatManager()
########################################
# Funzioni Gradio
########################################
def respond(message, history):
response = chat.send_message(message)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, history, ""
def save_current_chat():
chat.save_chat("chat.json")
return "💾 Chat salvata in chat.json"
def load_previous_chat():
chat.load_chat("chat.json")
history = []
for m in chat.get_history():
history.append({"role": m["role"], "content": m["content"]})
return history, history
def reset_chat():
chat.reset_chat()
return [], []
########################################
# Interfaccia Gradio
########################################
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Agente di Analisi e Consulenza Crypto (Chat)")
# Dropdown provider e stile
with gr.Row():
provider = gr.Dropdown(
choices=chat.list_providers(),
type="index",
label="Modello da usare"
)
# Per qualche motivo deep seek non viene mostrato ma se si fa print(provider.choices) sembra esserci
provider.change(fn=chat.choose_provider, inputs=provider, outputs=None)
style = gr.Dropdown(
choices=chat.list_styles(),
type="index",
label="Stile di investimento"
)
style.change(fn=chat.choose_style, inputs=style, outputs=None)
chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages")
msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?")
with gr.Row():
clear_btn = gr.Button("🗑️ Reset Chat")
save_btn = gr.Button("💾 Salva Chat")
load_btn = gr.Button("📂 Carica Chat")
# Invio messaggio
msg.submit(respond, inputs=[msg, chatbot], outputs=[chatbot, chatbot, msg])
# Reset
clear_btn.click(reset_chat, inputs=None, outputs=[chatbot, chatbot])
# Salvataggio
save_btn.click(save_current_chat, inputs=None, outputs=None)
# Caricamento
load_btn.click(load_previous_chat, inputs=None, outputs=[chatbot, chatbot])
server, port = ("0.0.0.0", 8000)
server_log = "localhost" if server == "0.0.0.0" else server
log_info(f"Starting UPO AppAI Chat on http://{server_log}:{port}") # noqa
demo.launch(server_name=server, server_port=port, quiet=True)

32
src/app/__main__.py Normal file
View File

@@ -0,0 +1,32 @@
import asyncio
import logging
from dotenv import load_dotenv
from app.configs import AppConfig
from app.interface import *
if __name__ == "__main__":
# =====================
load_dotenv()
configs = AppConfig.load()
# =====================
chat = ChatManager()
gradio = chat.gradio_build_interface()
_app, local_url, share_url = gradio.launch(server_name="0.0.0.0", server_port=configs.port, quiet=True, prevent_thread_lock=True, share=configs.gradio_share)
logging.info(f"UPO AppAI Chat is running on {share_url or local_url}")
try:
telegram = TelegramApp()
telegram.add_miniapp_url(share_url)
telegram.run()
except AssertionError as e:
try:
logging.warning(f"Telegram bot could not be started: {e}")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_forever()
except KeyboardInterrupt:
logging.info("Shutting down due to KeyboardInterrupt")
finally:
gradio.close()

View File

@@ -0,0 +1,4 @@
from app.agents.predictor import PredictorInput, PredictorOutput
from app.agents.pipeline import Pipeline, PipelineInputs, PipelineEvent
__all__ = ["PredictorInput", "PredictorOutput", "Pipeline", "PipelineInputs", "PipelineEvent"]

203
src/app/agents/pipeline.py Normal file
View File

@@ -0,0 +1,203 @@
import asyncio
from enum import Enum
import logging
import random
from typing import Any, Callable
from agno.agent import RunEvent
from agno.team import Team, TeamRunEvent
from agno.tools.reasoning import ReasoningTools
from agno.run.workflow import WorkflowRunEvent
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
from app.api.tools import *
from app.agents.prompts import *
from app.configs import AppConfig
logging = logging.getLogger("pipeline")
class PipelineEvent(str, Enum):
PLANNER = "Planner"
INFO_RECOVERY = "Info Recovery"
REPORT_GENERATION = "Report Generation"
REPORT_TRANSLATION = "Report Translation"
TOOL_USED = RunEvent.tool_call_completed
def check_event(self, event: str, step_name: str) -> bool:
return event == self.value or (WorkflowRunEvent.step_completed and step_name == self.value)
class PipelineInputs:
"""
Classe necessaria per passare gli input alla Pipeline.
Serve per raggruppare i parametri e semplificare l'inizializzazione.
"""
def __init__(self, configs: AppConfig | None = None) -> None:
"""
Inputs per la Pipeline di agenti.
Setta i valori di default se non specificati.
"""
self.configs = configs if configs else AppConfig()
agents = self.configs.agents
self.team_model = self.configs.get_model_by_name(agents.team_model)
self.team_leader_model = self.configs.get_model_by_name(agents.team_leader_model)
self.predictor_model = self.configs.get_model_by_name(agents.predictor_model)
self.strategy = self.configs.get_strategy_by_name(agents.strategy)
self.user_query = ""
# ======================
# Dropdown handlers
# ======================
def choose_team_leader(self, index: int):
"""
Sceglie il modello LLM da usare per il Team Leader.
"""
self.leader_model = self.configs.models.all_models[index]
def choose_team(self, index: int):
"""
Sceglie il modello LLM da usare per il Team.
"""
self.team_model = self.configs.models.all_models[index]
def choose_strategy(self, index: int):
"""
Sceglie la strategia da usare per il Team.
"""
self.strategy = self.configs.strategies[index]
# ======================
# Helpers
# ======================
def list_models_names(self) -> list[str]:
"""
Restituisce la lista dei nomi dei modelli disponibili.
"""
return [model.label for model in self.configs.models.all_models]
def list_strategies_names(self) -> list[str]:
"""
Restituisce la lista delle strategie disponibili.
"""
return [strat.label for strat in self.configs.strategies]
class Pipeline:
"""
Coordina gli agenti di servizio (Market, News, Social) e il Predictor finale.
Il Team è orchestrato da qwen3:latest (Ollama), mentre il Predictor è dinamico
e scelto dall'utente tramite i dropdown dell'interfaccia grafica.
"""
def __init__(self, inputs: PipelineInputs):
self.inputs = inputs
# ======================
# Core interaction
# ======================
def interact(self, listeners: dict[RunEvent | TeamRunEvent, Callable[[PipelineEvent], None]] = {}) -> str:
"""
Esegue la pipeline di agenti per rispondere alla query dell'utente.
Args:
listeners: dizionario di callback per eventi specifici (opzionale)
Returns:
La risposta generata dalla pipeline.
"""
return asyncio.run(self.interact_async(listeners))
async def interact_async(self, listeners: dict[RunEvent | TeamRunEvent, Callable[[PipelineEvent], None]] = {}) -> str:
"""
Versione asincrona che esegue la pipeline di agenti per rispondere alla query dell'utente.
Args:
listeners: dizionario di callback per eventi specifici (opzionale)
Returns:
La risposta generata dalla pipeline.
"""
run_id = random.randint(1000, 9999) # Per tracciare i log
logging.info(f"[{run_id}] Pipeline query: {self.inputs.user_query}")
# Step 1: Crea gli agenti e il team
market_tool, news_tool, social_tool = self.get_tools()
market_agent = self.inputs.team_model.get_agent(instructions=MARKET_INSTRUCTIONS, name="MarketAgent", tools=[market_tool])
news_agent = self.inputs.team_model.get_agent(instructions=NEWS_INSTRUCTIONS, name="NewsAgent", tools=[news_tool])
social_agent = self.inputs.team_model.get_agent(instructions=SOCIAL_INSTRUCTIONS, name="SocialAgent", tools=[social_tool])
team = Team(
model=self.inputs.team_leader_model.get_model(COORDINATOR_INSTRUCTIONS),
name="CryptoAnalysisTeam",
tools=[ReasoningTools()],
members=[market_agent, news_agent, social_agent],
)
# Step 3: Crea il workflow
#query_planner = Step(name=PipelineEvent.PLANNER, agent=Agent())
info_recovery = Step(name=PipelineEvent.INFO_RECOVERY, team=team)
#report_generation = Step(name=PipelineEvent.REPORT_GENERATION, agent=Agent())
#report_translate = Step(name=AppEvent.REPORT_TRANSLATION, agent=Agent())
workflow = Workflow(
name="App Workflow",
steps=[
#query_planner,
info_recovery,
#report_generation,
#report_translate
]
)
# Step 4: Fai partire il workflow e prendi l'output
query = f"The user query is: {self.inputs.user_query}\n\n They requested a {self.inputs.strategy.label} investment strategy."
result = await self.run(workflow, query, events={})
logging.info(f"[{run_id}] Run finished")
return result
# ======================
# Helpers
# =====================
def get_tools(self) -> tuple[MarketAPIsTool, NewsAPIsTool, SocialAPIsTool]:
"""
Restituisce la lista di tools disponibili per gli agenti.
"""
api = self.inputs.configs.api
market_tool = MarketAPIsTool(currency=api.currency)
market_tool.handler.set_retries(api.retry_attempts, api.retry_delay_seconds)
news_tool = NewsAPIsTool()
news_tool.handler.set_retries(api.retry_attempts, api.retry_delay_seconds)
social_tool = SocialAPIsTool()
social_tool.handler.set_retries(api.retry_attempts, api.retry_delay_seconds)
return (market_tool, news_tool, social_tool)
@classmethod
async def run(cls, workflow: Workflow, query: str, events: dict[PipelineEvent, Callable[[Any], None]]) -> str:
"""
Esegue il workflow e gestisce gli eventi tramite le callback fornite.
Args:
workflow: istanza di Workflow da eseguire
query: query dell'utente da passare al workflow
events: dizionario di callback per eventi specifici (opzionale)
Returns:
La risposta generata dal workflow.
"""
iterator = await workflow.arun(query, stream=True, stream_intermediate_steps=True)
content = None
async for event in iterator:
step_name = getattr(event, 'step_name', '')
for app_event, listener in events.items():
if app_event.check_event(event.event, step_name):
listener(event)
if event.event == WorkflowRunEvent.workflow_completed:
content = getattr(event, 'content', '')
if isinstance(content, str):
think_str = "</think>"
think = content.rfind(think_str)
content = content[(think + len(think_str)):] if think != -1 else content
return content if content else "No output from workflow, something went wrong."

View File

@@ -0,0 +1,16 @@
from pydantic import BaseModel, Field
from app.api.core.markets import ProductInfo
class PredictorInput(BaseModel):
data: list[ProductInfo] = Field(..., description="Market data as a list of ProductInfo")
style: str = Field(..., description="Prediction style")
sentiment: str = Field(..., description="Aggregated sentiment from news and social analysis")
class ItemPortfolio(BaseModel):
asset: str = Field(..., description="Name of the asset")
percentage: float = Field(..., description="Percentage allocation to the asset")
motivation: str = Field(..., description="Motivation for the allocation")
class PredictorOutput(BaseModel):
strategy: str = Field(..., description="Concise operational strategy in Italian")
portfolio: list[ItemPortfolio] = Field(..., description="List of portfolio items with allocations")

View File

@@ -0,0 +1,21 @@
from pathlib import Path
__PROMPTS_PATH = Path(__file__).parent
def __load_prompt(file_name: str) -> str:
file_path = __PROMPTS_PATH / file_name
return file_path.read_text(encoding='utf-8').strip()
COORDINATOR_INSTRUCTIONS = __load_prompt("team_leader.txt")
MARKET_INSTRUCTIONS = __load_prompt("team_market.txt")
NEWS_INSTRUCTIONS = __load_prompt("team_news.txt")
SOCIAL_INSTRUCTIONS = __load_prompt("team_social.txt")
PREDICTOR_INSTRUCTIONS = __load_prompt("predictor.txt")
__all__ = [
"COORDINATOR_INSTRUCTIONS",
"MARKET_INSTRUCTIONS",
"NEWS_INSTRUCTIONS",
"SOCIAL_INSTRUCTIONS",
"PREDICTOR_INSTRUCTIONS",
]

View File

@@ -1,27 +1,3 @@
from enum import Enum
from pydantic import BaseModel, Field
from app.markets.base import ProductInfo
class PredictorStyle(Enum):
CONSERVATIVE = "Conservativo"
AGGRESSIVE = "Aggressivo"
class PredictorInput(BaseModel):
data: list[ProductInfo] = Field(..., description="Market data as a list of ProductInfo")
style: PredictorStyle = Field(..., description="Prediction style")
sentiment: str = Field(..., description="Aggregated sentiment from news and social analysis")
class ItemPortfolio(BaseModel):
asset: str = Field(..., description="Name of the asset")
percentage: float = Field(..., description="Percentage allocation to the asset")
motivation: str = Field(..., description="Motivation for the allocation")
class PredictorOutput(BaseModel):
strategy: str = Field(..., description="Concise operational strategy in Italian")
portfolio: list[ItemPortfolio] = Field(..., description="List of portfolio items with allocations")
PREDICTOR_INSTRUCTIONS = """
You are an **Allocation Algorithm (Crypto-Algo)** specialized in analyzing market data and sentiment to generate an investment strategy and a target portfolio. You are an **Allocation Algorithm (Crypto-Algo)** specialized in analyzing market data and sentiment to generate an investment strategy and a target portfolio.
Your sole objective is to process the user_input data and generate the strictly structured output as required by the response format. **You MUST NOT provide introductions, preambles, explanations, conclusions, or any additional comments that are not strictly required.** Your sole objective is to process the user_input data and generate the strictly structured output as required by the response format. **You MUST NOT provide introductions, preambles, explanations, conclusions, or any additional comments that are not strictly required.**
@@ -49,4 +25,3 @@ The allocation strategy must be **derived exclusively from the "Allocation Logic
1. **Strategy (strategy):** Must be a concise operational description **in Italian ("in Italiano")**, with a maximum of 5 sentences. 1. **Strategy (strategy):** Must be a concise operational description **in Italian ("in Italiano")**, with a maximum of 5 sentences.
2. **Portfolio (portfolio):** The sum of all percentages must be **exactly 100%**. The justification (motivation) for each asset must be a single clear sentence **in Italian ("in Italiano")**. 2. **Portfolio (portfolio):** The sum of all percentages must be **exactly 100%**. The justification (motivation) for each asset must be a single clear sentence **in Italian ("in Italiano")**.
"""

View File

@@ -0,0 +1,15 @@
You are the expert coordinator of a financial analysis team specializing in cryptocurrencies.
Your team consists of three agents:
- **MarketAgent**: Provides quantitative market data, price analysis, and technical indicators.
- **NewsAgent**: Scans and analyzes the latest news, articles, and official announcements.
- **SocialAgent**: Gauges public sentiment, trends, and discussions on social media.
Your primary objective is to answer the user's query by orchestrating the work of your team members.
Your workflow is as follows:
1. **Deconstruct the user's query** to identify the required information.
2. **Delegate specific tasks** to the most appropriate agent(s) to gather the necessary data and initial analysis.
3. **Analyze the information** returned by the agents.
4. If the initial data is insufficient or the query is complex, **iteratively re-engage the agents** with follow-up questions to build a comprehensive picture.
5. **Synthesize all the gathered information** into a final, coherent, and complete analysis that fills all the required output fields.

View File

@@ -0,0 +1,19 @@
**TASK:** You are a specialized **Crypto Price Data Retrieval Agent**. Your primary goal is to fetch the most recent and/or historical price data for requested cryptocurrency assets (e.g., 'BTC', 'ETH', 'SOL'). You must provide the data in a clear and structured format.
**AVAILABLE TOOLS:**
1. `get_products(asset_ids: list[str])`: Get **current** product/price info for a list of assets. **(PREFERITA: usa questa per i prezzi live)**
2. `get_historical_prices(asset_id: str, limit: int)`: Get historical price data for one asset. Default limit is 100. **(PREFERITA: usa questa per i dati storici)**
3. `get_products_aggregated(asset_ids: list[str])`: Get **aggregated current** product/price info for a list of assets. **(USA SOLO SE richiesto 'aggregato' o se `get_products` fallisce)**
4. `get_historical_prices_aggregated(asset_id: str, limit: int)`: Get **aggregated historical** price data for one asset. **(USA SOLO SE richiesto 'aggregato' o se `get_historical_prices` fallisce)**
**USAGE GUIDELINE:**
* **Asset ID:** Always convert common names (e.g., 'Bitcoin', 'Ethereum') into their official ticker/ID (e.g., 'BTC', 'ETH').
* **Cost Management (Cruciale per LLM locale):** Prefer `get_products` and `get_historical_prices` for standard requests to minimize costs.
* **Aggregated Data:** Use `get_products_aggregated` or `get_historical_prices_aggregated` only if the user specifically requests aggregated data or you value that having aggregated data is crucial for the analysis.
* **Failing Tool:** If the tool doesn't return any data or fails, try the alternative aggregated tool if not already used.
**REPORTING REQUIREMENT:**
1. **Format:** Output the results in a clear, easy-to-read list or table.
2. **Live Price Request:** If an asset's *current price* is requested, report the **Asset ID**, **Latest Price**, and **Time/Date of the price**.
3. **Historical Price Request:** If *historical data* is requested, report the **Asset ID**, the **Limit** of points returned, and the **First** and **Last** entries from the list of historical prices (Date, Price).
4. **Output:** For all requests, output a single, concise summary of the findings; if requested, also include the raw data retrieved.

View File

@@ -0,0 +1,18 @@
**TASK:** You are a specialized **Crypto News Analyst**. Your goal is to fetch the latest news or top headlines related to cryptocurrencies, and then **analyze the sentiment** of the content to provide a concise report to the team leader. Prioritize 'crypto' or specific cryptocurrency names (e.g., 'Bitcoin', 'Ethereum') in your searches.
**AVAILABLE TOOLS:**
1. `get_latest_news(query: str, limit: int)`: Get the 'limit' most recent news articles for a specific 'query'.
2. `get_top_headlines(limit: int)`: Get the 'limit' top global news headlines.
3. `get_latest_news_aggregated(query: str, limit: int)`: Get aggregated latest news articles for a specific 'query'.
4. `get_top_headlines_aggregated(limit: int)`: Get aggregated top global news headlines.
**USAGE GUIDELINE:**
* Always use `get_latest_news` with a relevant crypto-related query first.
* The default limit for news items should be 5 unless specified otherwise.
* If the tool doesn't return any articles, respond with "No relevant news articles found."
**REPORTING REQUIREMENT:**
1. **Analyze** the tone and key themes of the retrieved articles.
2. **Summarize** the overall **market sentiment** (e.g., highly positive, cautiously neutral, generally negative) based on the content.
3. **Identify** the top 2-3 **main topics** discussed (e.g., new regulation, price surge, institutional adoption).
4. **Output** a single, brief report summarizing these findings. Do not output the raw articles.

View File

@@ -0,0 +1,15 @@
**TASK:** You are a specialized **Social Media Sentiment Analyst**. Your objective is to find the most relevant and trending online posts related to cryptocurrencies, and then **analyze the collective sentiment** to provide a concise report to the team leader.
**AVAILABLE TOOLS:**
1. `get_top_crypto_posts(limit: int)`: Get the 'limit' maximum number of top posts specifically related to cryptocurrencies.
**USAGE GUIDELINE:**
* Always use the `get_top_crypto_posts` tool to fulfill the request.
* The default limit for posts should be 5 unless specified otherwise.
* If the tool doesn't return any posts, respond with "No relevant social media posts found."
**REPORTING REQUIREMENT:**
1. **Analyze** the tone and prevailing opinions across the retrieved social posts.
2. **Summarize** the overall **community sentiment** (e.g., high enthusiasm/FOMO, uncertainty, FUD/fear) based on the content.
3. **Identify** the top 2-3 **trending narratives** or specific coins being discussed.
4. **Output** a single, brief report summarizing these findings. Do not output the raw posts.

View File

152
src/app/api/core/markets.py Normal file
View File

@@ -0,0 +1,152 @@
import statistics
from datetime import datetime
from pydantic import BaseModel
class ProductInfo(BaseModel):
"""
Product information as obtained from market APIs.
Implements conversion methods from raw API data.
"""
id: str = ""
symbol: str = ""
price: float = 0.0
volume_24h: float = 0.0
currency: str = ""
@staticmethod
def aggregate(products: dict[str, list['ProductInfo']]) -> list['ProductInfo']:
"""
Aggregates a list of ProductInfo by symbol.
Args:
products (dict[str, list[ProductInfo]]): Map provider -> list of ProductInfo
Returns:
list[ProductInfo]: List of ProductInfo aggregated by symbol
"""
# Costruzione mappa symbol -> lista di ProductInfo
symbols_infos: dict[str, list[ProductInfo]] = {}
for _, product_list in products.items():
for product in product_list:
symbols_infos.setdefault(product.symbol, []).append(product)
# Aggregazione per ogni symbol
aggregated_products: list[ProductInfo] = []
for symbol, product_list in symbols_infos.items():
product = ProductInfo()
product.id = f"{symbol}_AGGREGATED"
product.symbol = symbol
product.currency = next(p.currency for p in product_list if p.currency)
volume_sum = sum(p.volume_24h for p in product_list)
product.volume_24h = volume_sum / len(product_list) if product_list else 0.0
prices = sum(p.price * p.volume_24h for p in product_list)
product.price = (prices / volume_sum) if volume_sum > 0 else 0.0
aggregated_products.append(product)
return aggregated_products
class Price(BaseModel):
"""
Represents price data for an asset as obtained from market APIs.
Implements conversion methods from raw API data.
"""
high: float = 0.0
low: float = 0.0
open: float = 0.0
close: float = 0.0
volume: float = 0.0
timestamp: str = ""
"""Timestamp in format YYYY-MM-DD HH:MM"""
def set_timestamp(self, timestamp_ms: int | None = None, timestamp_s: int | None = None) -> None:
"""
Sets the timestamp from milliseconds or seconds.
The timestamp is saved as a formatted string 'YYYY-MM-DD HH:MM'.
Args:
timestamp_ms: Timestamp in milliseconds.
timestamp_s: Timestamp in seconds.
Raises:
ValueError: If neither timestamp_ms nor timestamp_s is provided.
"""
if timestamp_ms is not None:
timestamp = timestamp_ms // 1000
elif timestamp_s is not None:
timestamp = timestamp_s
else:
raise ValueError("Either timestamp_ms or timestamp_s must be provided")
assert timestamp > 0, "Invalid timestamp data received"
self.timestamp = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M')
@staticmethod
def aggregate(prices: dict[str, list['Price']]) -> list['Price']:
"""
Aggregates historical prices for the same symbol by calculating the mean.
Args:
prices (dict[str, list[Price]]): Map provider -> list of Price.
The map must contain only Price objects for the same symbol.
Returns:
list[Price]: List of Price objects aggregated by timestamp.
"""
# Costruiamo una mappa timestamp -> lista di Price
timestamped_prices: dict[str, list[Price]] = {}
for _, price_list in prices.items():
for price in price_list:
timestamped_prices.setdefault(price.timestamp, []).append(price)
# Ora aggregiamo i prezzi per ogni timestamp
aggregated_prices: list[Price] = []
for time, price_list in timestamped_prices.items():
price = Price()
price.timestamp = time
price.high = statistics.mean([p.high for p in price_list])
price.low = statistics.mean([p.low for p in price_list])
price.open = statistics.mean([p.open for p in price_list])
price.close = statistics.mean([p.close for p in price_list])
price.volume = statistics.mean([p.volume for p in price_list])
aggregated_prices.append(price)
return aggregated_prices
class MarketWrapper:
"""
Base class for market API wrappers.
All market API wrappers should inherit from this class and implement the methods.
Provides interface for retrieving product and price information from market APIs.
"""
def get_product(self, asset_id: str) -> ProductInfo:
"""
Get product information for a specific asset ID.
Args:
asset_id (str): The asset ID to retrieve information for.
Returns:
ProductInfo: An object containing product information.
"""
raise NotImplementedError("This method should be overridden by subclasses")
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
"""
Get product information for multiple asset IDs.
Args:
asset_ids (list[str]): The list of asset IDs to retrieve information for.
Returns:
list[ProductInfo]: A list of objects containing product information.
"""
raise NotImplementedError("This method should be overridden by subclasses")
def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
"""
Get historical price data for a specific asset ID.
Args:
asset_id (str): The asset ID to retrieve price data for.
limit (int): The maximum number of price data points to return.
Returns:
list[Price]: A list of Price objects.
"""
raise NotImplementedError("This method should be overridden by subclasses")

View File

@@ -1,6 +1,10 @@
from pydantic import BaseModel from pydantic import BaseModel
class Article(BaseModel): class Article(BaseModel):
"""
Represents a news article with source, time, title, and description.
"""
source: str = "" source: str = ""
time: str = "" time: str = ""
title: str = "" title: str = ""
@@ -10,11 +14,12 @@ class NewsWrapper:
""" """
Base class for news API wrappers. Base class for news API wrappers.
All news API wrappers should inherit from this class and implement the methods. All news API wrappers should inherit from this class and implement the methods.
Provides interface for retrieving news articles from news APIs.
""" """
def get_top_headlines(self, limit: int = 100) -> list[Article]: def get_top_headlines(self, limit: int = 100) -> list[Article]:
""" """
Get top headlines, optionally limited by limit. Retrieve top headlines, optionally limited by the specified number.
Args: Args:
limit (int): The maximum number of articles to return. limit (int): The maximum number of articles to return.
Returns: Returns:
@@ -24,7 +29,7 @@ class NewsWrapper:
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]: def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
""" """
Get latest news based on a query. Retrieve the latest news based on a search query.
Args: Args:
query (str): The search query. query (str): The search query.
limit (int): The maximum number of articles to return. limit (int): The maximum number of articles to return.

View File

@@ -2,12 +2,18 @@ from pydantic import BaseModel
class SocialPost(BaseModel): class SocialPost(BaseModel):
"""
Represents a social media post with time, title, description, and comments.
"""
time: str = "" time: str = ""
title: str = "" title: str = ""
description: str = "" description: str = ""
comments: list["SocialComment"] = [] comments: list["SocialComment"] = []
class SocialComment(BaseModel): class SocialComment(BaseModel):
"""
Represents a comment on a social media post.
"""
time: str = "" time: str = ""
description: str = "" description: str = ""
@@ -16,11 +22,12 @@ class SocialWrapper:
""" """
Base class for social media API wrappers. Base class for social media API wrappers.
All social media API wrappers should inherit from this class and implement the methods. All social media API wrappers should inherit from this class and implement the methods.
Provides interface for retrieving social media posts and comments from APIs.
""" """
def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]: def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]:
""" """
Get top cryptocurrency-related posts, optionally limited by total. Retrieve top cryptocurrency-related posts, optionally limited by the specified number.
Args: Args:
limit (int): The maximum number of posts to return. limit (int): The maximum number of posts to return.
Returns: Returns:

View File

@@ -0,0 +1,7 @@
from app.api.markets.binance import BinanceWrapper
from app.api.markets.coinbase import CoinBaseWrapper
from app.api.markets.cryptocompare import CryptoCompareWrapper
from app.api.markets.yfinance import YFinanceWrapper
__all__ = ["BinanceWrapper", "CoinBaseWrapper", "CryptoCompareWrapper", "YFinanceWrapper"]

View File

@@ -0,0 +1,83 @@
import os
from typing import Any
from binance.client import Client # type: ignore
from app.api.core.markets import ProductInfo, MarketWrapper, Price
def extract_product(currency: str, ticker_data: dict[str, Any]) -> ProductInfo:
product = ProductInfo()
product.id = ticker_data.get('symbol', '')
product.symbol = ticker_data.get('symbol', '').replace(currency, '')
product.price = float(ticker_data.get('price', 0))
product.volume_24h = float(ticker_data.get('volume', 0))
product.currency = currency
return product
def extract_price(kline_data: list[Any]) -> Price:
timestamp = kline_data[0]
price = Price()
price.open = float(kline_data[1])
price.high = float(kline_data[2])
price.low = float(kline_data[3])
price.close = float(kline_data[4])
price.volume = float(kline_data[5])
price.set_timestamp(timestamp_ms=timestamp)
return price
# Add here eventual other fiat not supported by Binance
FIAT_TO_STABLECOIN = {
"USD": "USDT",
}
class BinanceWrapper(MarketWrapper):
"""
Wrapper per le API autenticate di Binance.\n
Implementa l'interfaccia BaseWrapper per fornire accesso unificato
ai dati di mercato di Binance tramite le API REST con autenticazione.\n
https://binance-docs.github.io/apidocs/spot/en/
"""
def __init__(self, currency: str = "USD"):
"""
Inizializza il wrapper di Binance con le credenziali API e la valuta di riferimento.
Alcune valute fiat non sono supportate direttamente da Binance (es. "USD").
Infatti, se viene fornita una valuta fiat come "USD", questa viene automaticamente convertita in una stablecoin Tether ("USDT") per compatibilità con Binance.
Args:
currency (str): Valuta in cui restituire i prezzi. Se "USD" viene fornito, verrà utilizzato "USDT". Default è "USD".
"""
api_key = os.getenv("BINANCE_API_KEY")
api_secret = os.getenv("BINANCE_API_SECRET")
self.currency = currency if currency not in FIAT_TO_STABLECOIN else FIAT_TO_STABLECOIN[currency]
self.client = Client(api_key=api_key, api_secret=api_secret)
def __format_symbol(self, asset_id: str) -> str:
"""
Formatta l'asset_id nel formato richiesto da Binance.
"""
return asset_id.replace('-', '') if '-' in asset_id else f"{asset_id}{self.currency}"
def get_product(self, asset_id: str) -> ProductInfo:
symbol = self.__format_symbol(asset_id)
ticker: dict[str, Any] = self.client.get_symbol_ticker(symbol=symbol) # type: ignore
ticker_24h: dict[str, Any] = self.client.get_ticker(symbol=symbol) # type: ignore
ticker['volume'] = ticker_24h.get('volume', 0)
return extract_product(self.currency, ticker)
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
return [ self.get_product(asset_id) for asset_id in asset_ids ]
def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
symbol = self.__format_symbol(asset_id)
# Ottiene candele orarie degli ultimi 30 giorni
klines: list[list[Any]] = self.client.get_historical_klines( # type: ignore
symbol=symbol,
interval=Client.KLINE_INTERVAL_1HOUR,
limit=limit,
)
return [extract_price(kline) for kline in klines]

View File

@@ -1,12 +1,12 @@
import os import os
from enum import Enum from enum import Enum
from datetime import datetime, timedelta from datetime import datetime, timedelta
from coinbase.rest import RESTClient from coinbase.rest import RESTClient # type: ignore
from coinbase.rest.types.product_types import Candle, GetProductResponse, Product from coinbase.rest.types.product_types import Candle, GetProductResponse, Product # type: ignore
from .base import ProductInfo, BaseWrapper, Price from app.api.core.markets import ProductInfo, MarketWrapper, Price
def get_product(product_data: GetProductResponse | Product) -> ProductInfo: def extract_product(product_data: GetProductResponse | Product) -> ProductInfo:
product = ProductInfo() product = ProductInfo()
product.id = product_data.product_id or "" product.id = product_data.product_id or ""
product.symbol = product_data.base_currency_id or "" product.symbol = product_data.base_currency_id or ""
@@ -14,14 +14,16 @@ def get_product(product_data: GetProductResponse | Product) -> ProductInfo:
product.volume_24h = float(product_data.volume_24h) if product_data.volume_24h else 0.0 product.volume_24h = float(product_data.volume_24h) if product_data.volume_24h else 0.0
return product return product
def get_price(candle_data: Candle) -> Price: def extract_price(candle_data: Candle) -> Price:
timestamp = int(candle_data.start) if candle_data.start else 0
price = Price() price = Price()
price.high = float(candle_data.high) if candle_data.high else 0.0 price.high = float(candle_data.high) if candle_data.high else 0.0
price.low = float(candle_data.low) if candle_data.low else 0.0 price.low = float(candle_data.low) if candle_data.low else 0.0
price.open = float(candle_data.open) if candle_data.open else 0.0 price.open = float(candle_data.open) if candle_data.open else 0.0
price.close = float(candle_data.close) if candle_data.close else 0.0 price.close = float(candle_data.close) if candle_data.close else 0.0
price.volume = float(candle_data.volume) if candle_data.volume else 0.0 price.volume = float(candle_data.volume) if candle_data.volume else 0.0
price.timestamp_ms = int(candle_data.start) * 1000 if candle_data.start else 0 price.set_timestamp(timestamp_s=timestamp)
return price return price
@@ -37,7 +39,7 @@ class Granularity(Enum):
SIX_HOUR = 21600 SIX_HOUR = 21600
ONE_DAY = 86400 ONE_DAY = 86400
class CoinBaseWrapper(BaseWrapper): class CoinBaseWrapper(MarketWrapper):
""" """
Wrapper per le API di Coinbase Advanced Trade.\n Wrapper per le API di Coinbase Advanced Trade.\n
Implementa l'interfaccia BaseWrapper per fornire accesso unificato Implementa l'interfaccia BaseWrapper per fornire accesso unificato
@@ -63,24 +65,26 @@ class CoinBaseWrapper(BaseWrapper):
def get_product(self, asset_id: str) -> ProductInfo: def get_product(self, asset_id: str) -> ProductInfo:
asset_id = self.__format(asset_id) asset_id = self.__format(asset_id)
asset = self.client.get_product(asset_id) asset = self.client.get_product(asset_id) # type: ignore
return get_product(asset) return extract_product(asset)
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]: def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
all_asset_ids = [self.__format(asset_id) for asset_id in asset_ids] all_asset_ids = [self.__format(asset_id) for asset_id in asset_ids]
assets = self.client.get_products(product_ids=all_asset_ids) assets = self.client.get_products(product_ids=all_asset_ids) # type: ignore
return [get_product(asset) for asset in assets.products] assert assets.products is not None, "No products data received from Coinbase"
return [extract_product(asset) for asset in assets.products]
def get_historical_prices(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]: def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
asset_id = self.__format(asset_id) asset_id = self.__format(asset_id)
end_time = datetime.now() end_time = datetime.now()
start_time = end_time - timedelta(days=14) start_time = end_time - timedelta(days=14)
data = self.client.get_candles( data = self.client.get_candles( # type: ignore
product_id=asset_id, product_id=asset_id,
granularity=Granularity.ONE_HOUR.name, granularity=Granularity.ONE_HOUR.name,
start=str(int(start_time.timestamp())), start=str(int(start_time.timestamp())),
end=str(int(end_time.timestamp())), end=str(int(end_time.timestamp())),
limit=limit limit=limit
) )
return [get_price(candle) for candle in data.candles] assert data.candles is not None, "No candles data received from Coinbase"
return [extract_price(candle) for candle in data.candles]

View File

@@ -1,9 +1,10 @@
import os import os
from typing import Any
import requests import requests
from .base import ProductInfo, BaseWrapper, Price from app.api.core.markets import ProductInfo, MarketWrapper, Price
def get_product(asset_data: dict) -> ProductInfo: def extract_product(asset_data: dict[str, Any]) -> ProductInfo:
product = ProductInfo() product = ProductInfo()
product.id = asset_data.get('FROMSYMBOL', '') + '-' + asset_data.get('TOSYMBOL', '') product.id = asset_data.get('FROMSYMBOL', '') + '-' + asset_data.get('TOSYMBOL', '')
product.symbol = asset_data.get('FROMSYMBOL', '') product.symbol = asset_data.get('FROMSYMBOL', '')
@@ -12,21 +13,22 @@ def get_product(asset_data: dict) -> ProductInfo:
assert product.price > 0, "Invalid price data received from CryptoCompare" assert product.price > 0, "Invalid price data received from CryptoCompare"
return product return product
def get_price(price_data: dict) -> Price: def extract_price(price_data: dict[str, Any]) -> Price:
timestamp = price_data.get('time', 0)
price = Price() price = Price()
price.high = float(price_data.get('high', 0)) price.high = float(price_data.get('high', 0))
price.low = float(price_data.get('low', 0)) price.low = float(price_data.get('low', 0))
price.open = float(price_data.get('open', 0)) price.open = float(price_data.get('open', 0))
price.close = float(price_data.get('close', 0)) price.close = float(price_data.get('close', 0))
price.volume = float(price_data.get('volumeto', 0)) price.volume = float(price_data.get('volumeto', 0))
price.timestamp_ms = price_data.get('time', 0) * 1000 price.set_timestamp(timestamp_s=timestamp)
assert price.timestamp_ms > 0, "Invalid timestamp data received from CryptoCompare"
return price return price
BASE_URL = "https://min-api.cryptocompare.com" BASE_URL = "https://min-api.cryptocompare.com"
class CryptoCompareWrapper(BaseWrapper): class CryptoCompareWrapper(MarketWrapper):
""" """
Wrapper per le API pubbliche di CryptoCompare. Wrapper per le API pubbliche di CryptoCompare.
La documentazione delle API è disponibile qui: https://developers.coindesk.com/documentation/legacy/Price/SingleSymbolPriceEndpoint La documentazione delle API è disponibile qui: https://developers.coindesk.com/documentation/legacy/Price/SingleSymbolPriceEndpoint
@@ -39,7 +41,7 @@ class CryptoCompareWrapper(BaseWrapper):
self.api_key = api_key self.api_key = api_key
self.currency = currency self.currency = currency
def __request(self, endpoint: str, params: dict[str, str] | None = None) -> dict[str, str]: def __request(self, endpoint: str, params: dict[str, Any] | None = None) -> dict[str, Any]:
if params is None: if params is None:
params = {} params = {}
params['api_key'] = self.api_key params['api_key'] = self.api_key
@@ -53,18 +55,18 @@ class CryptoCompareWrapper(BaseWrapper):
"tsyms": self.currency "tsyms": self.currency
}) })
data = response.get('RAW', {}).get(asset_id, {}).get(self.currency, {}) data = response.get('RAW', {}).get(asset_id, {}).get(self.currency, {})
return get_product(data) return extract_product(data)
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]: def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
response = self.__request("/data/pricemultifull", params = { response = self.__request("/data/pricemultifull", params = {
"fsyms": ",".join(asset_ids), "fsyms": ",".join(asset_ids),
"tsyms": self.currency "tsyms": self.currency
}) })
assets = [] assets: list[ProductInfo] = []
data = response.get('RAW', {}) data = response.get('RAW', {})
for asset_id in asset_ids: for asset_id in asset_ids:
asset_data = data.get(asset_id, {}).get(self.currency, {}) asset_data = data.get(asset_id, {}).get(self.currency, {})
assets.append(get_product(asset_data)) assets.append(extract_product(asset_data))
return assets return assets
def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]: def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
@@ -75,5 +77,5 @@ class CryptoCompareWrapper(BaseWrapper):
}) })
data = response.get('Data', {}).get('Data', []) data = response.get('Data', {}).get('Data', [])
prices = [get_price(price_data) for price_data in data] prices = [extract_price(price_data) for price_data in data]
return prices return prices

View File

@@ -1,9 +1,9 @@
import json import json
from agno.tools.yfinance import YFinanceTools from agno.tools.yfinance import YFinanceTools
from .base import BaseWrapper, ProductInfo, Price from app.api.core.markets import MarketWrapper, ProductInfo, Price
def create_product_info(stock_data: dict[str, str]) -> ProductInfo: def extract_product(stock_data: dict[str, str]) -> ProductInfo:
""" """
Converte i dati di YFinanceTools in ProductInfo. Converte i dati di YFinanceTools in ProductInfo.
""" """
@@ -12,24 +12,26 @@ def create_product_info(stock_data: dict[str, str]) -> ProductInfo:
product.symbol = product.id.split('-')[0] # Rimuovi il suffisso della valuta per le crypto product.symbol = product.id.split('-')[0] # Rimuovi il suffisso della valuta per le crypto
product.price = float(stock_data.get('Current Stock Price', f"0.0 USD").split(" ")[0]) # prende solo il numero product.price = float(stock_data.get('Current Stock Price', f"0.0 USD").split(" ")[0]) # prende solo il numero
product.volume_24h = 0.0 # YFinance non fornisce il volume 24h direttamente product.volume_24h = 0.0 # YFinance non fornisce il volume 24h direttamente
product.quote_currency = product.id.split('-')[1] # La valuta è la parte dopo il '-' product.currency = product.id.split('-')[1] # La valuta è la parte dopo il '-'
return product return product
def create_price_from_history(hist_data: dict[str, str]) -> Price: def extract_price(hist_data: dict[str, str]) -> Price:
""" """
Converte i dati storici di YFinanceTools in Price. Converte i dati storici di YFinanceTools in Price.
""" """
timestamp = int(hist_data.get('Timestamp', '0'))
price = Price() price = Price()
price.high = float(hist_data.get('High', 0.0)) price.high = float(hist_data.get('High', 0.0))
price.low = float(hist_data.get('Low', 0.0)) price.low = float(hist_data.get('Low', 0.0))
price.open = float(hist_data.get('Open', 0.0)) price.open = float(hist_data.get('Open', 0.0))
price.close = float(hist_data.get('Close', 0.0)) price.close = float(hist_data.get('Close', 0.0))
price.volume = float(hist_data.get('Volume', 0.0)) price.volume = float(hist_data.get('Volume', 0.0))
price.timestamp_ms = int(hist_data.get('Timestamp', '0')) price.set_timestamp(timestamp_ms=timestamp)
return price return price
class YFinanceWrapper(BaseWrapper): class YFinanceWrapper(MarketWrapper):
""" """
Wrapper per YFinanceTools che fornisce dati di mercato per azioni, ETF e criptovalute. Wrapper per YFinanceTools che fornisce dati di mercato per azioni, ETF e criptovalute.
Implementa l'interfaccia BaseWrapper per compatibilità con il sistema esistente. Implementa l'interfaccia BaseWrapper per compatibilità con il sistema esistente.
@@ -52,16 +54,16 @@ class YFinanceWrapper(BaseWrapper):
symbol = self._format_symbol(asset_id) symbol = self._format_symbol(asset_id)
stock_info = self.tool.get_company_info(symbol) stock_info = self.tool.get_company_info(symbol)
stock_info = json.loads(stock_info) stock_info = json.loads(stock_info)
return create_product_info(stock_info) return extract_product(stock_info)
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]: def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
products = [] products: list[ProductInfo] = []
for asset_id in asset_ids: for asset_id in asset_ids:
product = self.get_product(asset_id) product = self.get_product(asset_id)
products.append(product) products.append(product)
return products return products
def get_historical_prices(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]: def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
symbol = self._format_symbol(asset_id) symbol = self._format_symbol(asset_id)
days = limit // 24 + 1 # Arrotonda per eccesso days = limit // 24 + 1 # Arrotonda per eccesso
@@ -71,10 +73,10 @@ class YFinanceWrapper(BaseWrapper):
# Il formato dei dati è {timestamp: {Open: x, High: y, Low: z, Close: w, Volume: v}} # Il formato dei dati è {timestamp: {Open: x, High: y, Low: z, Close: w, Volume: v}}
timestamps = sorted(hist_data.keys())[-limit:] timestamps = sorted(hist_data.keys())[-limit:]
prices = [] prices: list[Price] = []
for timestamp in timestamps: for timestamp in timestamps:
temp = hist_data[timestamp] temp = hist_data[timestamp]
temp['Timestamp'] = timestamp temp['Timestamp'] = timestamp
price = create_price_from_history(temp) price = extract_price(temp)
prices.append(price) prices.append(price)
return prices return prices

View File

@@ -0,0 +1,7 @@
from app.api.news.newsapi import NewsApiWrapper
from app.api.news.googlenews import GoogleNewsWrapper
from app.api.news.cryptopanic_api import CryptoPanicWrapper
from app.api.news.duckduckgo import DuckDuckGoWrapper
__all__ = ["NewsApiWrapper", "GoogleNewsWrapper", "CryptoPanicWrapper", "DuckDuckGoWrapper"]

View File

@@ -1,7 +1,9 @@
import os import os
from typing import Any
import requests import requests
from enum import Enum from enum import Enum
from .base import NewsWrapper, Article from app.api.core.news import NewsWrapper, Article
class CryptoPanicFilter(Enum): class CryptoPanicFilter(Enum):
RISING = "rising" RISING = "rising"
@@ -18,8 +20,8 @@ class CryptoPanicKind(Enum):
MEDIA = "media" MEDIA = "media"
ALL = "all" ALL = "all"
def get_articles(response: dict) -> list[Article]: def extract_articles(response: dict[str, Any]) -> list[Article]:
articles = [] articles: list[Article] = []
if 'results' in response: if 'results' in response:
for item in response['results']: for item in response['results']:
article = Article() article = Article()
@@ -51,7 +53,7 @@ class CryptoPanicWrapper(NewsWrapper):
self.kind = CryptoPanicKind.NEWS self.kind = CryptoPanicKind.NEWS
def get_base_params(self) -> dict[str, str]: def get_base_params(self) -> dict[str, str]:
params = {} params: dict[str, str] = {}
params['public'] = 'true' # recommended for app and bots params['public'] = 'true' # recommended for app and bots
params['auth_token'] = self.api_key params['auth_token'] = self.api_key
params['kind'] = self.kind.value params['kind'] = self.kind.value
@@ -73,5 +75,5 @@ class CryptoPanicWrapper(NewsWrapper):
assert response.status_code == 200, f"Error fetching data: {response}" assert response.status_code == 200, f"Error fetching data: {response}"
json_response = response.json() json_response = response.json()
articles = get_articles(json_response) articles = extract_articles(json_response)
return articles[:limit] return articles[:limit]

View File

@@ -1,8 +1,10 @@
import json import json
from .base import Article, NewsWrapper from typing import Any
from agno.tools.duckduckgo import DuckDuckGoTools from agno.tools.duckduckgo import DuckDuckGoTools
from app.api.core.news import Article, NewsWrapper
def create_article(result: dict) -> Article:
def extract_article(result: dict[str, Any]) -> Article:
article = Article() article = Article()
article.source = result.get("source", "") article.source = result.get("source", "")
article.time = result.get("date", "") article.time = result.get("date", "")
@@ -23,10 +25,10 @@ class DuckDuckGoWrapper(NewsWrapper):
def get_top_headlines(self, limit: int = 100) -> list[Article]: def get_top_headlines(self, limit: int = 100) -> list[Article]:
results = self.tool.duckduckgo_news(self.query, max_results=limit) results = self.tool.duckduckgo_news(self.query, max_results=limit)
json_results = json.loads(results) json_results = json.loads(results)
return [create_article(result) for result in json_results] return [extract_article(result) for result in json_results]
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]: def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
results = self.tool.duckduckgo_news(query or self.query, max_results=limit) results = self.tool.duckduckgo_news(query or self.query, max_results=limit)
json_results = json.loads(results) json_results = json.loads(results)
return [create_article(result) for result in json_results] return [extract_article(result) for result in json_results]

View File

@@ -1,7 +1,9 @@
from gnews import GNews from typing import Any
from .base import Article, NewsWrapper from gnews import GNews # type: ignore
from app.api.core.news import Article, NewsWrapper
def result_to_article(result: dict) -> Article:
def extract_article(result: dict[str, Any]) -> Article:
article = Article() article = Article()
article.source = result.get("source", "") article.source = result.get("source", "")
article.time = result.get("publishedAt", "") article.time = result.get("publishedAt", "")
@@ -17,20 +19,20 @@ class GoogleNewsWrapper(NewsWrapper):
def get_top_headlines(self, limit: int = 100) -> list[Article]: def get_top_headlines(self, limit: int = 100) -> list[Article]:
gnews = GNews(language='en', max_results=limit, period='7d') gnews = GNews(language='en', max_results=limit, period='7d')
results = gnews.get_top_news() results: list[dict[str, Any]] = gnews.get_top_news() # type: ignore
articles = [] articles: list[Article] = []
for result in results: for result in results:
article = result_to_article(result) article = extract_article(result)
articles.append(article) articles.append(article)
return articles return articles
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]: def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
gnews = GNews(language='en', max_results=limit, period='7d') gnews = GNews(language='en', max_results=limit, period='7d')
results = gnews.get_news(query) results: list[dict[str, Any]] = gnews.get_news(query) # type: ignore
articles = [] articles: list[Article] = []
for result in results: for result in results:
article = result_to_article(result) article = extract_article(result)
articles.append(article) articles.append(article)
return articles return articles

View File

@@ -1,8 +1,10 @@
import os import os
import newsapi from typing import Any
from .base import Article, NewsWrapper import newsapi # type: ignore
from app.api.core.news import Article, NewsWrapper
def result_to_article(result: dict) -> Article:
def extract_article(result: dict[str, Any]) -> Article:
article = Article() article = Article()
article.source = result.get("source", {}).get("name", "") article.source = result.get("source", {}).get("name", "")
article.time = result.get("publishedAt", "") article.time = result.get("publishedAt", "")
@@ -23,7 +25,7 @@ class NewsApiWrapper(NewsWrapper):
self.client = newsapi.NewsApiClient(api_key=api_key) self.client = newsapi.NewsApiClient(api_key=api_key)
self.category = "business" # Cryptocurrency is under business self.category = "business" # Cryptocurrency is under business
self.language = "en" # TODO Only English articles for now? self.language = "en"
self.max_page_size = 100 self.max_page_size = 100
def __calc_pages(self, limit: int, page_size: int) -> tuple[int, int]: def __calc_pages(self, limit: int, page_size: int) -> tuple[int, int]:
@@ -33,21 +35,20 @@ class NewsApiWrapper(NewsWrapper):
def get_top_headlines(self, limit: int = 100) -> list[Article]: def get_top_headlines(self, limit: int = 100) -> list[Article]:
pages, page_size = self.__calc_pages(limit, self.max_page_size) pages, page_size = self.__calc_pages(limit, self.max_page_size)
articles = [] articles: list[Article] = []
for page in range(1, pages + 1): for page in range(1, pages + 1):
headlines = self.client.get_top_headlines(q="", category=self.category, language=self.language, page_size=page_size, page=page) headlines: dict[str, Any] = self.client.get_top_headlines(q="", category=self.category, language=self.language, page_size=page_size, page=page) # type: ignore
results = [result_to_article(article) for article in headlines.get("articles", [])] results = [extract_article(article) for article in headlines.get("articles", [])] # type: ignore
articles.extend(results) articles.extend(results)
return articles return articles
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]: def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
pages, page_size = self.__calc_pages(limit, self.max_page_size) pages, page_size = self.__calc_pages(limit, self.max_page_size)
articles = [] articles: list[Article] = []
for page in range(1, pages + 1): for page in range(1, pages + 1):
everything = self.client.get_everything(q=query, language=self.language, sort_by="publishedAt", page_size=page_size, page=page) everything: dict[str, Any] = self.client.get_everything(q=query, language=self.language, sort_by="publishedAt", page_size=page_size, page=page) # type: ignore
results = [result_to_article(article) for article in everything.get("articles", [])] results = [extract_article(article) for article in everything.get("articles", [])] # type: ignore
articles.extend(results) articles.extend(results)
return articles return articles

View File

@@ -0,0 +1,5 @@
from app.api.social.reddit import RedditWrapper
from app.api.social.x import XWrapper
from app.api.social.chan import ChanWrapper
__all__ = ["RedditWrapper", "XWrapper", "ChanWrapper"]

View File

@@ -1,7 +1,8 @@
import os import os
from praw import Reddit from praw import Reddit # type: ignore
from praw.models import Submission, MoreComments from praw.models import Submission # type: ignore
from .base import SocialWrapper, SocialPost, SocialComment from app.api.core.social import SocialWrapper, SocialPost, SocialComment
MAX_COMMENTS = 5 MAX_COMMENTS = 5
# metterne altri se necessario. # metterne altri se necessario.
@@ -21,22 +22,20 @@ SUBREDDITS = [
] ]
def create_social_post(post: Submission) -> SocialPost: def extract_post(post: Submission) -> SocialPost:
social = SocialPost() social = SocialPost()
social.time = str(post.created) social.time = str(post.created)
social.title = post.title social.title = post.title
social.description = post.selftext social.description = post.selftext
for i, top_comment in enumerate(post.comments): for top_comment in post.comments:
if i >= MAX_COMMENTS:
break
if isinstance(top_comment, MoreComments): #skip MoreComments objects
continue
comment = SocialComment() comment = SocialComment()
comment.time = str(top_comment.created) comment.time = str(top_comment.created)
comment.description = top_comment.body comment.description = top_comment.body
social.comments.append(comment) social.comments.append(comment)
if len(social.comments) >= MAX_COMMENTS:
break
return social return social
class RedditWrapper(SocialWrapper): class RedditWrapper(SocialWrapper):
@@ -60,9 +59,10 @@ class RedditWrapper(SocialWrapper):
client_id=client_id, client_id=client_id,
client_secret=client_secret, client_secret=client_secret,
user_agent="upo-appAI", user_agent="upo-appAI",
check_for_async=False,
) )
self.subreddits = self.tool.subreddit("+".join(SUBREDDITS)) self.subreddits = self.tool.subreddit("+".join(SUBREDDITS))
def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]: def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]:
top_posts = self.subreddits.top(limit=limit, time_filter="week") top_posts = self.subreddits.top(limit=limit, time_filter="week")
return [create_social_post(post) for post in top_posts] return [extract_post(post) for post in top_posts]

View File

@@ -7,9 +7,10 @@ https://www.npmjs.com/package/rettiwt-api
import os import os
import json import json
from .base import SocialWrapper, SocialPost
from shutil import which
import subprocess import subprocess
from shutil import which
from app.api.core.social import SocialWrapper, SocialPost
class XWrapper(SocialWrapper): class XWrapper(SocialWrapper):
def __init__(self): def __init__(self):
''' '''

View File

@@ -0,0 +1,5 @@
from app.api.tools.market_tool import MarketAPIsTool
from app.api.tools.social_tool import SocialAPIsTool
from app.api.tools.news_tool import NewsAPIsTool
__all__ = ["MarketAPIsTool", "NewsAPIsTool", "SocialAPIsTool"]

View File

@@ -0,0 +1,80 @@
from agno.tools import Toolkit
from app.api.wrapper_handler import WrapperHandler
from app.api.core.markets import MarketWrapper, Price, ProductInfo
from app.api.markets import BinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper, YFinanceWrapper
class MarketAPIsTool(MarketWrapper, Toolkit):
"""
Class that aggregates multiple market API wrappers and manages them using WrapperHandler.
This class supports retrieving product information and historical prices.
This class can also aggregate data from multiple sources to provide a more comprehensive view of the market.
The following wrappers are included in this order:
- BinanceWrapper
- YFinanceWrapper
- CoinBaseWrapper
- CryptoCompareWrapper
"""
def __init__(self, currency: str = "USD"):
"""
Initialize the MarketAPIsTool with multiple market API wrappers.
The following wrappers are included in this order:
- BinanceWrapper
- YFinanceWrapper
- CoinBaseWrapper
- CryptoCompareWrapper
Args:
currency (str): Valuta in cui restituire i prezzi. Default è "USD".
"""
kwargs = {"currency": currency or "USD"}
wrappers: list[type[MarketWrapper]] = [BinanceWrapper, YFinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper]
self.handler = WrapperHandler.build_wrappers(wrappers, kwargs=kwargs)
Toolkit.__init__( # type: ignore
self,
name="Market APIs Toolkit",
tools=[
self.get_product,
self.get_products,
self.get_historical_prices,
self.get_products_aggregated,
self.get_historical_prices_aggregated,
],
)
def get_product(self, asset_id: str) -> ProductInfo:
return self.handler.try_call(lambda w: w.get_product(asset_id))
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
return self.handler.try_call(lambda w: w.get_products(asset_ids))
def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
return self.handler.try_call(lambda w: w.get_historical_prices(asset_id, limit))
def get_products_aggregated(self, asset_ids: list[str]) -> list[ProductInfo]:
"""
Restituisce i dati aggregati per una lista di asset_id.\n
Attenzione che si usano tutte le fonti, quindi potrebbe usare molte chiamate API (che potrebbero essere a pagamento).
Args:
asset_ids (list[str]): Lista di asset_id da cercare.
Returns:
list[ProductInfo]: Lista di ProductInfo aggregati.
Raises:
Exception: If all wrappers fail to provide results.
"""
all_products = self.handler.try_call_all(lambda w: w.get_products(asset_ids))
return ProductInfo.aggregate(all_products)
def get_historical_prices_aggregated(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]:
"""
Restituisce i dati storici aggregati per un asset_id. Usa i dati di tutte le fonti disponibili e li aggrega.\n
Attenzione che si usano tutte le fonti, quindi potrebbe usare molte chiamate API (che potrebbero essere a pagamento).
Args:
asset_id (str): Asset ID da cercare.
limit (int): Numero massimo di dati storici da restituire.
Returns:
list[Price]: Lista di Price aggregati.
Raises:
Exception: If all wrappers fail to provide results.
"""
all_prices = self.handler.try_call_all(lambda w: w.get_historical_prices(asset_id, limit))
return Price.aggregate(all_prices)

View File

@@ -0,0 +1,72 @@
from agno.tools import Toolkit
from app.api.wrapper_handler import WrapperHandler
from app.api.core.news import NewsWrapper, Article
from app.api.news import NewsApiWrapper, GoogleNewsWrapper, CryptoPanicWrapper, DuckDuckGoWrapper
class NewsAPIsTool(NewsWrapper, Toolkit):
"""
Aggregates multiple news API wrappers and manages them using WrapperHandler.
This class supports retrieving top headlines and latest news articles by querying multiple sources:
- GoogleNewsWrapper
- DuckDuckGoWrapper
- NewsApiWrapper
- CryptoPanicWrapper
By default, it returns results from the first successful wrapper.
Optionally, it can be configured to collect articles from all wrappers.
If no wrapper succeeds, an exception is raised.
"""
def __init__(self):
"""
Initialize the NewsAPIsTool with multiple news API wrappers.
The tool uses WrapperHandler to manage and invoke the different news API wrappers.
The following wrappers are included in this order:
- GoogleNewsWrapper.
- DuckDuckGoWrapper.
- NewsApiWrapper.
- CryptoPanicWrapper.
"""
wrappers: list[type[NewsWrapper]] = [GoogleNewsWrapper, DuckDuckGoWrapper, NewsApiWrapper, CryptoPanicWrapper]
self.handler = WrapperHandler.build_wrappers(wrappers)
Toolkit.__init__( # type: ignore
self,
name="News APIs Toolkit",
tools=[
self.get_top_headlines,
self.get_latest_news,
self.get_top_headlines_aggregated,
self.get_latest_news_aggregated,
],
)
def get_top_headlines(self, limit: int = 100) -> list[Article]:
return self.handler.try_call(lambda w: w.get_top_headlines(limit))
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
return self.handler.try_call(lambda w: w.get_latest_news(query, limit))
def get_top_headlines_aggregated(self, limit: int = 100) -> dict[str, list[Article]]:
"""
Calls get_top_headlines on all wrappers/providers and returns a dictionary mapping their names to their articles.
Args:
limit (int): Maximum number of articles to retrieve from each provider.
Returns:
dict[str, list[Article]]: A dictionary mapping providers names to their list of Articles
Raises:
Exception: If all wrappers fail to provide results.
"""
return self.handler.try_call_all(lambda w: w.get_top_headlines(limit))
def get_latest_news_aggregated(self, query: str, limit: int = 100) -> dict[str, list[Article]]:
"""
Calls get_latest_news on all wrappers/providers and returns a dictionary mapping their names to their articles.
Args:
query (str): The search query to find relevant news articles.
limit (int): Maximum number of articles to retrieve from each provider.
Returns:
dict[str, list[Article]]: A dictionary mapping providers names to their list of Articles
Raises:
Exception: If all wrappers fail to provide results.
"""
return self.handler.try_call_all(lambda w: w.get_latest_news(query, limit))

View File

@@ -0,0 +1,51 @@
from agno.tools import Toolkit
from app.api.wrapper_handler import WrapperHandler
from app.api.core.social import SocialPost, SocialWrapper
from app.api.social import *
class SocialAPIsTool(SocialWrapper, Toolkit):
"""
Aggregates multiple social media API wrappers and manages them using WrapperHandler.
This class supports retrieving top crypto-related posts by querying multiple sources:
- RedditWrapper
By default, it returns results from the first successful wrapper.
Optionally, it can be configured to collect posts from all wrappers.
If no wrapper succeeds, an exception is raised.
"""
def __init__(self):
"""
Initialize the SocialAPIsTool with multiple social media API wrappers.
The tool uses WrapperHandler to manage and invoke the different social media API wrappers.
The following wrappers are included in this order:
- RedditWrapper.
"""
wrappers: list[type[SocialWrapper]] = [RedditWrapper, XWrapper, ChanWrapper]
self.handler = WrapperHandler.build_wrappers(wrappers)
Toolkit.__init__( # type: ignore
self,
name="Socials Toolkit",
tools=[
self.get_top_crypto_posts,
self.get_top_crypto_posts_aggregated,
],
)
def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]:
return self.handler.try_call(lambda w: w.get_top_crypto_posts(limit))
def get_top_crypto_posts_aggregated(self, limit_per_wrapper: int = 5) -> dict[str, list[SocialPost]]:
"""
Calls get_top_crypto_posts on all wrappers/providers and returns a dictionary mapping their names to their posts.
Args:
limit_per_wrapper (int): Maximum number of posts to retrieve from each provider.
Returns:
dict[str, list[SocialPost]]: A dictionary where keys are wrapper names and values are lists of SocialPost objects.
Raises:
Exception: If all wrappers fail to provide results.
"""
return self.handler.try_call_all(lambda w: w.get_top_crypto_posts(limit_per_wrapper))

View File

@@ -1,13 +1,16 @@
import inspect import inspect
import logging
import time import time
import traceback import traceback
from typing import TypeVar, Callable, Generic, Iterable, Type from typing import Any, Callable, Generic, TypeVar
from agno.utils.log import log_warning, log_info
W = TypeVar("W") logging = logging.getLogger("wrapper_handler")
T = TypeVar("T") WrapperType = TypeVar("WrapperType")
WrapperClassType = TypeVar("WrapperClassType")
OutputType = TypeVar("OutputType")
class WrapperHandler(Generic[W]):
class WrapperHandler(Generic[WrapperType]):
""" """
A handler for managing multiple wrappers with retry logic. A handler for managing multiple wrappers with retry logic.
It attempts to call a function on the current wrapper, and if it fails, It attempts to call a function on the current wrapper, and if it fails,
@@ -17,7 +20,7 @@ class WrapperHandler(Generic[W]):
Note: use `build_wrappers` to create an instance of this class for better error handling. Note: use `build_wrappers` to create an instance of this class for better error handling.
""" """
def __init__(self, wrappers: list[W], try_per_wrapper: int = 3, retry_delay: int = 2): def __init__(self, wrappers: list[WrapperType], try_per_wrapper: int = 3, retry_delay: int = 2):
""" """
Initializes the WrapperHandler with a list of wrappers and retry settings.\n Initializes the WrapperHandler with a list of wrappers and retry settings.\n
Use `build_wrappers` to create an instance of this class for better error handling. Use `build_wrappers` to create an instance of this class for better error handling.
@@ -32,9 +35,18 @@ class WrapperHandler(Generic[W]):
self.retry_per_wrapper = try_per_wrapper self.retry_per_wrapper = try_per_wrapper
self.retry_delay = retry_delay self.retry_delay = retry_delay
self.index = 0 self.index = 0
self.retry_count = 0
def try_call(self, func: Callable[[W], T]) -> T: def set_retries(self, try_per_wrapper: int, retry_delay: int) -> None:
"""
Sets the retry parameters for the handler.
Args:
try_per_wrapper (int): Number of retries per wrapper before switching to the next.
retry_delay (int): Delay in seconds between retries.
"""
self.retry_per_wrapper = try_per_wrapper
self.retry_delay = retry_delay
def try_call(self, func: Callable[[WrapperType], OutputType]) -> OutputType:
""" """
Attempts to call the provided function on the current wrapper. Attempts to call the provided function on the current wrapper.
If it fails, it retries a specified number of times before switching to the next wrapper. If it fails, it retries a specified number of times before switching to the next wrapper.
@@ -46,35 +58,9 @@ class WrapperHandler(Generic[W]):
Raises: Raises:
Exception: If all wrappers fail after retries. Exception: If all wrappers fail after retries.
""" """
log_info(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}") return self.__try_call(func, try_all=False).popitem()[1]
iterations = 0 def try_call_all(self, func: Callable[[WrapperType], OutputType]) -> dict[str, OutputType]:
while iterations < len(self.wrappers):
wrapper = self.wrappers[self.index]
wrapper_name = wrapper.__class__.__name__
try:
log_info(f"try_call {wrapper_name}")
result = func(wrapper)
log_info(f"{wrapper_name} succeeded")
self.retry_count = 0
return result
except Exception as e:
self.retry_count += 1
error = WrapperHandler.__concise_error(e)
log_warning(f"{wrapper_name} failed {self.retry_count}/{self.retry_per_wrapper}: {error}")
if self.retry_count >= self.retry_per_wrapper:
self.index = (self.index + 1) % len(self.wrappers)
self.retry_count = 0
iterations += 1
else:
time.sleep(self.retry_delay)
raise Exception(f"All wrappers failed, latest error: {error}")
def try_call_all(self, func: Callable[[W], T]) -> dict[str, T]:
""" """
Calls the provided function on all wrappers, collecting results. Calls the provided function on all wrappers, collecting results.
If a wrapper fails, it logs a warning and continues with the next. If a wrapper fails, it logs a warning and continues with the next.
@@ -86,24 +72,57 @@ class WrapperHandler(Generic[W]):
Raises: Raises:
Exception: If all wrappers fail. Exception: If all wrappers fail.
""" """
log_info(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}") return self.__try_call(func, try_all=True)
results = {} def __try_call(self, func: Callable[[WrapperType], OutputType], try_all: bool) -> dict[str, OutputType]:
for wrapper in self.wrappers: """
Internal method to handle the logic of trying to call a function on wrappers.
It can either stop at the first success or try all wrappers.
Args:
func (Callable[[W], T]): A function that takes a wrapper and returns a result.
try_all (bool): If True, tries all wrappers and collects results; if False, stops at the first success.
Returns:
dict[str, T]: A dictionary mapping wrapper class names to results.
Raises:
Exception: If all wrappers fail after retries.
"""
logging.info(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}")
results: dict[str, OutputType] = {}
starting_index = self.index
for i in range(starting_index, len(self.wrappers) + starting_index):
self.index = i % len(self.wrappers)
wrapper = self.wrappers[self.index]
wrapper_name = wrapper.__class__.__name__ wrapper_name = wrapper.__class__.__name__
if not try_all:
logging.info(f"try_call {wrapper_name}")
for try_count in range(1, self.retry_per_wrapper + 1):
try: try:
result = func(wrapper) result = func(wrapper)
log_info(f"{wrapper_name} succeeded") logging.info(f"{wrapper_name} succeeded")
results[wrapper.__class__] = result results[wrapper_name] = result
break
except Exception as e: except Exception as e:
error = WrapperHandler.__concise_error(e) error = WrapperHandler.__concise_error(e)
log_warning(f"{wrapper_name} failed: {error}") logging.warning(f"{wrapper_name} failed {try_count}/{self.retry_per_wrapper}: {error}")
time.sleep(self.retry_delay)
if not try_all and results:
return results
if not results: if not results:
error = locals().get("error", "Unknown error")
raise Exception(f"All wrappers failed, latest error: {error}") raise Exception(f"All wrappers failed, latest error: {error}")
self.index = starting_index
return results return results
@staticmethod @staticmethod
def __check(wrappers: list[W]) -> bool: def __check(wrappers: list[Any]) -> bool:
return all(w.__class__ is type for w in wrappers) return all(w.__class__ is type for w in wrappers)
@staticmethod @staticmethod
@@ -112,13 +131,13 @@ class WrapperHandler(Generic[W]):
return f"{e} [\"{last_frame.filename}\", line {last_frame.lineno}]" return f"{e} [\"{last_frame.filename}\", line {last_frame.lineno}]"
@staticmethod @staticmethod
def build_wrappers(constructors: Iterable[Type[W]], try_per_wrapper: int = 3, retry_delay: int = 2, kwargs: dict | None = None) -> 'WrapperHandler[W]': def build_wrappers(constructors: list[type[WrapperClassType]], try_per_wrapper: int = 3, retry_delay: int = 2, kwargs: dict[str, Any] | None = None) -> 'WrapperHandler[WrapperClassType]':
""" """
Builds a WrapperHandler instance with the given wrapper constructors. Builds a WrapperHandler instance with the given wrapper constructors.
It attempts to initialize each wrapper and logs a warning if any cannot be initialized. It attempts to initialize each wrapper and logs a warning if any cannot be initialized.
Only successfully initialized wrappers are included in the handler. Only successfully initialized wrappers are included in the handler.
Args: Args:
constructors (Iterable[Type[W]]): An iterable of wrapper classes to instantiate. e.g. [WrapperA, WrapperB] constructors (list[type[W]]): An iterable of wrapper classes to instantiate. e.g. [WrapperA, WrapperB]
try_per_wrapper (int): Number of retries per wrapper before switching to the next. try_per_wrapper (int): Number of retries per wrapper before switching to the next.
retry_delay (int): Delay in seconds between retries. retry_delay (int): Delay in seconds between retries.
kwargs (dict | None): Optional dictionary with keyword arguments common to all wrappers. kwargs (dict | None): Optional dictionary with keyword arguments common to all wrappers.
@@ -129,12 +148,12 @@ class WrapperHandler(Generic[W]):
""" """
assert WrapperHandler.__check(constructors), f"All constructors must be classes. Received: {constructors}" assert WrapperHandler.__check(constructors), f"All constructors must be classes. Received: {constructors}"
result = [] result: list[WrapperClassType] = []
for wrapper_class in constructors: for wrapper_class in constructors:
try: try:
wrapper = wrapper_class(**(kwargs or {})) wrapper = wrapper_class(**(kwargs or {}))
result.append(wrapper) result.append(wrapper)
except Exception as e: except Exception as e:
log_warning(f"{wrapper_class} cannot be initialized: {e}") logging.warning(f"'{wrapper_class.__name__}' cannot be initialized: {e}")
return WrapperHandler(result, try_per_wrapper, retry_delay) return WrapperHandler(result, try_per_wrapper, retry_delay)

View File

@@ -1,78 +0,0 @@
import os
import json
from typing import List, Dict
from app.pipeline import Pipeline
SAVE_DIR = os.path.join(os.path.dirname(__file__), "..", "saves")
os.makedirs(SAVE_DIR, exist_ok=True)
class ChatManager:
"""
Gestisce la conversazione con la Pipeline:
- mantiene lo storico dei messaggi
- invoca la Pipeline per generare risposte
- salva e ricarica le chat
"""
def __init__(self):
self.pipeline = Pipeline()
self.history: List[Dict[str, str]] = [] # [{"role": "user"/"assistant", "content": "..."}]
def send_message(self, message: str) -> str:
"""
Aggiunge un messaggio utente, chiama la Pipeline e salva la risposta nello storico.
"""
# Aggiungi messaggio utente allo storico
self.history.append({"role": "user", "content": message})
# Pipeline elabora la query
response = self.pipeline.interact(message)
# Aggiungi risposta assistente allo storico
self.history.append({"role": "assistant", "content": response})
return response
def save_chat(self, filename: str = "chat.json") -> None:
"""
Salva la chat corrente in src/saves/<filename>.
"""
path = os.path.join(SAVE_DIR, filename)
with open(path, "w", encoding="utf-8") as f:
json.dump(self.history, f, ensure_ascii=False, indent=2)
def load_chat(self, filename: str = "chat.json") -> None:
"""
Carica una chat salvata da src/saves/<filename>.
"""
path = os.path.join(SAVE_DIR, filename)
if not os.path.exists(path):
self.history = []
return
with open(path, "r", encoding="utf-8") as f:
self.history = json.load(f)
def reset_chat(self) -> None:
"""
Resetta lo storico della chat.
"""
self.history = []
def get_history(self) -> List[Dict[str, str]]:
"""
Restituisce lo storico completo della chat.
"""
return self.history
# Facciamo pass-through di provider e style, così Gradio può usarli
def choose_provider(self, index: int):
self.pipeline.choose_provider(index)
def choose_style(self, index: int):
self.pipeline.choose_style(index)
def list_providers(self) -> List[str]:
return self.pipeline.list_providers()
def list_styles(self) -> List[str]:
return self.pipeline.list_styles()

238
src/app/configs.py Normal file
View File

@@ -0,0 +1,238 @@
import os
import threading
import ollama
import yaml
import logging.config
from typing import Any, ClassVar
from pydantic import BaseModel
from agno.agent import Agent
from agno.tools import Toolkit
from agno.models.base import Model
from agno.models.google import Gemini
from agno.models.ollama import Ollama
log = logging.getLogger(__name__)
class AppModel(BaseModel):
name: str = "gemini-2.0-flash"
label: str = "Gemini"
model: type[Model] | None = None
def get_model(self, instructions: str) -> Model:
"""
Restituisce un'istanza del modello specificato.
Args:
instructions: istruzioni da passare al modello (system prompt).
Returns:
Un'istanza di BaseModel o una sua sottoclasse.
Raise:
ValueError se il modello non è supportato.
"""
if self.model is None:
raise ValueError(f"Model class for '{self.name}' is not set.")
return self.model(id=self.name, instructions=[instructions])
def get_agent(self, instructions: str, name: str = "", output_schema: type[BaseModel] | None = None, tools: list[Toolkit] | None = None) -> Agent:
"""
Costruisce un agente con il modello e le istruzioni specificate.
Args:
instructions: istruzioni da passare al modello (system prompt)
name: nome dell'agente (opzionale)
output: schema di output opzionale (Pydantic BaseModel)
tools: lista opzionale di strumenti (tools) da fornire all'agente
Returns:
Un'istanza di Agent.
"""
return Agent(
model=self.get_model(instructions),
name=name,
retries=2,
tools=tools,
delay_between_retries=5, # seconds
output_schema=output_schema
)
class APIConfig(BaseModel):
retry_attempts: int = 3
retry_delay_seconds: int = 2
currency: str = "USD"
class Strategy(BaseModel):
name: str = "Conservative"
label: str = "Conservative"
description: str = "Focus on low-risk investments with steady returns."
class ModelsConfig(BaseModel):
gemini: list[AppModel] = [AppModel()]
ollama: list[AppModel] = []
@property
def all_models(self) -> list[AppModel]:
return self.gemini + self.ollama
class AgentsConfigs(BaseModel):
strategy: str = "Conservative"
team_model: str = "gemini-2.0-flash"
team_leader_model: str = "gemini-2.0-flash"
predictor_model: str = "gemini-2.0-flash"
class AppConfig(BaseModel):
port: int = 8000
gradio_share: bool = False
logging_level: str = "INFO"
api: APIConfig = APIConfig()
strategies: list[Strategy] = [Strategy()]
models: ModelsConfig = ModelsConfig()
agents: AgentsConfigs = AgentsConfigs()
__lock: ClassVar[threading.Lock] = threading.Lock()
@classmethod
def load(cls, file_path: str = "configs.yaml") -> 'AppConfig':
"""
Load the application configuration from a YAML file.
Be sure to call load_dotenv() before if you use environment variables.
Args:
file_path: path to the YAML configuration file.
Returns:
An instance of AppConfig with the loaded settings.
"""
with open(file_path, 'r') as f:
data = yaml.safe_load(f)
configs = cls(**data)
log.info(f"Loaded configuration from {file_path}")
return configs
def __new__(cls, *args: Any, **kwargs: Any) -> 'AppConfig':
with cls.__lock:
if not hasattr(cls, 'instance'):
cls.instance = super(AppConfig, cls).__new__(cls)
return cls.instance
def __init__(self, *args: Any, **kwargs: Any) -> None:
if hasattr(self, '_initialized'):
return
super().__init__(*args, **kwargs)
self.set_logging_level()
self.validate_models()
self._initialized = True
def get_model_by_name(self, name: str) -> AppModel:
"""
Retrieve a model configuration by its name.
Args:
name: the name of the model to retrieve.
Returns:
The AppModel instance if found.
Raises:
ValueError if no model with the specified name is found.
"""
for model in self.models.all_models:
if model.name == name:
return model
raise ValueError(f"Model with name '{name}' not found.")
def get_strategy_by_name(self, name: str) -> Strategy:
"""
Retrieve a strategy configuration by its name.
Args:
name: the name of the strategy to retrieve.
Returns:
The Strategy instance if found.
Raises:
ValueError if no strategy with the specified name is found.
"""
for strat in self.strategies:
if strat.name == name:
return strat
raise ValueError(f"Strategy with name '{name}' not found.")
def set_logging_level(self) -> None:
"""
Set the logging level based on the configuration.
"""
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False, # Keep existing loggers (e.g. third-party loggers)
'formatters': {
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': '%(log_color)s%(levelname)s%(reset)s [%(asctime)s] (%(name)s) - %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'colored',
'level': self.logging_level,
},
},
'root': { # Configure the root logger
'handlers': ['console'],
'level': self.logging_level,
},
'loggers': {
'httpx': {'level': 'WARNING'}, # Too much spam for INFO
}
})
# Modify the agno loggers
agno_logger_names = ["agno", "agno-team", "agno-workflow"]
for logger_name in agno_logger_names:
logger = logging.getLogger(logger_name)
logger.handlers.clear()
logger.propagate = True
def validate_models(self) -> None:
"""
Validate the configured models for each provider.
"""
self.__validate_online_models("gemini", clazz=Gemini, key="GOOGLE_API_KEY")
self.__validate_ollama_models()
def __validate_online_models(self, provider: str, clazz: type[Model], key: str | None = None) -> None:
"""
Validate models for online providers like Gemini.
Args:
provider: name of the provider (e.g. "gemini")
clazz: class of the model (e.g. Gemini)
key: API key required for the provider (optional)
"""
if getattr(self.models, provider) is None:
log.warning(f"No models configured for provider '{provider}'.")
models: list[AppModel] = getattr(self.models, provider)
if key and os.getenv(key) is None:
log.warning(f"No {key} set in environment variables for {provider}.")
models.clear()
return
for model in models:
model.model = clazz
def __validate_ollama_models(self) -> None:
"""
Validate models for the Ollama provider.
"""
try:
models_list = ollama.list()
availables = {model['model'] for model in models_list['models']}
not_availables: list[str] = []
for model in self.models.ollama:
if model.name in availables:
model.model = Ollama
else:
not_availables.append(model.name)
if not_availables:
log.warning(f"Ollama models not available: {not_availables}")
self.models.ollama = [model for model in self.models.ollama if model.model]
except Exception as e:
log.warning(f"Ollama is not running or not reachable: {e}")

View File

@@ -0,0 +1,4 @@
from app.interface.chat import ChatManager
from app.interface.telegram_app import TelegramApp
__all__ = ["ChatManager", "TelegramApp"]

129
src/app/interface/chat.py Normal file
View File

@@ -0,0 +1,129 @@
import os
import json
import gradio as gr
from app.agents.pipeline import Pipeline, PipelineInputs
class ChatManager:
"""
Gestisce la conversazione con la Pipeline:
- mantiene lo storico dei messaggi
- invoca la Pipeline per generare risposte
- salva e ricarica le chat
"""
def __init__(self):
self.history: list[dict[str, str]] = [] # [{"role": "user"/"assistant", "content": "..."}]
self.inputs = PipelineInputs()
def send_message(self, message: str) -> None:
"""
Aggiunge un messaggio utente, chiama la Pipeline e salva la risposta nello storico.
"""
# Aggiungi messaggio utente allo storico
self.history.append({"role": "user", "content": message})
def receive_message(self, response: str) -> str:
"""
Riceve un messaggio dalla pipeline e lo aggiunge allo storico.
"""
# Aggiungi risposta assistente allo storico
self.history.append({"role": "assistant", "content": response})
return response
def save_chat(self, filename: str = "chat.json") -> None:
"""
Salva la chat corrente in src/saves/<filename>.
"""
with open(filename, "w", encoding="utf-8") as f:
json.dump(self.history, f, ensure_ascii=False, indent=2)
def load_chat(self, filename: str = "chat.json") -> None:
"""
Carica una chat salvata da src/saves/<filename>.
"""
if not os.path.exists(filename):
self.history = []
return
with open(filename, "r", encoding="utf-8") as f:
self.history = json.load(f)
def reset_chat(self) -> None:
"""
Resetta lo storico della chat.
"""
self.history = []
def get_history(self) -> list[dict[str, str]]:
"""
Restituisce lo storico completo della chat.
"""
return self.history
########################################
# Funzioni Gradio
########################################
def gradio_respond(self, message: str, history: list[dict[str, str]]) -> tuple[list[dict[str, str]], list[dict[str, str]], str]:
self.send_message(message)
self.inputs.user_query = message
pipeline = Pipeline(self.inputs)
response = pipeline.interact()
self.receive_message(response)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, history, ""
def gradio_save(self) -> str:
self.save_chat("chat.json")
return "💾 Chat salvata in chat.json"
def gradio_load(self) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
self.load_chat("chat.json")
history: list[dict[str, str]] = []
for m in self.get_history():
history.append({"role": m["role"], "content": m["content"]})
return history, history
def gradio_clear(self) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
self.reset_chat()
return [], []
def gradio_build_interface(self) -> gr.Blocks:
with gr.Blocks() as interface:
gr.Markdown("# 🤖 Agente di Analisi e Consulenza Crypto (Chat)")
# Dropdown provider e stile
with gr.Row():
provider = gr.Dropdown(
choices=self.inputs.list_models_names(),
type="index",
label="Modello da usare"
)
provider.change(fn=self.inputs.choose_team_leader, inputs=provider, outputs=None)
style = gr.Dropdown(
choices=self.inputs.list_strategies_names(),
type="index",
label="Stile di investimento"
)
style.change(fn=self.inputs.choose_strategy, inputs=style, outputs=None)
chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages")
msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?")
with gr.Row():
clear_btn = gr.Button("🗑️ Reset Chat")
save_btn = gr.Button("💾 Salva Chat")
load_btn = gr.Button("📂 Carica Chat")
# Eventi e interazioni
msg.submit(self.gradio_respond, inputs=[msg, chatbot], outputs=[chatbot, chatbot, msg])
clear_btn.click(self.gradio_clear, inputs=None, outputs=[chatbot, chatbot])
save_btn.click(self.gradio_save, inputs=None, outputs=None)
load_btn.click(self.gradio_load, inputs=None, outputs=[chatbot, chatbot])
return interface

View File

@@ -0,0 +1,252 @@
import io
import os
import json
import httpx
import logging
import warnings
from enum import Enum
from markdown_pdf import MarkdownPdf, Section
from telegram import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, Message, Update, User
from telegram.constants import ChatAction
from telegram.ext import Application, CallbackQueryHandler, CommandHandler, ContextTypes, ConversationHandler, MessageHandler, filters
from app.agents.pipeline import Pipeline, PipelineInputs
# per per_message di ConversationHandler che rompe sempre qualunque input tu metta
warnings.filterwarnings("ignore")
logging = logging.getLogger("telegram")
# Lo stato cambia in base al valore di ritorno delle funzioni async
# END state è già definito in telegram.ext.ConversationHandler
# Un semplice schema delle interazioni:
# /start
# ║
# V
# ╔══ CONFIGS <═════╗
# ║ ║ ╚══> SELECT_CONFIG
# ║ V
# ║ start_team (polling for updates)
# ║ ║
# ║ V
# ╚═══> END
CONFIGS, SELECT_CONFIG = range(2)
# Usato per separare la query arrivata da Telegram
QUERY_SEP = "|==|"
class ConfigsChat(Enum):
MODEL_TEAM = "Team Model"
MODEL_OUTPUT = "Output Model"
STRATEGY = "Strategy"
class TelegramApp:
def __init__(self):
token = os.getenv("TELEGRAM_BOT_TOKEN")
assert token, "TELEGRAM_BOT_TOKEN environment variable not set"
self.user_requests: dict[User, PipelineInputs] = {}
self.token = token
self.create_bot()
def add_miniapp_url(self, url: str) -> None:
try:
endpoint = f"https://api.telegram.org/bot{self.token}/setChatMenuButton"
payload = {"menu_button": json.dumps({
"type": "web_app",
"text": "MiniApp",
"web_app": { "url": url }
})}
httpx.post(endpoint, data=payload)
except httpx.HTTPError as e:
logging.warning(f"Failed to update mini app URL: {e}")
def create_bot(self) -> None:
"""
Initialize the Telegram bot and set up the conversation handler.
"""
app = Application.builder().token(self.token).build()
app.add_error_handler(self.__error_handler)
app.add_handler(ConversationHandler(
per_message=False, # capire a cosa serve perchè da un warning quando parte il server
entry_points=[CommandHandler('start', self.__start)],
states={
CONFIGS: [
CallbackQueryHandler(self.__model_team, pattern=ConfigsChat.MODEL_TEAM.name),
CallbackQueryHandler(self.__model_output, pattern=ConfigsChat.MODEL_OUTPUT.name),
CallbackQueryHandler(self.__strategy, pattern=ConfigsChat.STRATEGY.name),
CallbackQueryHandler(self.__cancel, pattern='^cancel$'),
MessageHandler(filters.TEXT, self.__start_team) # Any text message
],
SELECT_CONFIG: [
CallbackQueryHandler(self.__select_config, pattern=f"^__select_config{QUERY_SEP}.*$"),
]
},
fallbacks=[CommandHandler('start', self.__start)],
))
self.app = app
def run(self) -> None:
self.app.run_polling()
########################################
# Funzioni di utilità
########################################
async def start_message(self, user: User, query: CallbackQuery | Message) -> None:
confs = self.user_requests.setdefault(user, PipelineInputs())
str_model_team = f"{ConfigsChat.MODEL_TEAM.value}: {confs.team_model.label}"
str_model_output = f"{ConfigsChat.MODEL_OUTPUT.value}: {confs.team_leader_model.label}"
str_strategy = f"{ConfigsChat.STRATEGY.value}: {confs.strategy.label}"
msg, keyboard = (
"Please choose an option or write your query",
InlineKeyboardMarkup([
[InlineKeyboardButton(str_model_team, callback_data=ConfigsChat.MODEL_TEAM.name)],
[InlineKeyboardButton(str_model_output, callback_data=ConfigsChat.MODEL_OUTPUT.name)],
[InlineKeyboardButton(str_strategy, callback_data=ConfigsChat.STRATEGY.name)],
[InlineKeyboardButton("Cancel", callback_data='cancel')]
])
)
if isinstance(query, CallbackQuery):
await query.edit_message_text(msg, reply_markup=keyboard, parse_mode='MarkdownV2')
else:
await query.reply_text(msg, reply_markup=keyboard, parse_mode='MarkdownV2')
async def handle_callbackquery(self, update: Update) -> tuple[CallbackQuery, User]:
assert update.callback_query and update.callback_query.from_user, "Update callback_query or user is None"
query = update.callback_query
await query.answer() # Acknowledge the callback query
return query, query.from_user
async def handle_message(self, update: Update) -> tuple[Message, User]:
assert update.message and update.message.from_user, "Update message or user is None"
return update.message, update.message.from_user
def build_callback_data(self, callback: str, config: ConfigsChat, labels: list[str]) -> list[tuple[str, str]]:
return [(label, QUERY_SEP.join((callback, config.value, str(i)))) for i, label in enumerate(labels)]
async def __error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
try:
logging.error(f"Unhandled exception in Telegram handler: {context.error}")
# Try to notify the user in chat if possible
if isinstance(update, Update) and update.effective_chat:
chat_id = update.effective_chat.id
msg = "An error occurred while processing your request."
await context.bot.send_message(chat_id=chat_id, text=msg)
except Exception:
# Ensure we never raise from the error handler itself
logging.exception("Exception in the error handler")
#########################################
# Funzioni async per i comandi e messaggi
#########################################
async def __start(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
message, user = await self.handle_message(update)
logging.info(f"@{user.username} started the conversation.")
await self.start_message(user, message)
return CONFIGS
async def __model_team(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
return await self._model_select(update, ConfigsChat.MODEL_TEAM)
async def __model_output(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
return await self._model_select(update, ConfigsChat.MODEL_OUTPUT)
async def _model_select(self, update: Update, state: ConfigsChat, msg: str | None = None) -> int:
query, user = await self.handle_callbackquery(update)
req = self.user_requests[user]
models = self.build_callback_data("__select_config", state, req.list_models_names())
inline_btns = [[InlineKeyboardButton(name, callback_data=callback_data)] for name, callback_data in models]
await query.edit_message_text(msg or state.value, reply_markup=InlineKeyboardMarkup(inline_btns))
return SELECT_CONFIG
async def __strategy(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
req = self.user_requests[user]
strategies = self.build_callback_data("__select_config", ConfigsChat.STRATEGY, req.list_strategies_names())
inline_btns = [[InlineKeyboardButton(name, callback_data=callback_data)] for name, callback_data in strategies]
await query.edit_message_text("Select a strategy", reply_markup=InlineKeyboardMarkup(inline_btns))
return SELECT_CONFIG
async def __select_config(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
logging.debug(f"@{user.username} --> {query.data}")
req = self.user_requests[user]
_, state, index = str(query.data).split(QUERY_SEP)
if state == str(ConfigsChat.MODEL_TEAM):
req.choose_team(int(index))
if state == str(ConfigsChat.MODEL_OUTPUT):
req.choose_team_leader(int(index))
if state == str(ConfigsChat.STRATEGY):
req.choose_strategy(int(index))
await self.start_message(user, query)
return CONFIGS
async def __start_team(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
message, user = await self.handle_message(update)
confs = self.user_requests[user]
confs.user_query = message.text or ""
logging.info(f"@{user.username} started the team with [{confs.team_model.label}, {confs.team_leader_model.label}, {confs.strategy.label}]")
await self.__run_team(update, confs)
logging.info(f"@{user.username} team finished.")
return ConversationHandler.END
async def __cancel(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await self.handle_callbackquery(update)
logging.info(f"@{user.username} canceled the conversation.")
if user in self.user_requests:
del self.user_requests[user]
await query.edit_message_text("Conversation canceled. Use /start to begin again.")
return ConversationHandler.END
async def __run_team(self, update: Update, inputs: PipelineInputs) -> None:
if not update.message: return
bot = update.get_bot()
msg_id = update.message.message_id - 1
chat_id = update.message.chat_id
configs_str = [
'Running with configurations: ',
f'Team: {inputs.team_model.label}',
f'Output: {inputs.team_leader_model.label}',
f'Strategy: {inputs.strategy.label}',
f'Query: "{inputs.user_query}"'
]
full_message = f"""```\n{'\n'.join(configs_str)}\n```\n\n"""
first_message = full_message + "Generating report, please wait"
msg = await bot.edit_message_text(chat_id=chat_id, message_id=msg_id, text=first_message, parse_mode='MarkdownV2')
if isinstance(msg, bool): return
# Remove user query and bot message
await bot.delete_message(chat_id=chat_id, message_id=update.message.id)
# TODO migliorare messaggi di attesa
await bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
pipeline = Pipeline(inputs)
report_content = await pipeline.interact_async()
await msg.delete()
# attach report file to the message
pdf = MarkdownPdf(toc_level=2, optimize=True)
pdf.add_section(Section(report_content, toc=False))
# TODO vedere se ha senso dare il pdf o solo il messaggio
document = io.BytesIO()
pdf.save_bytes(document)
document.seek(0)
await bot.send_document(chat_id=chat_id, document=document, filename="report.pdf", parse_mode='MarkdownV2', caption=full_message)

View File

@@ -1,106 +0,0 @@
from agno.tools import Toolkit
from app.utils.wrapper_handler import WrapperHandler
from app.utils.market_aggregation import aggregate_product_info, aggregate_history_prices
from .base import BaseWrapper, ProductInfo, Price
from .coinbase import CoinBaseWrapper
from .binance import BinanceWrapper
from .cryptocompare import CryptoCompareWrapper
from .yfinance import YFinanceWrapper
__all__ = [ "MarketAPIsTool", "BinanceWrapper", "CoinBaseWrapper", "CryptoCompareWrapper", "YFinanceWrapper", "MARKET_INSTRUCTIONS" ]
class MarketAPIsTool(BaseWrapper, Toolkit):
"""
Class that aggregates multiple market API wrappers and manages them using WrapperHandler.
This class supports retrieving product information and historical prices.
This class can also aggregate data from multiple sources to provide a more comprehensive view of the market.
The following wrappers are included in this order:
- BinanceWrapper
- YFinanceWrapper
- CoinBaseWrapper
- CryptoCompareWrapper
"""
def __init__(self, currency: str = "USD"):
"""
Initialize the MarketAPIsTool with multiple market API wrappers.
The following wrappers are included in this order:
- BinanceWrapper
- YFinanceWrapper
- CoinBaseWrapper
- CryptoCompareWrapper
Args:
currency (str): Valuta in cui restituire i prezzi. Default è "USD".
"""
kwargs = {"currency": currency or "USD"}
wrappers = [ BinanceWrapper, YFinanceWrapper, CoinBaseWrapper, CryptoCompareWrapper ]
self.wrappers: WrapperHandler[BaseWrapper] = WrapperHandler.build_wrappers(wrappers, kwargs=kwargs)
Toolkit.__init__(
self,
name="Market APIs Toolkit",
tools=[
self.get_product,
self.get_products,
self.get_historical_prices,
self.get_products_aggregated,
self.get_historical_prices_aggregated,
],
)
def get_product(self, asset_id: str) -> ProductInfo:
return self.wrappers.try_call(lambda w: w.get_product(asset_id))
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
return self.wrappers.try_call(lambda w: w.get_products(asset_ids))
def get_historical_prices(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]:
return self.wrappers.try_call(lambda w: w.get_historical_prices(asset_id, limit))
def get_products_aggregated(self, asset_ids: list[str]) -> list[ProductInfo]:
"""
Restituisce i dati aggregati per una lista di asset_id.\n
Attenzione che si usano tutte le fonti, quindi potrebbe usare molte chiamate API (che potrebbero essere a pagamento).
Args:
asset_ids (list[str]): Lista di asset_id da cercare.
Returns:
list[ProductInfo]: Lista di ProductInfo aggregati.
"""
all_products = self.wrappers.try_call_all(lambda w: w.get_products(asset_ids))
return aggregate_product_info(all_products)
def get_historical_prices_aggregated(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]:
"""
Restituisce i dati storici aggregati per un asset_id. Usa i dati di tutte le fonti disponibili e li aggrega.\n
Attenzione che si usano tutte le fonti, quindi potrebbe usare molte chiamate API (che potrebbero essere a pagamento).
Args:
asset_id (str): Asset ID da cercare.
limit (int): Numero massimo di dati storici da restituire.
Returns:
list[Price]: Lista di Price aggregati.
"""
all_prices = self.wrappers.try_call_all(lambda w: w.get_historical_prices(asset_id, limit))
return aggregate_history_prices(all_prices)
MARKET_INSTRUCTIONS = """
**TASK:** You are a specialized **Crypto Price Data Retrieval Agent**. Your primary goal is to fetch the most recent and/or historical price data for requested cryptocurrency assets (e.g., 'BTC', 'ETH', 'SOL'). You must provide the data in a clear and structured format.
**AVAILABLE TOOLS:**
1. `get_products(asset_ids: list[str])`: Get **current** product/price info for a list of assets. **(PREFERITA: usa questa per i prezzi live)**
2. `get_historical_prices(asset_id: str, limit: int)`: Get historical price data for one asset. Default limit is 100. **(PREFERITA: usa questa per i dati storici)**
3. `get_products_aggregated(asset_ids: list[str])`: Get **aggregated current** product/price info for a list of assets. **(USA SOLO SE richiesto 'aggregato' o se `get_products` fallisce)**
4. `get_historical_prices_aggregated(asset_id: str, limit: int)`: Get **aggregated historical** price data for one asset. **(USA SOLO SE richiesto 'aggregato' o se `get_historical_prices` fallisce)**
**USAGE GUIDELINE:**
* **Asset ID:** Always convert common names (e.g., 'Bitcoin', 'Ethereum') into their official ticker/ID (e.g., 'BTC', 'ETH').
* **Cost Management (Cruciale per LLM locale):**
* **Priorità Bassa per Aggregazione:** **Non** usare i metodi `*aggregated` a meno che l'utente non lo richieda esplicitamente o se i metodi non-aggregati falliscono.
* **Limitazione Storica:** Il limite predefinito per i dati storici deve essere **20** punti dati, a meno che l'utente non specifichi un limite diverso.
* **Fallimento Tool:** Se lo strumento non restituisce dati per un asset specifico, rispondi per quell'asset con: "Dati di prezzo non trovati per [Asset ID]."
**REPORTING REQUIREMENT:**
1. **Format:** Output the results in a clear, easy-to-read list or table.
2. **Live Price Request:** If an asset's *current price* is requested, report the **Asset ID**, **Latest Price**, and **Time/Date of the price**.
3. **Historical Price Request:** If *historical data* is requested, report the **Asset ID**, the **Limit** of points returned, and the **First** and **Last** entries from the list of historical prices (Date, Price). Non stampare l'intera lista di dati storici.
4. **Output:** For all requests, fornire un **unico e conciso riepilogo** dei dati reperiti.
"""

View File

@@ -1,61 +0,0 @@
from pydantic import BaseModel
class BaseWrapper:
"""
Base class for market API wrappers.
All market API wrappers should inherit from this class and implement the methods.
"""
def get_product(self, asset_id: str) -> 'ProductInfo':
"""
Get product information for a specific asset ID.
Args:
asset_id (str): The asset ID to retrieve information for.
Returns:
ProductInfo: An object containing product information.
"""
raise NotImplementedError("This method should be overridden by subclasses")
def get_products(self, asset_ids: list[str]) -> list['ProductInfo']:
"""
Get product information for multiple asset IDs.
Args:
asset_ids (list[str]): The list of asset IDs to retrieve information for.
Returns:
list[ProductInfo]: A list of objects containing product information.
"""
raise NotImplementedError("This method should be overridden by subclasses")
def get_historical_prices(self, asset_id: str = "BTC", limit: int = 100) -> list['Price']:
"""
Get historical price data for a specific asset ID.
Args:
asset_id (str): The asset ID to retrieve price data for.
limit (int): The maximum number of price data points to return.
Returns:
list[Price]: A list of Price objects.
"""
raise NotImplementedError("This method should be overridden by subclasses")
class ProductInfo(BaseModel):
"""
Informazioni sul prodotto, come ottenute dalle API di mercato.
Implementa i metodi di conversione dai dati grezzi delle API.
"""
id: str = ""
symbol: str = ""
price: float = 0.0
volume_24h: float = 0.0
quote_currency: str = ""
class Price(BaseModel):
"""
Rappresenta i dati di prezzo per un asset, come ottenuti dalle API di mercato.
Implementa i metodi di conversione dai dati grezzi delle API.
"""
high: float = 0.0
low: float = 0.0
open: float = 0.0
close: float = 0.0
volume: float = 0.0
timestamp_ms: int = 0 # Timestamp in milliseconds

View File

@@ -1,76 +0,0 @@
import os
from datetime import datetime
from binance.client import Client
from .base import ProductInfo, BaseWrapper, Price
def get_product(currency: str, ticker_data: dict[str, str]) -> ProductInfo:
product = ProductInfo()
product.id = ticker_data.get('symbol')
product.symbol = ticker_data.get('symbol', '').replace(currency, '')
product.price = float(ticker_data.get('price', 0))
product.volume_24h = float(ticker_data.get('volume', 0))
product.quote_currency = currency
return product
def get_price(kline_data: list) -> Price:
price = Price()
price.open = float(kline_data[1])
price.high = float(kline_data[2])
price.low = float(kline_data[3])
price.close = float(kline_data[4])
price.volume = float(kline_data[5])
price.timestamp_ms = kline_data[0]
return price
class BinanceWrapper(BaseWrapper):
"""
Wrapper per le API autenticate di Binance.\n
Implementa l'interfaccia BaseWrapper per fornire accesso unificato
ai dati di mercato di Binance tramite le API REST con autenticazione.\n
https://binance-docs.github.io/apidocs/spot/en/
"""
def __init__(self, currency: str = "USDT"):
api_key = os.getenv("BINANCE_API_KEY")
api_secret = os.getenv("BINANCE_API_SECRET")
self.currency = currency
self.client = Client(api_key=api_key, api_secret=api_secret)
def __format_symbol(self, asset_id: str) -> str:
"""
Formatta l'asset_id nel formato richiesto da Binance.
"""
return asset_id.replace('-', '') if '-' in asset_id else f"{asset_id}{self.currency}"
def get_product(self, asset_id: str) -> ProductInfo:
symbol = self.__format_symbol(asset_id)
ticker = self.client.get_symbol_ticker(symbol=symbol)
ticker_24h = self.client.get_ticker(symbol=symbol)
ticker['volume'] = ticker_24h.get('volume', 0) # Aggiunge volume 24h ai dati del ticker
return get_product(self.currency, ticker)
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
symbols = [self.__format_symbol(asset_id) for asset_id in asset_ids]
symbols_str = f"[\"{'","'.join(symbols)}\"]"
tickers = self.client.get_symbol_ticker(symbols=symbols_str)
tickers_24h = self.client.get_ticker(symbols=symbols_str) # un po brutale, ma va bene così
for t, t24 in zip(tickers, tickers_24h):
t['volume'] = t24.get('volume', 0)
return [get_product(self.currency, ticker) for ticker in tickers]
def get_historical_prices(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]:
symbol = self.__format_symbol(asset_id)
# Ottiene candele orarie degli ultimi 30 giorni
klines = self.client.get_historical_klines(
symbol=symbol,
interval=Client.KLINE_INTERVAL_1HOUR,
limit=limit,
)
return [get_price(kline) for kline in klines]

View File

@@ -1,130 +0,0 @@
import os
import requests
from enum import Enum
from agno.agent import Agent
from agno.models.base import Model
from agno.models.google import Gemini
from agno.models.ollama import Ollama
from agno.models.openai import OpenAIChat
from agno.models.deepseek import DeepSeek
from agno.utils.log import log_warning
from agno.tools import Toolkit
from pydantic import BaseModel
class AppModels(Enum):
"""
Enum per i modelli supportati.
Aggiungere nuovi modelli qui se necessario.
Per quanto riguarda Ollama, i modelli dovranno essere scaricati e installati
localmente seguendo le istruzioni di https://ollama.com/docs/guide/install-models
"""
GEMINI = "gemini-2.0-flash" # API online
GEMINI_PRO = "gemini-2.0-pro" # API online, più costoso ma migliore
GPT_4 = "gpt-4"
DEEPSEEK = "deepseek-chat"
OLLAMA_GPT = "gpt-oss:latest" # + good - slow (13b)
OLLAMA_QWEN = "qwen3:latest" # + good + fast (8b)
OLLAMA_QWEN_4B = "qwen3:4b" # + fast + decent (4b)
OLLAMA_QWEN_1B = "qwen3:1.7b" # + very fast + decent (1.7b)
@staticmethod
def availables_local() -> list['AppModels']:
"""
Controlla quali provider di modelli LLM locali sono disponibili.
Ritorna una lista di provider disponibili.
"""
ollama_host = os.getenv("OLLAMA_HOST", "http://localhost:11434")
result = requests.get(f"{ollama_host}/api/tags")
if result.status_code != 200:
log_warning(f"Ollama is not running or not reachable {result}")
return []
availables = []
result = result.text
for model in [model for model in AppModels if model.name.startswith("OLLAMA")]:
if model.value in result:
availables.append(model)
return availables
@staticmethod
def availables_online() -> list['AppModels']:
"""
Controlla quali provider di modelli LLM online hanno le loro API keys disponibili
come variabili d'ambiente e ritorna una lista di provider disponibili.
"""
availables = []
if not os.getenv("GOOGLE_API_KEY"):
log_warning("No GOOGLE_API_KEY set in environment variables.")
else:
availables.append(AppModels.GEMINI)
availables.append(AppModels.GEMINI_PRO)
if not os.getenv("OPENAI_API_KEY"):
log_warning("No OPENAI_API_KEY set in environment variables.")
else:
availables.append(AppModels.GPT_4)
if not os.getenv("DEEPSEEK_API_KEY"):
log_warning("No DEEPSEEK_API_KEY set in environment variables.")
else:
availables.append(AppModels.DEEPSEEK)
return availables
@staticmethod
def availables() -> list['AppModels']:
"""
Controlla quali provider di modelli LLM locali sono disponibili e quali
provider di modelli LLM online hanno le loro API keys disponibili come variabili
d'ambiente e ritorna una lista di provider disponibili.
L'ordine di preferenza è:
1. Gemini (Google)
2. OpenAI
3. DeepSeek
4. Ollama (locale)
"""
availables = [
*AppModels.availables_online(),
*AppModels.availables_local()
]
assert availables, "No valid model API keys set in environment variables."
return availables
def get_model(self, instructions:str) -> Model:
"""
Restituisce un'istanza del modello specificato.
Args:
instructions: istruzioni da passare al modello (system prompt).
Returns:
Un'istanza di BaseModel o una sua sottoclasse.
Raise:
ValueError se il modello non è supportato.
"""
name = self.value
if self in {model for model in AppModels if model.name.startswith("GEMINI")}:
return Gemini(name, instructions=[instructions])
elif self in {model for model in AppModels if model.name.startswith("OLLAMA")}:
return Ollama(name, instructions=[instructions])
elif self in {model for model in AppModels if model.name.startswith("GPT")}:
return OpenAIChat(name, instructions=[instructions])
elif self in {model for model in AppModels if model.name.startswith("DEEPSEEK")}:
return DeepSeek(name, instructions=[instructions])
raise ValueError(f"Modello non supportato: {self}")
def get_agent(self, instructions: str, name: str = "", output: BaseModel | None = None, tools: list[Toolkit] = []) -> Agent:
"""
Costruisce un agente con il modello e le istruzioni specificate.
Args:
instructions: istruzioni da passare al modello (system prompt)
name: nome dell'agente (opzionale)
output: schema di output opzionale (Pydantic BaseModel)
Returns:
Un'istanza di Agent.
"""
return Agent(
model=self.get_model(instructions),
name=name,
retries=2,
tools=tools,
delay_between_retries=5, # seconds
output_schema=output # se si usa uno schema di output, lo si passa qui
# TODO Eventuali altri parametri da mettere all'agente anche se si possono comunque assegnare dopo la creazione
)

View File

@@ -1,94 +0,0 @@
from agno.tools import Toolkit
from app.utils.wrapper_handler import WrapperHandler
from .base import NewsWrapper, Article
from .news_api import NewsApiWrapper
from .googlenews import GoogleNewsWrapper
from .cryptopanic_api import CryptoPanicWrapper
from .duckduckgo import DuckDuckGoWrapper
__all__ = ["NewsAPIsTool", "NEWS_INSTRUCTIONS", "NewsApiWrapper", "GoogleNewsWrapper", "CryptoPanicWrapper", "DuckDuckGoWrapper"]
class NewsAPIsTool(NewsWrapper, Toolkit):
"""
Aggregates multiple news API wrappers and manages them using WrapperHandler.
This class supports retrieving top headlines and latest news articles by querying multiple sources:
- GoogleNewsWrapper
- DuckDuckGoWrapper
- NewsApiWrapper
- CryptoPanicWrapper
By default, it returns results from the first successful wrapper.
Optionally, it can be configured to collect articles from all wrappers.
If no wrapper succeeds, an exception is raised.
"""
def __init__(self):
"""
Initialize the NewsAPIsTool with multiple news API wrappers.
The tool uses WrapperHandler to manage and invoke the different news API wrappers.
The following wrappers are included in this order:
- GoogleNewsWrapper.
- DuckDuckGoWrapper.
- NewsApiWrapper.
- CryptoPanicWrapper.
"""
wrappers = [GoogleNewsWrapper, DuckDuckGoWrapper, NewsApiWrapper, CryptoPanicWrapper]
self.wrapper_handler: WrapperHandler[NewsWrapper] = WrapperHandler.build_wrappers(wrappers)
Toolkit.__init__(
self,
name="News APIs Toolkit",
tools=[
self.get_top_headlines,
self.get_latest_news,
],
)
def get_top_headlines(self, limit: int = 100) -> list[Article]:
return self.wrapper_handler.try_call(lambda w: w.get_top_headlines(limit))
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
return self.wrapper_handler.try_call(lambda w: w.get_latest_news(query, limit))
def get_top_headlines_aggregated(self, limit: int = 100) -> dict[str, list[Article]]:
"""
Calls get_top_headlines on all wrappers/providers and returns a dictionary mapping their names to their articles.
Args:
limit (int): Maximum number of articles to retrieve from each provider.
Returns:
dict[str, list[Article]]: A dictionary mapping providers names to their list of Articles
"""
return self.wrapper_handler.try_call_all(lambda w: w.get_top_headlines(limit))
def get_latest_news_aggregated(self, query: str, limit: int = 100) -> dict[str, list[Article]]:
"""
Calls get_latest_news on all wrappers/providers and returns a dictionary mapping their names to their articles.
Args:
query (str): The search query to find relevant news articles.
limit (int): Maximum number of articles to retrieve from each provider.
Returns:
dict[str, list[Article]]: A dictionary mapping providers names to their list of Articles
"""
return self.wrapper_handler.try_call_all(lambda w: w.get_latest_news(query, limit))
NEWS_INSTRUCTIONS = """
**TASK:** You are a specialized **Crypto News Analyst**. Your goal is to fetch the latest news or top headlines related to cryptocurrencies, and then **analyze the sentiment** of the content to provide a concise report to the team leader. Prioritize 'crypto' or specific cryptocurrency names (e.g., 'Bitcoin', 'Ethereum') in your searches.
**AVAILABLE TOOLS:**
1. `get_latest_news(query: str, limit: int)`: Get the 'limit' most recent news articles for a specific 'query'.
2. `get_top_headlines(limit: int)`: Get the 'limit' top global news headlines.
3. `get_latest_news_aggregated(query: str, limit: int)`: Get aggregated latest news articles for a specific 'query'.
4. `get_top_headlines_aggregated(limit: int)`: Get aggregated top global news headlines.
**USAGE GUIDELINE:**
* Always use `get_latest_news` with a relevant crypto-related query first.
* The default limit for news items should be 5 unless specified otherwise.
* If the tool doesn't return any articles, respond with "No relevant news articles found."
**REPORTING REQUIREMENT:**
1. **Analyze** the tone and key themes of the retrieved articles.
2. **Summarize** the overall **market sentiment** (e.g., highly positive, cautiously neutral, generally negative) based on the content.
3. **Identify** the top 2-3 **main topics** discussed (e.g., new regulation, price surge, institutional adoption).
4. **Output** a single, brief report summarizing these findings. Do not output the raw articles.
"""

View File

@@ -1,148 +0,0 @@
from agno.run.agent import RunOutput
from agno.team import Team
from app.news import NewsAPIsTool, NEWS_INSTRUCTIONS
from app.social import SocialAPIsTool, SOCIAL_INSTRUCTIONS
from app.markets import MarketAPIsTool, MARKET_INSTRUCTIONS
from app.models import AppModels
from app.predictor import PredictorStyle, PredictorInput, PredictorOutput, PREDICTOR_INSTRUCTIONS
class Pipeline:
"""
Coordina gli agenti di servizio (Market, News, Social) e il Predictor finale.
Il Team è orchestrato da qwen3:latest (Ollama), mentre il Predictor è dinamico
e scelto dall'utente tramite i dropdown dell'interfaccia grafica.
"""
def __init__(self):
# Inizializza gli agenti
self.market_agent = AppModels.OLLAMA_QWEN.get_agent(
instructions=MARKET_INSTRUCTIONS,
name="MarketAgent",
tools=[MarketAPIsTool()]
)
self.news_agent = AppModels.OLLAMA_QWEN.get_agent(
instructions=NEWS_INSTRUCTIONS,
name="NewsAgent",
tools=[NewsAPIsTool()]
)
self.social_agent = AppModels.OLLAMA_QWEN.get_agent(
instructions=SOCIAL_INSTRUCTIONS,
name="SocialAgent",
tools=[SocialAPIsTool()]
)
# === Modello di orchestrazione del Team ===
team_model = AppModels.OLLAMA_QWEN.get_model(
# TODO: migliorare le istruzioni del team
"Agisci come coordinatore: smista le richieste tra MarketAgent, NewsAgent e SocialAgent."
)
# === Team ===
self.team = Team(
name="CryptoAnalysisTeam",
members=[self.market_agent, self.news_agent, self.social_agent],
model=team_model
)
# === Predictor ===
self.available_models = AppModels.availables()
self.all_styles = list(PredictorStyle)
# Scelte di default
self.chosen_model = self.available_models[0] if self.available_models else None
self.style = self.all_styles[0] if self.all_styles else None
self._init_predictor() # Inizializza il predictor con il modello di default
# ======================
# Dropdown handlers
# ======================
def choose_provider(self, index: int):
"""
Sceglie il modello LLM da usare per il Predictor.
"""
self.chosen_model = self.available_models[index]
self._init_predictor()
def choose_style(self, index: int):
"""
Sceglie lo stile (conservativo/aggressivo) da usare per il Predictor.
"""
self.style = self.all_styles[index]
# ======================
# Helpers
# ======================
def _init_predictor(self):
"""
Inizializza (o reinizializza) il Predictor in base al modello scelto.
"""
if not self.chosen_model:
return
self.predictor = self.chosen_model.get_agent(
PREDICTOR_INSTRUCTIONS,
output=PredictorOutput, # type: ignore
)
def list_providers(self) -> list[str]:
"""
Restituisce la lista dei nomi dei modelli disponibili.
"""
return [model.name for model in self.available_models]
def list_styles(self) -> list[str]:
"""
Restituisce la lista degli stili di previsione disponibili.
"""
return [style.value for style in self.all_styles]
# ======================
# Core interaction
# ======================
def interact(self, query: str) -> str:
"""
1. Raccoglie output dai membri del Team
2. Aggrega output strutturati
3. Invoca Predictor
4. Restituisce la strategia finale
"""
if not self.predictor or not self.style:
return "⚠️ Devi prima selezionare un modello e una strategia validi dagli appositi menu."
# Step 1: raccolta output dai membri del Team
team_outputs = self.team.run(query)
# Step 2: aggregazione output strutturati
all_products = []
sentiments = []
for agent_output in team_outputs.member_responses:
if isinstance(agent_output, RunOutput):
if "products" in agent_output.metadata:
all_products.extend(agent_output.metadata["products"])
if "sentiment_news" in agent_output.metadata:
sentiments.append(agent_output.metadata["sentiment_news"])
if "sentiment_social" in agent_output.metadata:
sentiments.append(agent_output.metadata["sentiment_social"])
aggregated_sentiment = "\n".join(sentiments)
# Step 3: invocazione Predictor
predictor_input = PredictorInput(
data=all_products,
style=self.style,
sentiment=aggregated_sentiment
)
result = self.predictor.run(predictor_input)
prediction: PredictorOutput = result.content
# Step 4: restituzione strategia finale
portfolio_lines = "\n".join(
[f"{item.asset} ({item.percentage}%): {item.motivation}" for item in prediction.portfolio]
)
return (
f"📊 Strategia ({self.style.value}): {prediction.strategy}\n\n"
f"💼 Portafoglio consigliato:\n{portfolio_lines}"
)

View File

@@ -1,63 +0,0 @@
from agno.tools import Toolkit
from app.utils.wrapper_handler import WrapperHandler
from .base import SocialPost, SocialWrapper
from .reddit import RedditWrapper
from .x import XWrapper
from .chan import ChanWrapper
__all__ = ["SocialAPIsTool", "SOCIAL_INSTRUCTIONS", "RedditWrapper", "XWrapper", "ChanWrapper"]
class SocialAPIsTool(SocialWrapper, Toolkit):
"""
Aggregates multiple social media API wrappers and manages them using WrapperHandler.
This class supports retrieving top crypto-related posts by querying multiple sources:
- RedditWrapper
By default, it returns results from the first successful wrapper.
Optionally, it can be configured to collect posts from all wrappers.
If no wrapper succeeds, an exception is raised.
"""
def __init__(self):
"""
Initialize the SocialAPIsTool with multiple social media API wrappers.
The tool uses WrapperHandler to manage and invoke the different social media API wrappers.
The following wrappers are included in this order:
- RedditWrapper.
"""
wrappers = [RedditWrapper, XWrapper, ChanWrapper]
self.wrapper_handler: WrapperHandler[SocialWrapper] = WrapperHandler.build_wrappers(wrappers)
Toolkit.__init__(
self,
name="Socials Toolkit",
tools=[
self.get_top_crypto_posts,
],
)
# TODO Pensare se ha senso restituire i post da TUTTI i wrapper o solo dal primo che funziona
# la modifica è banale, basta usare try_call_all invece di try_call
def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]:
return self.wrapper_handler.try_call(lambda w: w.get_top_crypto_posts(limit))
SOCIAL_INSTRUCTIONS = """
**TASK:** You are a specialized **Social Media Sentiment Analyst**. Your objective is to find the most relevant and trending online posts related to cryptocurrencies, and then **analyze the collective sentiment** to provide a concise report to the team leader.
**AVAILABLE TOOLS:**
1. `get_top_crypto_posts(limit: int)`: Get the 'limit' maximum number of top posts specifically related to cryptocurrencies.
**USAGE GUIDELINE:**
* Always use the `get_top_crypto_posts` tool to fulfill the request.
* The default limit for posts should be 5 unless specified otherwise.
* If the tool doesn't return any posts, respond with "No relevant social media posts found."
**REPORTING REQUIREMENT:**
1. **Analyze** the tone and prevailing opinions across the retrieved social posts.
2. **Summarize** the overall **community sentiment** (e.g., high enthusiasm/FOMO, uncertainty, FUD/fear) based on the content.
3. **Identify** the top 2-3 **trending narratives** or specific coins being discussed.
4. **Output** a single, brief report summarizing these findings. Do not output the raw posts.
"""

View File

@@ -1,91 +0,0 @@
import statistics
from app.markets.base import ProductInfo, Price
def aggregate_history_prices(prices: dict[str, list[Price]]) -> list[Price]:
"""
Aggrega i prezzi storici per symbol calcolando la media oraria.
Args:
prices (dict[str, list[Price]]): Mappa provider -> lista di Price
Returns:
list[Price]: Lista di Price aggregati per ora
"""
# Costruiamo una mappa timestamp_h -> lista di Price
timestamped_prices: dict[int, list[Price]] = {}
for _, price_list in prices.items():
for price in price_list:
time = price.timestamp_ms - (price.timestamp_ms % 3600000) # arrotonda all'ora (non dovrebbe essere necessario)
timestamped_prices.setdefault(time, []).append(price)
# Ora aggregiamo i prezzi per ogni ora
aggregated_prices = []
for time, price_list in timestamped_prices.items():
price = Price()
price.timestamp_ms = time
price.high = statistics.mean([p.high for p in price_list])
price.low = statistics.mean([p.low for p in price_list])
price.open = statistics.mean([p.open for p in price_list])
price.close = statistics.mean([p.close for p in price_list])
price.volume = statistics.mean([p.volume for p in price_list])
aggregated_prices.append(price)
return aggregated_prices
def aggregate_product_info(products: dict[str, list[ProductInfo]]) -> list[ProductInfo]:
"""
Aggrega una lista di ProductInfo per symbol.
Args:
products (dict[str, list[ProductInfo]]): Mappa provider -> lista di ProductInfo
Returns:
list[ProductInfo]: Lista di ProductInfo aggregati per symbol
"""
# Costruzione mappa symbol -> lista di ProductInfo
symbols_infos: dict[str, list[ProductInfo]] = {}
for _, product_list in products.items():
for product in product_list:
symbols_infos.setdefault(product.symbol, []).append(product)
# Aggregazione per ogni symbol
sources = list(products.keys())
aggregated_products = []
for symbol, product_list in symbols_infos.items():
product = ProductInfo()
product.id = f"{symbol}_AGGREGATED"
product.symbol = symbol
product.quote_currency = next(p.quote_currency for p in product_list if p.quote_currency)
volume_sum = sum(p.volume_24h for p in product_list)
product.volume_24h = volume_sum / len(product_list) if product_list else 0.0
prices = sum(p.price * p.volume_24h for p in product_list)
product.price = (prices / volume_sum) if volume_sum > 0 else 0.0
aggregated_products.append(product)
return aggregated_products
def _calculate_confidence(products: list[ProductInfo], sources: list[str]) -> float:
"""Calcola un punteggio di confidenza 0-1"""
if not products:
return 0.0
score = 1.0
# Riduci score se pochi dati
if len(products) < 2:
score *= 0.7
# Riduci score se prezzi troppo diversi
prices = [p.price for p in products if p.price > 0]
if len(prices) > 1:
price_std = (max(prices) - min(prices)) / statistics.mean(prices)
if price_std > 0.05: # >5% variazione
score *= 0.8
# Riduci score se fonti sconosciute
unknown_sources = sum(1 for s in sources if s == "unknown")
if unknown_sources > 0:
score *= (1 - unknown_sources / len(sources))
return max(0.0, min(1.0, score))

View File

@@ -1,50 +0,0 @@
import pytest
from app.predictor import PREDICTOR_INSTRUCTIONS, PredictorInput, PredictorOutput, PredictorStyle
from app.markets.base import ProductInfo
from app.models import AppModels
def unified_checks(model: AppModels, input):
llm = model.get_agent(PREDICTOR_INSTRUCTIONS, output=PredictorOutput) # type: ignore[arg-type]
result = llm.run(input)
content = result.content
assert isinstance(content, PredictorOutput)
assert content.strategy not in (None, "", "null")
assert isinstance(content.strategy, str)
assert isinstance(content.portfolio, list)
assert len(content.portfolio) > 0
for item in content.portfolio:
assert item.asset not in (None, "", "null")
assert isinstance(item.asset, str)
assert item.percentage >= 0.0
assert item.percentage <= 100.0
assert isinstance(item.percentage, (int, float))
assert item.motivation not in (None, "", "null")
assert isinstance(item.motivation, str)
# La somma delle percentuali deve essere esattamente 100
total_percentage = sum(item.percentage for item in content.portfolio)
assert abs(total_percentage - 100) < 0.01 # Permette una piccola tolleranza per errori di arrotondamento
class TestPredictor:
@pytest.fixture(scope="class")
def inputs(self):
data = []
for symbol, price in [("BTC", 60000.00), ("ETH", 3500.00), ("SOL", 150.00)]:
product_info = ProductInfo()
product_info.symbol = symbol
product_info.price = price
data.append(product_info)
return PredictorInput(data=data, style=PredictorStyle.AGGRESSIVE, sentiment="positivo")
def test_gemini_model_output(self, inputs):
unified_checks(AppModels.GEMINI, inputs)
@pytest.mark.slow
def test_ollama_qwen_model_output(self, inputs):
unified_checks(AppModels.OLLAMA_QWEN, inputs)
@pytest.mark.slow
def test_ollama_gpt_oss_model_output(self, inputs):
unified_checks(AppModels.OLLAMA_GPT, inputs)

View File

@@ -1,5 +1,18 @@
import pytest import pytest
from app.markets.binance import BinanceWrapper import asyncio
from app.api.markets.binance import BinanceWrapper
# fix warning about no event loop
@pytest.fixture(scope="session", autouse=True)
def event_loop():
"""
Ensure there is an event loop for the duration of the tests.
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.close()
@pytest.mark.market @pytest.mark.market
@pytest.mark.api @pytest.mark.api
@@ -45,9 +58,24 @@ class TestBinance:
assert isinstance(history, list) assert isinstance(history, list)
assert len(history) == 5 assert len(history) == 5
for entry in history: for entry in history:
assert hasattr(entry, 'timestamp_ms') assert hasattr(entry, 'timestamp')
assert hasattr(entry, 'close') assert hasattr(entry, 'close')
assert hasattr(entry, 'high') assert hasattr(entry, 'high')
assert entry.close > 0 assert entry.close > 0
assert entry.high > 0 assert entry.high > 0
assert entry.timestamp_ms > 0 assert entry.timestamp != ''
def test_binance_fiat_conversion(self):
market = BinanceWrapper(currency="USD")
assert market.currency == "USDT"
product = market.get_product("BTC")
assert product is not None
assert product.symbol == "BTC"
assert product.price > 0
market = BinanceWrapper(currency="EUR")
assert market.currency == "EUR"
product = market.get_product("BTC")
assert product is not None
assert product.symbol == "BTC"
assert product.price > 0

View File

@@ -1,6 +1,6 @@
import os import os
import pytest import pytest
from app.markets import CoinBaseWrapper from app.api.markets import CoinBaseWrapper
@pytest.mark.market @pytest.mark.market
@pytest.mark.api @pytest.mark.api
@@ -47,9 +47,9 @@ class TestCoinBase:
assert isinstance(history, list) assert isinstance(history, list)
assert len(history) == 5 assert len(history) == 5
for entry in history: for entry in history:
assert hasattr(entry, 'timestamp_ms') assert hasattr(entry, 'timestamp')
assert hasattr(entry, 'close') assert hasattr(entry, 'close')
assert hasattr(entry, 'high') assert hasattr(entry, 'high')
assert entry.close > 0 assert entry.close > 0
assert entry.high > 0 assert entry.high > 0
assert entry.timestamp_ms > 0 assert entry.timestamp != ''

View File

@@ -1,6 +1,6 @@
import os import os
import pytest import pytest
from app.markets import CryptoCompareWrapper from app.api.markets import CryptoCompareWrapper
@pytest.mark.market @pytest.mark.market
@pytest.mark.api @pytest.mark.api
@@ -49,9 +49,9 @@ class TestCryptoCompare:
assert isinstance(history, list) assert isinstance(history, list)
assert len(history) == 5 assert len(history) == 5
for entry in history: for entry in history:
assert hasattr(entry, 'timestamp_ms') assert hasattr(entry, 'timestamp')
assert hasattr(entry, 'close') assert hasattr(entry, 'close')
assert hasattr(entry, 'high') assert hasattr(entry, 'high')
assert entry.close > 0 assert entry.close > 0
assert entry.high > 0 assert entry.high > 0
assert entry.timestamp_ms > 0 assert entry.timestamp != ''

View File

@@ -1,6 +1,6 @@
import os import os
import pytest import pytest
from app.news import CryptoPanicWrapper from app.api.news import CryptoPanicWrapper
@pytest.mark.limited @pytest.mark.limited

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.news import DuckDuckGoWrapper from app.api.news import DuckDuckGoWrapper
@pytest.mark.news @pytest.mark.news

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.news import GoogleNewsWrapper from app.api.news import GoogleNewsWrapper
@pytest.mark.news @pytest.mark.news

View File

@@ -1,6 +1,6 @@
import os import os
import pytest import pytest
from app.news import NewsApiWrapper from app.api.news import NewsApiWrapper
@pytest.mark.news @pytest.mark.news

View File

@@ -1,7 +1,6 @@
import os import os
import pytest import pytest
from praw import Reddit from app.api.social.reddit import MAX_COMMENTS, RedditWrapper
from app.social.reddit import MAX_COMMENTS, RedditWrapper
@pytest.mark.social @pytest.mark.social
@pytest.mark.api @pytest.mark.api
@@ -10,7 +9,7 @@ class TestRedditWrapper:
def test_initialization(self): def test_initialization(self):
wrapper = RedditWrapper() wrapper = RedditWrapper()
assert wrapper is not None assert wrapper is not None
assert isinstance(wrapper.tool, Reddit) assert wrapper.tool is not None
def test_get_top_crypto_posts(self): def test_get_top_crypto_posts(self):
wrapper = RedditWrapper() wrapper = RedditWrapper()

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.markets import YFinanceWrapper from app.api.markets import YFinanceWrapper
@pytest.mark.market @pytest.mark.market
@pytest.mark.api @pytest.mark.api
@@ -48,9 +48,9 @@ class TestYFinance:
assert isinstance(history, list) assert isinstance(history, list)
assert len(history) == 5 assert len(history) == 5
for entry in history: for entry in history:
assert hasattr(entry, 'timestamp_ms') assert hasattr(entry, 'timestamp')
assert hasattr(entry, 'close') assert hasattr(entry, 'close')
assert hasattr(entry, 'high') assert hasattr(entry, 'high')
assert entry.close > 0 assert entry.close > 0
assert entry.high > 0 assert entry.high > 0
assert entry.timestamp_ms > 0 assert entry.timestamp != ''

View File

@@ -33,7 +33,7 @@ def pytest_configure(config:pytest.Config):
line = f"{marker[0]}: {marker[1]}" line = f"{marker[0]}: {marker[1]}"
config.addinivalue_line("markers", line) config.addinivalue_line("markers", line)
def pytest_collection_modifyitems(config, items): def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None:
"""Modifica automaticamente degli item di test rimovendoli""" """Modifica automaticamente degli item di test rimovendoli"""
# Rimuovo i test "limited" e "slow" se non richiesti esplicitamente # Rimuovo i test "limited" e "slow" se non richiesti esplicitamente
mark_to_remove = ['limited', 'slow'] mark_to_remove = ['limited', 'slow']

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.markets import MarketAPIsTool from app.api.tools import MarketAPIsTool
@pytest.mark.tools @pytest.mark.tools
@@ -7,15 +7,15 @@ from app.markets import MarketAPIsTool
@pytest.mark.api @pytest.mark.api
class TestMarketAPIsTool: class TestMarketAPIsTool:
def test_wrapper_initialization(self): def test_wrapper_initialization(self):
market_wrapper = MarketAPIsTool("USD") market_wrapper = MarketAPIsTool("EUR")
assert market_wrapper is not None assert market_wrapper is not None
assert hasattr(market_wrapper, 'get_product') assert hasattr(market_wrapper, 'get_product')
assert hasattr(market_wrapper, 'get_products') assert hasattr(market_wrapper, 'get_products')
assert hasattr(market_wrapper, 'get_historical_prices') assert hasattr(market_wrapper, 'get_historical_prices')
def test_wrapper_capabilities(self): def test_wrapper_capabilities(self):
market_wrapper = MarketAPIsTool("USD") market_wrapper = MarketAPIsTool("EUR")
capabilities = [] capabilities: list[str] = []
if hasattr(market_wrapper, 'get_product'): if hasattr(market_wrapper, 'get_product'):
capabilities.append('single_product') capabilities.append('single_product')
if hasattr(market_wrapper, 'get_products'): if hasattr(market_wrapper, 'get_products'):
@@ -25,7 +25,7 @@ class TestMarketAPIsTool:
assert len(capabilities) > 0 assert len(capabilities) > 0
def test_market_data_retrieval(self): def test_market_data_retrieval(self):
market_wrapper = MarketAPIsTool("USD") market_wrapper = MarketAPIsTool("EUR")
btc_product = market_wrapper.get_product("BTC") btc_product = market_wrapper.get_product("BTC")
assert btc_product is not None assert btc_product is not None
assert hasattr(btc_product, 'symbol') assert hasattr(btc_product, 'symbol')
@@ -34,8 +34,8 @@ class TestMarketAPIsTool:
def test_error_handling(self): def test_error_handling(self):
try: try:
market_wrapper = MarketAPIsTool("USD") market_wrapper = MarketAPIsTool("EUR")
fake_product = market_wrapper.get_product("NONEXISTENT_CRYPTO_SYMBOL_12345") fake_product = market_wrapper.get_product("NONEXISTENT_CRYPTO_SYMBOL_12345")
assert fake_product is None or fake_product.price == 0 assert fake_product is None or fake_product.price == 0
except Exception as e: except Exception as _:
pass pass

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.news import NewsAPIsTool from app.api.tools import NewsAPIsTool
@pytest.mark.tools @pytest.mark.tools
@@ -12,7 +12,7 @@ class TestNewsAPITool:
def test_news_api_tool_get_top(self): def test_news_api_tool_get_top(self):
tool = NewsAPIsTool() tool = NewsAPIsTool()
result = tool.wrapper_handler.try_call(lambda w: w.get_top_headlines(limit=2)) result = tool.handler.try_call(lambda w: w.get_top_headlines(limit=2))
assert isinstance(result, list) assert isinstance(result, list)
assert len(result) > 0 assert len(result) > 0
for article in result: for article in result:
@@ -21,7 +21,7 @@ class TestNewsAPITool:
def test_news_api_tool_get_latest(self): def test_news_api_tool_get_latest(self):
tool = NewsAPIsTool() tool = NewsAPIsTool()
result = tool.wrapper_handler.try_call(lambda w: w.get_latest_news(query="crypto", limit=2)) result = tool.handler.try_call(lambda w: w.get_latest_news(query="crypto", limit=2))
assert isinstance(result, list) assert isinstance(result, list)
assert len(result) > 0 assert len(result) > 0
for article in result: for article in result:
@@ -30,20 +30,20 @@ class TestNewsAPITool:
def test_news_api_tool_get_top__all_results(self): def test_news_api_tool_get_top__all_results(self):
tool = NewsAPIsTool() tool = NewsAPIsTool()
result = tool.wrapper_handler.try_call_all(lambda w: w.get_top_headlines(limit=2)) result = tool.handler.try_call_all(lambda w: w.get_top_headlines(limit=2))
assert isinstance(result, dict) assert isinstance(result, dict)
assert len(result.keys()) > 0 assert len(result.keys()) > 0
for provider, articles in result.items(): for _provider, articles in result.items():
for article in articles: for article in articles:
assert article.title is not None assert article.title is not None
assert article.source is not None assert article.source is not None
def test_news_api_tool_get_latest__all_results(self): def test_news_api_tool_get_latest__all_results(self):
tool = NewsAPIsTool() tool = NewsAPIsTool()
result = tool.wrapper_handler.try_call_all(lambda w: w.get_latest_news(query="crypto", limit=2)) result = tool.handler.try_call_all(lambda w: w.get_latest_news(query="crypto", limit=2))
assert isinstance(result, dict) assert isinstance(result, dict)
assert len(result.keys()) > 0 assert len(result.keys()) > 0
for provider, articles in result.items(): for _provider, articles in result.items():
for article in articles: for article in articles:
assert article.title is not None assert article.title is not None
assert article.source is not None assert article.source is not None

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.social import SocialAPIsTool from app.api.tools import SocialAPIsTool
@pytest.mark.tools @pytest.mark.tools
@@ -12,7 +12,7 @@ class TestSocialAPIsTool:
def test_social_api_tool_get_top(self): def test_social_api_tool_get_top(self):
tool = SocialAPIsTool() tool = SocialAPIsTool()
result = tool.wrapper_handler.try_call(lambda w: w.get_top_crypto_posts(limit=2)) result = tool.handler.try_call(lambda w: w.get_top_crypto_posts(limit=2))
assert isinstance(result, list) assert isinstance(result, list)
assert len(result) > 0 assert len(result) > 0
for post in result: for post in result:
@@ -21,10 +21,10 @@ class TestSocialAPIsTool:
def test_social_api_tool_get_top__all_results(self): def test_social_api_tool_get_top__all_results(self):
tool = SocialAPIsTool() tool = SocialAPIsTool()
result = tool.wrapper_handler.try_call_all(lambda w: w.get_top_crypto_posts(limit=2)) result = tool.handler.try_call_all(lambda w: w.get_top_crypto_posts(limit=2))
assert isinstance(result, dict) assert isinstance(result, dict)
assert len(result.keys()) > 0 assert len(result.keys()) > 0
for provider, posts in result.items(): for _provider, posts in result.items():
for post in posts: for post in posts:
assert post.title is not None assert post.title is not None
assert post.time is not None assert post.time is not None

View File

@@ -1,6 +1,6 @@
import pytest import pytest
from app.markets.base import ProductInfo, Price from datetime import datetime
from app.utils.market_aggregation import aggregate_history_prices, aggregate_product_info from app.api.core.markets import ProductInfo, Price
@pytest.mark.aggregator @pytest.mark.aggregator
@@ -13,12 +13,12 @@ class TestMarketDataAggregator:
prod.symbol=symbol prod.symbol=symbol
prod.price=price prod.price=price
prod.volume_24h=volume prod.volume_24h=volume
prod.quote_currency=currency prod.currency=currency
return prod return prod
def __price(self, timestamp_ms: int, high: float, low: float, open: float, close: float, volume: float) -> Price: def __price(self, timestamp_s: int, high: float, low: float, open: float, close: float, volume: float) -> Price:
price = Price() price = Price()
price.timestamp_ms = timestamp_ms price.set_timestamp(timestamp_s=timestamp_s)
price.high = high price.high = high
price.low = low price.low = low
price.open = open price.open = open
@@ -33,7 +33,7 @@ class TestMarketDataAggregator:
"Provider3": [self.__product("BTC", 49900.0, 900.0, "USD")], "Provider3": [self.__product("BTC", 49900.0, 900.0, "USD")],
} }
aggregated = aggregate_product_info(products) aggregated = ProductInfo.aggregate(products)
assert len(aggregated) == 1 assert len(aggregated) == 1
info = aggregated[0] info = aggregated[0]
@@ -41,9 +41,9 @@ class TestMarketDataAggregator:
assert info.symbol == "BTC" assert info.symbol == "BTC"
avg_weighted_price = (50000.0 * 1000.0 + 50100.0 * 1100.0 + 49900.0 * 900.0) / (1000.0 + 1100.0 + 900.0) avg_weighted_price = (50000.0 * 1000.0 + 50100.0 * 1100.0 + 49900.0 * 900.0) / (1000.0 + 1100.0 + 900.0)
assert info.price == pytest.approx(avg_weighted_price, rel=1e-3) assert info.price == pytest.approx(avg_weighted_price, rel=1e-3) # type: ignore
assert info.volume_24h == pytest.approx(1000.0, rel=1e-3) assert info.volume_24h == pytest.approx(1000.0, rel=1e-3) # type: ignore
assert info.quote_currency == "USD" assert info.currency == "USD"
def test_aggregate_product_info_multiple_symbols(self): def test_aggregate_product_info_multiple_symbols(self):
products = { products = {
@@ -57,7 +57,7 @@ class TestMarketDataAggregator:
], ],
} }
aggregated = aggregate_product_info(products) aggregated = ProductInfo.aggregate(products)
assert len(aggregated) == 2 assert len(aggregated) == 2
btc_info = next((p for p in aggregated if p.symbol == "BTC"), None) btc_info = next((p for p in aggregated if p.symbol == "BTC"), None)
@@ -65,56 +65,65 @@ class TestMarketDataAggregator:
assert btc_info is not None assert btc_info is not None
avg_weighted_price_btc = (50000.0 * 1000.0 + 50100.0 * 1100.0) / (1000.0 + 1100.0) avg_weighted_price_btc = (50000.0 * 1000.0 + 50100.0 * 1100.0) / (1000.0 + 1100.0)
assert btc_info.price == pytest.approx(avg_weighted_price_btc, rel=1e-3) assert btc_info.price == pytest.approx(avg_weighted_price_btc, rel=1e-3) # type: ignore
assert btc_info.volume_24h == pytest.approx(1050.0, rel=1e-3) assert btc_info.volume_24h == pytest.approx(1050.0, rel=1e-3) # type: ignore
assert btc_info.quote_currency == "USD" assert btc_info.currency == "USD"
assert eth_info is not None assert eth_info is not None
avg_weighted_price_eth = (4000.0 * 2000.0 + 4050.0 * 2100.0) / (2000.0 + 2100.0) avg_weighted_price_eth = (4000.0 * 2000.0 + 4050.0 * 2100.0) / (2000.0 + 2100.0)
assert eth_info.price == pytest.approx(avg_weighted_price_eth, rel=1e-3) assert eth_info.price == pytest.approx(avg_weighted_price_eth, rel=1e-3) # type: ignore
assert eth_info.volume_24h == pytest.approx(2050.0, rel=1e-3) assert eth_info.volume_24h == pytest.approx(2050.0, rel=1e-3) # type: ignore
assert eth_info.quote_currency == "USD" assert eth_info.currency == "USD"
def test_aggregate_product_info_with_no_data(self): def test_aggregate_product_info_with_no_data(self):
products = { products: dict[str, list[ProductInfo]] = {
"Provider1": [], "Provider1": [],
"Provider2": [], "Provider2": [],
} }
aggregated = aggregate_product_info(products) aggregated = ProductInfo.aggregate(products)
assert len(aggregated) == 0 assert len(aggregated) == 0
def test_aggregate_product_info_with_partial_data(self): def test_aggregate_product_info_with_partial_data(self):
products = { products: dict[str, list[ProductInfo]] = {
"Provider1": [self.__product("BTC", 50000.0, 1000.0, "USD")], "Provider1": [self.__product("BTC", 50000.0, 1000.0, "USD")],
"Provider2": [], "Provider2": [],
} }
aggregated = aggregate_product_info(products) aggregated = ProductInfo.aggregate(products)
assert len(aggregated) == 1 assert len(aggregated) == 1
info = aggregated[0] info = aggregated[0]
assert info.symbol == "BTC" assert info.symbol == "BTC"
assert info.price == pytest.approx(50000.0, rel=1e-3) assert info.price == pytest.approx(50000.0, rel=1e-3) # type: ignore
assert info.volume_24h == pytest.approx(1000.0, rel=1e-3) assert info.volume_24h == pytest.approx(1000.0, rel=1e-3) # type: ignore
assert info.quote_currency == "USD" assert info.currency == "USD"
def test_aggregate_history_prices(self): def test_aggregate_history_prices(self):
"""Test aggregazione di prezzi storici usando aggregate_history_prices""" """Test aggregazione di prezzi storici usando aggregate_history_prices"""
timestamp_now = datetime.now()
timestamp_1h_ago = int(timestamp_now.replace(hour=timestamp_now.hour - 1).timestamp())
timestamp_2h_ago = int(timestamp_now.replace(hour=timestamp_now.hour - 2).timestamp())
prices = { prices = {
"Provider1": [ "Provider1": [
self.__price(1685577600000, 50000.0, 49500.0, 49600.0, 49900.0, 150.0), self.__price(timestamp_1h_ago, 50000.0, 49500.0, 49600.0, 49900.0, 150.0),
self.__price(1685581200000, 50200.0, 49800.0, 50000.0, 50100.0, 200.0), self.__price(timestamp_2h_ago, 50200.0, 49800.0, 50000.0, 50100.0, 200.0),
], ],
"Provider2": [ "Provider2": [
self.__price(1685577600000, 50100.0, 49600.0, 49700.0, 50000.0, 180.0), self.__price(timestamp_1h_ago, 50100.0, 49600.0, 49700.0, 50000.0, 180.0),
self.__price(1685581200000, 50300.0, 49900.0, 50100.0, 50200.0, 220.0), self.__price(timestamp_2h_ago, 50300.0, 49900.0, 50100.0, 50200.0, 220.0),
], ],
} }
aggregated = aggregate_history_prices(prices) price = Price()
price.set_timestamp(timestamp_s=timestamp_1h_ago)
timestamp_1h_ago = price.timestamp
price.set_timestamp(timestamp_s=timestamp_2h_ago)
timestamp_2h_ago = price.timestamp
aggregated = Price.aggregate(prices)
assert len(aggregated) == 2 assert len(aggregated) == 2
assert aggregated[0].timestamp_ms == 1685577600000 assert aggregated[0].timestamp == timestamp_1h_ago
assert aggregated[0].high == pytest.approx(50050.0, rel=1e-3) assert aggregated[0].high == pytest.approx(50050.0, rel=1e-3) # type: ignore
assert aggregated[0].low == pytest.approx(49550.0, rel=1e-3) assert aggregated[0].low == pytest.approx(49550.0, rel=1e-3) # type: ignore
assert aggregated[1].timestamp_ms == 1685581200000 assert aggregated[1].timestamp == timestamp_2h_ago
assert aggregated[1].high == pytest.approx(50250.0, rel=1e-3) assert aggregated[1].high == pytest.approx(50250.0, rel=1e-3) # type: ignore
assert aggregated[1].low == pytest.approx(49850.0, rel=1e-3) assert aggregated[1].low == pytest.approx(49850.0, rel=1e-3) # type: ignore

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from app.utils.wrapper_handler import WrapperHandler from app.api.wrapper_handler import WrapperHandler
class MockWrapper: class MockWrapper:
def do_something(self) -> str: def do_something(self) -> str:
@@ -37,7 +37,7 @@ class TestWrapperHandler:
def test_init_failing_with_instances(self): def test_init_failing_with_instances(self):
with pytest.raises(AssertionError) as exc_info: with pytest.raises(AssertionError) as exc_info:
WrapperHandler.build_wrappers([MockWrapper(), MockWrapper2()]) WrapperHandler.build_wrappers([MockWrapper(), MockWrapper2()]) # type: ignore
assert exc_info.type == AssertionError assert exc_info.type == AssertionError
def test_init_not_failing(self): def test_init_not_failing(self):
@@ -49,104 +49,98 @@ class TestWrapperHandler:
assert len(handler.wrappers) == 2 assert len(handler.wrappers) == 2
def test_all_wrappers_fail(self): def test_all_wrappers_fail(self):
wrappers = [FailingWrapper, FailingWrapper] wrappers: list[type[MockWrapper]] = [FailingWrapper, FailingWrapper]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0)
with pytest.raises(Exception) as exc_info: with pytest.raises(Exception) as exc_info:
handler.try_call(lambda w: w.do_something()) handler.try_call(lambda w: w.do_something())
assert "All wrappers failed" in str(exc_info.value) assert "All wrappers failed" in str(exc_info.value)
def test_success_on_first_try(self): def test_success_on_first_try(self):
wrappers = [MockWrapper, FailingWrapper] wrappers: list[type[MockWrapper]] = [MockWrapper, FailingWrapper]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0)
result = handler.try_call(lambda w: w.do_something()) result = handler.try_call(lambda w: w.do_something())
assert result == "Success" assert result == "Success"
assert handler.index == 0 # Should still be on the first wrapper assert handler.index == 0 # Should still be on the first wrapper
assert handler.retry_count == 0
def test_eventual_success(self): def test_eventual_success(self):
wrappers = [FailingWrapper, MockWrapper] wrappers: list[type[MockWrapper]] = [FailingWrapper, MockWrapper]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0)
result = handler.try_call(lambda w: w.do_something()) result = handler.try_call(lambda w: w.do_something())
assert result == "Success" assert result == "Success"
assert handler.index == 1 # Should have switched to the second wrapper assert handler.index == 1 # Should have switched to the second wrapper
assert handler.retry_count == 0
def test_partial_failures(self): def test_partial_failures(self):
wrappers = [FailingWrapper, MockWrapper, FailingWrapper] wrappers: list[type[MockWrapper]] = [FailingWrapper, MockWrapper, FailingWrapper]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
result = handler.try_call(lambda w: w.do_something()) result = handler.try_call(lambda w: w.do_something())
assert result == "Success" assert result == "Success"
assert handler.index == 1 # Should have switched to the second wrapper assert handler.index == 1 # Should have switched to the second wrapper
assert handler.retry_count == 0
# Next call should still succeed on the second wrapper # Next call should still succeed on the second wrapper
result = handler.try_call(lambda w: w.do_something()) result = handler.try_call(lambda w: w.do_something())
assert result == "Success" assert result == "Success"
assert handler.index == 1 # Should still be on the second wrapper assert handler.index == 1 # Should still be on the second wrapper
assert handler.retry_count == 0
handler.index = 2 # Manually switch to the third wrapper handler.index = 2 # Manually switch to the third wrapper
result = handler.try_call(lambda w: w.do_something()) result = handler.try_call(lambda w: w.do_something())
assert result == "Success" assert result == "Success"
assert handler.index == 1 # Should return to the second wrapper after failure assert handler.index == 1 # Should return to the second wrapper after failure
assert handler.retry_count == 0
def test_try_call_all_success(self): def test_try_call_all_success(self):
wrappers = [MockWrapper, MockWrapper2] wrappers: list[type[MockWrapper]] = [MockWrapper, MockWrapper2]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
results = handler.try_call_all(lambda w: w.do_something()) results = handler.try_call_all(lambda w: w.do_something())
assert results == {MockWrapper: "Success", MockWrapper2: "Success 2"} assert results == {MockWrapper.__name__: "Success", MockWrapper2.__name__: "Success 2"}
def test_try_call_all_partial_failures(self): def test_try_call_all_partial_failures(self):
# Only the second wrapper should succeed # Only the second wrapper should succeed
wrappers = [FailingWrapper, MockWrapper, FailingWrapper] wrappers: list[type[MockWrapper]] = [FailingWrapper, MockWrapper, FailingWrapper]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
results = handler.try_call_all(lambda w: w.do_something()) results = handler.try_call_all(lambda w: w.do_something())
assert results == {MockWrapper: "Success"} assert results == {MockWrapper.__name__: "Success"}
# Only the second and fourth wrappers should succeed # Only the second and fourth wrappers should succeed
wrappers = [FailingWrapper, MockWrapper, FailingWrapper, MockWrapper2] wrappers: list[type[MockWrapper]] = [FailingWrapper, MockWrapper, FailingWrapper, MockWrapper2]
handler: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
results = handler.try_call_all(lambda w: w.do_something()) results = handler.try_call_all(lambda w: w.do_something())
assert results == {MockWrapper: "Success", MockWrapper2: "Success 2"} assert results == {MockWrapper.__name__: "Success", MockWrapper2.__name__: "Success 2"}
def test_try_call_all_all_fail(self): def test_try_call_all_all_fail(self):
# Test when all wrappers fail # Test when all wrappers fail
handler_all_fail: WrapperHandler[MockWrapper] = WrapperHandler.build_wrappers([FailingWrapper, FailingWrapper], try_per_wrapper=1, retry_delay=0) handler_all_fail = WrapperHandler.build_wrappers([FailingWrapper, FailingWrapper], try_per_wrapper=1, retry_delay=0)
with pytest.raises(Exception) as exc_info: with pytest.raises(Exception) as exc_info:
handler_all_fail.try_call_all(lambda w: w.do_something()) handler_all_fail.try_call_all(lambda w: w.do_something())
assert "All wrappers failed" in str(exc_info.value) assert "All wrappers failed" in str(exc_info.value)
def test_wrappers_with_parameters(self): def test_wrappers_with_parameters(self):
wrappers = [FailingWrapperWithParameters, MockWrapperWithParameters] wrappers: list[type[MockWrapperWithParameters]] = [FailingWrapperWithParameters, MockWrapperWithParameters]
handler: WrapperHandler[MockWrapperWithParameters] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=2, retry_delay=0)
result = handler.try_call(lambda w: w.do_something("test", 42)) result = handler.try_call(lambda w: w.do_something("test", 42))
assert result == "Success test and 42" assert result == "Success test and 42"
assert handler.index == 1 # Should have switched to the second wrapper assert handler.index == 1 # Should have switched to the second wrapper
assert handler.retry_count == 0
def test_wrappers_with_parameters_all_fail(self): def test_wrappers_with_parameters_all_fail(self):
wrappers = [FailingWrapperWithParameters, FailingWrapperWithParameters] wrappers: list[type[MockWrapperWithParameters]] = [FailingWrapperWithParameters, FailingWrapperWithParameters]
handler: WrapperHandler[MockWrapperWithParameters] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
with pytest.raises(Exception) as exc_info: with pytest.raises(Exception) as exc_info:
handler.try_call(lambda w: w.do_something("test", 42)) handler.try_call(lambda w: w.do_something("test", 42))
assert "All wrappers failed" in str(exc_info.value) assert "All wrappers failed" in str(exc_info.value)
def test_try_call_all_with_parameters(self): def test_try_call_all_with_parameters(self):
wrappers = [FailingWrapperWithParameters, MockWrapperWithParameters] wrappers: list[type[MockWrapperWithParameters]] = [FailingWrapperWithParameters, MockWrapperWithParameters]
handler: WrapperHandler[MockWrapperWithParameters] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
results = handler.try_call_all(lambda w: w.do_something("param", 99)) results = handler.try_call_all(lambda w: w.do_something("param", 99))
assert results == {MockWrapperWithParameters: "Success param and 99"} assert results == {MockWrapperWithParameters.__name__: "Success param and 99"}
def test_try_call_all_with_parameters_all_fail(self): def test_try_call_all_with_parameters_all_fail(self):
wrappers = [FailingWrapperWithParameters, FailingWrapperWithParameters] wrappers: list[type[MockWrapperWithParameters]] = [FailingWrapperWithParameters, FailingWrapperWithParameters]
handler: WrapperHandler[MockWrapperWithParameters] = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0) handler = WrapperHandler.build_wrappers(wrappers, try_per_wrapper=1, retry_delay=0)
with pytest.raises(Exception) as exc_info: with pytest.raises(Exception) as exc_info:
handler.try_call_all(lambda w: w.do_something("param", 99)) handler.try_call_all(lambda w: w.do_something("param", 99))
assert "All wrappers failed" in str(exc_info.value) assert "All wrappers failed" in str(exc_info.value)

64
uv.lock generated
View File

@@ -285,6 +285,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
] ]
[[package]]
name = "colorlog"
version = "6.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d3/7a/359f4d5df2353f26172b3cc39ea32daa39af8de522205f512f458923e677/colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2", size = 16624, upload-time = "2024-10-29T18:34:51.011Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424, upload-time = "2024-10-29T18:34:49.815Z" },
]
[[package]] [[package]]
name = "cryptography" name = "cryptography"
version = "46.0.2" version = "46.0.2"
@@ -844,14 +856,27 @@ wheels = [
[[package]] [[package]]
name = "markdown-it-py" name = "markdown-it-py"
version = "4.0.0" version = "3.0.0"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
dependencies = [ dependencies = [
{ name = "mdurl" }, { name = "mdurl" },
] ]
sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
]
[[package]]
name = "markdown-pdf"
version = "1.10"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py" },
{ name = "pymupdf" },
]
sdist = { url = "https://files.pythonhosted.org/packages/5e/e6/969311a194074afa9672324244adbf64a7e8663f2ba0003395b7140f5c4a/markdown_pdf-1.10.tar.gz", hash = "sha256:bcf23d816baa56aec3a60f940681652c4e46ee048c6335835cddf86d1ff20a8e", size = 17783, upload-time = "2025-09-24T19:01:38.758Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1f/78/c593979cf1525be786d63b285a7a67afae397fc132382158432490ebd1ed/markdown_pdf-1.10-py3-none-any.whl", hash = "sha256:1863e78454e5aa9bcb34c125f385d4ff045c727660c5172877e82e69d06fae6d", size = 17994, upload-time = "2025-09-24T19:01:37.155Z" },
] ]
[[package]] [[package]]
@@ -1285,6 +1310,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" },
] ]
[[package]]
name = "pymupdf"
version = "1.26.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/90/35/031556dfc0d332d8e9ed9b61ca105138606d3f8971b9eb02e20118629334/pymupdf-1.26.4.tar.gz", hash = "sha256:be13a066d42bfaed343a488168656637c4d9843ddc63b768dc827c9dfc6b9989", size = 83077563, upload-time = "2025-08-25T14:20:29.499Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/27/ae/3be722886cc7be2093585cd94f466db1199133ab005645a7a567b249560f/pymupdf-1.26.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cb95562a0a63ce906fd788bdad5239063b63068cf4a991684f43acb09052cb99", size = 23061974, upload-time = "2025-08-25T14:16:58.811Z" },
{ url = "https://files.pythonhosted.org/packages/fc/b0/9a451d837e1fe18ecdbfbc34a6499f153c8a008763229cc634725383a93f/pymupdf-1.26.4-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:67e9e6b45832c33726651c2a031e9a20108fd9e759140b9e843f934de813a7ff", size = 22410112, upload-time = "2025-08-25T14:17:24.511Z" },
{ url = "https://files.pythonhosted.org/packages/d8/13/0916e8e02cb5453161fb9d9167c747d0a20d58633e30728645374153f815/pymupdf-1.26.4-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:2604f687dd02b6a1b98c81bd8becfc0024899a2d2085adfe3f9e91607721fd22", size = 23454948, upload-time = "2025-08-25T21:20:07.71Z" },
{ url = "https://files.pythonhosted.org/packages/4e/c6/d3cfafc75d383603884edeabe4821a549345df954a88d79e6764e2c87601/pymupdf-1.26.4-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:973a6dda61ebd34040e4df3753bf004b669017663fbbfdaa294d44eceba98de0", size = 24060686, upload-time = "2025-08-25T14:17:56.536Z" },
{ url = "https://files.pythonhosted.org/packages/72/08/035e9d22c801e801bba50c6745bc90ba8696a042fe2c68793e28bf0c3b07/pymupdf-1.26.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:299a49797df5b558e695647fa791329ba3911cbbb31ed65f24a6266c118ef1a7", size = 24265046, upload-time = "2025-08-25T14:18:21.238Z" },
{ url = "https://files.pythonhosted.org/packages/28/8c/c201e4846ec0fb6ae5d52aa3a5d66f9355f0c69fb94230265714df0de65e/pymupdf-1.26.4-cp39-abi3-win32.whl", hash = "sha256:51b38379aad8c71bd7a8dd24d93fbe7580c2a5d9d7e1f9cd29ebbba315aa1bd1", size = 17127332, upload-time = "2025-08-25T14:18:39.132Z" },
{ url = "https://files.pythonhosted.org/packages/d1/c4/87d27b108c2f6d773aa5183c5ae367b2a99296ea4bc16eb79f453c679e30/pymupdf-1.26.4-cp39-abi3-win_amd64.whl", hash = "sha256:0b6345a93a9afd28de2567e433055e873205c52e6b920b129ca50e836a3aeec6", size = 18743491, upload-time = "2025-08-25T14:19:01.104Z" },
]
[[package]] [[package]]
name = "pytest" name = "pytest"
version = "8.4.2" version = "8.4.2"
@@ -1348,6 +1388,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
] ]
[[package]]
name = "python-telegram-bot"
version = "22.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "httpx" },
]
sdist = { url = "https://files.pythonhosted.org/packages/0b/6b/400f88e5c29a270c1c519a3ca8ad0babc650ec63dbfbd1b73babf625ed54/python_telegram_bot-22.5.tar.gz", hash = "sha256:82d4efd891d04132f308f0369f5b5929e0b96957901f58bcef43911c5f6f92f8", size = 1488269, upload-time = "2025-09-27T13:50:27.879Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bc/c3/340c7520095a8c79455fcf699cbb207225e5b36490d2b9ee557c16a7b21b/python_telegram_bot-22.5-py3-none-any.whl", hash = "sha256:4b7cd365344a7dce54312cc4520d7fa898b44d1a0e5f8c74b5bd9b540d035d16", size = 730976, upload-time = "2025-09-27T13:50:25.93Z" },
]
[[package]] [[package]]
name = "pytz" name = "pytz"
version = "2025.2" version = "2025.2"
@@ -1663,18 +1715,21 @@ source = { virtual = "." }
dependencies = [ dependencies = [
{ name = "agno" }, { name = "agno" },
{ name = "coinbase-advanced-py" }, { name = "coinbase-advanced-py" },
{ name = "colorlog" },
{ name = "ddgs" }, { name = "ddgs" },
{ name = "deepseek" }, { name = "deepseek" },
{ name = "dotenv" }, { name = "dotenv" },
{ name = "gnews" }, { name = "gnews" },
{ name = "google-genai" }, { name = "google-genai" },
{ name = "gradio" }, { name = "gradio" },
{ name = "markdown-pdf" },
{ name = "newsapi-python" }, { name = "newsapi-python" },
{ name = "ollama" }, { name = "ollama" },
{ name = "openai" }, { name = "openai" },
{ name = "praw" }, { name = "praw" },
{ name = "pytest" }, { name = "pytest" },
{ name = "python-binance" }, { name = "python-binance" },
{ name = "python-telegram-bot" },
{ name = "yfinance" }, { name = "yfinance" },
] ]
@@ -1682,18 +1737,21 @@ dependencies = [
requires-dist = [ requires-dist = [
{ name = "agno" }, { name = "agno" },
{ name = "coinbase-advanced-py" }, { name = "coinbase-advanced-py" },
{ name = "colorlog" },
{ name = "ddgs" }, { name = "ddgs" },
{ name = "deepseek" }, { name = "deepseek" },
{ name = "dotenv" }, { name = "dotenv" },
{ name = "gnews" }, { name = "gnews" },
{ name = "google-genai" }, { name = "google-genai" },
{ name = "gradio" }, { name = "gradio" },
{ name = "markdown-pdf" },
{ name = "newsapi-python" }, { name = "newsapi-python" },
{ name = "ollama" }, { name = "ollama" },
{ name = "openai" }, { name = "openai" },
{ name = "praw" }, { name = "praw" },
{ name = "pytest" }, { name = "pytest" },
{ name = "python-binance" }, { name = "python-binance" },
{ name = "python-telegram-bot" },
{ name = "yfinance" }, { name = "yfinance" },
] ]