Add Telegram bot support #23

Merged
Berack96 merged 23 commits from 6-telegram-interface into main 2025-10-13 10:49:46 +02:00
4 changed files with 114 additions and 114 deletions
Showing only changes of commit e7c32cc227 - Show all commits

View File

@@ -1,95 +1,24 @@
import gradio as gr
# IMPORTANTE: Carichiamo le variabili d'ambiente PRIMA di qualsiasi altra cosa
from dotenv import load_dotenv
load_dotenv()
# IMPORTARE LIBRERIE DA QUI IN POI
from app.utils import ChatManager, BotFunctions
from agno.utils.log import log_info #type: ignore
from app.utils import ChatManager
from app.agents import Pipeline
from app.utils.telegram_app import BotFunctions
# Disabilita TUTTI i log di livello inferiore a WARNING
# La maggior parte arrivano da httpx
import logging
logging.getLogger().setLevel(logging.WARNING)
def gradio_app(pipeline: Pipeline, server: str = "0.0.0.0", port: int = 8000) -> str:
chat = ChatManager()
########################################
# Funzioni Gradio
########################################
def respond(message: str, history: list[dict[str, str]]) -> tuple[list[dict[str, str]], list[dict[str, str]], str]:
chat.send_message(message)
response = pipeline.interact(message)
chat.receive_message(response)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, history, ""
def save_current_chat() -> str:
chat.save_chat("chat.json")
return "💾 Chat salvata in chat.json"
def load_previous_chat() -> tuple[list[dict[str, str]], list[dict[str, str]]]:
chat.load_chat("chat.json")
history: list[dict[str, str]] = []
for m in chat.get_history():
history.append({"role": m["role"], "content": m["content"]})
return history, history
def reset_chat() -> tuple[list[dict[str, str]], list[dict[str, str]]]:
chat.reset_chat()
return [], []
########################################
# Interfaccia Gradio
########################################
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Agente di Analisi e Consulenza Crypto (Chat)")
# Dropdown provider e stile
with gr.Row():
provider = gr.Dropdown(
choices=pipeline.list_providers(),
type="index",
label="Modello da usare"
)
provider.change(fn=pipeline.choose_predictor, inputs=provider, outputs=None)
style = gr.Dropdown(
choices=pipeline.list_styles(),
type="index",
label="Stile di investimento"
)
style.change(fn=pipeline.choose_style, inputs=style, outputs=None)
chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages")
msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?")
with gr.Row():
clear_btn = gr.Button("🗑️ Reset Chat")
save_btn = gr.Button("💾 Salva Chat")
load_btn = gr.Button("📂 Carica Chat")
# Eventi e interazioni
msg.submit(respond, inputs=[msg, chatbot], outputs=[chatbot, chatbot, msg])
clear_btn.click(reset_chat, inputs=None, outputs=[chatbot, chatbot])
save_btn.click(save_current_chat, inputs=None, outputs=None)
load_btn.click(load_previous_chat, inputs=None, outputs=[chatbot, chatbot])
_app, local, share = demo.launch(server_name=server, server_port=port, quiet=True, prevent_thread_lock=True)
log_info(f"UPO AppAI Chat is running on {local} and {share}")
return share
if __name__ == "__main__":
load_dotenv() # Carica le variabili d'ambiente dal file .env
server, port, share = ("0.0.0.0", 8000, False) # TODO Temp configs, maybe read from env/yaml/ini file later
pipeline = Pipeline()
url = gradio_app(pipeline)
chat = ChatManager()
gradio = chat.gradio_build_interface()
_app, local_url, share_url = gradio.launch(server_name=server, server_port=port, quiet=True, prevent_thread_lock=True, share=share)
log_info(f"UPO AppAI Chat is running on {local_url} and {share_url}")
telegram = BotFunctions.create_bot(pipeline, url)
telegram = BotFunctions.create_bot(share_url)
telegram.run_polling()

View File

@@ -12,11 +12,12 @@ class Pipeline:
e scelto dall'utente tramite i dropdown dell'interfaccia grafica.
"""
def __init__(self):
self.available_models = AppModels.availables()
self.all_styles = list(PredictorStyle)
# Variabili statiche
available_models = AppModels.availables()
all_styles = list(PredictorStyle)
self.style = self.all_styles[0]
def __init__(self):
self.style = Pipeline.all_styles[0]
self.team = create_team_with(AppModels.OLLAMA_QWEN_1B)
self.choose_predictor(0) # Modello di default
@@ -27,7 +28,7 @@ class Pipeline:
"""
Sceglie il modello LLM da usare per il Predictor.
"""
model = self.available_models[index]
model = Pipeline.available_models[index]
self.predictor = model.get_agent(
PREDICTOR_INSTRUCTIONS,
output_schema=PredictorOutput,
@@ -37,7 +38,7 @@ class Pipeline:
"""
Sceglie lo stile (conservativo/aggressivo) da usare per il Predictor.
"""
self.style = self.all_styles[index]
self.style = Pipeline.all_styles[index]
# ======================
# Helpers
@@ -46,13 +47,13 @@ class Pipeline:
"""
Restituisce la lista dei nomi dei modelli disponibili.
"""
return [model.name for model in self.available_models]
return [model.name for model in Pipeline.available_models]
def list_styles(self) -> list[str]:
"""
Restituisce la lista degli stili di previsione disponibili.
"""
return [style.value for style in self.all_styles]
return [style.value for style in Pipeline.all_styles]
# ======================
# Core interaction

View File

@@ -1,5 +1,8 @@
import json
import os
import json
import gradio as gr
from app.agents.pipeline import Pipeline
class ChatManager:
"""
@@ -11,6 +14,7 @@ class ChatManager:
def __init__(self):
self.history: list[dict[str, str]] = [] # [{"role": "user"/"assistant", "content": "..."}]
self.pipeline = Pipeline()
def send_message(self, message: str) -> None:
"""
@@ -56,3 +60,66 @@ class ChatManager:
Restituisce lo storico completo della chat.
"""
return self.history
########################################
# Funzioni Gradio
########################################
def gradio_respond(self, message: str, history: list[dict[str, str]]) -> tuple[list[dict[str, str]], list[dict[str, str]], str]:
self.send_message(message)
response = self.pipeline.interact(message)
self.receive_message(response)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, history, ""
def gradio_save(self) -> str:
self.save_chat("chat.json")
return "💾 Chat salvata in chat.json"
def gradio_load(self) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
self.load_chat("chat.json")
history: list[dict[str, str]] = []
for m in self.get_history():
history.append({"role": m["role"], "content": m["content"]})
return history, history
def gradio_clear(self) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
self.reset_chat()
return [], []
def gradio_build_interface(self) -> gr.Blocks:
with gr.Blocks() as interface:
gr.Markdown("# 🤖 Agente di Analisi e Consulenza Crypto (Chat)")
# Dropdown provider e stile
with gr.Row():
provider = gr.Dropdown(
choices=self.pipeline.list_providers(),
type="index",
label="Modello da usare"
)
provider.change(fn=self.pipeline.choose_predictor, inputs=provider, outputs=None)
style = gr.Dropdown(
choices=self.pipeline.list_styles(),
type="index",
label="Stile di investimento"
)
style.change(fn=self.pipeline.choose_style, inputs=style, outputs=None)
chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages")
msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?")
with gr.Row():
clear_btn = gr.Button("🗑️ Reset Chat")
save_btn = gr.Button("💾 Salva Chat")
load_btn = gr.Button("📂 Carica Chat")
# Eventi e interazioni
msg.submit(self.gradio_respond, inputs=[msg, chatbot], outputs=[chatbot, chatbot, msg])
clear_btn.click(self.gradio_clear, inputs=None, outputs=[chatbot, chatbot])
save_btn.click(self.gradio_save, inputs=None, outputs=None)
load_btn.click(self.gradio_load, inputs=None, outputs=[chatbot, chatbot])
return interface

View File

@@ -1,15 +1,22 @@
import io
import os
import json
import httpx
import warnings
from enum import Enum
from typing import Any
from agno.utils.log import log_info # type: ignore
from markdown_pdf import MarkdownPdf, Section
from telegram import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, Message, Update, User
from telegram.constants import ChatAction
from telegram.ext import Application, CallbackQueryHandler, CommandHandler, ContextTypes, ConversationHandler, ExtBot, JobQueue, MessageHandler, filters
from app.agents import AppModels, PredictorStyle
from app.agents.pipeline import Pipeline
# per per_message di ConversationHandler che rompe sempre qualunque input tu metta
warnings.filterwarnings("ignore")
# Lo stato cambia in base al valore di ritorno delle funzioni async
# END state è già definito in telegram.ext.ConversationHandler
# Un semplice schema delle interazioni:
@@ -32,9 +39,9 @@ class ConfigsChat(Enum):
class ConfigsRun:
def __init__(self):
self.model_team = BotFunctions.pipeline.available_models[0]
self.model_output = BotFunctions.pipeline.available_models[0]
self.strategy = BotFunctions.pipeline.all_styles[0]
self.model_team = Pipeline.available_models[0]
self.model_output = Pipeline.available_models[0]
self.strategy = Pipeline.all_styles[0]
self.user_query = ""
@@ -43,11 +50,10 @@ class BotFunctions:
# In theory this is already thread-safe if run with CPython
users_req: dict[User, ConfigsRun]
pipeline: Pipeline
# che incubo di typing
@staticmethod
def create_bot(pipeline: Pipeline, miniapp_url: str | None = None) -> Application[ExtBot[None], ContextTypes.DEFAULT_TYPE, dict[str, Any], dict[str, Any], dict[str, Any], JobQueue[ContextTypes.DEFAULT_TYPE]]:
def create_bot(miniapp_url: str | None = None) -> Application[ExtBot[None], ContextTypes.DEFAULT_TYPE, dict[str, Any], dict[str, Any], dict[str, Any], JobQueue[ContextTypes.DEFAULT_TYPE]]:
"""
Create a Telegram bot application instance.
Assumes the TELEGRAM_BOT_TOKEN environment variable is set.
@@ -57,7 +63,6 @@ class BotFunctions:
AssertionError: If the TELEGRAM_BOT_TOKEN environment variable is not set.
"""
BotFunctions.users_req = {}
BotFunctions.pipeline = pipeline
token = os.getenv("TELEGRAM_BOT_TOKEN", '')
assert token, "TELEGRAM_BOT_TOKEN environment variable not set"
@@ -118,7 +123,7 @@ class BotFunctions:
async def handle_configs(update: Update, state: ConfigsChat, msg: str | None = None) -> int:
query, _ = await BotFunctions.handle_callbackquery(update)
models = [(m.name, f"__select_config:{state}:{m.name}") for m in BotFunctions.pipeline.available_models]
models = [(m.name, f"__select_config:{state}:{m.name}") for m in Pipeline.available_models]
inline_btns = [[InlineKeyboardButton(name, callback_data=callback_data)] for name, callback_data in models]
await query.edit_message_text(msg or state.value, reply_markup=InlineKeyboardMarkup(inline_btns))
@@ -142,7 +147,7 @@ class BotFunctions:
endpoint = f"https://api.telegram.org/bot{token}/setChatMenuButton"
payload = {"menu_button": json.dumps({
"type": "web_app",
"text": "Apri Mini App", # Il testo che appare sul pulsante
"text": "MiniApp",
"web_app": {
"url": url
}
@@ -173,7 +178,7 @@ class BotFunctions:
async def __strategy(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, _ = await BotFunctions.handle_callbackquery(update)
strategies = [(s.name, f"__select_config:{ConfigsChat.STRATEGY}:{s.name}") for s in BotFunctions.pipeline.all_styles]
strategies = [(s.name, f"__select_config:{ConfigsChat.STRATEGY}:{s.name}") for s in Pipeline.all_styles]
inline_btns = [[InlineKeyboardButton(name, callback_data=callback_data)] for name, callback_data in strategies]
await query.edit_message_text("Select a strategy", reply_markup=InlineKeyboardMarkup(inline_btns))
@@ -227,37 +232,35 @@ class BotFunctions:
msg_id = update.message.message_id - 1
chat_id = update.message.chat_id
configs = [
configs_str = [
'Running with configurations: ',
f'Team: {confs.model_team.name}',
f'Output: {confs.model_output.name}',
f'Strategy: {confs.strategy.name}',
f'Query: "{confs.user_query}"'
]
full_message = f"""```\n{'\n'.join(configs)}\n```\n\n"""
full_message = f"""```\n{'\n'.join(configs_str)}\n```\n\n"""
msg = await bot.edit_message_text(chat_id=chat_id, message_id=msg_id, text=full_message, parse_mode='MarkdownV2')
if isinstance(msg, bool): return
# Remove user query and bot message
await bot.delete_message(chat_id=chat_id, message_id=update.message.id)
# TODO fare il run effettivo del team
# Simulate a long-running task
n_simulations = 3
import asyncio
# Start TEAM
# TODO migliorare messaggi di attesa
pipeline = Pipeline()
pipeline.choose_predictor(Pipeline.available_models.index(confs.model_team))
pipeline.choose_style(Pipeline.all_styles.index(confs.strategy))
await bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
for i in range(n_simulations):
await msg.edit_text(f"{full_message}Working {i+1}/{n_simulations}", parse_mode='MarkdownV2')
await asyncio.sleep(2)
report_content = pipeline.interact(confs.user_query)
await msg.delete()
# attach report file to the message
import io
from markdown_pdf import MarkdownPdf, Section
report_content = f"# Report\n\nThis is a sample report generated by the team."
pdf = MarkdownPdf(toc_level=2, optimize=True)
pdf.add_section(Section(report_content, toc=False))
# TODO vedere se ha senso dare il pdf o solo il messaggio
document = io.BytesIO()
pdf.save_bytes(document)
document.seek(0)