Better logging

This commit is contained in:
2025-10-09 12:43:27 +02:00
parent e7c32cc227
commit 2642b0a221
7 changed files with 111 additions and 57 deletions

View File

@@ -13,6 +13,7 @@ dependencies = [
"pytest", # Test
"dotenv", # Gestire variabili d'ambiente (generalmente API keys od opzioni)
"gradio", # UI web semplice con user_input e output
"colorlog", # Log colorati in console
# Per costruire agenti (ovvero modelli che possono fare più cose tramite tool) https://github.com/agno-agi/agno
# altamente consigliata dato che ha anche tools integrati per fare scraping, calcoli e molto altro

View File

@@ -3,10 +3,37 @@ from dotenv import load_dotenv
load_dotenv()
# Modifico il comportamento del logging (dato che ci sono molte librerie che lo usano)
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False, # Mantiene i logger esistenti (es. di terze parti)
'formatters': {
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': '%(log_color)s%(levelname)s%(reset)s [%(asctime)s] (%(name)s) - %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'colored',
'level': 'INFO'
},
},
'root': { # Configura il logger root
'handlers': ['console'],
'level': 'INFO',
},
'loggers': {
'httpx': {'level': 'WARNING'}, # Troppo spam per INFO
}
})
# IMPORTARE LIBRERIE DA QUI IN POI
from app.utils import ChatManager, BotFunctions
from agno.utils.log import log_info #type: ignore
@@ -17,7 +44,7 @@ if __name__ == "__main__":
chat = ChatManager()
gradio = chat.gradio_build_interface()
_app, local_url, share_url = gradio.launch(server_name=server, server_port=port, quiet=True, prevent_thread_lock=True, share=share)
log_info(f"UPO AppAI Chat is running on {local_url} and {share_url}")
logging.info(f"UPO AppAI Chat is running on {local_url} and {share_url}")
telegram = BotFunctions.create_bot(share_url)
telegram.run_polling()

View File

@@ -1,14 +1,16 @@
import os
import ollama
import logging
from enum import Enum
from agno.agent import Agent
from agno.models.base import Model
from agno.models.google import Gemini
from agno.models.ollama import Ollama
from agno.tools import Toolkit
from agno.utils.log import log_warning #type: ignore
from pydantic import BaseModel
logging = logging.getLogger(__name__)
class AppModels(Enum):
"""
@@ -36,7 +38,7 @@ class AppModels(Enum):
app_models = [model for model in AppModels if model.name.startswith("OLLAMA")]
return [model for model in app_models if model.value in availables]
except Exception as e:
log_warning(f"Ollama is not running or not reachable: {e}")
logging.warning(f"Ollama is not running or not reachable: {e}")
return []
@staticmethod
@@ -46,7 +48,7 @@ class AppModels(Enum):
come variabili d'ambiente e ritorna una lista di provider disponibili.
"""
if not os.getenv("GOOGLE_API_KEY"):
log_warning("No GOOGLE_API_KEY set in environment variables.")
logging.warning("No GOOGLE_API_KEY set in environment variables.")
return []
availables = [AppModels.GEMINI, AppModels.GEMINI_PRO]
return availables

View File

@@ -1,9 +1,12 @@
import logging
from agno.run.agent import RunOutput
from app.agents.models import AppModels
from app.agents.team import create_team_with
from app.agents.predictor import PREDICTOR_INSTRUCTIONS, PredictorInput, PredictorOutput, PredictorStyle
from app.base.markets import ProductInfo
logging = logging.getLogger(__name__)
class Pipeline:
"""
@@ -65,42 +68,51 @@ class Pipeline:
3. Invoca Predictor
4. Restituisce la strategia finale
"""
# Step 1: raccolta output dai membri del Team
logging.info(f"Pipeline received query: {query}")
team_outputs = self.team.run(query) # type: ignore
# Step 2: aggregazione output strutturati
all_products: list[ProductInfo] = []
sentiments: list[str] = []
# Step 2: recupero ouput
if not isinstance(team_outputs.content, str):
logging.error(f"Team output is not a string: {team_outputs.content}")
raise ValueError("Team output is not a string")
logging.info(f"Team finished")
return team_outputs.content
for agent_output in team_outputs.member_responses:
if isinstance(agent_output, RunOutput) and agent_output.metadata is not None:
keys = agent_output.metadata.keys()
if "products" in keys:
all_products.extend(agent_output.metadata["products"])
if "sentiment_news" in keys:
sentiments.append(agent_output.metadata["sentiment_news"])
if "sentiment_social" in keys:
sentiments.append(agent_output.metadata["sentiment_social"])
# # Step 2: aggregazione output strutturati
# all_products: list[ProductInfo] = []
# sentiments: list[str] = []
aggregated_sentiment = "\n".join(sentiments)
# for agent_output in team_outputs.member_responses:
# if isinstance(agent_output, RunOutput) and agent_output.metadata is not None:
# keys = agent_output.metadata.keys()
# if "products" in keys:
# all_products.extend(agent_output.metadata["products"])
# if "sentiment_news" in keys:
# sentiments.append(agent_output.metadata["sentiment_news"])
# if "sentiment_social" in keys:
# sentiments.append(agent_output.metadata["sentiment_social"])
# Step 3: invocazione Predictor
predictor_input = PredictorInput(
data=all_products,
style=self.style,
sentiment=aggregated_sentiment
)
# aggregated_sentiment = "\n".join(sentiments)
result = self.predictor.run(predictor_input) # type: ignore
if not isinstance(result.content, PredictorOutput):
return "❌ Errore: il modello non ha restituito un output valido."
prediction: PredictorOutput = result.content
# # Step 3: invocazione Predictor
# predictor_input = PredictorInput(
# data=all_products,
# style=self.style,
# sentiment=aggregated_sentiment
# )
# result = self.predictor.run(predictor_input) # type: ignore
# if not isinstance(result.content, PredictorOutput):
# return "❌ Errore: il modello non ha restituito un output valido."
# prediction: PredictorOutput = result.content
# Step 4: restituzione strategia finale
portfolio_lines = "\n".join(
[f"{item.asset} ({item.percentage}%): {item.motivation}" for item in prediction.portfolio]
)
return (
f"📊 Strategia ({self.style.value}): {prediction.strategy}\n\n"
f"💼 Portafoglio consigliato:\n{portfolio_lines}"
)
# portfolio_lines = "\n".join(
# [f"{item.asset} ({item.percentage}%): {item.motivation}" for item in prediction.portfolio]
# )
# return (
# f"📊 Strategia ({self.style.value}): {prediction.strategy}\n\n"
# f"💼 Portafoglio consigliato:\n{portfolio_lines}"
# )

View File

@@ -2,10 +2,10 @@ import io
import os
import json
import httpx
import logging
import warnings
from enum import Enum
from typing import Any
from agno.utils.log import log_info # type: ignore
from markdown_pdf import MarkdownPdf, Section
from telegram import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, Message, Update, User
from telegram.constants import ChatAction
@@ -15,6 +15,7 @@ from app.agents.pipeline import Pipeline
# per per_message di ConversationHandler che rompe sempre qualunque input tu metta
warnings.filterwarnings("ignore")
logging = logging.getLogger(__name__)
# Lo stato cambia in base al valore di ritorno delle funzioni async
@@ -70,7 +71,7 @@ class BotFunctions:
if miniapp_url: BotFunctions.update_miniapp_url(miniapp_url, token)
app = Application.builder().token(token).build()
conv_handler = ConversationHandler(
app.add_handler(ConversationHandler(
per_message=False, # capire a cosa serve perchè da un warning quando parte il server
entry_points=[CommandHandler('start', BotFunctions.__start)],
states={
@@ -86,11 +87,7 @@ class BotFunctions:
]
},
fallbacks=[CommandHandler('start', BotFunctions.__start)],
)
app.add_handler(conv_handler)
log_info("Telegram bot application created successfully.")
))
return app
########################################
@@ -154,7 +151,7 @@ class BotFunctions:
})}
httpx.post(endpoint, data=payload)
except httpx.HTTPError as e:
log_info(f"Failed to update mini app URL: {e}")
logging.info(f"Failed to update mini app URL: {e}")
#########################################
# Funzioni async per i comandi e messaggi
@@ -162,7 +159,7 @@ class BotFunctions:
@staticmethod
async def __start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
message, user = await BotFunctions.handle_message(update)
log_info(f"@{user.username} started the conversation.")
logging.info(f"@{user.username} started the conversation.")
await BotFunctions.start_message(user, message)
return CONFIGS
@@ -187,7 +184,7 @@ class BotFunctions:
@staticmethod
async def __select_config(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await BotFunctions.handle_callbackquery(update)
log_info(f"@{user.username} --> {query.data}")
logging.info(f"@{user.username} --> {query.data}")
req = BotFunctions.users_req[user]
@@ -209,16 +206,16 @@ class BotFunctions:
confs = BotFunctions.users_req[user]
confs.user_query = message.text or ""
log_info(f"@{user.username} started the team with [{confs.model_team}, {confs.model_output}, {confs.strategy}]")
logging.info(f"@{user.username} started the team with [{confs.model_team}, {confs.model_output}, {confs.strategy}]")
await BotFunctions.__run_team(update, confs)
log_info(f"@{user.username} team finished.")
logging.info(f"@{user.username} team finished.")
return ConversationHandler.END
@staticmethod
async def __cancel(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
query, user = await BotFunctions.handle_callbackquery(update)
log_info(f"@{user.username} canceled the conversation.")
logging.info(f"@{user.username} canceled the conversation.")
if user in BotFunctions.users_req:
del BotFunctions.users_req[user]
await query.edit_message_text("Conversation canceled. Use /start to begin again.")
@@ -246,12 +243,12 @@ class BotFunctions:
# Remove user query and bot message
await bot.delete_message(chat_id=chat_id, message_id=update.message.id)
# Start TEAM
# TODO migliorare messaggi di attesa
# TODO settare correttamente i modelli
pipeline = Pipeline()
pipeline.choose_predictor(Pipeline.available_models.index(confs.model_team))
#pipeline.choose_predictor(Pipeline.available_models.index(confs.model_team))
pipeline.choose_style(Pipeline.all_styles.index(confs.strategy))
# TODO migliorare messaggi di attesa
await bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
report_content = pipeline.interact(confs.user_query)
await msg.delete()

View File

@@ -1,9 +1,10 @@
import inspect
import logging
import time
import traceback
from typing import Any, Callable, Generic, TypeVar
from agno.utils.log import log_info, log_warning #type: ignore
logging = logging.getLogger(__name__)
WrapperType = TypeVar("WrapperType")
WrapperClassType = TypeVar("WrapperClassType")
OutputType = TypeVar("OutputType")
@@ -76,7 +77,7 @@ class WrapperHandler(Generic[WrapperType]):
Exception: If all wrappers fail after retries.
"""
log_info(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}")
logging.info(f"{inspect.getsource(func).strip()} {inspect.getclosurevars(func).nonlocals}")
results: dict[str, OutputType] = {}
starting_index = self.index
@@ -86,18 +87,18 @@ class WrapperHandler(Generic[WrapperType]):
wrapper_name = wrapper.__class__.__name__
if not try_all:
log_info(f"try_call {wrapper_name}")
logging.info(f"try_call {wrapper_name}")
for try_count in range(1, self.retry_per_wrapper + 1):
try:
result = func(wrapper)
log_info(f"{wrapper_name} succeeded")
logging.info(f"{wrapper_name} succeeded")
results[wrapper_name] = result
break
except Exception as e:
error = WrapperHandler.__concise_error(e)
log_warning(f"{wrapper_name} failed {try_count}/{self.retry_per_wrapper}: {error}")
logging.warning(f"{wrapper_name} failed {try_count}/{self.retry_per_wrapper}: {error}")
time.sleep(self.retry_delay)
if not try_all and results:
@@ -143,6 +144,6 @@ class WrapperHandler(Generic[WrapperType]):
wrapper = wrapper_class(**(kwargs or {}))
result.append(wrapper)
except Exception as e:
log_warning(f"{wrapper_class} cannot be initialized: {e}")
logging.warning(f"'{wrapper_class.__name__}' cannot be initialized: {e}")
return WrapperHandler(result, try_per_wrapper, retry_delay)

14
uv.lock generated
View File

@@ -285,6 +285,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
]
[[package]]
name = "colorlog"
version = "6.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d3/7a/359f4d5df2353f26172b3cc39ea32daa39af8de522205f512f458923e677/colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2", size = 16624, upload-time = "2024-10-29T18:34:51.011Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424, upload-time = "2024-10-29T18:34:49.815Z" },
]
[[package]]
name = "cryptography"
version = "46.0.2"
@@ -1644,6 +1656,7 @@ source = { virtual = "." }
dependencies = [
{ name = "agno" },
{ name = "coinbase-advanced-py" },
{ name = "colorlog" },
{ name = "ddgs" },
{ name = "dotenv" },
{ name = "gnews" },
@@ -1663,6 +1676,7 @@ dependencies = [
requires-dist = [
{ name = "agno" },
{ name = "coinbase-advanced-py" },
{ name = "colorlog" },
{ name = "ddgs" },
{ name = "dotenv" },
{ name = "gnews" },