Configurazioni dell'app #27

Merged
Berack96 merged 16 commits from configs into main 2025-10-12 18:05:43 +02:00
8 changed files with 298 additions and 142 deletions
Showing only changes of commit 86277fdbdc - Show all commits

41
configs.yaml Normal file
View File

@@ -0,0 +1,41 @@
port: 8000
gradio_share: false
logging_level: INFO
strategies:
- name: Conservative
label: Conservative
description: Focus on stable and low-risk investments.
- name: Balanced
label: Balanced
description: A mix of growth and stability.
- name: Aggressive
label: Aggressive
description: High-risk, high-reward investments.
models:
gemini:
- name: gemini-2.0-flash
label: Gemini
- name: gemini-2.0-pro
label: Gemini Pro
ollama:
- name: gpt-oss:latest
label: Ollama GPT
- name: qwen3:8b
label: Qwen 3 (8B)
- name: qwen3:4b
label: Qwen 3 (4B)
- name: qwen3:1.7b
label: Qwen 3 (1.7B)
api:
retry_attempts: 3
retry_delay_seconds: 2
currency: EUR
agents:
strategy: Conservative
team_model: qwen3:1.7b
team_leader_model: qwen3:4b
predictor_model: qwen3:4b

View File

@@ -1,6 +1,7 @@
import gradio as gr import gradio as gr
from dotenv import load_dotenv from dotenv import load_dotenv
from agno.utils.log import log_info #type: ignore from agno.utils.log import log_info #type: ignore
from app.configs import AppConfig
from app.utils import ChatManager from app.utils import ChatManager
from app.agents import Pipeline from app.agents import Pipeline
@@ -8,7 +9,10 @@ from app.agents import Pipeline
if __name__ == "__main__": if __name__ == "__main__":
# Inizializzazioni # Inizializzazioni
load_dotenv() load_dotenv()
pipeline = Pipeline()
configs = AppConfig.load()
pipeline = Pipeline(configs)
chat = ChatManager() chat = ChatManager()
######################################## ########################################
@@ -57,7 +61,7 @@ if __name__ == "__main__":
type="index", type="index",
label="Stile di investimento" label="Stile di investimento"
) )
style.change(fn=pipeline.choose_style, inputs=style, outputs=None) style.change(fn=pipeline.choose_strategy, inputs=style, outputs=None)
chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages") chatbot = gr.Chatbot(label="Conversazione", height=500, type="messages")
msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?") msg = gr.Textbox(label="Scrivi la tua richiesta", placeholder="Es: Quali sono le crypto interessanti oggi?")

View File

@@ -1,6 +1,5 @@
from app.agents.models import AppModels from app.agents.predictor import PredictorInput, PredictorOutput
from app.agents.predictor import PredictorInput, PredictorOutput, PredictorStyle
from app.agents.team import create_team_with from app.agents.team import create_team_with
from app.agents.pipeline import Pipeline from app.agents.pipeline import Pipeline
__all__ = ["AppModels", "PredictorInput", "PredictorOutput", "PredictorStyle", "create_team_with", "Pipeline"] __all__ = ["PredictorInput", "PredictorOutput", "create_team_with", "Pipeline"]

View File

@@ -1,107 +0,0 @@
import os
import ollama
from enum import Enum
from agno.agent import Agent
from agno.models.base import Model
from agno.models.google import Gemini
from agno.models.ollama import Ollama
from agno.tools import Toolkit
from agno.utils.log import log_warning #type: ignore
from pydantic import BaseModel
class AppModels(Enum):
"""
Enum per i modelli supportati.
Aggiungere nuovi modelli qui se necessario.
Per quanto riguarda Ollama, i modelli dovranno essere scaricati e installati
localmente seguendo le istruzioni di https://ollama.com/docs/guide/install-models
"""
GEMINI = "gemini-2.0-flash" # API online
GEMINI_PRO = "gemini-2.0-pro" # API online, più costoso ma migliore
OLLAMA_GPT = "gpt-oss:latest" # + good - slow (13b)
OLLAMA_QWEN = "qwen3:latest" # + good + fast (8b)
OLLAMA_QWEN_4B = "qwen3:4b" # + fast + decent (4b)
OLLAMA_QWEN_1B = "qwen3:1.7b" # + very fast + decent (1.7b)
@staticmethod
def availables_local() -> list['AppModels']:
"""
Controlla quali provider di modelli LLM locali sono disponibili.
Ritorna una lista di provider disponibili.
"""
try:
models_list = ollama.list()
availables = [model['model'] for model in models_list['models']]
app_models = [model for model in AppModels if model.name.startswith("OLLAMA")]
return [model for model in app_models if model.value in availables]
except Exception as e:
log_warning(f"Ollama is not running or not reachable: {e}")
return []
@staticmethod
def availables_online() -> list['AppModels']:
"""
Controlla quali provider di modelli LLM online hanno le loro API keys disponibili
come variabili d'ambiente e ritorna una lista di provider disponibili.
"""
if not os.getenv("GOOGLE_API_KEY"):
log_warning("No GOOGLE_API_KEY set in environment variables.")
return []
availables = [AppModels.GEMINI, AppModels.GEMINI_PRO]
return availables
@staticmethod
def availables() -> list['AppModels']:
"""
Controlla quali provider di modelli LLM locali sono disponibili e quali
provider di modelli LLM online hanno le loro API keys disponibili come variabili
d'ambiente e ritorna una lista di provider disponibili.
L'ordine di preferenza è:
1. Gemini (Google)
2. Ollama (locale)
"""
availables = [
*AppModels.availables_online(),
*AppModels.availables_local()
]
assert availables, "No valid model API keys set in environment variables."
return availables
def get_model(self, instructions:str) -> Model:
"""
Restituisce un'istanza del modello specificato.
Args:
instructions: istruzioni da passare al modello (system prompt).
Returns:
Un'istanza di BaseModel o una sua sottoclasse.
Raise:
ValueError se il modello non è supportato.
"""
name = self.value
if self in {model for model in AppModels if model.name.startswith("GEMINI")}:
return Gemini(name, instructions=[instructions])
elif self in {model for model in AppModels if model.name.startswith("OLLAMA")}:
return Ollama(name, instructions=[instructions])
raise ValueError(f"Modello non supportato: {self}")
def get_agent(self, instructions: str, name: str = "", output_schema: type[BaseModel] | None = None, tools: list[Toolkit] | None = None) -> Agent:
"""
Costruisce un agente con il modello e le istruzioni specificate.
Args:
instructions: istruzioni da passare al modello (system prompt)
name: nome dell'agente (opzionale)
output: schema di output opzionale (Pydantic BaseModel)
tools: lista opzionale di strumenti (tools) da fornire all'agente
Returns:
Un'istanza di Agent.
"""
return Agent(
model=self.get_model(instructions),
name=name,
retries=2,
tools=tools,
delay_between_retries=5, # seconds
output_schema=output_schema
)

View File

@@ -1,9 +1,9 @@
from agno.run.agent import RunOutput from agno.run.agent import RunOutput
from app.agents.models import AppModels
from app.agents.team import create_team_with from app.agents.team import create_team_with
from app.agents.predictor import PredictorInput, PredictorOutput, PredictorStyle from app.agents.predictor import PredictorInput, PredictorOutput
from app.agents.prompts import * from app.agents.prompts import *
from app.api.base.markets import ProductInfo from app.api.base.markets import ProductInfo
from app.configs import AppConfig
class Pipeline: class Pipeline:
@@ -13,13 +13,12 @@ class Pipeline:
e scelto dall'utente tramite i dropdown dell'interfaccia grafica. e scelto dall'utente tramite i dropdown dell'interfaccia grafica.
""" """
def __init__(self): def __init__(self, configs: AppConfig):
self.available_models = AppModels.availables() self.configs = configs
self.all_styles = list(PredictorStyle)
self.style = self.all_styles[0] # Stato iniziale
self.team = create_team_with(AppModels.OLLAMA_QWEN_1B) self.choose_strategy(0)
self.choose_predictor(0) # Modello di default self.choose_predictor(0)
# ====================== # ======================
# Dropdown handlers # Dropdown handlers
@@ -28,17 +27,17 @@ class Pipeline:
""" """
Sceglie il modello LLM da usare per il Predictor. Sceglie il modello LLM da usare per il Predictor.
""" """
model = self.available_models[index] model = self.configs.models.all_models[index]
self.predictor = model.get_agent( self.predictor = model.get_agent(
PREDICTOR_INSTRUCTIONS, PREDICTOR_INSTRUCTIONS,
output_schema=PredictorOutput, output_schema=PredictorOutput,
) )
def choose_style(self, index: int): def choose_strategy(self, index: int):
""" """
Sceglie lo stile (conservativo/aggressivo) da usare per il Predictor. Sceglie la strategia da usare per il Predictor.
""" """
self.style = self.all_styles[index] self.strat = self.configs.strategies[index].description
# ====================== # ======================
# Helpers # Helpers
@@ -47,13 +46,13 @@ class Pipeline:
""" """
Restituisce la lista dei nomi dei modelli disponibili. Restituisce la lista dei nomi dei modelli disponibili.
""" """
return [model.name for model in self.available_models] return [model.label for model in self.configs.models.all_models]
def list_styles(self) -> list[str]: def list_styles(self) -> list[str]:
""" """
Restituisce la lista degli stili di previsione disponibili. Restituisce la lista degli stili di previsione disponibili.
""" """
return [style.value for style in self.all_styles] return [strat.label for strat in self.configs.strategies]
# ====================== # ======================
# Core interaction # Core interaction
@@ -66,7 +65,11 @@ class Pipeline:
4. Restituisce la strategia finale 4. Restituisce la strategia finale
""" """
# Step 1: raccolta output dai membri del Team # Step 1: raccolta output dai membri del Team
team_outputs = self.team.run(query) # type: ignore team_model = self.configs.get_model_by_name(self.configs.agents.team_model)
leader_model = self.configs.get_model_by_name(self.configs.agents.team_leader_model)
team = create_team_with(team_model, leader_model)
team_outputs = team.run(query) # type: ignore
# Step 2: aggregazione output strutturati # Step 2: aggregazione output strutturati
all_products: list[ProductInfo] = [] all_products: list[ProductInfo] = []
@@ -87,7 +90,7 @@ class Pipeline:
# Step 3: invocazione Predictor # Step 3: invocazione Predictor
predictor_input = PredictorInput( predictor_input = PredictorInput(
data=all_products, data=all_products,
style=self.style, style=self.strat,
sentiment=aggregated_sentiment sentiment=aggregated_sentiment
) )
@@ -101,6 +104,6 @@ class Pipeline:
[f"{item.asset} ({item.percentage}%): {item.motivation}" for item in prediction.portfolio] [f"{item.asset} ({item.percentage}%): {item.motivation}" for item in prediction.portfolio]
) )
return ( return (
f"📊 Strategia ({self.style.value}): {prediction.strategy}\n\n" f"📊 Strategia ({self.strat}): {prediction.strategy}\n\n"
f"💼 Portafoglio consigliato:\n{portfolio_lines}" f"💼 Portafoglio consigliato:\n{portfolio_lines}"
) )

View File

@@ -1,15 +1,9 @@
from enum import Enum
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from app.api.base.markets import ProductInfo from app.api.base.markets import ProductInfo
class PredictorStyle(Enum):
CONSERVATIVE = "Conservativo"
AGGRESSIVE = "Aggressivo"
class PredictorInput(BaseModel): class PredictorInput(BaseModel):
data: list[ProductInfo] = Field(..., description="Market data as a list of ProductInfo") data: list[ProductInfo] = Field(..., description="Market data as a list of ProductInfo")
style: PredictorStyle = Field(..., description="Prediction style") style: str = Field(..., description="Prediction style")
sentiment: str = Field(..., description="Aggregated sentiment from news and social analysis") sentiment: str = Field(..., description="Aggregated sentiment from news and social analysis")
class ItemPortfolio(BaseModel): class ItemPortfolio(BaseModel):

View File

@@ -1,29 +1,29 @@
from agno.team import Team from agno.team import Team
from app.agents import AppModels
from app.api.markets import MarketAPIsTool from app.api.markets import MarketAPIsTool
from app.api.news import NewsAPIsTool from app.api.news import NewsAPIsTool
from app.api.social import SocialAPIsTool from app.api.social import SocialAPIsTool
from app.agents.prompts import * from app.agents.prompts import *
from app.configs import AppModel
def create_team_with(models: AppModels, coordinator: AppModels | None = None) -> Team: def create_team_with(model: AppModel, coordinator: AppModel | None = None) -> Team:
market_agent = models.get_agent( market_agent = model.get_agent(
instructions=MARKET_INSTRUCTIONS, instructions=MARKET_INSTRUCTIONS,
name="MarketAgent", name="MarketAgent",
tools=[MarketAPIsTool()] tools=[MarketAPIsTool()]
) )
news_agent = models.get_agent( news_agent = model.get_agent(
instructions=NEWS_INSTRUCTIONS, instructions=NEWS_INSTRUCTIONS,
name="NewsAgent", name="NewsAgent",
tools=[NewsAPIsTool()] tools=[NewsAPIsTool()]
) )
social_agent = models.get_agent( social_agent = model.get_agent(
instructions=SOCIAL_INSTRUCTIONS, instructions=SOCIAL_INSTRUCTIONS,
name="SocialAgent", name="SocialAgent",
tools=[SocialAPIsTool()] tools=[SocialAPIsTool()]
) )
coordinator = coordinator or models coordinator = coordinator or model
return Team( return Team(
model=coordinator.get_model(COORDINATOR_INSTRUCTIONS), model=coordinator.get_model(COORDINATOR_INSTRUCTIONS),
name="CryptoAnalysisTeam", name="CryptoAnalysisTeam",

222
src/app/configs.py Normal file
View File

@@ -0,0 +1,222 @@
import os
copilot-pull-request-reviewer[bot] commented 2025-10-12 17:56:25 +02:00 (Migrated from github.com)
Review

This singleton implementation is not thread-safe and can cause issues in concurrent environments. Consider using a proper singleton pattern with locks or removing the singleton behavior if it's not strictly necessary.

This singleton implementation is not thread-safe and can cause issues in concurrent environments. Consider using a proper singleton pattern with locks or removing the singleton behavior if it's not strictly necessary.
copilot-pull-request-reviewer[bot] commented 2025-10-12 17:56:25 +02:00 (Migrated from github.com)
Review

Import statements should be placed at the top of the file, not within method bodies. Move this import to the top-level imports section.

Import statements should be placed at the top of the file, not within method bodies. Move this import to the top-level imports section.
copilot-pull-request-reviewer[bot] commented 2025-10-12 17:56:25 +02:00 (Migrated from github.com)
Review

Comment should be in English to maintain consistency with code comments throughout the project.

            'disable_existing_loggers': False, # Keeps existing loggers (e.g., third-party loggers)
Comment should be in English to maintain consistency with code comments throughout the project. ```suggestion 'disable_existing_loggers': False, # Keeps existing loggers (e.g., third-party loggers) ```
copilot-pull-request-reviewer[bot] commented 2025-10-12 17:56:25 +02:00 (Migrated from github.com)
Review

Comment should be in English to maintain consistency with code comments throughout the project.

                'httpx': {'level': 'WARNING'}, # Too much spam for INFO
Comment should be in English to maintain consistency with code comments throughout the project. ```suggestion 'httpx': {'level': 'WARNING'}, # Too much spam for INFO ```
import ollama
import yaml
import logging.config
from pydantic import BaseModel
from agno.agent import Agent
from agno.tools import Toolkit
from agno.models.base import Model
from agno.models.google import Gemini
from agno.models.ollama import Ollama
log = logging.getLogger(__name__)
class AppModel(BaseModel):
name: str = "gemini-2.0-flash"
label: str = "Gemini"
model: type[Model] | None = None
def get_model(self, instructions: str) -> Model:
"""
Restituisce un'istanza del modello specificato.
Args:
instructions: istruzioni da passare al modello (system prompt).
Returns:
Un'istanza di BaseModel o una sua sottoclasse.
Raise:
ValueError se il modello non è supportato.
"""
if self.model is None:
raise ValueError(f"Model class for '{self.name}' is not set.")
return self.model(id=self.name, instructions=[instructions])
def get_agent(self, instructions: str, name: str = "", output_schema: type[BaseModel] | None = None, tools: list[Toolkit] | None = None) -> Agent:
"""
Costruisce un agente con il modello e le istruzioni specificate.
Args:
instructions: istruzioni da passare al modello (system prompt)
name: nome dell'agente (opzionale)
output: schema di output opzionale (Pydantic BaseModel)
tools: lista opzionale di strumenti (tools) da fornire all'agente
Returns:
Un'istanza di Agent.
"""
return Agent(
model=self.get_model(instructions),
name=name,
retries=2,
tools=tools,
delay_between_retries=5, # seconds
output_schema=output_schema
)
class APIConfig(BaseModel):
retry_attempts: int = 3
retry_delay_seconds: int = 2
currency: str = "USD"
class Strategy(BaseModel):
name: str = "Conservative"
label: str = "Conservative"
description: str = "Focus on low-risk investments with steady returns."
class ModelsConfig(BaseModel):
gemini: list[AppModel] = [AppModel()]
ollama: list[AppModel] = []
@property
def all_models(self) -> list[AppModel]:
return self.gemini + self.ollama
class AgentsConfigs(BaseModel):
strategy: str = "Conservative"
team_model: str = "gemini-2.0-flash"
team_leader_model: str = "gemini-2.0-flash"
predictor_model: str = "gemini-2.0-flash"
class AppConfig(BaseModel):
port: int = 8000
gradio_share: bool = False
logging_level: str = "INFO"
api: APIConfig = APIConfig()
strategies: list[Strategy] = [Strategy()]
models: ModelsConfig = ModelsConfig()
agents: AgentsConfigs = AgentsConfigs()
@classmethod
def load(cls, file_path: str = "configs.yaml") -> 'AppConfig':
"""
Load the application configuration from a YAML file.
Be sure to call load_dotenv() before if you use environment variables.
Args:
file_path: path to the YAML configuration file.
Returns:
An instance of AppConfig with the loaded settings.
"""
with open(file_path, 'r') as f:
data = yaml.safe_load(f)
configs = cls(**data)
configs.set_logging_level()
configs.validate_models()
log.info(f"Loaded configuration from {file_path}")
return configs
def get_model_by_name(self, name: str) -> AppModel:
"""
Retrieve a model configuration by its name.
Args:
name: the name of the model to retrieve.
Returns:
The AppModel instance if found.
Raises:
ValueError if no model with the specified name is found.
"""
for model in self.models.all_models:
if model.name == name:
return model
raise ValueError(f"Model with name '{name}' not found.")
def get_strategy_by_name(self, name: str) -> Strategy:
"""
Retrieve a strategy configuration by its name.
Args:
name: the name of the strategy to retrieve.
Returns:
The Strategy instance if found.
Raises:
ValueError if no strategy with the specified name is found.
"""
for strat in self.strategies:
if strat.name == name:
return strat
raise ValueError(f"Strategy with name '{name}' not found.")
def set_logging_level(self) -> None:
"""
Set the logging level based on the configuration.
"""
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False, # Mantiene i logger esistenti (es. di terze parti)
'formatters': {
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': '%(log_color)s%(levelname)s%(reset)s [%(asctime)s] (%(name)s) - %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'colored',
'level': self.logging_level,
},
},
'root': { # Configura il logger root
'handlers': ['console'],
'level': self.logging_level,
},
'loggers': {
'httpx': {'level': 'WARNING'}, # Troppo spam per INFO
}
})
# Modifichiamo i logger di agno
import agno.utils.log # type: ignore
agno_logger_names = ["agno", "agno-team", "agno-workflow"]
for logger_name in agno_logger_names:
logger = logging.getLogger(logger_name)
logger.handlers.clear()
logger.propagate = True
def validate_models(self) -> None:
"""
Validate the configured models for each provider.
"""
self.__validate_online_models("gemini", clazz=Gemini, key="GOOGLE_API_KEY")
self.__validate_ollama_models()
def __validate_online_models(self, provider: str, clazz: type[Model], key: str | None = None) -> None:
"""
Validate models for online providers like Gemini.
Args:
provider: name of the provider (e.g. "gemini")
clazz: class of the model (e.g. Gemini)
key: API key required for the provider (optional)
"""
if getattr(self.models, provider) is None:
log.warning(f"No models configured for provider '{provider}'.")
models: list[AppModel] = getattr(self.models, provider)
if key and os.getenv(key) is None:
log.warning(f"No {key} set in environment variables for {provider}.")
models.clear()
return
for model in models:
model.model = clazz
def __validate_ollama_models(self) -> None:
"""
Validate models for the Ollama provider.
"""
try:
models_list = ollama.list()
availables = {model['model'] for model in models_list['models']}
not_availables: list[str] = []
for model in self.models.ollama:
if model.name in availables:
model.model = Ollama
else:
not_availables.append(model.name)
if not_availables:
log.warning(f"Ollama models not available: {not_availables}")
self.models.ollama = [model for model in self.models.ollama if model.model]
except Exception as e:
log.warning(f"Ollama is not running or not reachable: {e}")