Fix configs validation (#66)

* fix configs validation of models

* added validation for defaults configs

* better error handling in main if any of error occurs during setup
This commit was merged in pull request #66.
This commit is contained in:
Giacomo Bertolazzi
2025-10-30 15:47:07 +01:00
committed by GitHub
parent b480888806
commit df14ae5bc6
2 changed files with 63 additions and 32 deletions

View File

@@ -1,32 +1,38 @@
import asyncio
import logging
import sys
from dotenv import load_dotenv
from app.configs import AppConfig
from app.interface import *
if __name__ == "__main__":
# =====================
load_dotenv()
configs = AppConfig.load()
# =====================
chat = ChatManager()
gradio = chat.gradio_build_interface()
_app, local_url, share_url = gradio.launch(server_name="0.0.0.0", server_port=configs.port, quiet=True, prevent_thread_lock=True, share=configs.gradio_share)
logging.info(f"UPO AppAI Chat is running on {share_url or local_url}")
try:
telegram = TelegramApp()
telegram.add_miniapp_url(share_url)
telegram.run()
except AssertionError as e:
# =====================
load_dotenv()
configs = AppConfig.load()
# =====================
chat = ChatManager()
gradio = chat.gradio_build_interface()
_app, local_url, share_url = gradio.launch(server_name="0.0.0.0", server_port=configs.port, quiet=True, prevent_thread_lock=True, share=configs.gradio_share)
logging.info(f"UPO AppAI Chat is running on {share_url or local_url}")
try:
logging.warning(f"Telegram bot could not be started: {e}")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_forever()
except KeyboardInterrupt:
logging.info("Shutting down due to KeyboardInterrupt")
finally:
gradio.close()
telegram = TelegramApp()
telegram.add_miniapp_url(share_url)
telegram.run()
except AssertionError as e:
try:
logging.warning(f"Telegram bot could not be started: {e}")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_forever()
except KeyboardInterrupt:
logging.info("Shutting down due to KeyboardInterrupt")
finally:
gradio.close()
except Exception as e:
logging.error(f"Application failed to start: {e}")
sys.exit(1)

View File

@@ -78,36 +78,38 @@ class Strategy(BaseModel):
class ModelsConfig(BaseModel):
gemini: list[AppModel] = [AppModel()]
gpt: list[AppModel] = [AppModel(name="gpt-4o", label="OpenAIChat")]
mistral: list[AppModel] = [AppModel(name="mistral-large-latest", label="Mistral")]
deepseek: list[AppModel] = [AppModel(name="deepseek-chat", label="DeepSeek")]
# xai: list[AppModel] = [AppModel(name="grok-3", label="xAI")]
gpt: list[AppModel] = []
mistral: list[AppModel] = []
deepseek: list[AppModel] = []
ollama: list[AppModel] = []
@property
def all_models(self) -> list[AppModel]:
return self.gemini + self.ollama + self.gpt + self.mistral + self.deepseek # + self.xai
return self.gemini + self.ollama + self.gpt + self.mistral + self.deepseek
def validate_models(self) -> None:
"""
Validate the configured models for each provider.
Validate the configured models for each supported provider.
"""
self.__validate_online_models(self.gemini, clazz=Gemini, key="GOOGLE_API_KEY")
self.__validate_online_models(self.gpt, clazz=OpenAIChat, key="OPENAI_API_KEY")
self.__validate_online_models(self.mistral, clazz=MistralChat, key="MISTRAL_API_KEY")
self.__validate_online_models(self.deepseek, clazz=DeepSeek, key="DEEPSEEK_API_KEY")
# self.__validate_online_models(self.xai, clazz=xAI, key="XAI_API_KEY")
self.__validate_ollama_models()
def __validate_online_models(self, models: list[AppModel], clazz: type[Model], key: str | None = None) -> None:
"""
Validate models for online providers like Gemini.
Validate models for online providers that require an API key.
If the models list is empty, no validation is performed and the method returns immediately.
If the API key is not set, the models list will be cleared.
Args:
models: list of AppModel instances to validate
clazz: class of the model (e.g. Gemini)
key: API key required for the provider (optional)
"""
if not models:
return
if key and os.getenv(key) is None:
log.warning(f"No {key} set in environment variables for {clazz.__name__}.")
models.clear()
@@ -131,7 +133,7 @@ class ModelsConfig(BaseModel):
else:
not_availables.append(model.name)
if not_availables:
log.warning(f"Ollama models not available: {not_availables}")
log.warning(f"Ollama models not available, but defined in configs: {not_availables}")
self.ollama = [model for model in self.ollama if model.model]
@@ -147,6 +149,28 @@ class AgentsConfigs(BaseModel):
query_analyzer_model: str = "gemini-2.0-flash"
report_generation_model: str = "gemini-2.0-flash"
def validate_defaults(self, configs: 'AppConfig') -> None:
"""
Validate that the default models and strategy exist in the provided configurations.
If any default is not found, a ValueError is raised.
Args:
configs: the AppConfig instance containing models and strategies.
Raises:
ValueError if any default model or strategy is not found.
"""
try:
configs.get_strategy_by_name(self.strategy)
except ValueError as e:
log.error(f"Default strategy '{self.strategy}' not found in configurations.")
raise e
for model_name in [self.team_model, self.team_leader_model, self.query_analyzer_model, self.report_generation_model]:
try:
configs.get_model_by_name(model_name)
except ValueError as e:
log.error(f"Default agent model '{model_name}' not found in configurations.")
raise e
class AppConfig(BaseModel):
port: int = 8000
gradio_share: bool = False
@@ -188,6 +212,7 @@ class AppConfig(BaseModel):
super().__init__(*args, **kwargs)
self.set_logging_level()
self.models.validate_models()
self.agents.validate_defaults(self)
self._initialized = True
def get_model_by_name(self, name: str) -> AppModel: