Refactor model handling and agent construction; remove deprecated example script

This commit is contained in:
2025-09-16 17:42:22 +02:00
parent 0f6a7dabb6
commit fc725e48e2
5 changed files with 178 additions and 164 deletions

View File

@@ -1,8 +1,63 @@
import agno
import gradio
import dotenv
import requests
from dotenv import load_dotenv
from enum import Enum
from agno.agent import Agent
from agno.models.base import BaseModel
from agno.models.google import Gemini
from agno.models.ollama import Ollama
from agno.tools.reasoning import ReasoningTools
class Model(Enum):
"""
Enum per i modelli supportati.
Aggiungere nuovi modelli qui se necessario.
Per quanto riguarda Ollama, i modelli dovrano essere scaricati e installati
localmente seguendo le istruzioni di https://ollama.com/docs/guide/install-models
"""
GEMINI = "gemini-2.0-flash" # API online
OLLAMA = "llama3.1" # little and fast (7b) but not so good
OLLAMA_GPT = "gpt-oss" # a bit big (13b) but very good (almost like gemini API)
OLLAMA_GEMMA = "gemma3:4b" # no tool support
OLLAMA_DEEP = "deepseek-r1:8b" # no tool support
OLLAMA_QWEN = "qwen3:8b" # good
def get_model(model: Model, instructions:str = None) -> BaseModel:
"""
Restituisce un'istanza del modello specificato.
"""
name = model.value
if model in {Model.GEMINI}:
return Gemini(name, instructions=instructions)
elif model in {Model.OLLAMA, Model.OLLAMA_GPT, Model.OLLAMA_GEMMA, Model.OLLAMA_DEEP, Model.OLLAMA_QWEN}:
return Ollama(name, instructions=instructions)
raise ValueError(f"Modello non supportato: {model}")
def build_agent(model:Model, instructions: str) -> Agent:
"""
Costruisce un agente con il modello e le istruzioni specificate.
"""
return Agent(
model=get_model(model, instructions=instructions),
tools=[ReasoningTools()],
instructions=instructions,
markdown=True,
)
if __name__ == "__main__":
print("Hello World!")
# da fare assolutamente prima di usare tutto perchè carica le variabili d'ambiente
# come le API key nel nostro caso
load_dotenv()
prompt = "Scrivi una poesia su un gatto."
instructions = "Rispondi in italiano e molto brevemente. Usa tabelle per visualizzare i dati."
gemini = build_agent(Model.GEMINI, instructions=instructions).run(prompt)
print(f"Risposta Gemini:\n{gemini.content}\n==============================")
ollama = build_agent(Model.OLLAMA_GPT, instructions=instructions).run(prompt)
print(f"\nRisposta Ollama GPT:\n{ollama.content}\n==============================")

View File

@@ -1,28 +0,0 @@
from agno.agent import Agent
from agno.models.google import Gemini
from agno.tools.reasoning import ReasoningTools
from dotenv import load_dotenv
import ollama
from ollama_demo import generate_text
def run_gemini_poem():
load_dotenv()
reasoning_agent = Agent(
model=Gemini(),
tools=[ReasoningTools()],
instructions="Use tables to display data.",
markdown=True,
)
result = reasoning_agent.run("Scrivi una poesia su un gatto. Sii breve.")
print(result.content)
def run_ollama_codegemma_poem():
prompt = "Scrivi una poesia su un gatto. Sii breve."
response = generate_text(model="gpt-oss:latest", prompt=prompt)
print(response)
if __name__ == "__main__":
print("Risposta Gemini:")
run_gemini_poem()
print("\nRisposta Ollama GPT-OSS:")
run_ollama_codegemma_poem()