Update chat interface #70

Merged
trojanhorse47 merged 9 commits from 47-update-chat-interface into main 2025-10-31 14:24:39 +01:00
3 changed files with 109 additions and 24 deletions
Showing only changes of commit 0799a4ab08 - Show all commits

View File

@@ -45,28 +45,28 @@ class PipelineInputs:
""" """
Sceglie il modello LLM da usare per l'analizzatore di query. Sceglie il modello LLM da usare per l'analizzatore di query.
""" """
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list." assert 0 <= index < len(self.configs.models.all_models), "Index out of range for models list."
self.query_analyzer_model = self.configs.models.all_models[index] self.query_analyzer_model = self.configs.models.all_models[index]
def choose_team_leader(self, index: int): def choose_team_leader(self, index: int):
""" """
Sceglie il modello LLM da usare per il Team Leader. Sceglie il modello LLM da usare per il Team Leader.
""" """
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list." assert 0 <= index < len(self.configs.models.all_models), "Index out of range for models list."
self.team_leader_model = self.configs.models.all_models[index] self.team_leader_model = self.configs.models.all_models[index]
def choose_team(self, index: int): def choose_team(self, index: int):
""" """
Sceglie il modello LLM da usare per il Team. Sceglie il modello LLM da usare per il Team.
""" """
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list." assert 0 <= index < len(self.configs.models.all_models), "Index out of range for models list."
self.team_model = self.configs.models.all_models[index] self.team_model = self.configs.models.all_models[index]
def choose_report_generator(self, index: int): def choose_report_generator(self, index: int):
""" """
Sceglie il modello LLM da usare per il generatore di report. Sceglie il modello LLM da usare per il generatore di report.
""" """
assert index >= 0 and index < len(self.configs.models.all_models), "Index out of range for models list." assert 0 <= index < len(self.configs.models.all_models), "Index out of range for models list."
self.report_generation_model = self.configs.models.all_models[index] self.report_generation_model = self.configs.models.all_models[index]
def choose_strategy(self, index: int): def choose_strategy(self, index: int):
@@ -111,6 +111,7 @@ class PipelineInputs:
name="CryptoAnalysisTeam", name="CryptoAnalysisTeam",
tools=[ReasoningTools(), PlanMemoryTool(), CryptoSymbolsTools()], tools=[ReasoningTools(), PlanMemoryTool(), CryptoSymbolsTools()],
members=[market_agent, news_agent, social_agent], members=[market_agent, news_agent, social_agent],
stream_intermediate_steps=True
) )
def get_agent_query_checker(self) -> Agent: def get_agent_query_checker(self) -> Agent:

View File

@@ -84,6 +84,25 @@ class Pipeline:
result = await self.run(workflow, query, events=events) result = await self.run(workflow, query, events=events)
return result return result
async def interact_stream(self, listeners: list[tuple[PipelineEvent, Callable[[Any], None]]] = []):
"""
Versione asincrona in streaming che ESEGUE (yield) la pipeline,
restituendo gli aggiornamenti di stato e il risultato finale.
"""
run_id = random.randint(1000, 9999) # Per tracciare i log
logging.info(f"[{run_id}] Pipeline query: {self.inputs.user_query}")
copilot-pull-request-reviewer[bot] commented 2025-10-30 20:22:23 +01:00 (Migrated from github.com)
Review

[nitpick] The comment describes the function as 'ESEGUE (yield)' which is inconsistent with the style used in similar comments. The word 'ESEGUE' (executes) appears to be emphasized but doesn't align well with the yield concept. Consider clarifying that it 'yields' or 'streams' intermediate results and the final response.

        Versione asincrona in streaming che restituisce (yield) gli aggiornamenti di stato intermedi
        e il risultato finale della pipeline.
[nitpick] The comment describes the function as 'ESEGUE (yield)' which is inconsistent with the style used in similar comments. The word 'ESEGUE' (executes) appears to be emphasized but doesn't align well with the yield concept. Consider clarifying that it 'yields' or 'streams' intermediate results and the final response. ```suggestion Versione asincrona in streaming che restituisce (yield) gli aggiornamenti di stato intermedi e il risultato finale della pipeline. ```
events = [*PipelineEvent.get_log_events(run_id), *listeners]
query = QueryInputs(
user_query=self.inputs.user_query,
strategy=self.inputs.strategy.description
)
workflow = self.build_workflow()
# Delega al classmethod 'run_stream' per lo streaming
async for item in self.run_stream(workflow, query, events=events):
yield item
def build_workflow(self) -> Workflow: def build_workflow(self) -> Workflow:
""" """
@@ -114,33 +133,88 @@ class Pipeline:
]) ])
@classmethod @classmethod
async def run(cls, workflow: Workflow, query: QueryInputs, events: list[tuple[PipelineEvent, Callable[[Any], None]]]) -> str: async def run(cls, workflow: Workflow, query: QueryInputs,
events: list[tuple[PipelineEvent, Callable[[Any], None]]]) -> str:
""" """
Esegue il workflow e gestisce gli eventi tramite le callback fornite. Esegue il workflow e gestisce gli eventi, restituendo solo il risultato finale.
Args: Consuma il generatore 'run_stream'.
workflow: istanza di Workflow da eseguire """
query: query dell'utente da passare al workflow final_result = "Errore durante l'esecuzione del workflow."
events: dizionario di callback per eventi specifici (opzionale) # Consuma il generatore e salva solo l'ultimo item
copilot-pull-request-reviewer[bot] commented 2025-10-30 20:22:22 +01:00 (Migrated from github.com)
Review

The ellipsis '...' at the end of the string is inconsistent with other yield statements. Line 196 has three dots while lines 181, 183, 185, and 194 use '...' (ellipsis character). Consider using the ellipsis character for consistency.

                        yield f"Sto usando uno strumento sconosciuto…"
The ellipsis '...' at the end of the string is inconsistent with other yield statements. Line 196 has three dots while lines 181, 183, 185, and 194 use '...' (ellipsis character). Consider using the ellipsis character for consistency. ```suggestion yield f"Sto usando uno strumento sconosciuto…" ```
copilot-pull-request-reviewer[bot] commented 2025-10-30 20:22:23 +01:00 (Migrated from github.com)
Review

[nitpick] The default error message 'Errore durante l'esecuzione del workflow.' may not be reached in practice since the stream should always yield at least one item (the error message from line 213). This initialization could be misleading. Consider using a more descriptive default or documenting why this fallback exists.

        # Fallback: if the workflow yields no results, return a descriptive error.
        final_result = "[Pipeline Error] Nessun risultato prodotto dal workflow. (Fallback: run_stream non ha generato output)"
[nitpick] The default error message 'Errore durante l'esecuzione del workflow.' may not be reached in practice since the stream should always yield at least one item (the error message from line 213). This initialization could be misleading. Consider using a more descriptive default or documenting why this fallback exists. ```suggestion # Fallback: if the workflow yields no results, return a descriptive error. final_result = "[Pipeline Error] Nessun risultato prodotto dal workflow. (Fallback: run_stream non ha generato output)" ```
Returns: async for item in cls.run_stream(workflow, query, events):
La risposta generata dal workflow. final_result = item
return final_result
@classmethod
async def run_stream(cls, workflow: Workflow, query: QueryInputs,
events: list[tuple[PipelineEvent, Callable[[Any], None]]]):
"""
Esegue il workflow e restituisce gli eventi di stato e il risultato finale.
""" """
iterator = await workflow.arun(query, stream=True, stream_intermediate_steps=True) iterator = await workflow.arun(query, stream=True, stream_intermediate_steps=True)
content = None content = None
current_active_step = None
async for event in iterator: async for event in iterator:
step_name = getattr(event, 'step_name', '') step_name = getattr(event, 'step_name', '')
# 1. Chiama i listeners (per i log)
for app_event, listener in events: for app_event, listener in events:
if app_event.check_event(event.event, step_name): if app_event.check_event(event.event, step_name):
listener(event) listener(event)
if event.event == WorkflowRunEvent.step_completed:
# 2. Restituisce gli aggiornamenti di stato per Gradio
if event.event == WorkflowRunEvent.step_started.value:
current_active_step = step_name
if step_name == PipelineEvent.QUERY_CHECK.value:
yield "🔍 Sto controllando la tua richiesta..."
elif step_name == PipelineEvent.INFO_RECOVERY.value:
yield "📊 Sto recuperando i dati (mercato, news, social)..."
elif step_name == PipelineEvent.REPORT_GENERATION.value:
yield "✍️ Sto scrivendo il report finale..."
# Gestisce i tool usati da agenti singoli (come Query Check)
elif event.event == WorkflowRunEvent.step_output.value:
agent_event = event.content
if hasattr(agent_event, 'event') and agent_event.event == RunEvent.tool_call_completed.value:
tool_name = getattr(agent_event.tool, 'tool_name', 'uno strumento')
yield f"🛠️ Sto usando lo strumento: {tool_name}..."
# Gestisce i tool usati da agenti interni al team (come CustomEvent)
elif event.event == WorkflowRunEvent.custom_event.value:
custom_content = getattr(event, 'content', None)
if custom_content and hasattr(custom_content, 'event'):
agent_event = custom_content
if agent_event.event == RunEvent.tool_call_completed.value:
if step_name == PipelineEvent.INFO_RECOVERY.value:
tool_name = getattr(agent_event.tool, 'tool_name', 'uno strumento')
yield f"🛠️ (Team) Sto usando lo strumento: {tool_name}..."
# Gestisce gli eventi di tool promossi dal Team
elif event.event == PipelineEvent.TOOL_USED.value:
# Ci assicuriamo che l'evento provenga dallo step corretto
if current_active_step == PipelineEvent.INFO_RECOVERY.value:
tool_object = getattr(event, 'tool', None)
if tool_object:
tool_name = getattr(tool_object, 'tool_name', 'uno strumento')
yield f"🛠️ (Team) Sto usando lo strumento: {tool_name}..."
else:
yield f"🛠️ (Team) Sto usando uno strumento sconosciuto..."
# 3. Salva il contenuto finale quando uno step è completato
elif event.event == WorkflowRunEvent.step_completed.value:
current_active_step = None
content = getattr(event, 'content', '') content = getattr(event, 'content', '')
# 4. Restituisce la risposta finale
if content and isinstance(content, str): if content and isinstance(content, str):
think_str = "</think>" think_str = "</think>"
think = content.rfind(think_str) think = content.rfind(think_str)
return content[(think + len(think_str)):] if think != -1 else content final_answer = content[(think + len(think_str)):] if think != -1 else content
if content and isinstance(content, QueryOutputs): yield final_answer
return content.response elif content and isinstance(content, QueryOutputs):
yield content.response
else:
logging.error(f"No output from workflow: {content}") logging.error(f"No output from workflow: {content}")
return "No output from workflow, something went wrong." yield "Nessun output dal workflow, qualcosa è andato storto."

View File

@@ -49,13 +49,23 @@ class ChatManager:
######################################## ########################################
copilot-pull-request-reviewer[bot] commented 2025-10-30 20:22:23 +01:00 (Migrated from github.com)
Review

All chunks (including intermediate status messages) are yielded to Gradio, but only the final response is saved to history. This could be confusing because intermediate status updates like '🔍 Sto controllando la tua richiesta...' will be displayed but not the actual final answer if the stream doesn't clearly differentiate between status updates and the final response. Consider adding logic to distinguish between status messages and the final answer, or document this behavior clearly.

All chunks (including intermediate status messages) are yielded to Gradio, but only the final response is saved to history. This could be confusing because intermediate status updates like '🔍 Sto controllando la tua richiesta...' will be displayed but not the actual final answer if the stream doesn't clearly differentiate between status updates and the final response. Consider adding logic to distinguish between status messages and the final answer, or document this behavior clearly.
# Funzioni Gradio # Funzioni Gradio
######################################## ########################################
def gradio_respond(self, message: str, history: list[tuple[str, str]]) -> str: async def gradio_respond(self, message: str, history: list[tuple[str, str]]):
"""
Versione asincrona in streaming.
Produce (yield) aggiornamenti di stato e la risposta finale.
"""
self.inputs.user_query = message self.inputs.user_query = message
pipeline = Pipeline(self.inputs) pipeline = Pipeline(self.inputs)
response = pipeline.interact()
response = None
# Itera sul nuovo generatore asincrono
async for chunk in pipeline.interact_stream():
response = chunk # Salva l'ultimo chunk (che sarà la risposta finale)
yield response # Restituisce l'aggiornamento (o la risposta finale) a Gradio
# Dopo che il generatore è completo, salva l'ultima risposta nello storico
if response:
self.history.append((message, response)) self.history.append((message, response))
return response
def gradio_save(self) -> str: def gradio_save(self) -> str:
self.save_chat("chat.json") self.save_chat("chat.json")