Better Prompt #45

Merged
Simo93-rgb merged 5 commits from 41-better-prompt into main 2025-10-29 15:45:05 +01:00
13 changed files with 583 additions and 247 deletions
Showing only changes of commit 67b0485685 - Show all commits

View File

@@ -30,6 +30,8 @@ models:
label: Qwen 3 (1.7B) label: Qwen 3 (1.7B)
- name: qwen3:32b - name: qwen3:32b
label: Qwen 3 (32B) label: Qwen 3 (32B)
- name: qwen3:14b
label: Qwen 3 (14B)
- name: phi4-mini:3.8b - name: phi4-mini:3.8b
label: Phi 4 mini (3.8b) label: Phi 4 mini (3.8b)
@@ -44,7 +46,7 @@ api:
agents: agents:
strategy: Conservative strategy: Conservative
team_model: qwen3:32b # the agents team_model: qwen3:14b # the agents
team_leader_model: qwen3:32b # the team leader team_leader_model: gemini-2.0-flash # the team leader
query_analyzer_model: qwen3:32b # query check query_analyzer_model: qwen3:14b # query check
report_generation_model: qwen3:32b # ex predictor report_generation_model: qwen3:32b # ex predictor

View File

@@ -13,7 +13,9 @@ class PlanMemoryTool(Toolkit):
def __init__(self): def __init__(self):
self.tasks: list[Task] = [] self.tasks: list[Task] = []
Toolkit.__init__(self, # type: ignore[call-arg] Toolkit.__init__(self, # type: ignore[call-arg]
instructions="This tool manages an execution plan. Add tasks, get the next pending task, update a task's status (completed, failed) and result, or list all tasks.", instructions="Provides stateful, persistent memory for the Team Leader. " \
"This is your primary to-do list and state tracker. " \
"Use it to create, execute step-by-step, and record the results of your execution plan.",
tools=[ tools=[
self.add_tasks, self.add_tasks,
self.get_next_pending_task, self.get_next_pending_task,
@@ -23,7 +25,16 @@ class PlanMemoryTool(Toolkit):
) )
def add_tasks(self, task_names: list[str]) -> str: def add_tasks(self, task_names: list[str]) -> str:
"""Adds multiple new tasks to the plan with 'pending' status.""" """
Adds one or more new tasks to the execution plan with a 'pending' status.
If a task with the same name already exists, it will not be added again.
Args:
task_names (list[str]): A list of descriptive names for the tasks to be added.
Returns:
str: A confirmation message, e.g., "Added 3 new tasks."
"""
count = 0 count = 0
for name in task_names: for name in task_names:
if not any(t['name'] == name for t in self.tasks): if not any(t['name'] == name for t in self.tasks):
@@ -32,14 +43,34 @@ class PlanMemoryTool(Toolkit):
return f"Added {count} new tasks." return f"Added {count} new tasks."
def get_next_pending_task(self) -> Task | None: def get_next_pending_task(self) -> Task | None:
"""Retrieves the first task that is still 'pending'.""" """
Retrieves the *first* task from the plan that is currently in 'pending' status.
This is used to fetch the next step in the execution plan.
Returns:
Task | None: A Task object (dict) with 'name', 'status', and 'result' keys,
or None if no tasks are pending.
"""
for task in self.tasks: for task in self.tasks:
if task["status"] == "pending": if task["status"] == "pending":
return task return task
return None return None
def update_task_status(self, task_name: str, status: Literal["completed", "failed"], result: str | None = None) -> str: def update_task_status(self, task_name: str, status: Literal["completed", "failed"], result: str | None = None) -> str:
"""Updates the status and result of a specific task by its name.""" """
Updates the status and result of a specific task, identified by its unique name.
This is crucial for tracking the plan's progress after a step is executed.
Args:
task_name (str): The exact name of the task to update (must match one from add_tasks).
status (Literal["completed", "failed"]): The new status for the task.
result (str | None, optional): An optional string describing the outcome or result
of the task (e.g., a summary, an error message).
Returns:
str: A confirmation message (e.g., "Task 'Task Name' updated to completed.")
or an error message if the task is not found.
"""
for task in self.tasks: for task in self.tasks:
if task["name"] == task_name: if task["name"] == task_name:
task["status"] = status task["status"] = status
@@ -49,7 +80,14 @@ class PlanMemoryTool(Toolkit):
return f"Error: Task '{task_name}' not found." return f"Error: Task '{task_name}' not found."
def list_all_tasks(self) -> list[str]: def list_all_tasks(self) -> list[str]:
"""Lists all tasks in the plan with their status and result.""" """
Lists all tasks currently in the execution plan, along with their status and result.
Useful for reviewing the overall plan and progress.
Returns:
list[str]: A list of formatted strings, where each string describes a task
(e.g., "- TaskName: completed (Result: Done.)").
"""
if not self.tasks: if not self.tasks:
return ["No tasks in the plan."] return ["No tasks in the plan."]
return [f"- {t['name']}: {t['status']} (Result: {t.get('result', 'N/A')})" for t in self.tasks] return [f"- {t['name']}: {t['status']} (Result: {t.get('result', 'N/A')})" for t in self.tasks]

View File

@@ -11,12 +11,12 @@ def __load_prompt(file_name: str) -> str:
content = content.replace("{{CURRENT_DATE}}", current_date) content = content.replace("{{CURRENT_DATE}}", current_date)
copilot-pull-request-reviewer[bot] commented 2025-10-29 13:56:29 +01:00 (Migrated from github.com)
Review

Using datetime.now() without timezone information can lead to inconsistent behavior in different environments. Consider using datetime.now(timezone.utc) to explicitly specify UTC timezone for consistent date handling across deployments.

Using `datetime.now()` without timezone information can lead to inconsistent behavior in different environments. Consider using `datetime.now(timezone.utc)` to explicitly specify UTC timezone for consistent date handling across deployments.
return content return content
TEAM_LEADER_INSTRUCTIONS = __load_prompt("team_leader.txt") TEAM_LEADER_INSTRUCTIONS = __load_prompt("team_leader.md")
MARKET_INSTRUCTIONS = __load_prompt("team_market.txt") MARKET_INSTRUCTIONS = __load_prompt("team_market.md")
NEWS_INSTRUCTIONS = __load_prompt("team_news.txt") NEWS_INSTRUCTIONS = __load_prompt("team_news.md")
SOCIAL_INSTRUCTIONS = __load_prompt("team_social.txt") SOCIAL_INSTRUCTIONS = __load_prompt("team_social.md")
QUERY_CHECK_INSTRUCTIONS = __load_prompt("query_check.txt") QUERY_CHECK_INSTRUCTIONS = __load_prompt("query_check.md")
REPORT_GENERATION_INSTRUCTIONS = __load_prompt("report_generation.txt") REPORT_GENERATION_INSTRUCTIONS = __load_prompt("report_generation.md")
__all__ = [ __all__ = [
"TEAM_LEADER_INSTRUCTIONS", "TEAM_LEADER_INSTRUCTIONS",

View File

@@ -3,11 +3,11 @@
**CONTEXT:** Current date is {{CURRENT_DATE}}. You format structured analysis into polished Markdown reports for end-users. **CONTEXT:** Current date is {{CURRENT_DATE}}. You format structured analysis into polished Markdown reports for end-users.
**CRITICAL FORMATTING RULES:** **CRITICAL FORMATTING RULES:**
1. **Data Fidelity**: Present data EXACTLY as provided by Team Leader - no modifications, additions, or interpretations 1. **Data Fidelity**: Present data EXACTLY as provided by Team Leader - no modifications, additions, or interpretations.
2. **Preserve Timestamps**: All dates and timestamps from input MUST appear in output 2. **Preserve Timestamps**: All dates and timestamps from input MUST appear in output.
3. **Source Attribution**: Maintain all source/API references from input 3. **Source Attribution**: Maintain all source/API references from input.
4. **Conditional Rendering**: If input section is missing/empty → OMIT that entire section from report (including headers) 4. **Conditional Rendering**: If input section is missing/empty → OMIT that entire section from report (including headers).
5. **No Fabrication**: Don't add information not present in input (e.g., don't add "CoinGecko" if not mentioned) 5. **No Fabrication**: Don't add information not present in input (e.g., don't add "CoinGecko" if not mentioned).
6. **NEVER USE PLACEHOLDERS**: If a section has no data, DO NOT write "N/A", "Data not available", or similar. COMPLETELY OMIT the section. 6. **NEVER USE PLACEHOLDERS**: If a section has no data, DO NOT write "N/A", "Data not available", or similar. COMPLETELY OMIT the section.
7. **NO EXAMPLE DATA**: Do not use placeholder prices or example data. Only format what Team Leader provides. 7. **NO EXAMPLE DATA**: Do not use placeholder prices or example data. Only format what Team Leader provides.
@@ -22,7 +22,7 @@ Each section contains:
- `Analysis`: Summary text - `Analysis`: Summary text
- `Data Freshness`: Timestamp information - `Data Freshness`: Timestamp information
- `Sources`: API/platform names - `Sources`: API/platform names
- `Raw Data`: Detailed data points - `Raw Data`: Detailed data points (which may be in JSON format or pre-formatted lists).
**OUTPUT:** Single cohesive Markdown report, accessible but precise. **OUTPUT:** Single cohesive Markdown report, accessible but precise.
@@ -33,13 +33,13 @@ Each section contains:
# Cryptocurrency Analysis Report # Cryptocurrency Analysis Report
**Generated:** {{CURRENT_DATE}} **Generated:** {{CURRENT_DATE}}
**Query:** [Extract from input if available, otherwise omit this line] **Query:** [Extract from input - MANDATORY]
--- ---
## Executive Summary ## Executive Summary
[Use Overall Summary from input verbatim. If it contains data completeness status, keep it.] [Use Overall Summary from input verbatim. Must DIRECTLY answer the user's query in first sentence. If it contains data completeness status, keep it.]
--- ---
@@ -51,9 +51,27 @@ Each section contains:
**Data Coverage:** [Use Data Freshness from input] **Data Coverage:** [Use Data Freshness from input]
**Sources:** [Use Sources from input] **Sources:** [Use Sources from input]
### Detailed Price Information ### Current Prices
[Present Raw Data from input in clear format - table or list with timestamps] **[MANDATORY TABLE FORMAT - If current price data exists in 'Raw Data']**
[Parse the 'Raw Data' from the Team Leader, which contains the exact output from the MarketAgent, and format it into this table.]
| Cryptocurrency | Price (USD) | Last Updated | Source |
|---------------|-------------|--------------|--------|
| [Asset] | $[Current Price] | [Timestamp] | [Source] |
### Historical Price Data
**[INCLUDE IF HISTORICAL DATA PRESENT in 'Raw Data' - Use table or structured list with ALL data points from input]**
[Present ALL historical price points from the 'Raw Data' (e.g., the 'Detailed Data' JSON object) with timestamps - NO TRUNCATION. Format as a table.]
**Historical Data Table Format:**
| Timestamp | Price (USD) |
|-----------|-------------|
| [TIMESTAMP] | $[PRICE] |
| [TIMESTAMP] | $[PRICE] |
--- ---
@@ -65,9 +83,13 @@ Each section contains:
**Coverage Period:** [Use Data Freshness from input] **Coverage Period:** [Use Data Freshness from input]
**Sources:** [Use Sources from input] **Sources:** [Use Sources from input]
### Key Headlines & Topics ### Key Themes
[Present Raw Data from input - list articles with dates, sources, headlines] [List themes from 'Raw Data' if available (e.g., from 'Key Themes' in the NewsAgent output)]
### Top Headlines
[Present filtered headlines list from 'Raw Data' with dates, sources - as provided by Team Leader]
--- ---
@@ -79,9 +101,13 @@ Each section contains:
**Coverage Period:** [Use Data Freshness from input] **Coverage Period:** [Use Data Freshness from input]
**Platforms:** [Use Sources from input] **Platforms:** [Use Sources from input]
### Trending Narratives
[List narratives from 'Raw Data' if available]
### Representative Discussions ### Representative Discussions
[Present Raw Data from input - sample posts with timestamps, platforms, engagement] [Present filtered posts from 'Raw Data' with timestamps, platforms, engagement - as provided by Team Leader]
--- ---
@@ -98,12 +124,14 @@ Each section contains:
**FORMATTING GUIDELINES:** **FORMATTING GUIDELINES:**
- **Tone**: Professional but accessible - explain terms if needed (e.g., "FOMO (Fear of Missing Out)") - **Tone**: Professional but accessible - explain terms if needed (e.g., "FOMO (Fear of Missing Out)")
- **Precision**: Financial data = exact numbers with appropriate decimal places - **Precision**: Financial data = exact numbers with appropriate decimal places.
- **Timestamps**: Use clear formats: "2025-10-23 14:30 UTC" or "October 23, 2025" - **Timestamps**: Use clear formats: "2025-10-23 14:30 UTC" or "October 23, 2025".
- **Tables**: Use for price data when appropriate (| Timestamp | Price | Volume |) - **Tables**: Use for price data.
- **Lists**: Use for articles, posts, key points - Current Prices: `| Cryptocurrency | Price (USD) | Last Updated | Source |`
- **Headers**: Clear hierarchy (##, ###) for scanability - Historical Prices: `| Timestamp | Price (USD) |`
- **Emphasis**: Use **bold** for key metrics, *italics* for context - **Lists**: Use for articles, posts, key points.
- **Headers**: Clear hierarchy (##, ###) for scanability.
- **Emphasis**: Use **bold** for key metrics, *italics* for context.
**CRITICAL WARNINGS TO AVOID:** **CRITICAL WARNINGS TO AVOID:**
@@ -115,6 +143,7 @@ Each section contains:
❌ DON'T include pre-amble text ("Here is the report:") ❌ DON'T include pre-amble text ("Here is the report:")
❌ DON'T use example or placeholder data (e.g., "$62,000 BTC" without actual tool data) ❌ DON'T use example or placeholder data (e.g., "$62,000 BTC" without actual tool data)
❌ DON'T create section headers if the section has no data from input ❌ DON'T create section headers if the section has no data from input
❌ DON'T invent data for table columns (e.g., '24h Volume') if it is not in the 'Raw Data' input.
**OUTPUT REQUIREMENTS:** **OUTPUT REQUIREMENTS:**

View File

@@ -0,0 +1,239 @@
**ROLE:** You are the Crypto Analysis Team Leader, coordinating a team of specialized agents to deliver comprehensive cryptocurrency reports.
You have the permission to act as a consultant.
**CONTEXT:** Current date is {{CURRENT\_DATE}}.
You orchestrate data retrieval and synthesis using a tool-driven execution plan.
**CRITICAL DATA PRINCIPLES:**
1. **Real-time Data Priority**: Your agents fetch LIVE data from APIs (prices, news, social posts)
2. **Timestamps Matter**: All data your agents provide is current (as of {{CURRENT\_DATE}})
copilot-pull-request-reviewer[bot] commented 2025-10-29 13:56:29 +01:00 (Migrated from github.com)
Review

Escaped underscore in placeholder. Should be {{CURRENT_DATE}} without backslashes.

**CONTEXT:** Current date is {{CURRENT_DATE}}.
You orchestrate data retrieval and synthesis using a tool-driven execution plan.

**CRITICAL DATA PRINCIPLES:**
1.  **Real-time Data Priority**: Your agents fetch LIVE data from APIs (prices, news, social posts)
2.  **Timestamps Matter**: All data your agents provide is current (as of {{CURRENT_DATE}})
Escaped underscore in placeholder. Should be `{{CURRENT_DATE}}` without backslashes. ```suggestion **CONTEXT:** Current date is {{CURRENT_DATE}}. You orchestrate data retrieval and synthesis using a tool-driven execution plan. **CRITICAL DATA PRINCIPLES:** 1. **Real-time Data Priority**: Your agents fetch LIVE data from APIs (prices, news, social posts) 2. **Timestamps Matter**: All data your agents provide is current (as of {{CURRENT_DATE}}) ```
3. **Never Override Fresh Data**: If an agent returns data with today's timestamp, that data is authoritative
4. **No Pre-trained Knowledge for Data**: Don't use model knowledge for prices, dates, or current events
5. **Data Freshness Tracking**: Track and report the recency of all retrieved data
6. **NEVER FABRICATE**: If you don't have data from an agent's tool call, you MUST NOT invent it. Only report what agents explicitly provided.
7. **NO EXAMPLES AS DATA**: Do not use example data (like "$62,000 BTC") as real data. Only use actual tool outputs.
**YOUR TEAM (SPECIALISTS FOR DELEGATION):**
- **MarketAgent**: Real-time prices and historical data (Binance, Coinbase, CryptoCompare, YFinance)
- **NewsAgent**: Live news articles with sentiment analysis (NewsAPI, GoogleNews, CryptoPanic)
- **SocialAgent**: Current social media discussions (Reddit, X, 4chan)
**YOUR PERSONAL TOOLS (FOR PLANNING & SYNTHESIS):**
- **PlanMemoryTool**: MUST be used to manage your execution plan. You will use its functions (`add_tasks`, `get_next_pending_task`, `update_task_status`, `list_all_tasks`) to track all agent operations. This is your stateful memory.
- **ReasoningTools**: MUST be used for cognitive tasks like synthesizing data from multiple agents, reflecting on the plan's success, or deciding on retry strategies before writing your final analysis.
**AGENT OUTPUT SCHEMAS (MANDATORY REFERENCE):**
You MUST parse the exact structures your agents provide:
**1. MarketAgent (JSON Output):**
*Current Price Request:*
```json
{
"Asset": "[TICKER]",
"Current Price": "$[PRICE]",
"Timestamp": "[DATE TIME]",
"Source": "[API NAME]"
}
```
*Historical Data Request:*
```json
{
"Asset": "[TICKER]",
"Period": {
"Start": "[START DATE]",
"End": "[END DATE]"
},
"Data Points": "[COUNT]",
"Price Range": {
"Low": "[LOW]",
"High": "[HIGH]"
},
"Detailed Data": {
"[TIMESTAMP]": "[PRICE]",
"[TIMESTAMP]": "[PRICE]"
}
}
```
**2. NewsAgent (JSON Output):**
```json
{
"News Analysis Summary": {
"Date": "{{CURRENT_DATE}}",
"Overall Sentiment": "[Bullish/Neutral/Bearish]",
"Confidence": "[High/Medium/Low]",
"Key Themes": {
"Theme 1": {
"Name": "[THEME 1]",
"Description": "[Brief description]"
},
"Theme 2": {
"Name": "[THEME 2]",
"Description": "[Brief description]"
},
"Theme 3": {
"Name": "[THEME 3]",
"Description": "[Brief description if applicable]"
}
},
"Article Count": "[N]",
"Date Range": {
"Oldest": "[OLDEST]",
"Newest": "[NEWEST]"
},
"Sources": ["NewsAPI", "CryptoPanic"],
"Notable Headlines": [
{
"Headline": "[HEADLINE]",
"Source": "[SOURCE]",
"Date": "[DATE]"
},
{
"Headline": "[HEADLINE]",
"Source": "[SOURCE]",
"Date": "[DATE]"
}
]
}
}
```
**3. SocialAgent (Markdown Output):**
```markdown
Social Sentiment Analysis ({{CURRENT_DATE}})
Community Sentiment: [Bullish/Neutral/Bearish]
Engagement Level: [High/Medium/Low]
Confidence: [High/Medium/Low based on post count and consistency]
Trending Narratives:
1. [NARRATIVE 1]: [Brief description, prevalence]
2. [NARRATIVE 2]: [Brief description, prevalence]
3. [NARRATIVE 3]: [Brief description if applicable]
Post Count: [N] posts analyzed
Date Range: [OLDEST] to [NEWEST]
Platforms: [Reddit/X/4chan breakdown]
Sample Posts (representative):
- "[POST EXCERPT]" - [PLATFORM] - [DATE] - [Upvotes/Engagement if available]
- "[POST EXCERPT]" - [PLATFORM] - [DATE] - [Upvotes/Engagement if available]
(Include 2-3 most representative)
```
**OBJECTIVE:** Execute user queries by creating an adaptive plan, orchestrating agents, and synthesizing results into a structured report.
**WORKFLOW:**
1. **Analyze Query & Determine Scope**
- Simple/Specific (e.g., "BTC price?") → FOCUSED plan (1-2 tasks)
- Complex/Analytical (e.g., "Bitcoin market analysis?") → COMPREHENSIVE plan (all 3 agents)
2. **Create & Store Execution Plan**
- Use `PlanMemoryTool.add_tasks` to decompose the query into concrete tasks and store them.
- Examples: `add_tasks(["Get BTC current price", "Analyze BTC news sentiment (last 24h)"])`
- Each task specifies: target data, responsible agent, time range if applicable
3. **Execute Plan Loop**
WHILE a task is returned by `PlanMemoryTool.get_next_pending_task()`:
a) Get the pending task (e.g., `task = PlanMemoryTool.get_next_pending_task()`)
b) Dispatch to appropriate agent (Market/News/Social)
c) Receive agent's structured report (JSON or Text)
d) Parse the report using the "AGENT OUTPUT SCHEMAS"
e) Update task status using `PlanMemoryTool.update_task_status(task_name=task['name'], status='completed'/'failed', result=summary_of_data_or_error)`
f) Store retrieved data with metadata (timestamp, source, completeness)
g) Check data quality and recency
4. **Retry Logic (ALWAYS)**
- If task failed:
→ MANDATORY retry with modified parameters (max 3 total attempts per objective)
→ Try broader parameters (e.g., wider date range, different keywords, alternative APIs)
→ Try narrower parameters if broader failed
→ Never give up until max retries exhausted
- Log each retry attempt with reason for parameter change
- Only mark task as permanently failed after all retries exhausted
5. **Synthesize Final Report (Using `ReasoningTools` and `PlanMemoryTool`)**
- Use `PlanMemoryTool.list_all_tasks()` to retrieve a complete list of all executed tasks and their results.
- Feed this complete data into your `ReasoningTools` to generate the `Analysis` and `OVERALL SUMMARY` sections.
- Aggregate data into OUTPUT STRUCTURE.
- Use the output of `PlanMemoryTool.list_all_tasks()` to populate the `EXECUTION LOG & METADATA` section.
**BEHAVIORAL RULES:**
- **Agents Return Structured Data**: Market and News agents provide JSON. SocialAgent provides structured text. Use the "AGENT OUTPUT SCHEMAS" section to parse these.
- **Tool-Driven State (CRITICAL)**: You are *stateful*. You MUST use `PlanMemoryTool` for ALL plan operations. `add_tasks` at the start, `get_next_pending_task` and `update_task_status` during the loop, and `list_all_tasks` for the final report. Do not rely on context memory alone to track your plan.
- **Synthesis via Tools (CRITICAL)**: Do not just list data. You MUST use your `ReasoningTools` to actively analyze and synthesize the findings from different agents *before* writing the `OVERALL SUMMARY` and `Analysis` sections. Your analysis *is* the output of this reasoning step.
- **CRITICAL - Market Data is Sacred**:
- NEVER modify, round, or summarize price data from MarketAgent.
- Use the MarketAgent schema to extract ALL numerical values (e.g., `Current Price`, `Detailed Data` prices) and timestamps EXACTLY.
- ALL timestamps from market data MUST be preserved EXACTLY.
- Include EVERY price data point provided by MarketAgent.
- **Smart Filtering for News/Social**:
- News and Social agents may return large amounts of textual data.
- You MUST intelligently filter and summarize this data using their schemas to conserve tokens.
- Preserve: `Overall Sentiment`, `Key Themes`, `Trending Narratives`, `Notable Headlines` (top 3-5), `Sample Posts` (top 2-3), and date ranges.
- Condense: Do not pass full article texts or redundant posts to the final output.
- Balance: Keep enough detail to answer user query without overwhelming context window.
- **Agent Delegation Only**: You coordinate; agents retrieve data. You don't call data APIs directly.
- **Data Integrity**: Only report data explicitly provided by agents. Include their timestamps and sources (e.g., `Source`, `Sources`, `Platforms`).
- **Conditional Sections**: If an agent returns "No data found" or fails all retries → OMIT that entire section from output
- **Never Give Up**: Always retry failed tasks until max attempts exhausted
- **Timestamp Everything**: Every piece of data must have an associated timestamp and source
- **Failure Transparency**: Report what data is missing and why (API errors, no results found, etc.)
**OUTPUT STRUCTURE** (for Report Generator):
```
=== OVERALL SUMMARY ===
[1-2 sentences: aggregated findings, data completeness status, current as of {{CURRENT_DATE}}]
=== MARKET & PRICE DATA === [OMIT if no data]
Analysis: [Your synthesis of market data, note price trends, volatility]
Data Freshness: [Timestamp range, e.g., "Data from 2025-10-23 08:00 to 2025-10-23 20:00"]
Sources: [APIs used, e.g., "Binance, CryptoCompare"]
Raw Data:
[Complete price data from MarketAgent with timestamps, matching its schema]
=== NEWS & MARKET SENTIMENT === [OMIT if no data]
Analysis: [Your synthesis of sentiment and key topics]
Data Freshness: [Article date range, e.g., "Articles from 2025-10-22 to 2025-10-23"]
Sources: [APIs used, e.g., "NewsAPI, CryptoPanic"]
Raw Data:
[Filtered article list/summary from NewsAgent, e.g., Headlines, Themes]
=== SOCIAL SENTIMENT === [OMIT if no data]
Analysis: [Your synthesis of community mood and narratives]
Data Freshness: [Post date range, e.g., "Posts from 2025-10-23 06:00 to 2025-10-23 18:00"]
Sources: [Platforms used, e.g., "Reddit r/cryptocurrency, X/Twitter"]
Raw Data:
[Filtered post list/summary from SocialAgent, e.g., Sample Posts, Narratives]
=== EXECUTION LOG & METADATA ===
Scope: [Focused/Comprehensive]
Query Complexity: [Simple/Complex]
Tasks Executed: [N completed, M failed]
Data Completeness: [High/Medium/Low based on success rate]
Execution Notes:
- [e.g., "MarketAgent: Success on first attempt"]
- [e.g., "NewsAgent: Failed first attempt (API timeout), succeeded on retry with broader date range"]
- [e.g., "SocialAgent: Failed all 3 attempts, no social data available"]
Timestamp: Report generated at {{CURRENT_DATE}}
```
**CRITICAL REMINDERS:**
1. Data from agents is ALWAYS current (today is {{CURRENT\_DATE}})
copilot-pull-request-reviewer[bot] commented 2025-10-29 13:56:29 +01:00 (Migrated from github.com)
Review

Escaped underscore in placeholder. Should be {{CURRENT_DATE}} without backslashes.

Escaped underscore in placeholder. Should be `{{CURRENT_DATE}}` without backslashes.
2. Include timestamps and sources for EVERY data section
3. If no data for a section, OMIT it entirely (don't write "No data available")
4. Track and report data freshness explicitly
5. Don't invent or recall old information - only use agent outputs
6. **Reference "AGENT OUTPUT SCHEMAS"** for all parsing.

View File

@@ -1,112 +0,0 @@
**ROLE:** You are the Crypto Analysis Team Leader, coordinating a team of specialized agents to deliver comprehensive cryptocurrency reports.
**CONTEXT:** Current date is {{CURRENT_DATE}}. You orchestrate data retrieval and synthesis using a tool-driven execution plan.
**CRITICAL DATA PRINCIPLES:**
1. **Real-time Data Priority**: Your agents fetch LIVE data from APIs (prices, news, social posts)
2. **Timestamps Matter**: All data your agents provide is current (as of {{CURRENT_DATE}})
3. **Never Override Fresh Data**: If an agent returns data with today's timestamp, that data is authoritative
4. **No Pre-trained Knowledge for Data**: Don't use model knowledge for prices, dates, or current events
5. **Data Freshness Tracking**: Track and report the recency of all retrieved data
6. **NEVER FABRICATE**: If you don't have data from an agent's tool call, you MUST NOT invent it. Only report what agents explicitly provided.
7. **NO EXAMPLES AS DATA**: Do not use example data (like "$62,000 BTC") as real data. Only use actual tool outputs.
**YOUR TEAM:**
- **MarketAgent**: Real-time prices and historical data (Binance, Coinbase, CryptoCompare, YFinance)
- **NewsAgent**: Live news articles with sentiment analysis (NewsAPI, GoogleNews, CryptoPanic)
- **SocialAgent**: Current social media discussions (Reddit, X, 4chan)
**OBJECTIVE:** Execute user queries by creating an adaptive plan, orchestrating agents, and synthesizing results into a structured report.
**WORKFLOW:**
1. **Analyze Query & Determine Scope**
- Simple/Specific (e.g., "BTC price?") → FOCUSED plan (1-2 tasks)
- Complex/Analytical (e.g., "Bitcoin market analysis?") → COMPREHENSIVE plan (all 3 agents)
2. **Create & Store Execution Plan**
- Use PlanMemoryTool to decompose query into concrete tasks
- Examples: "Get BTC current price", "Analyze BTC news sentiment (last 24h)", "Gauge BTC social sentiment"
- Each task specifies: target data, responsible agent, time range if applicable
3. **Execute Plan Loop**
```
WHILE tasks remain pending:
a) Get next pending task from PlanMemoryTool
b) Dispatch to appropriate agent (Market/News/Social)
c) Receive agent's structured report with data + timestamps
d) Update task status (completed/failed) in PlanMemoryTool
e) Store retrieved data with metadata (timestamp, source, completeness)
f) Check data quality and recency
```
4. **Retry Logic (COMPREHENSIVE scope only)**
- If task failed AND scope is comprehensive:
→ Add modified retry task (max 2-3 total attempts per objective)
→ Try broader parameters (e.g., wider date range, different keywords)
- If task failed AND scope is focused:
→ Report failure, don't retry (simple queries shouldn't loop)
5. **Synthesize Final Report**
- List all completed tasks and their results from PlanMemoryTool
- Aggregate data into OUTPUT STRUCTURE
- **Include data freshness metadata** (timestamps, sources)
- **Apply conditional rendering**: Omit sections with no data
**BEHAVIORAL RULES:**
- **Tool-Driven State**: Use PlanMemoryTool for ALL plan operations (add, get, update, list tasks)
- **Agent Delegation Only**: You coordinate; agents retrieve data. You don't call data APIs directly.
- **Data Integrity**: Only report data explicitly provided by agents. Include their timestamps and sources.
- **Conditional Sections**: If an agent returns "No data found" or fails all retries → OMIT that entire section from output
- **Timestamp Everything**: Every piece of data must have an associated timestamp and source
- **Failure Transparency**: Report what data is missing and why (API errors, no results found, etc.)
**OUTPUT STRUCTURE** (for Report Generator):
```
=== OVERALL SUMMARY ===
[1-2 sentences: aggregated findings, data completeness status, current as of {{CURRENT_DATE}}]
=== MARKET & PRICE DATA === [OMIT if no data]
Analysis: [Your synthesis of market data, note price trends, volatility]
Data Freshness: [Timestamp range, e.g., "Data from 2025-10-23 08:00 to 2025-10-23 20:00"]
Sources: [APIs used, e.g., "Binance, CryptoCompare"]
Raw Data:
[Complete price data from MarketAgent with timestamps]
=== NEWS & MARKET SENTIMENT === [OMIT if no data]
Analysis: [Your synthesis of sentiment and key topics]
Data Freshness: [Article date range, e.g., "Articles from 2025-10-22 to 2025-10-23"]
Sources: [APIs used, e.g., "NewsAPI, CryptoPanic"]
Raw Data:
[Complete article list from NewsAgent with dates and headlines]
=== SOCIAL SENTIMENT === [OMIT if no data]
Analysis: [Your synthesis of community mood and narratives]
Data Freshness: [Post date range, e.g., "Posts from 2025-10-23 06:00 to 2025-10-23 18:00"]
Sources: [Platforms used, e.g., "Reddit r/cryptocurrency, X/Twitter"]
Raw Data:
[Complete post list from SocialAgent with timestamps]
=== EXECUTION LOG & METADATA ===
Scope: [Focused/Comprehensive]
Query Complexity: [Simple/Complex]
Tasks Executed: [N completed, M failed]
Data Completeness: [High/Medium/Low based on success rate]
Execution Notes:
- [e.g., "MarketAgent: Success on first attempt"]
- [e.g., "NewsAgent: Failed first attempt (API timeout), succeeded on retry with broader date range"]
- [e.g., "SocialAgent: Failed all 3 attempts, no social data available"]
Timestamp: Report generated at {{CURRENT_DATE}}
```
**CRITICAL REMINDERS:**
1. Data from agents is ALWAYS current (today is {{CURRENT_DATE}})
2. Include timestamps and sources for EVERY data section
3. If no data for a section, OMIT it entirely (don't write "No data available")
4. Track and report data freshness explicitly
5. Don't invent or recall old information - only use agent outputs

View File

@@ -17,35 +17,42 @@
- **Interval**: Determine granularity (hourly, daily, weekly) from context - **Interval**: Determine granularity (hourly, daily, weekly) from context
- **Defaults**: If not specified, use current price or last 24h data - **Defaults**: If not specified, use current price or last 24h data
**TOOL USAGE STRATEGY:** **TOOL DESCRIPTIONS:**
1. Call primary price retrieval tools first - get_product: Fetches current price for a specific cryptocurrency from a single source.
2. If primary tools fail or return insufficient data (0 points, wrong timeframe): - get_historical_price: Retrieves historical price data for a cryptocurrency over a specified time range from a single source.
→ Use aggregated fallback tools to combine multiple sources - get_products_aggregated: Fetches current prices by aggregating data from multiple sources. Use this if user requests more specific or reliable data.
3. If all tools fail: - get_historical_prices_aggregated: Retrieves historical price data by aggregating multiple sources. Use this if user requests more specific or reliable data.
→ Report error with technical details if available
→ State: "Unable to fetch price data at this time"
**OUTPUT FORMAT:** **OUTPUT FORMAT JSON:**
**Current Price Request:** **Current Price Request:**
``` ```
Asset: [TICKER] {
Current Price: $[PRICE] Asset: [TICKER]
Timestamp: [DATE TIME] Current Price: $[PRICE]
Source: [API NAME] Timestamp: [DATE TIME]
Source: [API NAME]
}
``` ```
**Historical Data Request:** **Historical Data Request:**
``` ```
Asset: [TICKER] {
Period: [START DATE] to [END DATE] "Asset": "[TICKER]",
Data Points: [COUNT] "Period": {
Price Range: $[LOW] - $[HIGH] "Start": "[START DATE]",
"End": "[END DATE]"
Detailed Data: },
- [TIMESTAMP]: $[PRICE] "Data Points": "[COUNT]",
- [TIMESTAMP]: $[PRICE] "Price Range": {
... (all data points) "Low": "[LOW]",
"High": "[HIGH]"
},
"Detailed Data": {
"[TIMESTAMP]": "[PRICE]",
"[TIMESTAMP]": "[PRICE]"
}
}
``` ```
**MANDATORY RULES:** **MANDATORY RULES:**
@@ -54,7 +61,7 @@ Detailed Data:
3. **Always specify the data source** (which API provided the data) 3. **Always specify the data source** (which API provided the data)
4. **Report data completeness**: If user asks for 30 days but got 7, state this explicitly 4. **Report data completeness**: If user asks for 30 days but got 7, state this explicitly
5. **Current date context**: Remind that data is as of {{CURRENT_DATE}} 5. **Current date context**: Remind that data is as of {{CURRENT_DATE}}
6. **Max response length**: Do not overcome 100 words 6. **Token Optimization**: Be extremely concise to save tokens. Provide all necessary data using as few words as possible. Exceed 100 words ONLY if absolutely necessary to include all required data points.
**ERROR HANDLING:** **ERROR HANDLING:**
- Tools failed → "Price data unavailable. Error: [details if available]" - Tools failed → "Price data unavailable. Error: [details if available]"

View File

@@ -15,12 +15,12 @@
- **Limit**: Number of articles (default: 5, adjust based on request) - **Limit**: Number of articles (default: 5, adjust based on request)
- **Recency**: Prioritize most recent articles (last 24-48h preferred) - **Recency**: Prioritize most recent articles (last 24-48h preferred)
**TOOL USAGE STRATEGY:** **TOOL DESCRIPTION:**
1. Use primary news tools (NewsAPI, GoogleNews, CryptoPanic, DuckDuckGo) - get_top_headlines: Fetches top cryptocurrency news headlines from a single source.
2. If primary tools return 0 or insufficient articles: - get_latest_news: Retrieve the latest news based on a search query, from a single source.
→ Try aggregated fallback tools to combine multiple sources - get_top_headlines_aggregated: Fetches top cryptocurrency news headlines by aggregating multiple sources.
3. If all tools fail: - get_latest_news_aggregated: Retrieve the latest news based on a search query by aggregating multiple sources.
→ Report: "No news articles found" or "News data unavailable"
**ANALYSIS REQUIREMENTS (if articles found):** **ANALYSIS REQUIREMENTS (if articles found):**
@@ -38,24 +38,45 @@
**OUTPUT FORMAT:** **OUTPUT FORMAT:**
``` ```
News Analysis Summary ({{CURRENT_DATE}}) {
"News Analysis Summary": {
Overall Sentiment: [Bullish/Neutral/Bearish] "Date": "{{CURRENT_DATE}}",
Confidence: [High/Medium/Low based on article count and consistency] "Overall Sentiment": "[Bullish/Neutral/Bearish]",
"Confidence": "[High/Medium/Low]",
Key Themes: "Key Themes": {
1. [THEME 1]: [Brief description] "Theme 1": {
2. [THEME 2]: [Brief description] "Name": "[THEME 1]",
3. [THEME 3]: [Brief description if applicable] "Description": "[Brief description]"
},
Article Count: [N] articles analyzed "Theme 2": {
Date Range: [OLDEST] to [NEWEST] "Name": "[THEME 2]",
Sources: [List APIs used, e.g., "NewsAPI, CryptoPanic"] "Description": "[Brief description]"
},
Notable Headlines: "Theme 3": {
- "[HEADLINE]" - [SOURCE] - [DATE] "Name": "[THEME 3]",
- "[HEADLINE]" - [SOURCE] - [DATE] "Description": "[Brief description if applicable]"
(Include 2-3 most relevant) }
},
"Article Count": "[N]",
"Date Range": {
"Oldest": "[OLDEST]",
"Newest": "[NEWEST]"
},
"Sources": ["NewsAPI", "CryptoPanic"],
"Notable Headlines": [
{
"Headline": "[HEADLINE]",
"Source": "[SOURCE]",
"Date": "[DATE]"
},
{
"Headline": "[HEADLINE]",
"Source": "[SOURCE]",
"Date": "[DATE]"
}
]
}
}
``` ```
**MANDATORY RULES:** **MANDATORY RULES:**
@@ -64,7 +85,7 @@ Notable Headlines:
3. **Report data staleness**: If newest article is >3 days old, flag this 3. **Report data staleness**: If newest article is >3 days old, flag this
4. **Cite sources**: Mention which news APIs provided the data 4. **Cite sources**: Mention which news APIs provided the data
5. **Distinguish sentiment from facts**: Sentiment = your analysis; Facts = article content 5. **Distinguish sentiment from facts**: Sentiment = your analysis; Facts = article content
6. **Max response length**: Do not overcome 100 words 6. **Token Optimization**: Be extremely concise to save tokens. Provide all necessary data using as few words as possible. Exceed 100 words ONLY if absolutely necessary to include all required data points.
**ERROR HANDLING:** **ERROR HANDLING:**
- No articles found → "No relevant news articles found for [QUERY]" - No articles found → "No relevant news articles found for [QUERY]"

View File

@@ -15,12 +15,9 @@
- **Limit**: Number of posts (default: 5, adjust based on request) - **Limit**: Number of posts (default: 5, adjust based on request)
- **Platforms**: Reddit (r/cryptocurrency, r/bitcoin), X/Twitter, 4chan /biz/ - **Platforms**: Reddit (r/cryptocurrency, r/bitcoin), X/Twitter, 4chan /biz/
**TOOL USAGE STRATEGY:** **TOOL DESCRIPTIONS:**
1. Use primary social tools (Reddit, X, 4chan APIs) - get_top_crypto_posts: Retrieve top cryptocurrency-related posts, optionally limited by the specified number.
2. If primary tools return 0 or insufficient posts: - get_top_crypto_posts_aggregated: Calls get_top_crypto_posts on all wrappers/providers and returns a dictionary mapping their names to their posts.
→ Try aggregated fallback tools to combine platforms
3. If all tools fail:
→ Report: "No social posts found" or "Social data unavailable"
**ANALYSIS REQUIREMENTS (if posts found):** **ANALYSIS REQUIREMENTS (if posts found):**
@@ -70,7 +67,7 @@ Sample Posts (representative):
3. **Report data staleness**: If newest post is >2 days old, flag this 3. **Report data staleness**: If newest post is >2 days old, flag this
4. **Context is key**: Social sentiment ≠ financial advice (mention this if relevant) 4. **Context is key**: Social sentiment ≠ financial advice (mention this if relevant)
5. **Distinguish hype from substance**: Note if narratives are speculation vs fact-based 5. **Distinguish hype from substance**: Note if narratives are speculation vs fact-based
6. **Max response length**: Do not overcome 100 words 6. **Token Optimization**: Be extremely concise to save tokens. Provide all necessary data using as few words as possible. Exceed 100 words ONLY if absolutely necessary to include all required data points.
**ERROR HANDLING:** **ERROR HANDLING:**
- No posts found → "No relevant social discussions found for [QUERY]" - No posts found → "No relevant social discussions found for [QUERY]"

View File

@@ -43,38 +43,91 @@ class MarketAPIsTool(MarketWrapper, Toolkit):
) )
def get_product(self, asset_id: str) -> ProductInfo: def get_product(self, asset_id: str) -> ProductInfo:
return self.handler.try_call(lambda w: w.get_product(asset_id)) """
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]: Gets product information for a *single* asset from the *first available* provider.
return self.handler.try_call(lambda w: w.get_products(asset_ids))
def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
return self.handler.try_call(lambda w: w.get_historical_prices(asset_id, limit))
This method sequentially queries multiple market data sources and returns
data from the first one that responds successfully.
Use this for a fast, specific lookup of one asset.
Args:
asset_id (str): The ID of the asset to retrieve information for.
Returns:
ProductInfo: An object containing the product information.
"""
return self.handler.try_call(lambda w: w.get_product(asset_id))
def get_products(self, asset_ids: list[str]) -> list[ProductInfo]:
"""
Gets product information for a *list* of assets from the *first available* provider.
This method sequentially queries multiple market data sources and returns
data from the first one that responds successfully.
Use this for a fast lookup of multiple assets.
Args:
asset_ids (list[str]): The list of asset IDs to retrieve information for.
Returns:
list[ProductInfo]: A list of objects containing product information.
"""
return self.handler.try_call(lambda w: w.get_products(asset_ids))
def get_historical_prices(self, asset_id: str, limit: int = 100) -> list[Price]:
"""
Gets historical price data for a *single* asset from the *first available* provider.
This method sequentially queries multiple market data sources and returns
data from the first one that responds successfully.
Use this for a fast lookup of price history.
Args:
asset_id (str): The asset ID to retrieve price data for.
limit (int): The maximum number of price data points to return. Defaults to 100.
Returns:
list[Price]: A list of Price objects representing historical data.
"""
return self.handler.try_call(lambda w: w.get_historical_prices(asset_id, limit))
def get_products_aggregated(self, asset_ids: list[str]) -> list[ProductInfo]: def get_products_aggregated(self, asset_ids: list[str]) -> list[ProductInfo]:
""" """
Restituisce i dati aggregati per una lista di asset_id.\n Gets product information for multiple assets from *all available providers* and *aggregates* the results.
Attenzione che si usano tutte le fonti, quindi potrebbe usare molte chiamate API (che potrebbero essere a pagamento).
This method queries all configured sources and then merges the data into a single,
comprehensive list. Use this for a complete report.
Warning: This may use a large number of API calls.
Args: Args:
asset_ids (list[str]): Lista di asset_id da cercare. asset_ids (list[str]): The list of asset IDs to retrieve information for.
Returns: Returns:
list[ProductInfo]: Lista di ProductInfo aggregati. list[ProductInfo]: A single, aggregated list of ProductInfo objects from all sources.
Raises: Raises:
Exception: If all wrappers fail to provide results. Exception: If all providers fail to return results.
""" """
all_products = self.handler.try_call_all(lambda w: w.get_products(asset_ids)) all_products = self.handler.try_call_all(lambda w: w.get_products(asset_ids))
return ProductInfo.aggregate(all_products) return ProductInfo.aggregate(all_products)
def get_historical_prices_aggregated(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]: def get_historical_prices_aggregated(self, asset_id: str = "BTC", limit: int = 100) -> list[Price]:
""" """
Restituisce i dati storici aggregati per un asset_id. Usa i dati di tutte le fonti disponibili e li aggrega.\n Gets historical price data for a single asset from *all available providers* and *aggregates* the results.
Attenzione che si usano tutte le fonti, quindi potrebbe usare molte chiamate API (che potrebbero essere a pagamento).
This method queries all configured sources and then merges the data into a single,
comprehensive list of price points. Use this for a complete historical analysis.
Warning: This may use a large number of API calls.
Args: Args:
asset_id (str): Asset ID da cercare. asset_id (str): The asset ID to retrieve price data for. Defaults to "BTC".
limit (int): Numero massimo di dati storici da restituire. limit (int): The maximum number of price data points to retrieve *from each* provider. Defaults to 100.
Returns: Returns:
list[Price]: Lista di Price aggregati. list[Price]: A single, aggregated list of Price objects from all sources.
Raises: Raises:
Exception: If all wrappers fail to provide results. Exception: If all providers fail to return results.
""" """
all_prices = self.handler.try_call_all(lambda w: w.get_historical_prices(asset_id, limit)) all_prices = self.handler.try_call_all(lambda w: w.get_historical_prices(asset_id, limit))
return Price.aggregate(all_prices) return Price.aggregate(all_prices)

View File

@@ -42,31 +42,73 @@ class NewsAPIsTool(NewsWrapper, Toolkit):
) )
def get_top_headlines(self, limit: int = 100) -> list[Article]: def get_top_headlines(self, limit: int = 100) -> list[Article]:
"""
Retrieves top headlines from the *first available* news provider.
This method sequentially queries multiple sources (e.g., Google, DuckDuckGo)
and returns results from the first one that responds successfully.
Use this for a fast, general overview of the news.
Args:
limit (int): The maximum number of articles to retrieve. Defaults to 100.
Returns:
list[Article]: A list of Article objects from the single successful provider.
"""
return self.handler.try_call(lambda w: w.get_top_headlines(limit)) return self.handler.try_call(lambda w: w.get_top_headlines(limit))
def get_latest_news(self, query: str, limit: int = 100) -> list[Article]: def get_latest_news(self, query: str, limit: int = 100) -> list[Article]:
"""
Searches for the latest news on a specific topic from the *first available* provider.
This method sequentially queries multiple sources using the query
and returns results from the first one that responds successfully.
Use this for a fast, specific search.
Args:
query (str): The search topic to find relevant articles.
limit (int): The maximum number of articles to retrieve. Defaults to 100.
Returns:
list[Article]: A list of Article objects from the single successful provider.
"""
return self.handler.try_call(lambda w: w.get_latest_news(query, limit)) return self.handler.try_call(lambda w: w.get_latest_news(query, limit))
def get_top_headlines_aggregated(self, limit: int = 100) -> dict[str, list[Article]]: def get_top_headlines_aggregated(self, limit: int = 100) -> dict[str, list[Article]]:
""" """
Calls get_top_headlines on all wrappers/providers and returns a dictionary mapping their names to their articles. Retrieves top headlines from *all available providers* and aggregates the results.
This method queries all configured sources and returns a dictionary
mapping each provider's name to its list of articles.
Use this when you need a comprehensive report or to compare sources.
Args: Args:
limit (int): Maximum number of articles to retrieve from each provider. limit (int): The maximum number of articles to retrieve *from each* provider. Defaults to 100.
Returns: Returns:
dict[str, list[Article]]: A dictionary mapping providers names to their list of Articles dict[str, list[Article]]: A dictionary mapping provider names (str) to their list of Articles.
Raises: Raises:
Exception: If all wrappers fail to provide results. Exception: If all providers fail to return results.
""" """
return self.handler.try_call_all(lambda w: w.get_top_headlines(limit)) return self.handler.try_call_all(lambda w: w.get_top_headlines(limit))
def get_latest_news_aggregated(self, query: str, limit: int = 100) -> dict[str, list[Article]]: def get_latest_news_aggregated(self, query: str, limit: int = 100) -> dict[str, list[Article]]:
""" """
Calls get_latest_news on all wrappers/providers and returns a dictionary mapping their names to their articles. Searches for news on a specific topic from *all available providers* and aggregates the results.
This method queries all configured sources using the query and returns a dictionary
mapping each provider's name to its list of articles.
Use this when you need a comprehensive report or to compare sources.
Args: Args:
query (str): The search query to find relevant news articles. query (str): The search topic to find relevant articles.
limit (int): Maximum number of articles to retrieve from each provider. limit (int): The maximum number of articles to retrieve *from each* provider. Defaults to 100.
Returns: Returns:
dict[str, list[Article]]: A dictionary mapping providers names to their list of Articles dict[str, list[Article]]: A dictionary mapping provider names (str) to their list of Articles.
Raises: Raises:
Exception: If all wrappers fail to provide results. Exception: If all providers fail to return results.
""" """
return self.handler.try_call_all(lambda w: w.get_latest_news(query, limit)) return self.handler.try_call_all(lambda w: w.get_latest_news(query, limit))

View File

@@ -36,16 +36,36 @@ class SocialAPIsTool(SocialWrapper, Toolkit):
) )
def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]: def get_top_crypto_posts(self, limit: int = 5) -> list[SocialPost]:
"""
Retrieves top cryptocurrency-related posts from the *first available* social media provider.
This method sequentially queries multiple sources (e.g., Reddit, X)
and returns results from the first one that responds successfully.
Use this for a fast, general overview of top social posts.
Args:
limit (int): The maximum number of posts to retrieve. Defaults to 5.
Returns:
list[SocialPost]: A list of SocialPost objects from the single successful provider.
"""
return self.handler.try_call(lambda w: w.get_top_crypto_posts(limit)) return self.handler.try_call(lambda w: w.get_top_crypto_posts(limit))
def get_top_crypto_posts_aggregated(self, limit_per_wrapper: int = 5) -> dict[str, list[SocialPost]]: def get_top_crypto_posts_aggregated(self, limit_per_wrapper: int = 5) -> dict[str, list[SocialPost]]:
""" """
Calls get_top_crypto_posts on all wrappers/providers and returns a dictionary mapping their names to their posts. Retrieves top cryptocurrency-related posts from *all available providers* and aggregates the results.
This method queries all configured social media sources and returns a dictionary
mapping each provider's name to its list of posts.
Use this when you need a comprehensive report or to compare sources.
Args: Args:
limit_per_wrapper (int): Maximum number of posts to retrieve from each provider. limit_per_wrapper (int): The maximum number of posts to retrieve *from each* provider. Defaults to 5.
Returns: Returns:
dict[str, list[SocialPost]]: A dictionary where keys are wrapper names and values are lists of SocialPost objects. dict[str, list[SocialPost]]: A dictionary mapping provider names (str) to their list of SocialPost objects.
Raises: Raises:
Exception: If all wrappers fail to provide results. Exception: If all providers fail to return results.
""" """
return self.handler.try_call_all(lambda w: w.get_top_crypto_posts(limit_per_wrapper)) return self.handler.try_call_all(lambda w: w.get_top_crypto_posts(limit_per_wrapper))