diff --git a/.github/workflows/test-mcp-examples.yaml b/.github/workflows/test-mcp-examples.yaml index 969adf4c6..c4b2d9cfb 100644 --- a/.github/workflows/test-mcp-examples.yaml +++ b/.github/workflows/test-mcp-examples.yaml @@ -1,22 +1,18 @@ -name: Test MCP Examples +name: Test Pipeline Examples on: - push: - branches: - - main - paths: - - 'modules/ai-agents/examples/**/*.yaml' - - 'modules/ai-agents/examples/test-mcp-examples.sh' pull_request: branches: - main paths: - 'modules/ai-agents/examples/**/*.yaml' - - 'modules/ai-agents/examples/test-mcp-examples.sh' + - 'modules/ai-agents/examples/**/*.sh' + - 'modules/develop/examples/**/*.yaml' + - 'modules/develop/examples/**/*.sh' jobs: test-all-examples: - name: Test All MCP Examples + name: Test All Pipeline Examples runs-on: ubuntu-latest steps: @@ -39,15 +35,33 @@ jobs: sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 sudo chmod +x /usr/local/bin/yq - - name: Make test script executable - run: chmod +x modules/ai-agents/examples/test-mcp-examples.sh + - name: Make test scripts executable + run: | + find modules/ai-agents/examples -name "*.sh" -exec chmod +x {} \; + find modules/develop/examples -name "*.sh" -exec chmod +x {} \; 2>/dev/null || true + + - name: Run MCP tools tests + run: | + cd modules/ai-agents/examples/mcp-tools + ./test-mcp-tools.sh + + - name: Run ai-agents pipeline tests + run: | + cd modules/ai-agents/examples/pipelines + ./test-pipelines.sh - - name: Run MCP examples tests + - name: Run cookbook tests run: | - cd modules/ai-agents/examples - ./test-mcp-examples.sh + for dir in modules/develop/examples/cookbooks/*/; do + if [[ -f "${dir}test-"*".sh" ]]; then + echo "Testing ${dir}..." + cd "${dir}" + ./test-*.sh + cd - > /dev/null + fi + done - name: Test Summary if: always() run: | - echo "::notice title=MCP Examples Testing::All Cloud MCP examples have been validated" + echo "::notice title=Pipeline Examples Testing::All pipeline examples have been validated" diff --git a/docs-data/personas.yaml b/docs-data/personas.yaml index 46d4e7912..f3368e85d 100644 --- a/docs-data/personas.yaml +++ b/docs-data/personas.yaml @@ -2,34 +2,77 @@ # # These personas represent the target audience for Redpanda Cloud documentation. # Use these when assigning :personas: attributes to documentation pages. +# +# This persona set covers two domains: +# 1. Streaming/Data Platform: Real-time data streaming, connectors, pipelines +# 2. Agentic Data Platform (ADP): AI agent development, governance, enterprise AI adoption schema_version: "1.0" repository: cloud-docs personas: - - id: app_developer - name: Application Developer - description: Builds applications that produce and consume data from Redpanda Cloud - experience_level: intermediate + # ============================================================================ + # TIER 1: Executive & Governance + # ============================================================================ + + - id: executive + name: Executive Stakeholder + description: CIO/CAIO/Head of AI Strategy driving enterprise AI adoption and governance + experience_level: executive goals: - - Connect applications to Redpanda Cloud clusters - - Produce and consume messages reliably - - Implement proper error handling and retries - - Optimize client performance + - Drive enterprise-wide AI adoption strategy + - Ensure ROI on AI investments + - Establish governance framework for agent deployments + - Manage cost and resource allocation + - Ensure compliance with organizational policies pain_points: - - Authentication and connection configuration - - Understanding Kafka client options - - Debugging connectivity issues - - Choosing the right client library + - Lack of visibility into agent usage and costs + - Difficulty enforcing governance at scale + - Unclear ROI metrics for AI initiatives + - Risk of shadow AI deployments + - Integration with existing enterprise systems content_preferences: - - Working code examples in multiple languages - - Connection configuration templates - - Client library comparisons - - Performance tuning guides + - High-level governance frameworks + - ROI and cost analysis + - Compliance and audit capabilities + - Executive dashboards and reporting + - Strategic planning guides typical_content_types: - - how-to - - tutorial + - overview + - concepts + - best-practices + + - id: security_leader + name: Security & Risk Leader + description: CISO/Compliance Officer protecting systems and enforcing data protection policies + experience_level: advanced + goals: + - Enforce agent policy and access controls + - Maintain audit trails for compliance + - Protect sensitive data and credentials + - Manage risk across agent deployments + - Ensure regulatory compliance + pain_points: + - Agent access to sensitive systems + - Lack of visibility into agent actions + - Difficult to audit agent behavior + - Credential management and rotation + - Compliance with data protection regulations + content_preferences: + - Security architecture patterns + - Policy enforcement mechanisms + - Audit trail documentation + - Compliance certification guides + - Incident response procedures + typical_content_types: + - concepts - reference + - best-practices + - troubleshooting + + # ============================================================================ + # TIER 2: Platform Operations + # ============================================================================ - id: platform_admin name: Platform Administrator @@ -55,47 +98,97 @@ personas: - reference - best-practices - - id: data_engineer - name: Data Engineer - description: Builds data pipelines using managed connectors and Redpanda Connect - experience_level: intermediate + - id: ai_platform_engineer + name: AI/ML Platform Engineer + description: Operates agent infrastructure, runtimes, and connectivity with governance controls + experience_level: advanced goals: - - Set up managed connectors to move data between systems - - Transform and route data reliably - - Monitor connector and pipeline health - - Handle errors and retries + - Deploy and operate agent runtime infrastructure + - Configure governance controls and policies + - Monitor agent performance and resource usage + - Onboard and manage MCP servers + - Ensure agent observability and debugging pain_points: - - Connector configuration complexity - - Debugging failed connectors - - Schema management and evolution - - Performance tuning + - Complex agent runtime configuration + - Difficult to troubleshoot agent failures + - Managing agent resource allocation + - Integrating governance with existing tools + - Scaling agent infrastructure content_preferences: - - Connector setup guides - - Transformation examples - - Error handling patterns - - Monitoring and troubleshooting + - Infrastructure setup guides + - Governance configuration patterns + - Observability and monitoring setup + - Performance tuning documentation + - Troubleshooting workflows typical_content_types: - how-to - - cookbook + - reference - troubleshooting + - best-practices - - id: ai_agent_developer - name: AI Agent Developer - description: Builds AI agents and integrations using MCP tools and LLM frameworks + # ============================================================================ + # TIER 3: Builders & Developers + # ============================================================================ + + - id: app_developer + name: Application Developer + description: Builds applications that produce and consume data from Redpanda Cloud experience_level: intermediate goals: + - Connect applications to Redpanda Cloud clusters + - Produce and consume messages reliably + - Implement proper error handling and retries + - Optimize client performance + pain_points: + - Authentication and connection configuration + - Understanding Kafka client options + - Debugging connectivity issues + - Choosing the right client library + content_preferences: + - Working code examples in multiple languages + - Connection configuration templates + - Client library comparisons + - Performance tuning guides + typical_content_types: + - how-to + - tutorial + - reference + + - id: agent_developer + name: Agent Developer + description: Builds AI agents, agentic workflows, and MCP tools that integrate with Redpanda Cloud and ADP + experience_level: intermediate + goals: + # MCP and streaming integration - Create MCP tools that AI assistants can discover and use - Deploy MCP servers to Redpanda Cloud - Integrate with AI/LLM applications - Debug agent-tool interactions + # Agentic workflows and governed deployment + - Build agents and workflows that solve business problems + - Use ADP catalog, templates, and curated datasets + - Design reasoning patterns and tool interactions + - Deploy agents into governed runtime pain_points: + # MCP and integration challenges - MCP configuration syntax - Testing tools before deployment - Limited AI-specific examples + # ADP and governance challenges + - Hard to discover existing templates, MCP servers, datasets + - Unclear access policies + - Brittle multi-step integrations + - Inconsistent testing/debugging environments content_preferences: + # Code examples and patterns - Working code examples with AI context - Testing and debugging workflows - Integration patterns + # Catalog and governance + - Rich catalog of agent templates and tools + - Governance introspection (what agent can/can't do) + - Replay-based debugging + - Streamlined deployment workflows typical_content_types: - tutorial - how-to @@ -127,6 +220,81 @@ personas: - reference - best-practices + # ============================================================================ + # TIER 4: Data & Knowledge Management + # ============================================================================ + + - id: data_engineer + name: Data Engineer + description: Builds data pipelines with managed connectors AND creates curated datasets for agent consumption + experience_level: intermediate + goals: + # Data movement and pipelines + - Set up managed connectors to move data between systems + - Transform and route data reliably + - Monitor connector and pipeline health + - Handle errors and retries + # Agent-ready datasets and RAG + - Create agent-ready datasets with federated SQL + - Ensure data quality and freshness for agents + - Expose data safely through governed views + - Provide clean RAG context via MCP servers + pain_points: + # Connector and pipeline challenges + - Connector configuration complexity + - Debugging failed connectors + - Schema management and evolution + - Performance tuning + # Data curation for agents + - Siloed data across sources + - Fragile RAG sources + - Schema drift + - Difficulty providing agent-ready datasets quickly + content_preferences: + # Connector and transformation + - Connector setup guides + - Transformation examples + - Error handling patterns + - Monitoring and troubleshooting + # Federated data and RAG + - Federated SQL query examples + - Governed view patterns + - RAG context design + - Data lineage visualization + typical_content_types: + - how-to + - cookbook + - troubleshooting + - reference + + - id: knowledge_manager + name: Knowledge & Operations Manager + description: Maintains organizational documentation and knowledge bases for agent consumption + experience_level: intermediate + goals: + - Ingest and maintain organizational knowledge bases + - Ensure content freshness and accuracy + - Optimize vector search for agent queries + - Manage knowledge base access and permissions + pain_points: + - Stale or outdated documentation + - Difficult to index and search content + - Managing content from multiple sources + - Ensuring agent retrieval accuracy + content_preferences: + - KB ingestion workflows + - Vector search optimization guides + - Content freshness strategies + - Access control patterns + typical_content_types: + - how-to + - best-practices + - troubleshooting + + # ============================================================================ + # TIER 5: Evaluation & End Users + # ============================================================================ + - id: evaluator name: Technical Evaluator description: Assessing Redpanda Cloud for their organization @@ -150,4 +318,27 @@ personas: - overview - concepts - tutorial - - get-started + + - id: business_user + name: Business End User + description: Uses agent-powered automations to complete business tasks + experience_level: beginner + goals: + - Complete tasks efficiently using agents + - Understand what agents can and cannot do + - Trust agent recommendations and actions + - Report issues when agents fail + pain_points: + - Unclear agent capabilities + - Unexpected agent behavior + - Lack of transparency in agent actions + - Difficulty getting help when agents fail + content_preferences: + - Simple, task-oriented guides + - Agent capability overviews + - Troubleshooting for common issues + - Trust and transparency documentation + typical_content_types: + - overview + - how-to + - troubleshooting diff --git a/local-antora-playbook.yml b/local-antora-playbook.yml index 3e5b6c1d8..d8d478c82 100644 --- a/local-antora-playbook.yml +++ b/local-antora-playbook.yml @@ -10,9 +10,6 @@ urls: latest_version_segment: 'current' output: clean: true -runtime: - log: - failure_level: error content: sources: - url: . diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 990e75f9e..d25d4f013 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -25,24 +25,43 @@ * xref:ai-agents:index.adoc[Agentic AI] ** xref:ai-agents:mcp/index.adoc[MCP] -*** xref:ai-agents:mcp/overview.adoc[MCP Overview] +*** xref:ai-agents:mcp/overview.adoc[Overview] *** xref:ai-agents:mcp/remote/index.adoc[Remote MCP] **** xref:ai-agents:mcp/remote/overview.adoc[Overview] -**** xref:ai-agents:mcp/remote/quickstart.adoc[Quickstart] **** xref:ai-agents:mcp/remote/concepts.adoc[Concepts] +**** xref:ai-agents:mcp/remote/quickstart.adoc[Quickstart] **** xref:ai-agents:mcp/remote/create-tool.adoc[Create a Tool] **** xref:ai-agents:mcp/remote/best-practices.adoc[Best Practices] **** xref:ai-agents:mcp/remote/tool-patterns.adoc[Tool Patterns] -**** xref:ai-agents:mcp/remote/troubleshooting.adoc[Troubleshooting] -**** xref:ai-agents:mcp/remote/admin-guide.adoc[Admin Guide] -***** xref:ai-agents:mcp/remote/manage-servers.adoc[Manage Servers] -***** xref:ai-agents:mcp/remote/scale-resources.adoc[Scale Resources] -***** xref:ai-agents:mcp/remote/monitor-activity.adoc[Monitor Activity] +**** xref:ai-agents:mcp/remote/troubleshooting.adoc[Troubleshoot] +**** xref:ai-agents:mcp/remote/manage-servers.adoc[Manage Servers] +**** xref:ai-agents:mcp/remote/monitor-mcp-servers.adoc[Monitor MCP Servers] +**** xref:ai-agents:mcp/remote/scale-resources.adoc[Scale Resources] *** xref:ai-agents:mcp/local/index.adoc[Redpanda Cloud Management MCP Server] **** xref:ai-agents:mcp/local/overview.adoc[Overview] **** xref:ai-agents:mcp/local/quickstart.adoc[Quickstart] **** xref:ai-agents:mcp/local/configuration.adoc[Configure] +** xref:ai-agents:agents/index.adoc[Agents] +*** xref:ai-agents:agents/get-started-index.adoc[Get Started] +**** xref:ai-agents:agents/overview.adoc[Overview] +**** xref:ai-agents:agents/concepts.adoc[Concepts] +**** xref:ai-agents:agents/quickstart.adoc[Quickstart] +**** xref:ai-agents:agents/tutorials/customer-support-agent.adoc[Multi-Tool Orchestration] +**** xref:ai-agents:agents/tutorials/transaction-dispute-resolution.adoc[Multi-Agent Systems] +*** xref:ai-agents:agents/build-index.adoc[Build Agents] +**** xref:ai-agents:agents/create-agent.adoc[Create an Agent] +**** xref:ai-agents:agents/prompt-best-practices.adoc[System Prompt Best Practices] +**** xref:ai-agents:agents/architecture-patterns.adoc[Architecture Patterns] +**** xref:ai-agents:agents/troubleshooting.adoc[Troubleshoot] +*** xref:ai-agents:agents/monitor-agents.adoc[Monitor Agents] +*** xref:ai-agents:agents/integration-index.adoc[Agent Integrations] +**** xref:ai-agents:agents/integration-overview.adoc[Integration Patterns] +**** xref:ai-agents:agents/pipeline-integration-patterns.adoc[Pipeline to Agent] +**** xref:ai-agents:agents/a2a-concepts.adoc[A2A Protocol] + +** xref:ai-agents:observability/concepts.adoc[Transcripts] + * xref:develop:connect/about.adoc[Redpanda Connect] ** xref:develop:connect/connect-quickstart.adoc[Quickstart] ** xref:develop:connect/configuration/about.adoc[] @@ -79,6 +98,7 @@ **** xref:develop:connect/components/inputs/gcp_spanner_cdc.adoc[] **** xref:develop:connect/components/inputs/generate.adoc[] **** xref:develop:connect/components/inputs/http_client.adoc[] +**** xref:develop:connect/components/inputs/http_server.adoc[] **** xref:develop:connect/components/inputs/inproc.adoc[] **** xref:develop:connect/components/inputs/kafka.adoc[] **** xref:develop:connect/components/inputs/kafka_franz.adoc[] @@ -182,6 +202,7 @@ **** xref:develop:connect/components/outputs/timeplus.adoc[] *** xref:develop:connect/components/processors/about.adoc[] +**** xref:develop:connect/components/processors/a2a_message.adoc[] **** xref:develop:connect/components/processors/archive.adoc[] **** xref:develop:connect/components/processors/avro.adoc[] **** xref:develop:connect/components/processors/aws_bedrock_chat.adoc[] @@ -337,6 +358,7 @@ *** xref:develop:connect/cookbooks/joining_streams.adoc[] *** xref:develop:connect/cookbooks/redpanda_migrator.adoc[] *** xref:develop:connect/cookbooks/rag.adoc[] +*** xref:develop:connect/cookbooks/jira.adoc[] * xref:develop:index.adoc[Develop] ** xref:develop:kafka-clients.adoc[] diff --git a/modules/ai-agents/examples/agents/account-agent-prompt.txt b/modules/ai-agents/examples/agents/account-agent-prompt.txt new file mode 100644 index 000000000..292fa59b6 --- /dev/null +++ b/modules/ai-agents/examples/agents/account-agent-prompt.txt @@ -0,0 +1,62 @@ +You are the account agent for ACME Bank's dispute resolution system. You specialize in retrieving customer account information and transaction data. + +## Your Responsibilities + +- Look up customer account details with PII masking +- Retrieve specific transaction information +- Provide transaction pattern analysis +- Return only data available from your tools + +## Available Tools + +1. **get_customer_account**: Returns account data with masked PII + - Input: customer_id + - Returns: Name, masked email, card last 4, account type, location + +2. **get_transaction_details**: Returns detailed transaction information + - Input: transaction_id + - Returns: Amount, merchant, date, location, card used + +3. **get_transaction_history**: Returns spending pattern analysis + - Input: customer_id + - Returns: Aggregated spending patterns, categories, locations + +## PII Protection Rules + +Always return masked data: +- Email: First letter + **** + @domain (e.g., "s****@example.com") +- Phone: ***-***-XXXX (last 4 digits only) +- Card: Last 4 digits only +- Never return: Full card numbers, SSNs, full account numbers + +## Response Format + +Structure responses clearly: + +"I found the following account information: +- Customer: [Name] +- Account Type: [Type] +- Card ending in: [Last 4] +- Primary Location: [City, State, Country] + +Transaction details: +- Amount: $[Amount] +- Merchant: [Merchant Name] +- Date: [Date] +- Location: [Transaction Location]" + +## Error Handling + +If data not found: +- "I couldn't find an account for customer ID [ID]" +- "No transaction found with ID [ID]" +- Never guess or make up information + +## What You Don't Do + +- Don't calculate fraud scores (that's fraud-agent's job) +- Don't verify merchants (that's merchant-agent's job) +- Don't make recommendations about disputes +- Don't log audit events (that's compliance-agent's job) + +Your job is data retrieval only. Provide accurate, masked data and let the root agent make decisions. diff --git a/modules/ai-agents/examples/agents/compliance-agent-prompt.txt b/modules/ai-agents/examples/agents/compliance-agent-prompt.txt new file mode 100644 index 000000000..8704ecd35 --- /dev/null +++ b/modules/ai-agents/examples/agents/compliance-agent-prompt.txt @@ -0,0 +1,120 @@ +You are the compliance agent for ACME Bank's dispute resolution system. You specialize in regulatory requirements and audit logging. + +## Your Responsibilities + +- Log all dispute investigation actions for audit trail +- Check regulatory requirements for dispute types +- Verify compliance with banking regulations +- Provide timeline and documentation requirements + +## Available Tools + +1. **log_audit_event**: Log investigation actions + - Input: Transaction ID, customer ID, decision, evidence, outcome + - Returns: Audit record confirmation + +2. **check_regulatory_requirements**: Look up compliance rules + - Input: dispute_type (fraud, billing_error, service_not_received) + - Returns: Regulations, timelines, documentation requirements + +## Regulatory Frameworks + +You work with these regulations: + +1. **Regulation E (Electronic Fund Transfer Act)** + - Applies to: Fraud disputes, unauthorized transactions + - Customer liability: $50 if reported within 2 days, $500 if reported within 60 days + - Bank must provide provisional credit within 10 business days + - Investigation deadline: 90 days + +2. **Fair Credit Billing Act** + - Applies to: Billing errors, disputes + - Customer must dispute within 60 days of statement + - Bank must acknowledge within 30 days + - Resolution deadline: 90 days + +3. **Card Network Rules (Visa/Mastercard)** + - Chargeback rights and timelines + - Merchant response requirements + - Evidence requirements + +## Documentation Requirements + +For each dispute type, log: + +**Fraud Disputes:** +- Customer dispute affidavit +- Transaction details +- Fraud indicators identified +- Decision and reasoning +- Customer notification + +**Billing Errors:** +- Billing statement +- Customer dispute letter +- Merchant communication attempts +- Resolution details + +**Service Not Received:** +- Proof of non-delivery +- Merchant communication attempts +- Order/booking confirmation +- Resolution outcome + +## Timeline Tracking + +Monitor key deadlines: + +- Acknowledge dispute: 24-30 days (varies by type) +- Provisional credit: 10 business days (fraud) +- Final decision: 90 days (most disputes) +- Chargeback filing: 120 days (service issues) + +## Response Format + +For regulatory checks: + +"Compliance Requirements: + +Dispute Type: [Type] +Applicable Regulations: +- [Regulation 1] +- [Regulation 2] + +Customer Rights: +- Liability Limit: $[Amount] +- Notification Deadline: [Days] days + +Bank Obligations: +- Provisional Credit: [Required/Not Required] +- Investigation Deadline: [Days] days +- Customer Notification: [Required/Not Required] + +Documentation Required: +- [Document 1] +- [Document 2] +- [Document 3] + +Timeline: +- Acknowledge: [Timeframe] +- Decision: [Timeframe]" + +For audit logging: + +"Audit Event Logged: + +Audit ID: [UUID] +Timestamp: [ISO 8601] +Investigation Details: [Summary] +Decision: [Decision] +Evidence: [Evidence Sources] +Status: Recorded" + +## What You Don't Do + +- Don't retrieve transaction or account data +- Don't calculate fraud scores +- Don't verify merchants +- Don't make dispute recommendations + +Your job is compliance and audit only. Ensure all investigations are properly documented and regulatory requirements are met. diff --git a/modules/ai-agents/examples/agents/dispute-root-agent-prompt.txt b/modules/ai-agents/examples/agents/dispute-root-agent-prompt.txt new file mode 100644 index 000000000..a22888ddd --- /dev/null +++ b/modules/ai-agents/examples/agents/dispute-root-agent-prompt.txt @@ -0,0 +1,130 @@ +You are the root agent for a transaction dispute resolution system at ACME Bank. Your role is to orchestrate sub-agents and make final recommendations to customers about disputed transactions. + +## Your Responsibilities + +- Route customer queries to appropriate sub-agents +- Aggregate results from multiple sub-agents +- Make evidence-based recommendations +- Communicate clearly with customers +- Escalate complex cases to human agents + +## Available Sub-Agents + +You have access to four specialized sub-agents via A2A protocol: + +1. **account-agent**: Retrieves customer account data and transaction history +2. **fraud-agent**: Analyzes fraud risk and calculates risk scores +3. **merchant-agent**: Verifies merchant legitimacy and reputation +4. **compliance-agent**: Logs audit events and checks regulatory requirements + +## Decision Framework + +When investigating a dispute, follow this process: + +1. Start with account-agent to get customer and transaction details +2. Route to fraud-agent if fraud is suspected +3. Route to merchant-agent to verify merchant legitimacy +4. Route to compliance-agent to log the investigation and check requirements +5. Aggregate all evidence and make recommendation + +## Risk-Based Recommendations + +Based on aggregated evidence, take these actions: + +- **Fraud score 80-100 + high merchant risk**: Block the transaction immediately, block the card, issue new card +- **Fraud score 60-79**: Hold for specialist review, temporary card block +- **Fraud score 40-59**: Ask customer to verify with merchant first before taking action +- **Fraud score 0-39**: Likely legitimate transaction, help customer recall the purchase + +## Escalation Criteria + +Escalate to human agent when: + +- Fraud score is medium (40-70) and evidence is conflicting +- Customer disputes the recommendation strongly +- Regulatory requirements exceed available tools +- Subscription or recurring billing issues require merchant intervention + +## Compliance Constraints + +Never: + +- Expose full credit card numbers or SSNs (use masked versions only) +- Make guarantees about dispute outcomes (use "likely" or "recommend") +- Process disputes without logging to compliance-agent +- Reveal internal fraud detection logic or merchant scoring details to customers +- Make decisions without sub-agent evidence +- Ask customers for screenshots or additional proof (you have the transaction records) + +## Customer Communication Style + +**Clear, bank-appropriate language:** +- Use "I've reviewed your account" not "I called the account-agent" +- Use "this charge doesn't match your typical spending" not "fraud score is 95/100" +- Use "I'm blocking this card" not "I recommend you freeze it" +- Don't reveal merchant reputation scores or fraud report counts + +**Proactive protection:** +For likely fraud (score 80+): +- Block the card immediately: "I'm blocking your card ending in [XXXX] right now to prevent additional fraudulent charges" +- Issue replacement: "We'll send you a replacement card with a new number" +- Process the claim: "You'll see the credit for this charge within 10 business days" + +For uncertain cases (score 40-79): +- Temporary block: "I'm placing a temporary hold on this card while we investigate" +- Escalate: "A specialist will contact you within 24 hours" + +**Concise responses:** +Keep responses to 3-4 short paragraphs maximum. Customers want action, not detailed analysis. + +## Example Investigation Flow + +Customer: "I see a $1,847.99 charge from 'LUXURY WATCHES INT' in Singapore on transaction TXN-89012. This is fraud. My customer ID is CUST-1001." + +**Your response to customer:** + +"I've reviewed your account and this transaction. This charge doesn't match your typical spending pattern, and you haven't made international purchases in the past 90 days. + +Here's what I'm doing: +- Blocking your card ending in 4532 right now to prevent any additional unauthorized charges +- Approving your dispute for the full $1,847.99 - you'll see the credit within 10 business days +- Sending you a replacement card with a new number within 5-7 business days + +Your dispute has been logged and meets the requirements under Regulation E for unauthorized electronic fund transfers. + +Is there anything else I can help you with today?" + +**What you actually did behind the scenes:** +1. Called account-agent → confirmed US-based customer, no international history +2. Called fraud-agent → received score 95/100 (critical risk) +3. Called merchant-agent → confirmed high fraud indicators +4. Called compliance-agent → logged under Regulation E +5. Made decision: transaction is fraudulent, block card immediately + +(Don't share the scores or technical details with the customer) + +**Note:** When talking to customers, use natural banking language like "approving your dispute." But for programmatic JSON responses, "recommendation" describes the TRANSACTION status, not the dispute claim status. + +## Programmatic Invocations + +When invoked from a pipeline or automated system (you'll receive transaction data without conversational context), respond with ONLY valid JSON. No explanatory text, no markdown formatting, no commentary before or after - just the JSON object. + +Required JSON format: +{ + "recommendation": "block_and_investigate" | "hold_for_review" | "approve", + "fraud_score": , + "confidence": "high" | "medium" | "low", + "reasoning": "" +} + +**Recommendation field definitions:** +- **"block_and_investigate"**: Transaction is fraudulent. Block the card immediately and investigate. +- **"hold_for_review"**: Unclear if fraudulent. Place temporary hold and escalate to human specialist. +- **"approve"**: Transaction is legitimate. Customer likely forgot about it or needs clarification. + +**Mapping from conversational actions:** +- If you would block the card → use "block_and_investigate" +- If you would escalate to specialist → use "hold_for_review" +- If transaction seems legitimate → use "approve" + +The pipeline will parse this JSON to make automated decisions. Any non-JSON response will cause processing failures. diff --git a/modules/ai-agents/examples/agents/fraud-agent-prompt.txt b/modules/ai-agents/examples/agents/fraud-agent-prompt.txt new file mode 100644 index 000000000..b2c8a26de --- /dev/null +++ b/modules/ai-agents/examples/agents/fraud-agent-prompt.txt @@ -0,0 +1,85 @@ +You are the fraud detection agent for ACME Bank's dispute resolution system. You specialize in analyzing transactions for fraud indicators and calculating risk scores. + +## Your Responsibilities + +- Calculate fraud risk scores (0-100 scale) +- Identify specific fraud indicators +- Provide risk assessment reasoning +- Return confidence levels with assessments + +## Available Tools + +1. **calculate_fraud_score**: Multi-factor fraud scoring + - Input: transaction_id, customer_id + - Returns: Fraud score (0-100), risk level, breakdown by factor, recommendation + +2. **get_risk_indicators**: Detailed fraud signal detection + - Input: transaction_id + - Returns: Array of risk indicators with severity levels + +## Risk Scoring Factors + +Consider these factors: + +1. **Location Risk** (0-30 points) + - International vs. customer's country + - City mismatch from customer's primary location + - High-risk countries + +2. **Merchant Risk** (0-25 points) + - Merchant reputation score + - Fraud report history + - Business verification status + +3. **Amount Risk** (0-25 points) + - Deviation from customer's average + - Unusually large for merchant category + - Round numbers (potential testing) + +4. **Velocity Risk** (0-10 points) + - Multiple transactions in short timeframe + - Rapid succession of purchases + - Geographic impossibility + +5. **Category Risk** (0-10 points) + - Outside customer's typical categories + - High-risk MCC codes + - Mismatch with spending patterns +## Risk Levels + +- **Critical (80-100)**: Almost certainly fraud, immediate action needed +- **High (60-79)**: Strong fraud indicators, hold for review +- **Medium (40-59)**: Some concerning factors, customer verification recommended +- **Low (20-39)**: Minor flags, likely legitimate +- **Minimal (0-19)**: No significant fraud indicators + +## Response Format + +Structure your analysis: + +"Fraud Risk Analysis: + +Fraud Score: [Score]/100 - [Risk Level] + +Risk Breakdown: +- Location Risk: [Score] - [Explanation] +- Merchant Risk: [Score] - [Explanation] +- Amount Risk: [Score] - [Explanation] +- Velocity Risk: [Score] - [Explanation] +- Category Risk: [Score] - [Explanation] + +Key Indicators: +- [Indicator 1] +- [Indicator 2] +- [Indicator 3] + +Recommendation: [block_and_investigate | hold_for_review | monitor_closely | approve]" + +## What You Don't Do + +- Don't retrieve account or transaction data (use what's provided) +- Don't verify merchants (that's merchant-agent's job) +- Don't make final dispute decisions (provide recommendation only) +- Don't log audit events + +Your job is fraud analysis only. Provide objective risk assessment based on available data. diff --git a/modules/ai-agents/examples/agents/merchant-agent-prompt.txt b/modules/ai-agents/examples/agents/merchant-agent-prompt.txt new file mode 100644 index 000000000..bb6ee31da --- /dev/null +++ b/modules/ai-agents/examples/agents/merchant-agent-prompt.txt @@ -0,0 +1,87 @@ +You are the merchant verification agent for ACME Bank's dispute resolution system. You specialize in verifying merchant legitimacy and reputation. + +## Your Responsibilities + +- Verify merchant reputation and legitimacy +- Look up merchant category codes (MCC) +- Identify known fraud patterns for merchant categories +- Provide merchant-specific insights + +## Available Tools + +1. **verify_merchant**: Merchant reputation lookup + - Input: merchant_name + - Returns: Reputation score, fraud reports, business verification, red flags + +2. **get_merchant_category**: MCC code analysis + - Input: mcc (4-digit code) + - Returns: Category details, typical transaction ranges, fraud risk profile + +## Reputation Scoring + +Interpret reputation scores: + +- **90-100**: Excellent, trusted merchant +- **70-89**: Good, established business +- **50-69**: Moderate, some concerns +- **30-49**: Poor, significant red flags +- **0-29**: High risk, strong fraud indicators + +## Red Flags to Report + +Watch for: +- High volume of fraud reports +- Recently established businesses in high-risk categories +- Unverified business registration +- Pattern of chargebacks +- Operates in high-risk jurisdictions +- Billing descriptor mismatches + +## Common Merchant Issues + +Be aware of legitimate merchant problems: + +- **Subscription services**: Known for duplicate billing, difficult cancellation +- **International hotels**: Currency conversion confusion, incidental charges +- **Online marketplaces**: Third-party sellers, billing descriptor confusion +- **Travel booking**: Pre-authorization holds, cancellation fee disputes + +## Response Format + +Structure your verification: + +"Merchant Verification Results: + +Merchant: [Name] +Reputation Score: [Score]/100 - [Level] +Verification Status: [Verified | Unverified | Unknown] + +Business Details: +- Country: [Country] +- Years in Operation: [Years] +- Registration: [Verified/Unverified] + +Fraud Reports: +- Total Reports: [Count] +- Recent (30 days): [Count] +- Confirmed Fraud Cases: [Count] + +Category Analysis (MCC [Code]): +- Category: [Category Name] +- Risk Profile: [High/Medium/Low] +- Typical Transaction Range: $[Min]-$[Max] + +Red Flags: +- [Flag 1] +- [Flag 2] + +Recommendation: [trusted_merchant | verify_subscription_details | manual_review_required | block_merchant]" + +## What You Don't Do + +- Don't calculate fraud scores (that's fraud-agent's job) +- Don't retrieve transaction data (that's account-agent's job) +- Don't make final dispute decisions +- Don't log audit events + +Your job is merchant verification only. Provide objective assessment of merchant legitimacy. diff --git a/modules/ai-agents/examples/memory_cache.yaml b/modules/ai-agents/examples/mcp-tools/caches/memory_cache.yaml similarity index 100% rename from modules/ai-agents/examples/memory_cache.yaml rename to modules/ai-agents/examples/mcp-tools/caches/memory_cache.yaml diff --git a/modules/ai-agents/examples/redpanda_cache.yaml b/modules/ai-agents/examples/mcp-tools/caches/redpanda_cache.yaml similarity index 100% rename from modules/ai-agents/examples/redpanda_cache.yaml rename to modules/ai-agents/examples/mcp-tools/caches/redpanda_cache.yaml diff --git a/modules/ai-agents/examples/mcp-tools/caches/session_cache.yaml b/modules/ai-agents/examples/mcp-tools/caches/session_cache.yaml new file mode 100644 index 000000000..851773db9 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/caches/session_cache.yaml @@ -0,0 +1,13 @@ +# In-memory cache for session data +# Example of cache tool +# tag::complete[] +label: session-cache + +memory: + default_ttl: 300s + +meta: + mcp: + enabled: true + description: "In-memory cache for session data" +# end::complete[] diff --git a/modules/ai-agents/examples/mcp-tools/inputs/consume_redpanda.yaml b/modules/ai-agents/examples/mcp-tools/inputs/consume_redpanda.yaml new file mode 100644 index 000000000..841dd5aaa --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/inputs/consume_redpanda.yaml @@ -0,0 +1,22 @@ +# Consume events from Redpanda topics +# Use for event-driven AI agents, audit logs, or data change streams +label: consume-events + +# tag::component[] +redpanda: + seed_brokers: [ "${REDPANDA_BROKERS}" ] + topics: [ "user-events" ] + consumer_group: "mcp-event-processor" + start_from_oldest: true + tls: + enabled: true + sasl: + - mechanism: "${REDPANDA_SASL_MECHANISM}" + username: "${REDPANDA_SASL_USERNAME}" + password: "${REDPANDA_SASL_PASSWORD}" +# end::component[] + +meta: + mcp: + enabled: true + description: "Consume events from user-events topic" diff --git a/modules/ai-agents/examples/mcp-tools/inputs/event_driven_workflow.yaml b/modules/ai-agents/examples/mcp-tools/inputs/event_driven_workflow.yaml new file mode 100644 index 000000000..f549f88a0 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/inputs/event_driven_workflow.yaml @@ -0,0 +1,39 @@ +# Event-driven workflow orchestration +# Use for multi-step processes, saga patterns, microservice coordination +label: order-workflow + +# tag::component[] +redpanda: + seed_brokers: [ "${REDPANDA_BROKERS}" ] + topics: [ "order-events" ] + consumer_group: "workflow-orchestrator" + tls: + enabled: true + sasl: + - mechanism: "${REDPANDA_SASL_MECHANISM}" + username: "${REDPANDA_SASL_USERNAME}" + password: "${REDPANDA_SASL_PASSWORD}" + processors: + - switch: + - check: this.event_type == "order_created" + processors: + - http: + url: "${secrets.INVENTORY_API}/reserve" + verb: POST + headers: + Content-Type: application/json + body: '{"order_id": "${! this.order_id }", "items": ${! json("items") }}' + - check: this.event_type == "payment_confirmed" + processors: + - http: + url: "${secrets.FULFILLMENT_API}/ship" + verb: POST + headers: + Content-Type: application/json + body: '{"order_id": "${! this.order_id }"}' +# end::component[] + +meta: + mcp: + enabled: true + description: "Process order events and orchestrate fulfillment workflow" diff --git a/modules/ai-agents/examples/generate_input.yaml b/modules/ai-agents/examples/mcp-tools/inputs/generate_input.yaml similarity index 100% rename from modules/ai-agents/examples/generate_input.yaml rename to modules/ai-agents/examples/mcp-tools/inputs/generate_input.yaml diff --git a/modules/ai-agents/examples/mcp-tools/inputs/read_events.yaml b/modules/ai-agents/examples/mcp-tools/inputs/read_events.yaml new file mode 100644 index 000000000..8627214f5 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/inputs/read_events.yaml @@ -0,0 +1,21 @@ +# Read events from Redpanda +# Example of input tool +# tag::complete[] +label: read-events + +redpanda: # <1> + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: ["events"] + consumer_group: "mcp-reader" + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${secrets.MCP_USERNAME}" + password: "${secrets.MCP_PASSWORD}" + +meta: + mcp: + enabled: true + description: "Read events from Redpanda" +# end::complete[] diff --git a/modules/ai-agents/examples/mcp-tools/inputs/stream_processing.yaml b/modules/ai-agents/examples/mcp-tools/inputs/stream_processing.yaml new file mode 100644 index 000000000..0afbd9c14 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/inputs/stream_processing.yaml @@ -0,0 +1,28 @@ +# Process streaming data with aggregations +# Use for real-time analytics, windowed aggregations, computing metrics +label: process-sensor-data + +# tag::component[] +redpanda: + seed_brokers: [ "${REDPANDA_BROKERS}" ] + topics: [ "sensor-readings" ] + consumer_group: "analytics-processor" + tls: + enabled: true + sasl: + - mechanism: "${REDPANDA_SASL_MECHANISM}" + username: "${REDPANDA_SASL_USERNAME}" + password: "${REDPANDA_SASL_PASSWORD}" + processors: + - mapping: | + root.sensor_id = this.sensor_id + root.avg_temperature = this.readings.map_each(r -> r.temperature).mean() + root.max_temperature = this.readings.map_each(r -> r.temperature).max() + root.reading_count = this.readings.length() + root.window_end = now() +# end::component[] + +meta: + mcp: + enabled: true + description: "Process sensor readings and compute aggregations" diff --git a/modules/ai-agents/examples/mcp-tools/outputs/publish_event.yaml b/modules/ai-agents/examples/mcp-tools/outputs/publish_event.yaml new file mode 100644 index 000000000..748aea014 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/outputs/publish_event.yaml @@ -0,0 +1,20 @@ +# Publish event to Redpanda +# Example of output tool +# tag::complete[] +label: publish-event + +redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: "processed-events" + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${secrets.MCP_USERNAME}" + password: "${secrets.MCP_PASSWORD}" + +meta: + mcp: + enabled: true + description: "Publish event to Redpanda" +# end::complete[] diff --git a/modules/ai-agents/examples/mcp-tools/outputs/publish_with_timestamp.yaml b/modules/ai-agents/examples/mcp-tools/outputs/publish_with_timestamp.yaml new file mode 100644 index 000000000..93fcd82f5 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/outputs/publish_with_timestamp.yaml @@ -0,0 +1,25 @@ +# Publish event with timestamp +# Example of output tool with processors +# tag::complete[] +label: publish-with-timestamp + +processors: + - mutation: | + root = this + root.published_at = now() + +redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: "processed-events" + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${secrets.MCP_USERNAME}" + password: "${secrets.MCP_PASSWORD}" + +meta: + mcp: + enabled: true + description: "Add timestamp and publish to Redpanda" +# end::complete[] diff --git a/modules/ai-agents/examples/redpanda_output.yaml b/modules/ai-agents/examples/mcp-tools/outputs/redpanda_output.yaml similarity index 100% rename from modules/ai-agents/examples/redpanda_output.yaml rename to modules/ai-agents/examples/mcp-tools/outputs/redpanda_output.yaml diff --git a/modules/ai-agents/examples/redpanda_output_with_processors.yaml b/modules/ai-agents/examples/mcp-tools/outputs/redpanda_output_with_processors.yaml similarity index 97% rename from modules/ai-agents/examples/redpanda_output_with_processors.yaml rename to modules/ai-agents/examples/mcp-tools/outputs/redpanda_output_with_processors.yaml index eea4b323f..30e4a387b 100644 --- a/modules/ai-agents/examples/redpanda_output_with_processors.yaml +++ b/modules/ai-agents/examples/mcp-tools/outputs/redpanda_output_with_processors.yaml @@ -3,7 +3,7 @@ label: summarize_and_publish processors: - openai_chat_completion: api_key: "${secrets.OPENAI_API_KEY}" - model: "gpt-4" + model: "gpt-5.2" prompt: ${! json("question") } - mapping: | root.question = this.question diff --git a/modules/ai-agents/examples/mcp-tools/processors/calculate_fraud_score.yaml b/modules/ai-agents/examples/mcp-tools/processors/calculate_fraud_score.yaml new file mode 100644 index 000000000..280ddef6f --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/calculate_fraud_score.yaml @@ -0,0 +1,108 @@ +label: calculate_fraud_score +mapping: | + root = match { + this.transaction_id == "TXN-89012" && this.customer_id == "CUST-1001" => { + "transaction_id": "TXN-89012", + "customer_id": "CUST-1001", + "fraud_score": 95, + "risk_level": "critical", + "score_breakdown": { + "location_risk": 35, + "merchant_risk": 30, + "amount_risk": 25, + "velocity_risk": 0, + "category_risk": 20 + }, + "factors_detected": [ + "unusual_location", + "questionable_merchant", + "unusual_amount", + "unusual_category" + ], + "reasoning": "International transaction from Singapore with no customer history of international purchases. High-value jewelry purchase (14.5x customer average). Merchant has significant fraud indicators.", + "recommendation": "block_and_investigate" + }, + this.transaction_id == "TXN-89013" && this.customer_id == "CUST-1001" => { + "transaction_id": "TXN-89013", + "customer_id": "CUST-1001", + "fraud_score": 8, + "risk_level": "minimal", + "score_breakdown": { + "location_risk": 0, + "merchant_risk": 0, + "amount_risk": 0, + "velocity_risk": 0, + "category_risk": 0 + }, + "factors_detected": [], + "reasoning": "Local transaction from trusted merchant in customer's typical spending category and amount range.", + "recommendation": "approve" + }, + this.transaction_id == "TXN-89014" && this.customer_id == "CUST-1002" => { + "transaction_id": "TXN-89014", + "customer_id": "CUST-1002", + "fraud_score": 52, + "risk_level": "medium", + "score_breakdown": { + "location_risk": 0, + "merchant_risk": 15, + "amount_risk": 0, + "velocity_risk": 8, + "category_risk": 0 + }, + "factors_detected": [ + "questionable_merchant", + "high_velocity" + ], + "reasoning": "Recurring subscription service with known billing issues. Multiple charges detected from same merchant. Moderate merchant reputation score.", + "recommendation": "monitor_closely" + }, + this.transaction_id == "TXN-89015" && this.customer_id == "CUST-1003" => { + "transaction_id": "TXN-89015", + "customer_id": "CUST-1003", + "fraud_score": 12, + "risk_level": "minimal", + "score_breakdown": { + "location_risk": 0, + "merchant_risk": 0, + "amount_risk": 5, + "velocity_risk": 0, + "category_risk": 0 + }, + "factors_detected": [ + "slightly_elevated_amount" + ], + "reasoning": "International hotel charge consistent with customer's frequent travel patterns. Amount within expected range for lodging category.", + "recommendation": "approve" + }, + _ => { + "transaction_id": this.transaction_id, + "customer_id": this.customer_id, + "fraud_score": 50, + "risk_level": "medium", + "score_breakdown": { + "location_risk": 0, + "merchant_risk": 0, + "amount_risk": 0, + "velocity_risk": 0, + "category_risk": 0 + }, + "factors_detected": [], + "reasoning": "Insufficient data to calculate accurate fraud score for this transaction/customer combination.", + "recommendation": "monitor_closely" + } + } + +meta: + mcp: + enabled: true + description: "Calculate fraud risk score based on transaction patterns and risk indicators. Use TXN-89012 through TXN-89015 with corresponding customer IDs for testing." + properties: + - name: transaction_id + type: string + description: "Transaction identifier to analyze (format TXN-XXXXX)" + required: true + - name: customer_id + type: string + description: "Customer identifier for historical analysis (format CUST-XXXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/check_regulatory_requirements.yaml b/modules/ai-agents/examples/mcp-tools/processors/check_regulatory_requirements.yaml new file mode 100644 index 000000000..f8df06efd --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/check_regulatory_requirements.yaml @@ -0,0 +1,116 @@ +label: check_regulatory_requirements +mapping: | + root = match { + this.dispute_type == "fraud" => { + "dispute_type": "fraud", + "regulations_applicable": [ + "Regulation E (Electronic Fund Transfer Act)", + "Fair Credit Billing Act", + "Card Network Rules (Visa/Mastercard)" + ], + "customer_rights": { + "liability_limit": 50.00, + "zero_liability_if_reported_promptly": true, + "notification_deadline_days": 60 + }, + "bank_obligations": { + "provisional_credit_required": true, + "provisional_credit_deadline_days": 10, + "investigation_deadline_days": 90, + "customer_notification_required": true + }, + "documentation_required": [ + "Customer dispute affidavit", + "Transaction details", + "Customer communication log", + "Investigation findings" + ], + "timeline": { + "acknowledge_dispute_hours": 24, + "provisional_credit_days": 10, + "final_decision_days": 90 + } + }, + this.dispute_type == "billing_error" => { + "dispute_type": "billing_error", + "regulations_applicable": [ + "Fair Credit Billing Act", + "Regulation Z (Truth in Lending)" + ], + "customer_rights": { + "dispute_window_days": 60, + "interest_suspension": true + }, + "bank_obligations": { + "acknowledge_dispute_days": 30, + "investigation_deadline_days": 90, + "correction_required_if_error_found": true + }, + "documentation_required": [ + "Billing statement", + "Customer dispute letter", + "Merchant communication (if any)", + "Investigation results" + ], + "timeline": { + "acknowledge_dispute_days": 30, + "resolution_days": 90 + } + }, + this.dispute_type == "service_not_received" => { + "dispute_type": "service_not_received", + "regulations_applicable": [ + "Fair Credit Billing Act", + "Card Network Chargeback Rules" + ], + "customer_rights": { + "chargeback_eligibility": true, + "dispute_window_days": 120 + }, + "bank_obligations": { + "verify_merchant_response": true, + "chargeback_processing_days": 45 + }, + "documentation_required": [ + "Proof of non-delivery or service failure", + "Merchant communication attempts", + "Order/booking confirmation", + "Merchant response (if obtained)" + ], + "timeline": { + "merchant_response_wait_days": 15, + "chargeback_filing_days": 120 + } + }, + _ => { + "dispute_type": "general", + "regulations_applicable": [ + "Fair Credit Billing Act" + ], + "customer_rights": { + "dispute_right": true, + "dispute_window_days": 60 + }, + "bank_obligations": { + "investigation_required": true, + "customer_notification_required": true + }, + "documentation_required": [ + "Customer dispute statement", + "Transaction evidence" + ], + "timeline": { + "standard_review_days": 30 + } + } + } + +meta: + mcp: + enabled: true + description: "Check regulatory requirements for dispute resolution based on dispute type." + properties: + - name: dispute_type + type: string + description: "Type of dispute (fraud, billing_error, service_not_received)" + required: true diff --git a/modules/ai-agents/examples/customer_enrichment.yaml b/modules/ai-agents/examples/mcp-tools/processors/customer_enrichment.yaml similarity index 100% rename from modules/ai-agents/examples/customer_enrichment.yaml rename to modules/ai-agents/examples/mcp-tools/processors/customer_enrichment.yaml diff --git a/modules/ai-agents/examples/mcp-tools/processors/enrich_order.yaml b/modules/ai-agents/examples/mcp-tools/processors/enrich_order.yaml new file mode 100644 index 000000000..604f1da65 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/enrich_order.yaml @@ -0,0 +1,15 @@ +# Enrich order with customer data +# Example of processor tool with HTTP call +# tag::complete[] +label: enrich-order + +processors: + - http: + url: "https://api.example.com/lookup" + verb: GET + +meta: + mcp: + enabled: true + description: "Enrich order with customer data" +# end::complete[] diff --git a/modules/ai-agents/examples/gcp_bigquery_select_processor.yaml b/modules/ai-agents/examples/mcp-tools/processors/gcp_bigquery_select_processor.yaml similarity index 100% rename from modules/ai-agents/examples/gcp_bigquery_select_processor.yaml rename to modules/ai-agents/examples/mcp-tools/processors/gcp_bigquery_select_processor.yaml diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_customer_account.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_customer_account.yaml new file mode 100644 index 000000000..9701bb209 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_customer_account.yaml @@ -0,0 +1,51 @@ +label: get_customer_account +mapping: | + root = match { + this.customer_id == "CUST-1001" => { + "customer_id": "CUST-1001", + "name": "Dana A.", + "email": "s****@example.com", + "account_type": "premium_checking", + "card_last_four": "4532", + "card_status": "active", + "member_since": "2019-03-15", + "location": "Seattle, WA, USA", + "phone_masked": "***-***-7890" + }, + this.customer_id == "CUST-1002" => { + "customer_id": "CUST-1002", + "name": "Alex T.", + "email": "m****@example.com", + "account_type": "standard_checking", + "card_last_four": "8821", + "card_status": "active", + "member_since": "2021-07-22", + "location": "San Francisco, CA, USA", + "phone_masked": "***-***-4521" + }, + this.customer_id == "CUST-1003" => { + "customer_id": "CUST-1003", + "name": "Quinn N.", + "email": "e****@example.com", + "account_type": "premium_credit", + "card_last_four": "2193", + "card_status": "active", + "member_since": "2020-11-08", + "location": "Austin, TX, USA", + "phone_masked": "***-***-3344" + }, + _ => { + "error": "customer_not_found", + "message": "No account found for customer ID: " + this.customer_id + } + } + +meta: + mcp: + enabled: true + description: "Retrieve customer account information with masked PII. Use CUST-1001, CUST-1002, or CUST-1003 for testing." + properties: + - name: customer_id + type: string + description: "Customer identifier (format CUST-XXXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_customer_history.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_customer_history.yaml new file mode 100644 index 000000000..183e4f84b --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_customer_history.yaml @@ -0,0 +1,38 @@ +label: get_customer_history + +processors: + - mapping: | + let customer_id = this.customer_id + root = if $customer_id == "CUST-100" { + { + "customer_id": $customer_id, + "orders": [ + {"order_id": "ORD-12345", "status": "shipped", "total": 1299.99, "order_date": "2025-01-10"}, + {"order_id": "ORD-67890", "status": "processing", "total": 299.98, "order_date": "2025-01-14"}, + {"order_id": "ORD-11111", "status": "delivered", "total": 89.99, "order_date": "2024-12-20"} + ], + "total_orders": 3 + } + } else if $customer_id == "CUST-999" { + { + "customer_id": $customer_id, + "orders": [], + "total_orders": 0, + "message": "No orders found for this customer" + } + } else { + { + "error": true, + "message": "Customer not found" + } + } + +meta: + mcp: + enabled: true + description: "Retrieve order history. Use CUST-100 (has orders) or CUST-999 (no orders) for testing." + properties: + - name: customer_id + type: string + description: "The customer ID (format CUST-XXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_merchant_category.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_merchant_category.yaml new file mode 100644 index 000000000..f8ac390f1 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_merchant_category.yaml @@ -0,0 +1,90 @@ +label: get_merchant_category +mapping: | + root = match { + this.mcc == "5944" => { + "mcc": "5944", + "category": "Jewelry, Watch, Clock, and Silverware Stores", + "high_level_category": "retail_luxury", + "risk_profile": "high", + "typical_transaction_range": { + "min": 100, + "max": 5000, + "average": 850 + }, + "fraud_risk_notes": "High-value items, common fraud target, verify customer intent", + "common_fraud_patterns": [ + "Stolen card purchases", + "Account takeover", + "Reshipping schemes" + ] + }, + this.mcc == "5942" => { + "mcc": "5942", + "category": "Book Stores", + "high_level_category": "retail_general", + "risk_profile": "low", + "typical_transaction_range": { + "min": 10, + "max": 200, + "average": 45 + }, + "fraud_risk_notes": "Low fraud risk, common online purchase category", + "common_fraud_patterns": [] + }, + this.mcc == "4899" => { + "mcc": "4899", + "category": "Cable, Satellite, and Other Pay Television and Radio Services", + "high_level_category": "subscription_services", + "risk_profile": "medium", + "typical_transaction_range": { + "min": 9.99, + "max": 99.99, + "average": 29.99 + }, + "fraud_risk_notes": "Recurring billing, watch for duplicate charges and unauthorized subscriptions", + "common_fraud_patterns": [ + "Duplicate subscriptions", + "Unauthorized recurring charges", + "Failed cancellation processing" + ] + }, + this.mcc == "7011" => { + "mcc": "7011", + "category": "Lodging - Hotels, Motels, Resorts", + "high_level_category": "travel_hospitality", + "risk_profile": "medium", + "typical_transaction_range": { + "min": 80, + "max": 500, + "average": 180 + }, + "fraud_risk_notes": "Verify travel patterns, check for location consistency", + "common_fraud_patterns": [ + "Stolen card at booking sites", + "Account takeover for rewards redemption" + ] + }, + _ => { + "mcc": this.mcc, + "category": "Unknown Category", + "high_level_category": "unclassified", + "risk_profile": "unknown", + "typical_transaction_range": { + "min": 0, + "max": 0, + "average": 0 + }, + "fraud_risk_notes": "MCC not recognized, manual review recommended", + "common_fraud_patterns": [] + } + } + +meta: + mcp: + enabled: true + description: "Retrieve merchant category information including fraud risk level and common patterns based on MCC code." + properties: + - name: mcc + type: string + description: "Merchant Category Code (5944 for jewelry, 5942 for books, 4899 for streaming, 7011 for hotels)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_order_status.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_order_status.yaml new file mode 100644 index 000000000..55c962761 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_order_status.yaml @@ -0,0 +1,46 @@ +label: get_order_status +mapping: | + let order_id = this.order_id + root = if $order_id == "ORD-12345" { + { + "order_id": $order_id, + "status": "shipped", + "items": [{"name": "Laptop", "quantity": 1, "price": 1299.99}], + "total": 1299.99, + "order_date": "2025-01-10", + "customer_id": "CUST-100" + } + } else if $order_id == "ORD-67890" { + { + "order_id": $order_id, + "status": "processing", + "items": [{"name": "Headphones", "quantity": 2, "price": 149.99}], + "total": 299.98, + "order_date": "2025-01-14", + "customer_id": "CUST-100" + } + } else if $order_id == "ORD-99999" { + { + "error": "order_not_found", + "message": "Order not found" + } + } else { + { + "order_id": $order_id, + "status": "pending", + "items": [{"name": "Generic Item", "quantity": 1, "price": 49.99}], + "total": 49.99, + "order_date": "2025-01-15", + "customer_id": "CUST-999" + } + } + +meta: + mcp: + enabled: true + description: "Retrieve order status and details. Use ORD-12345 (shipped), ORD-67890 (processing), or ORD-99999 (not found) for testing." + properties: + - name: order_id + type: string + description: "The order ID (format ORD-XXXXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_risk_indicators.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_risk_indicators.yaml new file mode 100644 index 000000000..c4ccdf19d --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_risk_indicators.yaml @@ -0,0 +1,129 @@ +label: get_risk_indicators +mapping: | + root = match { + this.transaction_id == "TXN-89012" => { + "transaction_id": "TXN-89012", + "risk_indicators": [ + { + "indicator": "international_transaction", + "severity": "high", + "description": "Transaction originated from Singapore, customer has no international transaction history" + }, + { + "indicator": "first_time_merchant", + "severity": "medium", + "description": "Customer has never transacted with this merchant before" + }, + { + "indicator": "unusual_category", + "severity": "high", + "description": "Jewelry purchase is outside customer's typical spending categories" + }, + { + "indicator": "high_amount", + "severity": "high", + "description": "Transaction amount is 14.5x customer's average transaction" + }, + { + "indicator": "merchant_flagged", + "severity": "critical", + "description": "Merchant has been flagged in fraud databases" + } + ], + "total_indicators": 5, + "critical_count": 1, + "high_count": 3, + "medium_count": 1, + "overall_assessment": "high_fraud_probability" + }, + this.transaction_id == "TXN-89013" => { + "transaction_id": "TXN-89013", + "risk_indicators": [ + { + "indicator": "known_merchant", + "severity": "none", + "description": "Example Marketplace is a recognized and trusted merchant" + } + ], + "total_indicators": 1, + "critical_count": 0, + "high_count": 0, + "medium_count": 0, + "overall_assessment": "low_fraud_probability" + }, + this.transaction_id == "TXN-89014" => { + "transaction_id": "TXN-89014", + "risk_indicators": [ + { + "indicator": "recurring_billing", + "severity": "low", + "description": "Subscription service with recurring charges" + }, + { + "indicator": "merchant_billing_issues", + "severity": "medium", + "description": "Merchant has known history of duplicate billing complaints" + }, + { + "indicator": "duplicate_charge_pattern", + "severity": "medium", + "description": "Multiple charges detected from same merchant in short timeframe" + } + ], + "total_indicators": 3, + "critical_count": 0, + "high_count": 0, + "medium_count": 2, + "low_count": 1, + "none_count": 0, + "overall_assessment": "medium_fraud_probability" + }, + this.transaction_id == "TXN-89015" => { + "transaction_id": "TXN-89015", + "risk_indicators": [ + { + "indicator": "international_transaction", + "severity": "low", + "description": "Transaction in France matches customer's travel history" + }, + { + "indicator": "travel_category", + "severity": "none", + "description": "Hotel charge is consistent with customer's frequent travel patterns" + }, + { + "indicator": "timing_matches_travel", + "severity": "none", + "description": "Transaction date aligns with customer's Paris trip" + } + ], + "total_indicators": 3, + "critical_count": 0, + "high_count": 0, + "medium_count": 0, + "low_count": 1, + "none_count": 2, + "overall_assessment": "low_fraud_probability" + }, + _ => { + "transaction_id": this.transaction_id, + "risk_indicators": [], + "total_indicators": 0, + "critical_count": 0, + "high_count": 0, + "medium_count": 0, + "low_count": 0, + "none_count": 0, + "overall_assessment": "insufficient_data" + } + } + +meta: + mcp: + enabled: true + description: "Retrieve fraud risk indicators for a transaction including severity levels and overall assessment. Use TXN-89012 through TXN-89015 for testing." + properties: + - name: transaction_id + type: string + description: "Transaction identifier to analyze (format TXN-XXXXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_shipping_info.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_shipping_info.yaml new file mode 100644 index 000000000..b0a15b497 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_shipping_info.yaml @@ -0,0 +1,38 @@ +label: get_shipping_info + +processors: + - mapping: | + let order_id = this.order_id + root = if $order_id == "ORD-12345" { + { + "order_id": $order_id, + "tracking_number": "FX1234567890", + "carrier": "Example Shipping", + "status": "in_transit", + "estimated_delivery": "2025-01-17", + "last_location": "San Francisco Distribution Center", + "last_update": "2025-01-15T14:30:00Z" + } + } else if $order_id == "ORD-67890" { + { + "order_id": $order_id, + "error": true, + "message": "Order has not shipped yet" + } + } else { + { + "order_id": $order_id, + "error": true, + "message": "No shipping information available" + } + } + +meta: + mcp: + enabled: true + description: "Get tracking and shipping information. ORD-12345 has shipping info, ORD-67890 has not shipped yet." + properties: + - name: order_id + type: string + description: "The order ID to track" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_transaction_details.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_transaction_details.yaml new file mode 100644 index 000000000..82b44ba6f --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_transaction_details.yaml @@ -0,0 +1,99 @@ +label: get_transaction_details +mapping: | + root = match { + this.transaction_id == "TXN-89012" => { + "transaction_id": "TXN-89012", + "customer_id": "CUST-1001", + "amount": 1847.99, + "currency": "USD", + "merchant": { + "name": "LUXURY WATCHES INT", + "category": "jewelry", + "country": "SG", + "mcc": "5944" + }, + "card_last_four": "4532", + "date": "2026-01-18T14:22:00Z", + "location": { + "city": "Singapore", + "country": "SG", + "coordinates": "1.3521,103.8198" + }, + "status": "posted" + }, + this.transaction_id == "TXN-89013" => { + "transaction_id": "TXN-89013", + "customer_id": "CUST-1001", + "amount": 47.83, + "currency": "USD", + "merchant": { + "name": "EXAMPLE MKTPLACE", + "category": "online_retail", + "country": "US", + "mcc": "5942" + }, + "card_last_four": "4532", + "date": "2026-01-15T10:15:00Z", + "location": { + "city": "Seattle", + "country": "US", + "coordinates": "47.6062,-122.3321" + }, + "status": "posted" + }, + this.transaction_id == "TXN-89014" => { + "transaction_id": "TXN-89014", + "customer_id": "CUST-1002", + "amount": 29.99, + "currency": "USD", + "merchant": { + "name": "EXAMPLE STREAMING", + "category": "subscription_service", + "country": "US", + "mcc": "4899" + }, + "card_last_four": "8821", + "date": "2025-12-15T00:00:01Z", + "location": { + "city": "San Francisco", + "country": "US", + "coordinates": "37.7749,-122.4194" + }, + "status": "posted", + "recurring": true + }, + this.transaction_id == "TXN-89015" => { + "transaction_id": "TXN-89015", + "customer_id": "CUST-1003", + "amount": 312.50, + "currency": "EUR", + "merchant": { + "name": "HOTEL PARIS", + "category": "lodging", + "country": "FR", + "mcc": "7011" + }, + "card_last_four": "2193", + "date": "2026-01-10T20:30:00Z", + "location": { + "city": "Paris", + "country": "FR", + "coordinates": "48.8566,2.3522" + }, + "status": "posted" + }, + _ => { + "error": "transaction_not_found", + "message": "No transaction found with ID: " + this.transaction_id + } + } + +meta: + mcp: + enabled: true + description: "Retrieve detailed transaction information including merchant, location, and amount. Use TXN-89012 through TXN-89015 for testing." + properties: + - name: transaction_id + type: string + description: "Transaction identifier (format TXN-XXXXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_transaction_history.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_transaction_history.yaml new file mode 100644 index 000000000..3c8107fd3 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_transaction_history.yaml @@ -0,0 +1,108 @@ +label: get_transaction_history +mapping: | + root = match { + this.customer_id == "CUST-1001" => { + "customer_id": "CUST-1001", + "analysis_period": "last_90_days", + "spending_patterns": { + "average_transaction": 127.45, + "median_transaction": 65.20, + "total_transactions": 87, + "total_amount": 11088.15 + }, + "category_breakdown": [ + {"category": "online_retail", "count": 42, "avg_amount": 78.50}, + {"category": "groceries", "count": 28, "avg_amount": 95.30}, + {"category": "restaurants", "count": 12, "avg_amount": 45.80}, + {"category": "gas_stations", "count": 5, "avg_amount": 62.00} + ], + "location_patterns": { + "primary_region": "US_West_Coast", + "international_transactions": 0, + "cities": ["Seattle", "Bellevue", "Tacoma"] + }, + "merchant_patterns": { + "recurring_merchants": ["EXAMPLE MKTPLACE", "EXAMPLE WHOLESALE", "EXAMPLE COFFEE"], + "first_time_merchants_this_period": 3 + }, + "risk_indicators": { + "unusual_activity": false, + "velocity_flags": 0, + "declined_transactions": 1 + } + }, + this.customer_id == "CUST-1002" => { + "customer_id": "CUST-1002", + "analysis_period": "last_90_days", + "spending_patterns": { + "average_transaction": 95.33, + "median_transaction": 52.10, + "total_transactions": 64, + "total_amount": 6101.12 + }, + "category_breakdown": [ + {"category": "subscription_service", "count": 15, "avg_amount": 29.99}, + {"category": "restaurants", "count": 25, "avg_amount": 68.40}, + {"category": "online_retail", "count": 18, "avg_amount": 110.20}, + {"category": "entertainment", "count": 6, "avg_amount": 45.00} + ], + "location_patterns": { + "primary_region": "US_West_Coast", + "international_transactions": 0, + "cities": ["San Francisco", "Oakland", "San Jose"] + }, + "merchant_patterns": { + "recurring_merchants": ["EXAMPLE STREAMING", "EXAMPLE MEDIA", "EXAMPLE AUDIO"], + "first_time_merchants_this_period": 7 + }, + "risk_indicators": { + "unusual_activity": false, + "velocity_flags": 0, + "declined_transactions": 0 + } + }, + this.customer_id == "CUST-1003" => { + "customer_id": "CUST-1003", + "analysis_period": "last_90_days", + "spending_patterns": { + "average_transaction": 215.67, + "median_transaction": 145.00, + "total_transactions": 52, + "total_amount": 11214.84 + }, + "category_breakdown": [ + {"category": "travel", "count": 8, "avg_amount": 650.00}, + {"category": "lodging", "count": 6, "avg_amount": 380.50}, + {"category": "restaurants", "count": 22, "avg_amount": 85.20}, + {"category": "online_retail", "count": 16, "avg_amount": 95.75} + ], + "location_patterns": { + "primary_region": "US_South", + "international_transactions": 3, + "cities": ["Austin", "Houston", "Dallas", "Paris", "London"] + }, + "merchant_patterns": { + "recurring_merchants": ["EXAMPLE AIRLINES", "EXAMPLE HOTEL", "EXAMPLE TRAVEL"], + "first_time_merchants_this_period": 12 + }, + "risk_indicators": { + "unusual_activity": false, + "velocity_flags": 0, + "declined_transactions": 0 + } + }, + _ => { + "error": "customer_not_found", + "message": "No transaction history found for customer ID: " + this.customer_id + } + } + +meta: + mcp: + enabled: true + description: "Retrieve customer transaction history with spending patterns, category breakdown, and risk indicators. Use CUST-1001, CUST-1002, or CUST-1003 for testing." + properties: + - name: customer_id + type: string + description: "Customer identifier (format CUST-XXXX)" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_weather_complete.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_weather_complete.yaml new file mode 100644 index 000000000..5e57cc929 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_weather_complete.yaml @@ -0,0 +1,55 @@ +# Complete weather tool with validation, error handling, and response formatting +# tag::complete[] +label: get-weather + +processors: + # Validate and sanitize input + - label: validate_city + mutation: | + root.city = if this.city.or("").trim() == "" { + throw("city is required") + } else { + this.city.trim().lowercase().re_replace_all("[^a-z\\s\\-]", "") + } + root.units = this.units.or("metric") + + # Fetch weather data + - label: fetch_weather + try: + - http: + url: 'https://wttr.in/${! json("city") }?format=j1' + verb: GET + timeout: 10s + + - mutation: | + root.weather = { + "location": this.nearest_area.0.areaName.0.value, + "country": this.nearest_area.0.country.0.value, + "temperature_c": this.current_condition.0.temp_C, + "temperature_f": this.current_condition.0.temp_F, + "condition": this.current_condition.0.weatherDesc.0.value, + "humidity": this.current_condition.0.humidity, + "wind_kph": this.current_condition.0.windspeedKmph + } + + # Handle errors gracefully + - label: handle_errors + catch: + - mutation: | + root.error = true + root.message = "Failed to fetch weather: " + error() + +meta: + mcp: + enabled: true + description: "Get current weather for a city. Returns temperature, conditions, humidity, and wind speed." + properties: + - name: city + type: string + description: "City name (e.g., 'London', 'New York', 'Tokyo')" + required: true + - name: units + type: string + description: "Temperature units: 'metric' or 'imperial' (default: metric)" + required: false +# end::complete[] diff --git a/modules/ai-agents/examples/mcp-tools/processors/get_weather_simple.yaml b/modules/ai-agents/examples/mcp-tools/processors/get_weather_simple.yaml new file mode 100644 index 000000000..445bf7679 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/get_weather_simple.yaml @@ -0,0 +1,17 @@ +# Simple weather tool - minimal example +# tag::complete[] +http: + url: "https://wttr.in/${! this.city }?format=j1" + verb: GET + +meta: + mcp: + enabled: true + name: get_weather + description: "Get current weather for a city" + properties: + - name: city + type: string + description: "City name" + required: true +# end::complete[] diff --git a/modules/ai-agents/examples/http_processor.yaml b/modules/ai-agents/examples/mcp-tools/processors/http_processor.yaml similarity index 100% rename from modules/ai-agents/examples/http_processor.yaml rename to modules/ai-agents/examples/mcp-tools/processors/http_processor.yaml diff --git a/modules/ai-agents/examples/mcp-tools/processors/log_audit_event.yaml b/modules/ai-agents/examples/mcp-tools/processors/log_audit_event.yaml new file mode 100644 index 000000000..57b0a81a5 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/log_audit_event.yaml @@ -0,0 +1,60 @@ +label: log_audit_event +processors: + - mapping: | + root = { + "audit_id": uuid_v4(), + "timestamp": now(), + "event_type": "dispute_investigation", + "transaction_id": this.transaction_id, + "customer_id": this.customer_id, + "agent_decision": this.decision, + "risk_score": this.risk_score, + "evidence_reviewed": this.evidence, + "outcome": this.outcome, + "escalated": this.escalated, + "compliance_notes": this.notes, + "logged_by": "dispute-resolution-agent", + "status": "recorded" + } + + - log: + level: INFO + message: "Compliance audit event: ${!json()}" + +meta: + mcp: + enabled: true + description: "Log compliance audit events for dispute resolution. Records customer ID, transaction details, decision, and notes." + properties: + - name: customer_id + type: string + description: "Customer identifier (format CUST-XXXX)" + required: true + - name: transaction_id + type: string + description: "Transaction identifier (format TXN-XXXXX)" + required: true + - name: decision + type: string + description: "Dispute resolution decision (approve_refund, deny_claim, etc.)" + required: true + - name: risk_score + type: number + description: "Calculated fraud risk score (0-100)" + required: true + - name: evidence + type: object + description: "Evidence reviewed during investigation" + required: true + - name: outcome + type: string + description: "Final outcome of the dispute (approved, denied, escalated, pending)" + required: true + - name: escalated + type: boolean + description: "Whether case was escalated for manual review" + required: false + - name: notes + type: string + description: "Additional compliance notes" + required: false diff --git a/modules/ai-agents/examples/mcp-tools/processors/lookup_customer.yaml b/modules/ai-agents/examples/mcp-tools/processors/lookup_customer.yaml new file mode 100644 index 000000000..6fba9edfa --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/lookup_customer.yaml @@ -0,0 +1,23 @@ +# Look up customer by ID from PostgreSQL +# Example of sql_select processor tool +# tag::complete[] +label: lookup-customer # <1> + +sql_select: # <2> + driver: postgres + dsn: "${secrets.DATABASE_URL}" + table: customers + columns: ["id", "name", "email", "plan"] + where: id = ? + args_mapping: '[this.customer_id]' + +meta: # <3> + mcp: + enabled: true + description: "Look up a customer by ID and return their profile." + properties: + - name: customer_id + type: string + description: "The customer's unique identifier" + required: true +# end::complete[] diff --git a/modules/ai-agents/examples/observable_tool.yaml b/modules/ai-agents/examples/mcp-tools/processors/observable_tool.yaml similarity index 100% rename from modules/ai-agents/examples/observable_tool.yaml rename to modules/ai-agents/examples/mcp-tools/processors/observable_tool.yaml diff --git a/modules/ai-agents/examples/mcp-tools/processors/openai_chat.yaml b/modules/ai-agents/examples/mcp-tools/processors/openai_chat.yaml new file mode 100644 index 000000000..b8c202a66 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/openai_chat.yaml @@ -0,0 +1,27 @@ +# OpenAI chat completion for sentiment analysis +# Use for text generation, classification, summarization +label: analyze-feedback + +# tag::component[] +openai_chat_completion: + api_key: "${secrets.OPENAI_API_KEY}" + model: "gpt-5.2" + prompt: | + Analyze this customer feedback and provide: + 1. Sentiment (positive/negative/neutral) + 2. Key themes + 3. Actionable insights + + Feedback: ${! json(feedback_text) } + max_tokens: 500 +# end::component[] + +meta: + mcp: + enabled: true + description: "Analyze customer feedback for sentiment and themes" + properties: + - name: feedback_text + type: string + description: "The customer feedback text to analyze" + required: true diff --git a/modules/ai-agents/examples/mcp-tools/processors/openai_embeddings.yaml b/modules/ai-agents/examples/mcp-tools/processors/openai_embeddings.yaml new file mode 100644 index 000000000..0c7f15f8e --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/openai_embeddings.yaml @@ -0,0 +1,20 @@ +# Generate embeddings with OpenAI +# Use for semantic search, RAG pipelines, similarity matching +label: generate-embeddings + +# tag::component[] +openai_embeddings: + api_key: "${secrets.OPENAI_API_KEY}" + model: "text-embedding-3-small" + text: ${! json("content") } +# end::component[] + +meta: + mcp: + enabled: true + description: "Generate vector embeddings for text content" + properties: + - name: content + type: string + description: "Text content to generate embeddings for" + required: true diff --git a/modules/ai-agents/examples/order_workflow.yaml b/modules/ai-agents/examples/mcp-tools/processors/order_workflow.yaml similarity index 95% rename from modules/ai-agents/examples/order_workflow.yaml rename to modules/ai-agents/examples/mcp-tools/processors/order_workflow.yaml index ad0dc29ee..aebaac897 100644 --- a/modules/ai-agents/examples/order_workflow.yaml +++ b/modules/ai-agents/examples/mcp-tools/processors/order_workflow.yaml @@ -39,7 +39,7 @@ processors: root = this.merge({ "processing_tier": "premium", "processing_time_estimate": "2-4 hours", - "assigned_rep": "premium-team@company.com", + "assigned_rep": "premium-team@example.com", "priority_score": 95 }) @@ -51,7 +51,7 @@ processors: root = this.merge({ "processing_tier": "vip", "processing_time_estimate": "1-2 hours", - "assigned_rep": "vip-team@company.com", + "assigned_rep": "vip-team@example.com", "priority_score": 90, "perks": ["expedited_shipping", "white_glove_service"] }) @@ -63,7 +63,7 @@ processors: root = this.merge({ "processing_tier": "standard", "processing_time_estimate": "24-48 hours", - "assigned_rep": "support@company.com", + "assigned_rep": "support@example.com", "priority_score": 50 }) diff --git a/modules/ai-agents/examples/mcp-tools/processors/search_jira.yaml b/modules/ai-agents/examples/mcp-tools/processors/search_jira.yaml new file mode 100644 index 000000000..7ef2d6673 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/search_jira.yaml @@ -0,0 +1,31 @@ +# Search Jira issues using JQL +# Requires Enterprise license +# tag::complete[] +label: search-jira + +processors: + - generate: + count: 1 + mapping: | + root.jql = this.jql + root.maxResults = this.max_results.or(50) + root.fields = ["key", "summary", "status", "assignee", "priority"] + - jira: + base_url: "${secrets.JIRA_BASE_URL}" + username: "${secrets.JIRA_USERNAME}" + api_token: "${secrets.JIRA_API_TOKEN}" + +meta: + mcp: + enabled: true + description: "Search Jira issues using JQL. Returns matching issues with key, summary, status, assignee, and priority." + properties: + - name: jql + type: string + description: "JQL query (e.g., 'project = DOC AND status = Open')" + required: true + - name: max_results + type: number + description: "Maximum issues to return (default: 50)" + required: false +# end::complete[] diff --git a/modules/ai-agents/examples/mcp-tools/processors/transform_validate.yaml b/modules/ai-agents/examples/mcp-tools/processors/transform_validate.yaml new file mode 100644 index 000000000..b05b619ad --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/transform_validate.yaml @@ -0,0 +1,40 @@ +# Transform and validate data with Bloblang +# Use for parsing, validation, filtering, enrichment +label: transform-user-data + +processors: + # tag::mapping[] + - mapping: | + # Parse and validate incoming data + root.user_id = this.user_id.or(throw("user_id is required")) + root.timestamp = now().ts_format("2006-01-02T15:04:05Z07:00") + + # Transform and enrich + root.email_domain = this.email.split("@").index(1) + root.is_premium = this.subscription_tier == "premium" + + # Filter sensitive data + root.profile = this.profile.or({}).without("ssn", "credit_card") + # end::mapping[] + +meta: + mcp: + enabled: true + description: "Transform and validate user data" + properties: + - name: user_id + type: string + description: "User identifier" + required: true + - name: email + type: string + description: "User email address" + required: true + - name: subscription_tier + type: string + description: "Subscription level" + required: false + - name: profile + type: object + description: "User profile data" + required: false diff --git a/modules/ai-agents/examples/mcp-tools/processors/verify_merchant.yaml b/modules/ai-agents/examples/mcp-tools/processors/verify_merchant.yaml new file mode 100644 index 000000000..e0ad87731 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/processors/verify_merchant.yaml @@ -0,0 +1,126 @@ +label: verify_merchant +mapping: | + root = match { + this.merchant_name == "LUXURY WATCHES INT" => { + "merchant_name": "LUXURY WATCHES INT", + "merchant_id": "MER-99912", + "reputation_score": 12, + "reputation_level": "high_risk", + "verification_status": "unverified", + "fraud_reports": { + "total_reports": 247, + "recent_reports_30d": 42, + "confirmed_fraud_cases": 89 + }, + "business_details": { + "country": "Singapore", + "years_in_operation": 1, + "registration_verified": false + }, + "red_flags": [ + "High volume of fraud reports", + "Recently established business", + "Unverified business registration", + "Operates in high-risk category", + "Pattern of chargebacks" + ], + "recommendation": "block_merchant" + }, + this.merchant_name == "EXAMPLE MKTPLACE" => { + "merchant_name": "EXAMPLE MKTPLACE", + "merchant_id": "MER-00001", + "reputation_score": 98, + "reputation_level": "excellent", + "verification_status": "verified", + "fraud_reports": { + "total_reports": 1203, + "recent_reports_30d": 15, + "confirmed_fraud_cases": 0 + }, + "business_details": { + "country": "USA", + "years_in_operation": 20, + "registration_verified": true, + "parent_company": "Example Organization" + }, + "red_flags": [], + "recommendation": "trusted_merchant" + }, + this.merchant_name == "EXAMPLE STREAMING" => { + "merchant_name": "EXAMPLE STREAMING", + "merchant_id": "MER-45678", + "reputation_score": 65, + "reputation_level": "moderate", + "verification_status": "verified", + "fraud_reports": { + "total_reports": 892, + "recent_reports_30d": 67, + "confirmed_fraud_cases": 12 + }, + "business_details": { + "country": "USA", + "years_in_operation": 5, + "registration_verified": true + }, + "red_flags": [ + "Known billing system issues", + "Frequent duplicate charge complaints", + "Difficult cancellation process" + ], + "common_issues": [ + "Duplicate subscriptions", + "Failed cancellation processing", + "Unclear billing descriptors" + ], + "recommendation": "verify_subscription_details" + }, + this.merchant_name == "HOTEL PARIS" => { + "merchant_name": "HOTEL PARIS", + "merchant_id": "MER-78234", + "reputation_score": 88, + "reputation_level": "trusted", + "verification_status": "verified", + "fraud_reports": { + "total_reports": 45, + "recent_reports_30d": 2, + "confirmed_fraud_cases": 0 + }, + "business_details": { + "country": "France", + "years_in_operation": 15, + "registration_verified": true, + "chain": "Independent Boutique Hotels" + }, + "red_flags": [], + "pricing": { + "average_room_rate_eur": 280, + "typical_range_eur": "220-350" + }, + "recommendation": "legitimate_merchant" + }, + _ => { + "merchant_name": this.merchant_name, + "reputation_score": 50, + "reputation_level": "unknown", + "verification_status": "not_found", + "fraud_reports": { + "total_reports": 0, + "recent_reports_30d": 0, + "confirmed_fraud_cases": 0 + }, + "business_details": {}, + "red_flags": [], + "message": "Merchant not found in verification database", + "recommendation": "manual_review_required" + } + } + +meta: + mcp: + enabled: true + description: "Verify merchant reputation and fraud history. Use LUXURY WATCHES INT (high risk), EXAMPLE MKTPLACE (trusted), EXAMPLE STREAMING (moderate), or HOTEL PARIS (trusted) for testing." + properties: + - name: merchant_name + type: string + description: "Merchant name as it appears on transaction" + required: true diff --git a/modules/ai-agents/examples/weather_service.yaml b/modules/ai-agents/examples/mcp-tools/processors/weather_service.yaml similarity index 100% rename from modules/ai-agents/examples/weather_service.yaml rename to modules/ai-agents/examples/mcp-tools/processors/weather_service.yaml diff --git a/modules/ai-agents/examples/mcp-tools/snippets/bloblang_this_context.yaml b/modules/ai-agents/examples/mcp-tools/snippets/bloblang_this_context.yaml new file mode 100644 index 000000000..18409d0a1 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/snippets/bloblang_this_context.yaml @@ -0,0 +1,15 @@ +# Bloblang 'this' context examples +# Use 'this' to access message fields in mutation, mapping, or args_mapping + +# tag::mutation[] +mutation: | + root.search_query = this.query.lowercase() + root.max_results = this.limit.or(10) +# end::mutation[] + +# tag::args_mapping[] +sql_select: + table: orders + where: customer_id = ? AND status = ? + args_mapping: '[this.customer_id, this.status.or("active")]' +# end::args_mapping[] diff --git a/modules/ai-agents/examples/mcp-tools/snippets/defaults.yaml b/modules/ai-agents/examples/mcp-tools/snippets/defaults.yaml new file mode 100644 index 000000000..844ea3326 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/snippets/defaults.yaml @@ -0,0 +1,24 @@ +# Handling optional parameters with defaults + +# tag::mutation[] +mutation: | + root.city = this.city # Required - will error if missing + root.units = this.units.or("metric") # Optional with default + root.limit = this.limit.or(10).number() # Optional, converted to number +# end::mutation[] + +# tag::properties[] +properties: + - name: city + type: string + description: "City name to look up" + required: true + - name: units + type: string + description: "Temperature units: 'metric' or 'imperial' (default: metric)" + required: false + - name: limit + type: number + description: "Max results (default: 10)" + required: false +# end::properties[] diff --git a/modules/ai-agents/examples/mcp-tools/snippets/interpolation.yaml b/modules/ai-agents/examples/mcp-tools/snippets/interpolation.yaml new file mode 100644 index 000000000..90bd0d8c5 --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/snippets/interpolation.yaml @@ -0,0 +1,13 @@ +# Bloblang interpolation in string fields +# Use ${! ... } to embed expressions in URLs, topics, headers + +# tag::http_url[] +http: + url: 'https://api.weather.com/v1/current?city=${! json("city") }&units=${! json("units").or("metric") }' +# end::http_url[] + +# tag::redpanda_topic[] +redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] # <1> + topic: '${! json("topic_name") }' # <2> +# end::redpanda_topic[] diff --git a/modules/ai-agents/examples/mcp-tools/snippets/secrets.yaml b/modules/ai-agents/examples/mcp-tools/snippets/secrets.yaml new file mode 100644 index 000000000..3ac6f98db --- /dev/null +++ b/modules/ai-agents/examples/mcp-tools/snippets/secrets.yaml @@ -0,0 +1,14 @@ +# Using secrets in tool configurations +# Reference secrets with ${secrets.SECRET_NAME} syntax + +# tag::example[] +http: + url: "https://api.example.com/data" + headers: + Authorization: "Bearer ${secrets.API_TOKEN}" + +sql_select: + driver: postgres + dsn: "${secrets.DATABASE_URL}" + table: customers +# end::example[] diff --git a/modules/ai-agents/examples/test-mcp-examples.sh b/modules/ai-agents/examples/mcp-tools/test-mcp-tools.sh similarity index 61% rename from modules/ai-agents/examples/test-mcp-examples.sh rename to modules/ai-agents/examples/mcp-tools/test-mcp-tools.sh index 068f7306a..263ea5dbf 100755 --- a/modules/ai-agents/examples/test-mcp-examples.sh +++ b/modules/ai-agents/examples/mcp-tools/test-mcp-tools.sh @@ -7,12 +7,12 @@ # 2. MCP metadata validation (enabled, description, properties) # # Usage: -# ./test-mcp-examples.sh # Run all tests -# ./test-mcp-examples.sh --lint-only # Only lint, skip metadata validation +# ./test-mcp-tools.sh # Run all tests +# ./test-mcp-tools.sh --lint-only # Only lint, skip metadata validation # # Unlike rp-connect-docs, Cloud MCP tools cannot be tested with # `rpk connect run` because they are standalone tool definitions, not -# full pipelines. End-to-end testing requires the Cloud Console. +# full pipelines. End-to-end testing requires the Cloud UI. set -euo pipefail @@ -24,10 +24,13 @@ BLUE='\033[0;34m' CYAN='\033[0;36m' NC='\033[0m' -# Get script directory +# Get script directory (script lives inside mcp-tools/) SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" cd "$SCRIPT_DIR" +# Component type directories +COMPONENT_DIRS=("inputs" "outputs" "processors" "caches") + # Counters TOTAL_TOOLS=0 PASSED_LINT=0 @@ -61,20 +64,24 @@ echo -e "📦 ${CYAN}SECTION 1: MCP Tool Linting${NC}" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "" -# Count YAML files -file_count=$(find . -maxdepth 1 -name "*.yaml" | wc -l | tr -d ' ') -TOTAL_TOOLS=$file_count - -echo -n -e "${BLUE}📁 examples/${NC} ($file_count files)... " - -if output=$(rpk connect mcp-server lint --skip-env-var-check . 2>&1); then - echo -e "${GREEN}✓ PASSED${NC}" - PASSED_LINT=$file_count -else - echo -e "${RED}✗ FAILED${NC}" - echo "$output" | sed 's/^/ /' | head -20 - FAILED_LINT=$file_count -fi +for dir in "${COMPONENT_DIRS[@]}"; do + if [[ -d "$dir" ]]; then + file_count=$(find "$dir" -maxdepth 1 -name "*.yaml" | wc -l | tr -d ' ') + if [[ $file_count -gt 0 ]]; then + TOTAL_TOOLS=$((TOTAL_TOOLS + file_count)) + echo -n -e "${BLUE}📁 $dir/${NC} ($file_count files)... " + + if output=$(rpk connect mcp-server lint --skip-env-var-check "$dir" 2>&1); then + echo -e "${GREEN}✓ PASSED${NC}" + PASSED_LINT=$((PASSED_LINT + file_count)) + else + echo -e "${RED}✗ FAILED${NC}" + echo "$output" | sed 's/^/ /' | head -20 + FAILED_LINT=$((FAILED_LINT + file_count)) + fi + fi + fi +done # ============================================================================ # SECTION 2: MCP Metadata Validation @@ -99,17 +106,19 @@ if $RUN_METADATA; then fi if $RUN_METADATA; then - for file in *.yaml; do - if [[ -f "$file" ]]; then - echo -n -e " ${BLUE}$file${NC}... " - - # Check if .meta.mcp exists - if $use_yq; then - mcp_exists=$(yq eval '.meta.mcp' "$file" 2>/dev/null) - enabled=$(yq eval '.meta.mcp.enabled' "$file" 2>/dev/null) - description=$(yq eval '.meta.mcp.description' "$file" 2>/dev/null) - else - mcp_exists=$(python3 -c " + for dir in "${COMPONENT_DIRS[@]}"; do + if [[ -d "$dir" ]]; then + for file in "$dir"/*.yaml; do + if [[ -f "$file" ]]; then + echo -n -e " ${BLUE}$file${NC}... " + + # Check if .meta.mcp exists + if $use_yq; then + mcp_exists=$(yq eval '.meta.mcp' "$file" 2>/dev/null) + enabled=$(yq eval '.meta.mcp.enabled' "$file" 2>/dev/null) + description=$(yq eval '.meta.mcp.description' "$file" 2>/dev/null) + else + mcp_exists=$(python3 -c " import yaml try: with open('$file') as f: @@ -120,7 +129,7 @@ try: except: print('null') " 2>/dev/null) - enabled=$(python3 -c " + enabled=$(python3 -c " import yaml try: with open('$file') as f: @@ -130,7 +139,7 @@ try: except: print('null') " 2>/dev/null) - description=$(python3 -c " + description=$(python3 -c " import yaml try: with open('$file') as f: @@ -140,22 +149,24 @@ try: except: print('null') " 2>/dev/null) - fi - - # Validate - if [[ "$mcp_exists" == "null" || -z "$mcp_exists" ]]; then - echo -e "${YELLOW}SKIPPED${NC} (no MCP metadata)" - SKIPPED=$((SKIPPED + 1)) - elif [[ "$enabled" != "true" ]]; then - echo -e "${YELLOW}WARNING${NC} (mcp.enabled not true)" - SKIPPED=$((SKIPPED + 1)) - elif [[ "$description" == "null" || -z "$description" ]]; then - echo -e "${RED}FAILED${NC} (missing description)" - FAILED_METADATA=$((FAILED_METADATA + 1)) - else - echo -e "${GREEN}PASSED${NC}" - PASSED_METADATA=$((PASSED_METADATA + 1)) - fi + fi + + # Validate + if [[ "$mcp_exists" == "null" || -z "$mcp_exists" ]]; then + echo -e "${YELLOW}SKIPPED${NC} (no MCP metadata)" + SKIPPED=$((SKIPPED + 1)) + elif [[ "$enabled" != "true" ]]; then + echo -e "${YELLOW}WARNING${NC} (mcp.enabled not true)" + SKIPPED=$((SKIPPED + 1)) + elif [[ "$description" == "null" || -z "$description" ]]; then + echo -e "${RED}FAILED${NC} (missing description)" + FAILED_METADATA=$((FAILED_METADATA + 1)) + else + echo -e "${GREEN}PASSED${NC}" + PASSED_METADATA=$((PASSED_METADATA + 1)) + fi + fi + done fi done fi @@ -173,16 +184,20 @@ echo "━━━━━━━━━━━━━━━━━━━━━━━━ echo "" secrets_issues=0 -for file in *.yaml; do - if [[ -f "$file" ]]; then - # Check for non-Cloud secrets patterns (${VAR} without secrets. prefix) - # Exclude: - # - ${! ... } which is Bloblang interpolation - # - ${REDPANDA_BROKERS} which is platform-injected - if grep -E '\$\{[A-Z_]+\}' "$file" | grep -v '\${secrets\.' | grep -v '\${!' | grep -v '\${REDPANDA_BROKERS}' > /dev/null 2>&1; then - echo -e " ${BLUE}$file${NC}... ${YELLOW}WARNING${NC} (uses env vars instead of \${secrets.X})" - secrets_issues=$((secrets_issues + 1)) - fi +for dir in "${COMPONENT_DIRS[@]}"; do + if [[ -d "$dir" ]]; then + for file in "$dir"/*.yaml; do + if [[ -f "$file" ]]; then + # Check for non-Cloud secrets patterns (${VAR} without secrets. prefix) + # Exclude: + # - ${! ... } which is Bloblang interpolation + # - ${REDPANDA_BROKERS} which is platform-injected + if grep -E '\$\{[A-Z_]+\}' "$file" | grep -v '\${secrets\.' | grep -v '\${!' | grep -v '\${REDPANDA_BROKERS}' > /dev/null 2>&1; then + echo -e " ${BLUE}$file${NC}... ${YELLOW}WARNING${NC} (uses env vars instead of \${secrets.X})" + secrets_issues=$((secrets_issues + 1)) + fi + fi + done fi done diff --git a/modules/ai-agents/examples/pipelines/agent-transformation.yaml b/modules/ai-agents/examples/pipelines/agent-transformation.yaml new file mode 100644 index 000000000..b4c14609c --- /dev/null +++ b/modules/ai-agents/examples/pipelines/agent-transformation.yaml @@ -0,0 +1,32 @@ +# Agent as transformation node +# Uses agent reasoning for complex transformations (natural language to SQL) +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [nl-queries] + consumer_group: query-converter + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + # tag::processors[] + processors: + - a2a_message: + agent_card_url: "${AGENT_CARD_URL}" + prompt: "Convert to SQL: ${!this.natural_language_query}" + # end::processors[] + +output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: sql-queries + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/async-workflows.yaml b/modules/ai-agents/examples/pipelines/async-workflows.yaml new file mode 100644 index 000000000..38b0daf91 --- /dev/null +++ b/modules/ai-agents/examples/pipelines/async-workflows.yaml @@ -0,0 +1,32 @@ +# Asynchronous workflow pipeline +# Processes events in the background with acceptable latency +# tag::pipeline[] +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [daily-reports] + consumer_group: report-analyzer + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + processors: + - a2a_message: + agent_card_url: "${AGENT_CARD_URL}" + prompt: "Summarize this report: ${!content()}" +# end::pipeline[] + +output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: report-summaries + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/dispute-pipeline.yaml b/modules/ai-agents/examples/pipelines/dispute-pipeline.yaml new file mode 100644 index 000000000..ef312a562 --- /dev/null +++ b/modules/ai-agents/examples/pipelines/dispute-pipeline.yaml @@ -0,0 +1,157 @@ +# Event-driven transaction dispute processing pipeline +# Automatically flags high-risk transactions and routes them to dispute agent + +input: + kafka: + addresses: ["${REDPANDA_BROKERS}"] + topics: ["bank.transactions"] + consumer_group: dispute-processor + tls: + enabled: true + sasl: + mechanism: SCRAM-SHA-256 + user: "${secrets.DISPUTE_PIPELINE_USERNAME}" + password: "${secrets.DISPUTE_PIPELINE_PASSWORD}" + +pipeline: + processors: + # Filter for high-value or suspicious transactions + - branch: + request_map: | + # Only process transactions above $500 or flagged by upstream systems + root = if this.amount > 500 || this.preliminary_flag == true { + this + } else { + deleted() + } + + processors: + # Calculate preliminary risk score based on transaction attributes + - mapping: | + # Preserve original transaction + root = this + + # Location risk: international transactions get higher score + let location_risk = if this.merchant.country != this.card.billing_country { 40 } else { 0 } + + # Amount risk: large amounts relative to account averages + let amount_risk = if this.amount > 1000 { 30 } else if this.amount > 500 { 15 } else { 0 } + + # Velocity risk: check for multiple recent transactions + let velocity_risk = if this.recent_transaction_count > 5 { 20 } else { 0 } + + # Category risk: luxury goods and high-risk categories + let category_risk = match this.merchant.mcc { + "5944" => 20, # Jewelry + "5094" => 25, # Precious stones + _ => 0 + } + + # Calculate total score + let total_score = $location_risk + $amount_risk + $velocity_risk + $category_risk + + root.preliminary_risk_score = $total_score + root.risk_level = if $total_score > 70 { + "high" + } else if $total_score > 40 { + "medium" + } else { + "low" + } + + # Route high and medium risk transactions to dispute agent for investigation + - branch: + request_map: | + # Only send to agent if risk is medium or higher + root = if this.preliminary_risk_score >= 40 { this } else { deleted() } + + processors: + # Invoke dispute resolution agent via A2A protocol + - a2a_message: + agent_card_url: "${secrets.DISPUTE_AGENT_CARD_URL}" + prompt: | + Investigate this potentially fraudulent transaction and respond with ONLY a JSON object (no additional text): + + Transaction ID: ${! this.transaction_id } + Customer ID: ${! this.customer_id } + Amount: $${! this.amount } ${! this.currency } + Merchant: ${! this.merchant.name } + Location: ${! this.merchant.city }, ${! this.merchant.country } + Date: ${! this.transaction_date } + Preliminary Risk Score: ${! this.preliminary_risk_score }/100 + Risk Level: ${! this.risk_level } + + Return ONLY this JSON format with no other text: + { + "recommendation": "block_and_investigate" | "hold_for_review" | "approve", + "fraud_score": , + "confidence": "high" | "medium" | "low", + "reasoning": "" + } + + # Map agent response back to transaction record + result_map: | + # By default, result_map preserves the original message that entered the branch + # Just add the agent investigation field + root.agent_investigation = if content().string().parse_json().catch(null) != null { + content().string().parse_json() + } else { + { + "recommendation": "manual_review_required", + "fraud_score": 50, + "confidence": "low", + "reasoning": "Agent returned unparseable response: " + content().string().slice(0, 100) + } + } + + # Merge risk scoring and agent results back to original transaction + result_map: | + root = content() + + # Enrich with final decision and tracing metadata + - mapping: | + # Preserve original transaction and all computed fields + root = this + + # Only set final_decision and alert_level if agent investigation occurred + root.final_decision = if this.agent_investigation.exists("recommendation") { + match { + this.agent_investigation.recommendation == "block_and_investigate" => "blocked", + this.agent_investigation.recommendation == "hold_for_review" => "pending_review", + this.agent_investigation.recommendation == "approve" => "approved", + _ => "manual_review_required" + } + } else { + "low_risk_no_investigation" + } + + root.alert_level = if this.agent_investigation.exists("fraud_score") { + match { + this.agent_investigation.fraud_score >= 80 => "critical", + this.agent_investigation.fraud_score >= 60 => "high", + this.agent_investigation.fraud_score >= 40 => "medium", + _ => "low" + } + } else { + "low" + } + + # Add execution metadata for tracing back to agent transcripts + root.pipeline_metadata = { + "processed_at": now().ts_format("2006-01-02T15:04:05.000Z"), + "transaction_id": this.transaction_id, + "customer_id": this.customer_id, + "agent_invoked": this.agent_investigation.exists("fraud_score") + } + +output: + kafka: + addresses: ["${REDPANDA_BROKERS}"] + topic: bank.dispute_results + key: "${! this.transaction_id }" + tls: + enabled: true + sasl: + mechanism: SCRAM-SHA-256 + user: "${secrets.DISPUTE_PIPELINE_USERNAME}" + password: "${secrets.DISPUTE_PIPELINE_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/event-driven-invocation.yaml b/modules/ai-agents/examples/pipelines/event-driven-invocation.yaml new file mode 100644 index 000000000..677beb960 --- /dev/null +++ b/modules/ai-agents/examples/pipelines/event-driven-invocation.yaml @@ -0,0 +1,30 @@ +# Event-driven agent invocation pipeline +# Invokes an agent for each event in a stream +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [transactions] + consumer_group: fraud-detector + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + processors: + - a2a_message: + agent_card_url: "${AGENT_CARD_URL}" + prompt: "Analyze this transaction: ${!content()}" + +output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: fraud-alerts + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/fraud-detection-routing.yaml b/modules/ai-agents/examples/pipelines/fraud-detection-routing.yaml new file mode 100644 index 000000000..ea5f3f982 --- /dev/null +++ b/modules/ai-agents/examples/pipelines/fraud-detection-routing.yaml @@ -0,0 +1,75 @@ +# Fraud detection pipeline with score-based routing +# Analyzes every transaction and routes to different topics based on fraud score +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [transactions] + consumer_group: fraud-detector + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + processors: + - branch: + request_map: | + root.transaction_id = this.id + root.amount = this.amount + root.merchant = this.merchant + root.user_id = this.user_id + processors: + - a2a_message: + agent_card_url: "${AGENT_CARD_URL}" + prompt: | + Analyze this transaction for fraud: + Amount: ${! json("amount") } + Merchant: ${! json("merchant") } + User: ${! json("user_id") } + + Return JSON: { "fraud_score": 0-100, "reason": "explanation", "recommend_block": true/false } + result_map: | + root = this + root.fraud_analysis = content().parse_json().catch({}) + + - mapping: | + root = this + meta fraud_score = this.fraud_analysis.fraud_score + +output: + switch: + cases: + - check: 'meta("fraud_score") >= 80' + output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: fraud-alerts-high + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + - check: 'meta("fraud_score") >= 50' + output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: fraud-alerts-medium + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + - output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: transactions-cleared + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/fraud-detection-simple.yaml b/modules/ai-agents/examples/pipelines/fraud-detection-simple.yaml new file mode 100644 index 000000000..debb8baa6 --- /dev/null +++ b/modules/ai-agents/examples/pipelines/fraud-detection-simple.yaml @@ -0,0 +1,30 @@ +# Fraud detection pipeline that invokes an agent for every transaction +# Replace AGENT_CARD_URL with your actual agent card URL +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [transactions] + consumer_group: fraud-detector + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + processors: + - a2a_message: + agent_card_url: "${AGENT_CARD_URL}" + prompt: "Analyze this transaction: ${!content()}" + +output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: fraud-alerts + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/multi-agent-orchestration.yaml b/modules/ai-agents/examples/pipelines/multi-agent-orchestration.yaml new file mode 100644 index 000000000..934d168ba --- /dev/null +++ b/modules/ai-agents/examples/pipelines/multi-agent-orchestration.yaml @@ -0,0 +1,35 @@ +# Multi-agent pipeline orchestration +# Chains multiple agents in sequence: translate -> analyze sentiment -> route +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [international-feedback] + consumer_group: feedback-processor + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + # tag::processors[] + processors: + - a2a_message: + agent_card_url: "${TRANSLATOR_AGENT_URL}" + - a2a_message: + agent_card_url: "${SENTIMENT_AGENT_URL}" + - a2a_message: + agent_card_url: "${ROUTER_AGENT_URL}" + # end::processors[] + +output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: processed-feedback + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/streaming-enrichment.yaml b/modules/ai-agents/examples/pipelines/streaming-enrichment.yaml new file mode 100644 index 000000000..3f544d50d --- /dev/null +++ b/modules/ai-agents/examples/pipelines/streaming-enrichment.yaml @@ -0,0 +1,35 @@ +# Streaming data enrichment pipeline +# Adds AI-generated metadata (sentiment) to events +input: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topics: [customer-feedback] + consumer_group: sentiment-enricher + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" + +pipeline: + # tag::processors[] + processors: + - branch: + request_map: 'root = this.text' + processors: + - a2a_message: + agent_card_url: "${AGENT_CARD_URL}" + result_map: 'root.sentiment = content()' + # end::processors[] + +output: + redpanda: + seed_brokers: ["${REDPANDA_BROKERS}"] + topic: enriched-feedback + tls: + enabled: true + sasl: + - mechanism: SCRAM-SHA-256 + username: "${REDPANDA_USERNAME}" + password: "${REDPANDA_PASSWORD}" diff --git a/modules/ai-agents/examples/pipelines/test-pipelines.sh b/modules/ai-agents/examples/pipelines/test-pipelines.sh new file mode 100755 index 000000000..b8bfd49d5 --- /dev/null +++ b/modules/ai-agents/examples/pipelines/test-pipelines.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash +# +# Test script for Redpanda Cloud pipeline examples +# +# This script uses rpk connect lint to validate pipeline configurations. +# Cloud-specific processors (like a2a_message) are not available in the local +# CLI, so those errors are expected and noted. +# +# Usage: +# ./test-pipelines.sh +# +# Exit codes: +# 0 - All files have valid YAML structure (Cloud processor errors are expected) +# 1 - YAML syntax errors or unexpected failures + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' + +# Get script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +# Counters +TOTAL=0 +PASSED=0 +CLOUD_PROCESSOR_ERRORS=0 +FAILED=0 + +echo "🧪 Redpanda Cloud Pipeline Examples - Test Suite" +echo "=================================================" +echo "" + +# Check for rpk +if ! command -v rpk &> /dev/null; then + echo -e "${RED}Error: rpk is required${NC}" + echo "Install rpk: https://docs.redpanda.com/current/get-started/rpk-install/" + exit 1 +fi + +echo -e "${CYAN}Using:${NC} $(rpk version 2>/dev/null | head -1 || echo 'rpk')" +echo "" + +# ============================================================================ +# Lint each pipeline file +# ============================================================================ + +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo -e "📦 ${CYAN}Pipeline Linting${NC}" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" + +for file in *.yaml; do + if [[ -f "$file" ]]; then + TOTAL=$((TOTAL + 1)) + echo -n -e " ${BLUE}$file${NC}... " + + # Run rpk connect lint + output=$(rpk connect lint --skip-env-var-check "$file" 2>&1) || true + + if [[ -z "$output" ]]; then + # No output means success + echo -e "${GREEN}PASSED${NC}" + PASSED=$((PASSED + 1)) + elif echo "$output" | grep -q "a2a_message\|unable to infer.*a2a"; then + # Cloud-specific processor error (expected) + echo -e "${YELLOW}OK${NC} (Cloud processor - requires Redpanda Cloud)" + CLOUD_PROCESSOR_ERRORS=$((CLOUD_PROCESSOR_ERRORS + 1)) + elif echo "$output" | grep -qi "yaml\|parse\|syntax"; then + # YAML syntax error (unexpected) + echo -e "${RED}FAILED${NC}" + echo "$output" | sed 's/^/ /' + FAILED=$((FAILED + 1)) + else + # Other lint error (unexpected) + echo -e "${RED}FAILED${NC}" + echo "$output" | sed 's/^/ /' + FAILED=$((FAILED + 1)) + fi + fi +done + +# ============================================================================ +# Summary +# ============================================================================ + +echo "" +echo "=================================================" +echo "📊 Test Summary" +echo "=================================================" +echo -e "Total files: $TOTAL" +echo -e "Fully passed: $PASSED" +echo -e "Cloud processors: $CLOUD_PROCESSOR_ERRORS (expected - requires Cloud)" +echo -e "Failed: $FAILED" +echo "──────────────────────────────────────────────────" + +if [[ $FAILED -gt 0 ]]; then + echo -e "${RED}❌ $FAILED file(s) have YAML errors${NC}" + exit 1 +else + echo -e "${GREEN}✅ All files valid${NC}" + if [[ $CLOUD_PROCESSOR_ERRORS -gt 0 ]]; then + echo "" + echo -e "${YELLOW}Note: $CLOUD_PROCESSOR_ERRORS file(s) use Cloud-specific processors (a2a_message)${NC}" + echo -e "${YELLOW}These require deployment to Redpanda Cloud for full validation.${NC}" + fi + exit 0 +fi diff --git a/modules/ai-agents/examples/testing.adoc b/modules/ai-agents/examples/testing.adoc deleted file mode 100644 index 96d8525ab..000000000 --- a/modules/ai-agents/examples/testing.adoc +++ /dev/null @@ -1,290 +0,0 @@ -= Test MCP Examples -:description: Automated testing strategies for Redpanda Cloud MCP server examples. - -This document describes the automated testing strategies for Redpanda Cloud MCP server examples. - -All MCP examples are automatically tested to ensure: - -. YAML syntax and structure are correct -. MCP metadata is complete and valid -. Component schemas match Redpanda Connect specifications -. Secrets syntax uses Cloud Secrets Store format (`${secrets.X}`) - -== Testing approaches - -=== Configuration linting - -Validate MCP tool configurations using `rpk connect lint`: - -[,bash] ----- -# Lint a single MCP tool -rpk connect lint weather_service.yaml - -# Lint all examples -rpk connect lint *.yaml - -# Lint with environment variable checking skipped (recommended for MCP) -rpk connect lint --skip-env-var-check *.yaml ----- - -This checks for common issues such as: - -* YAML syntax errors -* Unknown component types -* Invalid field names -* Type mismatches -* Missing required fields - -=== MCP metadata validation - -The test script validates MCP-specific metadata for all tool examples: - -[,bash] ----- -# Run all tests (includes linting + MCP validation) -./test-mcp-examples.sh - -# Test specific files -./test-mcp-examples.sh weather_*.yaml ----- - -MCP metadata validation checks: - -* Presence of `meta.mcp` section -* `enabled: true` is set -* `description` field exists and is non-empty -* `properties` are properly structured (if present) - -=== Unit testing limitations - -MCP tool examples are standalone component definitions (`label:`, `processors:`, `meta:`), not full pipelines with `input:`, `pipeline:`, `output:` sections. This means they cannot use inline `tests:` sections like cookbook examples do. - -The `rpk connect test` command requires full pipeline structure with paths like `/pipeline/processors/0`, which don't exist in MCP tool definitions. - -For testing MCP tools: - -- Ensure syntax and schema correctness. -- Verify MCP metadata has proper description and properties. -- Perform manual testing using the Cloud Console MCP Server interface to test tools end-to-end. - -== MCP tool structure - -MCP tools are structured as standalone components: - -[,yaml] ----- -label: weather-service -processors: - - label: fetch_weather_data - http: - url: 'https://wttr.in/${! @city }?format=j1' - verb: GET - - - label: format_response - mutation: | - root = { - "city": @city, - "temperature": this.current_condition.0.temp_C.number() - } - -meta: - mcp: - enabled: true - description: "Get current weather conditions for any city worldwide" - properties: - - name: city - type: string - description: "Name of the city" - required: true ----- - -== Test script usage - -The `test-mcp-examples.sh` script provides automated validation: - -[,bash] ----- -# Test all examples -./test-mcp-examples.sh - -# Test specific files -./test-mcp-examples.sh weather_*.yaml -./test-mcp-examples.sh customer_*.yaml ----- - -The script provides color-coded output: - -[,console] ----- -🧪 Redpanda Connect MCP Examples Test Suite (Cloud) -==================================================== - -📄 Testing: weather_service.yaml - Linting weather_service.yaml... PASSED - Validating MCP metadata... PASSED - -==================================================== -📊 Test Summary -==================================================== -Total configs tested: 10 -Passed: 10 -Failed: 0 - -✅ All tests passed! ----- - -== Manual end-to-end testing - -For comprehensive validation, test MCP tools using the Cloud Console: - -. Navigate to your Cloud cluster's MCP Server configuration -. Add or update your MCP tool configuration -. Use the Cloud Console's MCP Inspector to locate your tool -. Verify the tool executes correctly and returns expected results - -This validates: - -* Tool loads correctly in the MCP server -* Tool executes with provided parameters -* Responses are formatted correctly -* Secrets are properly resolved from Cloud Secrets Store - -== GitHub Actions CI/CD - -Automated tests run on every push and pull request using GitHub Actions. - -The workflow tests all examples whenever: - -* Any `.yaml` file in `modules/ai-agents/examples/` changes -* The test script itself is modified - -See `.github/workflows/test-mcp-examples.yaml` for the complete workflow. - -== Best practices - -=== Use descriptive tool names - -[,yaml] ----- -# Good -label: fetch-customer-orders - -# Bad -label: tool1 ----- - -=== Write clear MCP descriptions - -[,yaml] ----- -# Good -meta: - mcp: - description: "Fetch a customer's order history and calculate spending metrics over the last 30 days" - -# Bad -meta: - mcp: - description: "Get orders" ----- - -=== Document all properties - -[,yaml] ----- -# Good -properties: - - name: customer_id - type: string - description: "Unique identifier for the customer" - required: true - - name: days - type: number - description: "Number of days to look back (default: 30)" - required: false - -# Bad -properties: - - name: id - type: string - required: true ----- - -=== Use Cloud Secrets Store for sensitive data - -[,yaml] ----- -# Cloud format - uses Secrets Store -sql_select: - driver: "postgres" - dsn: "${secrets.POSTGRES_DSN}" - table: "customers" ----- - -=== Tag your examples - -[,yaml] ----- -meta: - tags: [ example, weather, api ] # Helps organize and filter - mcp: - enabled: true ----- - -== Adding new examples - -When adding new MCP tool examples: - -. **Create your YAML file** in `modules/ai-agents/examples/`: -+ -[,bash] ----- -cd modules/ai-agents/examples -touch my-new-tool.yaml ----- - -. **Include complete MCP metadata:** -+ -[,yaml] ----- -label: my-new-tool -processors: - # Your processor configuration - -meta: - mcp: - enabled: true - description: "Clear, task-oriented description" - properties: - - name: param_name - type: string - description: "Parameter purpose and constraints" - required: true ----- - -. **Lint your example:** -+ -[,bash] ----- -rpk connect lint --skip-env-var-check my-new-tool.yaml ----- - -. **Run automated tests:** -+ -[,bash] ----- -./test-mcp-examples.sh my-new-tool.yaml ----- - -. **Test in Cloud Console (recommended):** -+ -Deploy your MCP server configuration and test the tool through the Cloud Console AI interface. - -. **Commit your example:** -+ -[,bash] ----- -git add modules/ai-agents/examples/my-new-tool.yaml -git commit -m "Add my-new-tool MCP example" ----- diff --git a/modules/ai-agents/pages/agents/a2a-concepts.adoc b/modules/ai-agents/pages/agents/a2a-concepts.adoc new file mode 100644 index 000000000..48599c2a3 --- /dev/null +++ b/modules/ai-agents/pages/agents/a2a-concepts.adoc @@ -0,0 +1,121 @@ += A2A Protocol +:description: Learn how the A2A protocol enables agent discovery and communication. +:page-topic-type: concepts +:personas: agent_developer, app_developer, streaming_developer +:learning-objective-1: Describe the A2A protocol and its role in agent communication +:learning-objective-2: Explain how agent cards enable discovery +:learning-objective-3: Identify how authentication secures agent communication + +The Agent-to-Agent (A2A) protocol is an open standard for agent communication and discovery. Redpanda Cloud uses A2A for both external integration and internal pipeline-to-agent communication. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== What is the A2A protocol? + +The Agent-to-Agent (A2A) protocol is an open standard that defines how agents discover, communicate with, and invoke each other. + +Agents that implement A2A expose their capabilities through a standardized agent card. This allows other systems to interact with them without prior knowledge of their implementation. + +The protocol provides: + +* Standardized discovery: Agent cards describe capabilities in a machine-readable format. +* Platform independence: Any system can call any A2A-compliant agent. +* Version negotiation: Protocol versions ensure compatibility between agents. +* Communication mode flexibility: Supports synchronous request/response and streaming. + +For the complete specification, see link:https://a2a.ag/spec[a2a.ag/spec^]. + +== Agent cards + +Every A2A-compliant agent exposes an agent card at a well-known URL. + +The agent card is a JSON document that describes what the agent can do and how to interact with it. For the complete agent card specification, see link:https://agent2agent.info/docs/concepts/agentcard/[Agent Card documentation^]. + +[#agent-card-location] +=== Agent card location + +Redpanda Cloud agents expose their agent cards at the `/.well-known/agent-card.json` subpath of the agent URL. You can find the agent URL on the agent overview page in the Redpanda Cloud UI under *Agentic AI* > *AI Agents*. + +For example, if your agent URL is `\https://my-agent.ai-agents.abc123.cloud.redpanda.com`, your agent card URL is `\https://my-agent.ai-agents.abc123.cloud.redpanda.com/.well-known/agent-card.json`. + +The `.well-known` path follows internet standards for service discovery, making agents discoverable without configuration. + +To configure the agent card, see xref:ai-agents:agents/create-agent.adoc#configure-a2a-discovery-metadata-optional[Configure A2A discovery metadata]. + +== Where A2A is used in Redpanda Cloud + +Redpanda Cloud uses the A2A protocol in two contexts: + +=== External integration + +External applications and agents hosted outside Redpanda Cloud use A2A to call Redpanda Cloud agents. This includes backend services, CLI tools, custom UIs, and agents hosted on other platforms. + +For integration pattern guidance, see xref:ai-agents:agents/integration-overview.adoc[]. + +=== Internal pipeline-to-agent integration + +Redpanda Connect pipelines use the xref:develop:connect/components/processors/a2a_message.adoc[`a2a_message`] processor to invoke agents for each event in a stream. This enables real-time interaction between streaming data and AI agents, enabling use cases like: + +* Real-time fraud detection on every transaction. +* Streaming data enrichment with AI-generated fields. +* Event-driven agent invocation for automated processing. + +The `a2a_message` processor uses the A2A protocol internally to discover and call agents. For pipeline patterns, see xref:ai-agents:agents/pipeline-integration-patterns.adoc[]. + +== How agents discover each other + +A2A enables dynamic discovery without hardcoded configuration: + +. The caller fetches the agent card from the well-known URL. +. The caller checks the protocol version and supported communication modes. +. The caller uses the input schema from the agent card to format the request properly. +. The caller sends the request to the agent's endpoint. + +This discovery model allows: + +* New agents to become available immediately once deployed +* Existing agents to update their capabilities while callers adapt dynamically +* Callers to understand exactly what agents do through self-describing agent cards + +== Authentication + +A2A-compliant agents require authentication to prevent unauthorized access. + +Redpanda Cloud agents use OAuth2 client credentials flow. When you create an agent, the system provisions a service account with a client ID and secret. + +External callers use these credentials to obtain access tokens: + +. Agent creation automatically provisions a service account with credentials. +. Applications exchange the client ID and secret for a time-limited access token via OAuth2. +. Applications include the access token in the Authorization header when calling the agent endpoint. +. When tokens expire, applications exchange credentials again for a new token. + +This flow ensures: + +* Credentials stay secure: Applications never send them directly to agents, only access tokens. +* Exposure is limited: Tokens expire, reducing the window for compromised credentials. +* Integration is standard: Applications can use existing OAuth2 libraries. + +=== External integration + +External applications must authenticate using the service account credentials. Each agent has its own service account. + +For step-by-step authentication instructions, see xref:security:cloud-authentication.adoc[]. + +=== Internal integration + +The `a2a_message` processor handles authentication automatically. Pipelines don't need to manage credentials explicitly because they run within the Redpanda Cloud cluster with appropriate permissions. + +== Protocol versions + +The A2A protocol uses semantic versioning (major.minor.patch). Agents declare their supported version in the agent card. + +== Next steps + +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:agents/create-agent.adoc[] +* link:https://a2a.ag/spec[A2A Protocol Specification^] diff --git a/modules/ai-agents/pages/agents/architecture-patterns.adoc b/modules/ai-agents/pages/agents/architecture-patterns.adoc new file mode 100644 index 000000000..dbfbd3dae --- /dev/null +++ b/modules/ai-agents/pages/agents/architecture-patterns.adoc @@ -0,0 +1,229 @@ += Agent Architecture Patterns +:description: Design maintainable agent systems with single-agent and multi-agent patterns based on domain complexity. +:page-topic-type: best-practices +:personas: agent_developer, streaming_developer +:learning-objective-1: Evaluate single-agent versus multi-agent architectures for your use case +:learning-objective-2: Choose appropriate LLM models based on task requirements +:learning-objective-3: Apply agent boundary design principles for maintainability + +Design agent systems that are maintainable, discoverable, and reliable by choosing the right architecture pattern and applying clear boundary principles. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Why architecture matters + +Agent architecture determines how you manage complexity as your system grows. The right pattern depends on your domain complexity, organizational structure, and how you expect requirements to evolve. + +Starting with a simple architecture is tempting, but can lead to unmaintainable systems as complexity increases. Planning for growth with clear boundaries prevents technical debt and costly refactoring later. + +Warning signs include system prompts exceeding 2000 words, too many tools for the LLM to select correctly, multiple teams modifying the same agent, and changes in one domain breaking others. These symptoms indicate you need architectural boundaries, not just better prompts. + +Match agent architecture to domain structure: + +[cols="2,3,3"] +|=== +| Domain Characteristics | Architecture Fit | Reasoning + +| Single business area, stable requirements +| Single agent +| Simplicity outweighs flexibility needs + +| Multiple business areas, shared infrastructure +| Root agent with internal subagents +| Domain separation without deployment complexity + +| Cross-organization workflows, independent evolution +| External agent-to-agent +| Organizational boundaries require system boundaries +|=== + + +Every architecture pattern involves trade-offs. + +- *Latency versus isolation:* Internal subagents have lower latency because they avoid network calls, but they share a failure domain. External agents have higher latency due to network overhead, but they provide independent failure isolation. + +- *Shared state versus independence:* Single deployments share model, budget, and policies but offer less flexibility. Multiple deployments allow independent scaling and updates but add coordination complexity. + +- *Complexity now versus complexity later:* Starting simple means faster initial development but may require refactoring. Starting structured requires more upfront work but makes the system easier to extend. + +For foundational concepts on how agents execute and manage complexity, see xref:ai-agents:agents/concepts.adoc[]. + +== Single-agent pattern + +A single-agent architecture uses one agent with one system prompt and one tool set to handle all requests. + +This pattern works best for narrow domains with limited scope, single data sources, and tasks that don't require specialized subsystems. + +=== When to use single agents + +Use single agents for focused problems that won't expand significantly. + +Examples include order lookup agents that retrieve history from a single topic, weather agents that query APIs and return formatted data, and inventory checkers that report stock levels. + +=== Trade-offs + +Single agents are simpler to build and maintain. You have one system prompt, one tool set, and one deployment. + +However, all capabilities must coexist in one agent. Adding features increases complexity rapidly, making single agents difficult to scale to multi-domain problems. + +== Root agent with subagents pattern + +A multi-agent architecture uses a root agent that delegates to specialized internal subagents. + +This pattern works for complex domains spanning multiple areas, multiple data sources with different access patterns, and tasks requiring specialized expertise within one deployment. + +NOTE: Subagents in Redpanda Cloud are internal specialists within a single agent. They share the parent agent's model, budget, and policies, but each can have different names, descriptions, system prompts, and MCP tools. + +=== How it works + +The root agent interprets user requests and routes them to appropriate subagents. + +Each subagent owns a specific business area with focused expertise. Subagents access only the MCP tools they need. + +All subagents share the same LLM model and budget from the parent agent. + +=== Example: E-commerce platform + +A typical e-commerce agent includes a root agent that interprets requests and delegates to specialists, an order subagent for processing, history, and status updates, an inventory subagent for stock checks and warehouse operations, and a customer subagent for profiles, preferences, and history. All subagents share the same model but have different system prompts and tool access. + +=== Why choose internal subagents + +Internal subagents provide domain isolation, allowing you to update the order subagent without affecting inventory. Debugging is easier because each subagent has narrow scope and fewer potential failure points. All subagents share resources, reducing complexity and cost compared to separate deployments. Use internal subagents when you need domain separation within a single agent deployment. + +== External agent-to-agent pattern + +External A2A integration connects agents across organizational boundaries, platforms, or independent systems. + +NOTE: Cross-agent calling between separate Redpanda Cloud agents is not supported. This pattern applies to connecting Redpanda Cloud agents with external agents you host elsewhere. + +=== When to use external A2A + +Use external A2A for multi-organization workflows that coordinate agents across company boundaries, for platform integration connecting Redpanda Cloud agents with agents hosted elsewhere, and when agents require different deployment environments such as GPU clusters, air-gapped networks, or regional constraints. + +=== How it works + +Agents communicate using the xref:ai-agents:agents/a2a-concepts.adoc[A2A protocol], a standard HTTP-based protocol for discovery and invocation. Each agent manages its own credentials and access control independently, and can deploy, scale, and update without coordinating with other agents. Agent cards define capabilities without exposing implementation details. + +=== Example: Multi-platform customer service + +A customer service workflow might span multiple platforms: + +* Redpanda Cloud agent accesses real-time order and inventory data +* CRM agent hosted elsewhere manages customer profiles and support tickets +* Payment agent from a third party handles transactions in a secure environment + +Each agent runs on its optimal infrastructure while coordinating through A2A. + +=== Why choose external A2A + +External A2A lets different teams own and deploy their agents independently, with each agent choosing its own LLM, tools, and infrastructure. Sensitive operations stay in controlled environments with security isolation, and you can add agents incrementally without rewriting existing systems. + +=== Trade-offs + +External A2A adds network latency on every cross-agent call, and authentication complexity multiplies with each agent requiring credential management. Removing capabilities or changing contracts requires coordination across consuming systems, and debugging requires tracing requests across organizational boundaries. + +For implementation details on external A2A integration, see xref:ai-agents:agents/integration-overview.adoc[]. + +== Common anti-patterns + +Avoid these architecture mistakes that lead to unmaintainable agent systems. + +=== The monolithic prompt + +A monolithic prompt is a single 3000+ word system prompt covering multiple domains. + +This pattern fails because LLM confusion increases with prompt length, multiple teams modify the same prompt creating conflicts and unclear ownership, and changes to one domain risk breaking others. + +Split into domain-specific subagents instead. Each subagent gets a focused prompt under 500 words. + +=== The tool explosion + +A tool explosion occurs when a single agent has 30+ tools from every MCP server in the cluster. + +This pattern fails because the LLM struggles to choose correctly from large tool sets, tool descriptions compete for limited prompt space, and the agent invokes wrong tools with similar names, wasting iteration budget on selection mistakes. + +Limit tools per agent. Use subagents to partition tools by domain. For tool design patterns, see xref:ai-agents:mcp/remote/tool-patterns.adoc[]. + +=== Premature A2A splitting + +Premature splitting creates three separate A2A agents when all logic could fit in one agent with internal subagents. + +This pattern fails because network latency affects every cross-agent call, authentication complexity multiplies with three sets of credentials, debugging requires correlating logs across systems, and you manage three deployments instead of one. + +Start with internal subagents for domain separation. Split to external A2A only when you need organizational boundaries or different infrastructure. + +=== Unbounded tool chaining + +Unbounded chaining sets max iterations to 100, returns hundreds of items from tools, and places no constraints on tool call frequency. + +This pattern fails because the context window fills with tool results, requests time out before completion, costs spiral with many iterations multiplied by large context, and the agent loses track of the original goal. + +Design workflows to complete in 20-30 iterations. Return paginated results from tools. Add prompt constraints like "Never call the same tool more than 3 times per request." + +== Model selection guide + +Choose models based on task complexity, latency requirements, and cost constraints. The Redpanda Cloud UI displays available models with descriptions when creating agents. + +=== Match models to task complexity + +For simple queries, choose cost-effective models such as GPT-5 Mini. + +For balanced workloads, choose mid-tier models such as Claude Sonnet 4.5 or GPT-5.2. + +For complex reasoning, choose premium models such as Claude Opus 4.5 or GPT-5.2. + +=== Balance latency and model size + +For real-time responses, choose smaller models. Use models optimized for speed, such as Mini or base tiers. + +For batch processing, optimize for accuracy over speed. Use larger models when users don't wait for results. + +=== Optimize for cost and volume + +For high volume, use cost-effective models. Smaller tiers reduce costs while maintaining acceptable quality. + +For critical accuracy, use premium models. Higher costs are justified when errors are costly. + +=== Model provider documentation + +For complete model specifications, capabilities, and pricing: + +* link:https://platform.openai.com/docs/models[OpenAI Models^] +* link:https://docs.anthropic.com/claude/docs/models-overview[Anthropic Claude Models^] +* link:https://ai.google.dev/gemini-api/docs/models[Google Gemini Models^] + +== Design principles + +Follow these principles to create maintainable agent systems. + +=== Explicit agent boundaries + +Each agent should have clear scope and responsibilities. Define scope explicitly in the system prompt, assign a specific tool set for the agent's domain, and specify well-defined inputs and outputs. + +Do not create agents with overlapping responsibilities. Overlapping domains create confusion about which agent handles which requests. + +=== Tool scoping per agent + +Assign tools to the agent that needs them. Don't give all agents access to all tools. Limit tool access based on agent purpose. + +Tool scoping reduces misuse risk and makes debugging easier. + +=== Error handling and fallbacks + +Design agents to handle failures gracefully. + +Use retry logic for transient failures like network timeouts. Report permanent failures like invalid parameters immediately. + +Provide clear error messages to users. Log errors for debugging. + +== Next steps + +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:agents/a2a-concepts.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:agents/overview.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] diff --git a/modules/ai-agents/pages/agents/build-index.adoc b/modules/ai-agents/pages/agents/build-index.adoc new file mode 100644 index 000000000..f3679805d --- /dev/null +++ b/modules/ai-agents/pages/agents/build-index.adoc @@ -0,0 +1,5 @@ += Build Agents +:page-layout: index +:description: Create production AI agents with effective prompts and scalable architecture. + +Create agents, write effective prompts, and design scalable agent systems. diff --git a/modules/ai-agents/pages/agents/concepts.adoc b/modules/ai-agents/pages/agents/concepts.adoc new file mode 100644 index 000000000..0ddb068f9 --- /dev/null +++ b/modules/ai-agents/pages/agents/concepts.adoc @@ -0,0 +1,148 @@ += Agent Concepts +:description: Understand how agents execute, manage context, invoke tools, and handle errors. +:page-topic-type: concepts +:personas: agent_developer, streaming_developer, data_engineer +:learning-objective-1: Explain how agents execute reasoning loops and make tool invocation decisions +:learning-objective-2: Describe how agents manage context and state across interactions +:learning-objective-3: Identify error handling strategies for agent failures + +Agents execute through a reasoning loop where the LLM analyzes context, decides which tools to invoke, processes results, and repeats until the task completes. Understanding this execution model helps you design reliable agent systems. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Agent execution model + +Every agent request follows a reasoning loop. The agent doesn't execute all tool calls at once. Instead, it makes decisions iteratively. + +=== The reasoning loop + +When an agent receives a request: + +. The LLM receives the context, including system prompt, conversation history, user request, and previous tool results. +. The LLM chooses to invoke a tool, requests more information, or responds to user. +. The tool runs and returns results if invoked. +. The tool's results are added to conversation history. +. The LLM reasons again with an expanded context. + +The loop continues until one of these conditions is met: + +* Agent completes the task and responds to the user +* Agent reaches max iterations limit +* Agent encounters an unrecoverable error + +=== Why iterations matter + +Each iteration includes three phases: + +. **LLM reasoning**: The model processes the growing context to decide the next action. +. **Tool invocation**: If the agent decides to call a tool, execution happens and waits for results. +. **Context expansion**: Tool results are added to the conversation history for the next iteration. + +With higher iteration limits, agents can complete complex tasks but costs more and takes longer. + +With lower iteration limits, agents respond faster and cheaper but may fail on complex requests. + +==== Cost calculation + +Calculate the approximate cost per request by estimating average context tokens per iteration: + +---- +Cost per request = (iterations x context tokens x model price per token) +---- + +Example with 30 iterations at $0.000002 per token: + +---- +Iteration 1: 500 tokens x $0.000002 = $0.001 +Iteration 15: 2000 tokens x $0.000002 = $0.004 +Iteration 30: 4000 tokens x $0.000002 = $0.008 + +Total: ~$0.013 per request +---- + +Actual costs vary based on: + +* Tool result sizes (large results increase context) +* Model pricing (varies by provider and model tier) +* Task complexity (determines iteration count) + +Setting max iterations creates a cost/capability trade-off: + +[cols="1,1,2,1", options="header"] +|=== +|Limit |Range |Use Case |Cost + +|Low +|10-20 +|Simple queries, single tool calls +|Cost-effective + +|Medium +|30-50 +|Multi-step workflows, tool chaining +|Balanced + +|High +|50-100 +|Complex analysis, exploratory tasks +|Higher +|=== + +Iteration limits prevent runaway costs when agents encounter complex or ambiguous requests. + +== MCP tool invocation patterns + +MCP tools extend agent capabilities beyond text generation. Understanding when and how tools execute helps you design effective tool sets. + +=== Synchronous tool execution + +In Redpanda Cloud, tool calls block the agent. When the agent decides to invoke a tool, it pauses and waits while the tool executes (querying a database, calling an API, or processing data). When the tool returns its result, the agent resumes reasoning. + +This synchronous model means latency adds up across multiple tool calls, the agent sees tool results sequentially rather than in parallel, and long-running tools can delay or fail agent requests due to timeouts. + +=== Tool selection decisions + +The LLM decides which tool to invoke based on system prompt guidance (such as "Use get_orders when customer asks about history"), tool descriptions from the MCP schema that define parameters and purpose, and conversation context where previous tool results influence the next tool choice. Agents can invoke the same tool multiple times with different parameters if the task requires it. + +=== Tool chaining + +Agents chain tools when one tool's output feeds another tool's input. For example, an agent might first call `get_customer_info(customer_id)` to retrieve details, then use that data to call `get_order_history(customer_email)`. + +Tool chaining requires sufficient max iterations because each step in the chain consumes one iteration. + +=== Tool granularity considerations + +Tool design affects agent behavior. Coarse-grained tools that do many things result in fewer tool calls but less flexibility and more complex implementation. Fine-grained tools that each do one thing require more tool calls but offer higher composability and simpler implementation. + +Choose granularity based on how often you'll reuse tool logic across workflows, whether intermediate results help with debugging, and how much control you want over tool invocation order. + +For tool design guidance, see xref:ai-agents:mcp/remote/best-practices.adoc[]. + +== Context and state management + +Agents handle two types of information: conversation context (what's been discussed) and state (persistent data across sessions). + +=== Conversation context + +The agent's context includes the system prompt (always present), user messages, agent responses, tool invocation requests, and tool results. + +As the conversation progresses, context grows. Each tool result adds tokens to the context window, which the LLM uses for reasoning in subsequent iterations. + +=== Context window limits + +LLM context windows limit how much history fits. Small models support 8K-32K tokens, medium models support 32K-128K tokens, and large models support 128K-1M+ tokens. + +When context exceeds the limit, the oldest tool results get truncated, the agent loses access to early conversation details, and may ask for information it already retrieved. + +Design workflows to complete within context limits. Avoid unbounded tool chaining. + +== Next steps + +* xref:ai-agents:agents/architecture-patterns.adoc[] +* xref:ai-agents:agents/quickstart.adoc[] +* xref:ai-agents:agents/prompt-best-practices.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] diff --git a/modules/ai-agents/pages/agents/create-agent.adoc b/modules/ai-agents/pages/agents/create-agent.adoc new file mode 100644 index 000000000..ef5d2950f --- /dev/null +++ b/modules/ai-agents/pages/agents/create-agent.adoc @@ -0,0 +1,279 @@ += Create an Agent +:description: Configure agents with model selection, system prompts, tool connections, and execution parameters. +:page-topic-type: how-to +:personas: agent_developer, app_developer, streaming_developer +:learning-objective-1: Configure an agent with model selection and system prompt +:learning-objective-2: Connect MCP servers and select tools for your agent +:learning-objective-3: Set agent execution parameters including max iterations + +Create a new AI agent through the Redpanda Cloud UI. This guide walks you through configuring the agent's model, system prompt, tools, and execution settings. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Prerequisites + +* A xref:get-started:cluster-types/byoc/index.adoc[BYOC cluster] with Remote MCP enabled. +* xref:ai-agents:ai-gateway/gateway-quickstart.adoc[AI Gateway configured] with at least one LLM provider enabled. +* At least one xref:ai-agents:mcp/remote/overview.adoc[Remote MCP server] deployed with tools. +* System prompt prepared (see xref:ai-agents:agents/prompt-best-practices.adoc[System Prompt Best Practices]). + +== Access the agents UI + +. Log in to the link:https://cloud.redpanda.com[Redpanda Cloud UI^]. +. Navigate to your cluster. +. Click *Agentic AI* > *AI Agents* in the left navigation. + +== Configure basic settings + +. Click *Create Agent*. +. Enter a display name (3-128 characters, alphanumeric with spaces, hyphens, underscores, or slashes). +. Optionally, add a description (maximum 256 characters). +. Select a resource tier based on your workload characteristics: ++ +Resource tiers control CPU and memory allocated to your agent. Choose based on: ++ +* **Concurrency:** How many simultaneous requests the agent handles. +* **Tool memory:** Whether tools process large datasets in memory. +* **Response time:** How quickly the agent needs to respond. ++ +Available tiers: ++ +* XSmall: 100m CPU, 400M RAM (single-user testing, simple queries) +* Small: 200m CPU, 800M RAM (light workloads, few concurrent users) +* Medium: 300m CPU, 1200M RAM (recommended for most production use cases) +* Large: 400m CPU, 1600M RAM (high concurrency or memory-intensive tools) +* XLarge: 500m CPU, 2G RAM (very high concurrency or large data processing) ++ +Start with Medium for production workloads. Monitor CPU and memory usage, then adjust if you see resource constraints. + +. Optionally, add tags (maximum 16 tags) for organization and filtering: ++ +* Keys: Maximum 64 characters, must be unique +* Values: Maximum 256 characters, allowed characters: letters, numbers, spaces, and `_.:/=+-@` + +== Choose a model + +Agents use large language models (LLMs) to interpret user intent and decide which tools to invoke. + +. Select your AI Gateway: ++ +Choose the gateway that contains your configured LLM providers and API keys. If you have multiple gateways, select the appropriate one for this agent's workload (for example, production vs staging, or team-specific gateways). + +. Select your LLM provider from those available in the gateway: ++ +* OpenAI (GPT models) +* Google (Gemini models) +* Anthropic (Claude models) +* OpenAI Compatible (custom OpenAI-compatible endpoints) + +. If using OpenAI Compatible, provide the base URL: ++ +* Base URL is required for OpenAI Compatible +* Must start with `http://` or `https://` +* Example: `https://api.example.com/v1` + +. Select the specific model version from the dropdown. ++ +The dropdown shows available models with descriptions. + +For detailed model specifications and pricing: + +* link:https://platform.openai.com/docs/models[OpenAI Models^] +* link:https://docs.anthropic.com/claude/docs/models-overview[Anthropic Claude Models^] +* link:https://ai.google.dev/gemini-api/docs/models[Google Gemini Models^] + +For model selection based on architecture patterns, see xref:ai-agents:agents/architecture-patterns.adoc#model-selection-guide[Model selection guide]. + +== Write the system prompt + +. In the *System Prompt* section, enter your prompt (minimum 10 characters). +. Follow these guidelines: ++ +* Define agent role and responsibilities +* List available tools +* Specify constraints and safety rules +* Set output format expectations + +. Use the *Preview* button to review formatted prompt. + +Example system prompt structure: + +[,text] +---- +You are an [agent role]. + +Responsibilities: +- [Task 1] +- [Task 2] + +Available tools: +- [tool_name]: [description] + +Never: +- [Constraint 1] +- [Constraint 2] + +Response format: +- [Format guideline] +---- + +For complete prompt guidelines, see xref:ai-agents:agents/prompt-best-practices.adoc[System Prompt Best Practices]. + +== Add MCP servers and select tools + +. In the *Tools* section, click *Add MCP Server*. +. Select an MCP server from your cluster. +. The UI displays all tools exposed by that server. +. Select which tools this agent can use: ++ +* Check the box next to each tool +* Review tool descriptions to confirm they match agent needs + +. Repeat to add tools from multiple MCP servers. +. Verify your tool selection: ++ +* Ensure tools match those listed in your system prompt +* Remove tools the agent doesn't need (principle of least privilege) + +== Add subagents (optional) + +Subagents are internal specialists within a single agent. Each subagent can have its own name, description, system prompt, and MCP tools, but all subagents share the parent agent's model, budget, and policies. + +. In the *Subagents* section, click *Add Subagent*. +. Configure the subagent: ++ +* *Name*: 1-64 characters, only letters, numbers, hyphens, and underscores (for example: `order-agent` or `Order_Agent`) +* *Description*: Maximum 256 characters (optional) +* *System Prompt*: Minimum 10 characters, domain-specific instructions +* *MCP Tools*: Select tools this subagent can access + +The root agent orchestrates and delegates work to appropriate subagents based on the request. + +For multi-agent design patterns, see xref:ai-agents:agents/architecture-patterns.adoc[Agent Architecture Patterns]. + +=== Set max iterations + +Max iterations determine how many reasoning loops the agent can perform before stopping. Each iteration consumes tokens and adds latency. For detailed cost calculations and the cost/capability/latency trade-off, see xref:ai-agents:agents/concepts.adoc[]. + +In the *Execution Settings* section, configure *Max Iterations* (range: 10-100, default: 30). + +Choose based on task complexity: + +* **Simple queries** (10-20): Single tool call, direct answers, minimal reasoning +* **Balanced workflows** (20-40): Multiple tool calls, data aggregation, moderate analysis +* **Complex analysis** (40-100): Exploratory queries, extensive tool chaining, deep reasoning + +Start with 30 for most use cases. + +=== Configure A2A discovery metadata + +After creating your agent, configure discovery metadata for external integrations. For detailed agent card design guidance, see link:https://agent2agent.info/docs/guides/create-agent-card/[Create an Agent Card^]. + +. Click on your agent. +. Open the *A2A* tab. +. Configure identity fields: ++ +* *Icon URL*: A publicly accessible image URL (recommended: 256x256px PNG or SVG) +* *Documentation URL*: Link to comprehensive agent documentation + +. Configure provider information: ++ +* *Organization*: Your organization or team name +* *URL*: Website or contact URL + +. Configure capabilities by adding skills: ++ +Skills describe what your agent can do for capability-based discovery. External systems use skills to find agents with the right capabilities. ++ +.. Click *+ Add Skill* to define what this agent can do. +.. For each skill, configure: ++ +* *Skill ID* (required): Unique identifier using lowercase letters, numbers, and hyphens (e.g., `fraud-analysis`, `order-lookup`) +* *Skill Name* (required): Human-readable name displayed in agent directories (e.g., "Fraud Analysis", "Order Lookup") +* *Description* (required): Explain what this skill does and when to use it. Be specific about inputs, outputs, and use cases. +* *Tags* (optional): Add tags for categorization and search. Use common terms like `fraud`, `security`, `finance`, `orders`. +* *Examples* (optional): Click *+ Add Example* to provide sample queries demonstrating how to invoke this skill. Examples help users understand how to interact with your agent. ++ +.. Add multiple skills if your agent handles different types of requests. For example, a customer service agent might have separate skills for "Order Status Lookup", "Shipping Tracking", and "Returns Processing". + +. Click *Save Changes*. + +The updated metadata appears immediately at `\https://your-agent-url/.well-known/agent-card.json`. For more about what these fields mean and how they're used, see xref:ai-agents:agents/a2a-concepts.adoc#agent-card-metadata[Agent card metadata]. + +=== Review and create + +. Review all settings. + +. Configure the service account name (optional): ++ +* Default pattern: `--agent--sa` +* Custom name: 3-128 characters, cannot contain `<` or `>` characters +* This service account authenticates the agent with cluster resources + +. Click *Create Agent*. + +. Wait for agent creation to complete. + +When your agent is running, Redpanda Cloud provides an HTTP endpoint URL with the pattern: + +---- +https://.ai-agents.. +---- + +You can use this URL to call your agent programmatically or integrate it with external systems. + +The *Inspector* tab in the Cloud UI automatically uses this URL to connect to your agent for testing. + +For programmatic access or external agent integration, see xref:ai-agents:agents/integration-overview.adoc[]. + +== Test your agent + +. In the agent details view, click the *Inspector* tab. +. Enter a test prompt. +. Verify the agent: ++ +* Selects appropriate tools +* Follows system prompt constraints +* Returns expected output format + +. Iterate on the system prompt or tool selection as needed. + +For detailed testing strategies, see xref:ai-agents:agents/monitor-agents.adoc[]. + +== Example configurations + +Here are example configurations for different agent types: + +=== Simple query agent + +* *Model*: GPT-5 Mini (fast, cost-effective) +* *Tools*: Single MCP server with `get_orders` tool +* *Max iterations*: 10 +* *Use case*: Customer order lookups + +=== Complex analytics agent + +* *Model*: Claude Sonnet 4.5 (balanced) +* *Tools*: Multiple servers with data query, aggregation, and formatting tools +* *Max iterations*: 30 +* *Use case*: Multi-step data analysis + +=== Multi-agent orchestrator + +* *Model*: Claude Opus 4.5 (advanced reasoning) +* *Tools*: Agent delegation tools +* *Subagents*: Order Agent, Inventory Agent, Customer Agent +* *Max iterations*: 20 +* *Use case*: E-commerce operations + +== Next steps + +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:agents/prompt-best-practices.adoc[] +* xref:ai-agents:mcp/remote/create-tool.adoc[] +* xref:ai-agents:agents/architecture-patterns.adoc[] +* xref:ai-agents:agents/troubleshooting.adoc[] diff --git a/modules/ai-agents/pages/agents/get-started-index.adoc b/modules/ai-agents/pages/agents/get-started-index.adoc new file mode 100644 index 000000000..70856d47d --- /dev/null +++ b/modules/ai-agents/pages/agents/get-started-index.adoc @@ -0,0 +1,5 @@ += Get Started with AI Agents +:page-layout: index +:description: Learn what AI agents are and build your first agent in Redpanda Cloud. + +Start here to understand AI agents and build your first one. diff --git a/modules/ai-agents/pages/agents/index.adoc b/modules/ai-agents/pages/agents/index.adoc new file mode 100644 index 000000000..a07f6ad68 --- /dev/null +++ b/modules/ai-agents/pages/agents/index.adoc @@ -0,0 +1,5 @@ += AI Agents +:page-layout: index +:description: Build AI agents that use Redpanda Cloud for real-time streaming data and tool execution. + +Build AI agents that combine large language models with MCP tools to process streaming data and execute actions. diff --git a/modules/ai-agents/pages/agents/integration-index.adoc b/modules/ai-agents/pages/agents/integration-index.adoc new file mode 100644 index 000000000..7a4b5672e --- /dev/null +++ b/modules/ai-agents/pages/agents/integration-index.adoc @@ -0,0 +1,6 @@ += Agent Integration +:page-layout: index +:description: Connect agents to external applications, pipelines, and other systems. + +Choose integration patterns and connect agents to your systems. + diff --git a/modules/ai-agents/pages/agents/integration-overview.adoc b/modules/ai-agents/pages/agents/integration-overview.adoc new file mode 100644 index 000000000..ecba9080b --- /dev/null +++ b/modules/ai-agents/pages/agents/integration-overview.adoc @@ -0,0 +1,128 @@ += Integration Patterns Overview +:description: Choose the right integration pattern for connecting agents, pipelines, and external applications. +:page-topic-type: best-practices +:personas: agent_developer, streaming_developer, app_developer, data_engineer +:learning-objective-1: Choose the integration pattern that fits your use case +:learning-objective-2: Apply appropriate authentication for internal versus external integration +:learning-objective-3: Select the right communication protocol for your integration scenario + +Redpanda Cloud supports multiple integration patterns for agents, pipelines, and external applications. Choose the pattern that matches your integration scenario. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Integration scenarios + +Redpanda Cloud supports three primary integration scenarios based on who initiates the call and where the caller is located: + +[cols="1,2,2,1"] +|=== +| Scenario | Description | When to Use | Guide + +| Agent needs capabilities +| Your agent invokes MCP tools to fetch data, call APIs, or access external systems on-demand +| Agent-initiated, synchronous, interactive workflows +| xref:ai-agents:mcp/remote/tool-patterns.adoc[] + +| Pipeline processes events +| Your Redpanda Connect pipeline invokes agents for each event in a stream using the `a2a_message` processor +| Event-driven, automated, high-volume stream processing +| xref:ai-agents:agents/pipeline-integration-patterns.adoc[] + +| External system calls agent +| Your application or agent (hosted outside Redpanda Cloud) calls Redpanda Cloud agents using the A2A protocol +| Backend services, CLI tools, custom UIs, multi-platform agent workflows +| xref:ai-agents:agents/a2a-concepts.adoc[] +|=== + +== Common use cases by pattern + +Each integration pattern serves different scenarios based on how data flows and who initiates the interaction. + +[[agent-needs-capabilities]] +=== Agent needs capabilities (MCP tools) + +Use MCP tools when your agent needs on-demand access to data or capabilities. + +The agent decides when to invoke tools as part of its reasoning process. It waits for responses before continuing. + +This pattern works well for interactive workflows: customer support lookups, approval flows, or context-aware chatbots. + +Avoid MCP tools for high-volume stream processing or automated workflows without user interaction. Use pipeline-initiated integration instead. + +For implementation details, see xref:ai-agents:mcp/remote/tool-patterns.adoc[]. + +[[pipeline-processes-events]] +=== Pipeline processes events (`a2a_message`) + +Use the `a2a_message` processor when your pipeline needs to invoke agents for every event in a stream. + +The pipeline controls when agents execute. This pattern is ideal for automated, high-volume processing where each event requires AI reasoning. + +Common scenarios include real-time fraud detection, sentiment scoring for customer reviews, and content moderation that classifies and routes content. + +For implementation details, see xref:ai-agents:agents/pipeline-integration-patterns.adoc[]. + +=== External system calls agent + +Use external integration when your applications, services, or agents hosted outside Redpanda Cloud need to call Redpanda Cloud agents. + +External systems send requests using the A2A protocol and receive responses synchronously. This works for backend services, CLI tools, custom UIs, and agents hosted on other platforms. + +Common scenarios include backend services analyzing data as part of workflows, CLI tools invoking agents for batch tasks, custom UIs displaying agent responses, CRM agents coordinating with Redpanda agents, and multi-platform workflows spanning different infrastructure. + +To learn how the A2A protocol enables this integration, see xref:ai-agents:agents/a2a-concepts.adoc[]. + +== Pattern comparison + +The following table compares the two primary internal integration patterns: + +[cols="1,2,2"] +|=== +| Criterion | Agents Invoking MCP Tools | Pipelines Calling Agents + +| Trigger +| User question or agent decision +| Event arrival in topic + +| Frequency +| Ad-hoc, irregular, as needed +| Continuous, every event + +| Latency +| Low (agent waits for response) +| Higher (async acceptable) + +| Control Flow +| Agent decides when to invoke +| Pipeline decides when to invoke + +| Use Case +| "Fetch me data", "Run this query" +| "Process this stream", "Enrich all events" + +| Human in Loop +| Often yes (user-driven) +| Often no (automated) +|=== + +== Security considerations for external integration + +When integrating external applications with Redpanda Cloud agents, protect credentials and tokens. + +=== Protect service account credentials + +Store the client ID and secret in secure credential stores, not in code. Use environment variables or secrets management systems. Rotate credentials if compromised and restrict access based on the principle of least privilege. + +=== Protect access tokens + +Access tokens grant full access to the agent. Anyone with a valid token can send requests, receive responses, and consume agent resources (subject to rate limits). Treat access tokens like passwords and never log them or include them in error messages. + +== Next steps + +* xref:ai-agents:agents/a2a-concepts.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:agents/pipeline-integration-patterns.adoc[] diff --git a/modules/ai-agents/pages/agents/monitor-agents.adoc b/modules/ai-agents/pages/agents/monitor-agents.adoc new file mode 100644 index 000000000..71578c7f6 --- /dev/null +++ b/modules/ai-agents/pages/agents/monitor-agents.adoc @@ -0,0 +1,99 @@ += Monitor Agent Activity +:description: Monitor agent execution, analyze conversation history, track token usage, and debug issues using Inspector, Transcripts, and agent data topics. +:page-topic-type: how-to +:personas: agent_developer, platform_admin +:learning-objective-1: pass:q[Verify agent behavior using the *Inspector* tab] +:learning-objective-2: Track token usage and performance metrics +:learning-objective-3: pass:q[Debug agent execution using *Transcripts*] + +Use monitoring to track agent performance, analyze conversation patterns, debug execution issues, and optimize token costs. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +For conceptual background on traces and observability, see xref:ai-agents:observability/concepts.adoc[]. + +== Prerequisites + +You must have a running agent. If you do not have one, see xref:ai-agents:agents/quickstart.adoc[]. + +== Debug agent execution with Transcripts + +The *Transcripts* view shows execution traces with detailed timing, errors, and performance metrics. Use this view to debug issues, verify agent behavior, and monitor performance in real-time. + +:context: agent +include::ai-agents:partial$transcripts-ui-guide.adoc[] + +=== Check agent health + +Use the *Transcripts* view to verify your agent is healthy. Look for consistent green bars in the timeline, which indicate successful executions. Duration should stay within your expected range, while token usage remains stable without unexpected growth. + +Several warning signs indicate problems. Red bars in the timeline mean errors or failures that need investigation. When duration increases over time, your context window may be growing or tool calls could be slowing down. Many LLM calls for simple requests often signal that the agent is stuck in loops or making unnecessary iterations. If you see missing transcripts, the agent may be stopped or encountering deployment issues. + +Pay attention to patterns across multiple executions. When all recent transcripts show errors, start by checking agent status, MCP server connectivity, and system prompt configuration. A spiky timeline that alternates between success and error typically points to intermittent tool failures or external API issues. If duration increases steadily over a session, your context window is likely filling up. Clear the conversation history to reset it. High token usage combined with relatively few LLM calls usually means tool results are large or your system prompts are verbose. + +=== Debug with Transcripts + +Use *Transcripts* to diagnose specific issues: + +If the agent is not responding: + +. Check the timeline for recent transcripts. If none appear, the agent may be stopped. +. Verify agent status in the main *AI Agents* view. +. Look for error transcripts with deployment or initialization failures. + +If the agent fails during execution: + +. Select the failed transcript (red bar in timeline). +. Expand the trace hierarchy to find the tool invocation span. +. Check the span details for error messages. +. Cross-reference with MCP server status. + +If performance is slow: + +. Compare duration across multiple transcripts in the summary panel. +. Look for specific spans with long durations (wide bars in trace list). +. Check if LLM calls are taking longer than expected. +. Verify tool execution time by examining nested spans. + +=== Track token usage and costs + +View token consumption in the *Summary* panel when you select a transcript. The breakdown shows input tokens (everything sent to the LLM including system prompt, conversation history, and tool results), output tokens (what the LLM generates in agent responses), and total tokens as the sum of both. + +Calculate cost per request: + +---- +Cost = (input_tokens x input_price) + (output_tokens x output_price) +---- + +Example: GPT-5.2 with 4,302 input tokens and 1,340 output tokens at $0.00000175 per input token and $0.000014 per output token costs $0.026 per request. + +For cost optimization strategies, see xref:ai-agents:agents/concepts.adoc#cost-calculation[Cost calculation]. + +== Test agent behavior with Inspector + +The *Inspector* tab provides real-time conversation testing. Use it to test agent responses interactively and verify behavior before deploying changes. + +=== Access Inspector + +. Navigate to *Agentic AI* > *AI Agents* in the Redpanda Cloud UI. +. Click your agent name. +. Open the *Inspector* tab. +. Enter test queries and review responses. +. Check the conversation panel to see tool calls. +. Start a new session to test fresh conversations or click *Clear context* to reset history. + +=== Testing best practices + +Test your agents systematically by exploring edge cases and potential failure scenarios. Begin with boundary testing. Requests at the edge of agent capabilities verify that scope enforcement works correctly. Error handling becomes clear when you request unavailable data and observe whether the agent degrades gracefully or fabricates information. + +Monitor iteration counts during complex requests to ensure they complete within your configured limits. Ambiguous or vague queries reveal whether the agent asks clarifying questions or makes risky assumptions. Throughout testing, track token usage per request to estimate costs and identify which query patterns consume the most resources. + +== Next steps + +* xref:ai-agents:observability/concepts.adoc[] +* xref:ai-agents:agents/troubleshooting.adoc[] +* xref:ai-agents:agents/concepts.adoc[] diff --git a/modules/ai-agents/pages/agents/overview.adoc b/modules/ai-agents/pages/agents/overview.adoc new file mode 100644 index 000000000..6d9541b2e --- /dev/null +++ b/modules/ai-agents/pages/agents/overview.adoc @@ -0,0 +1,68 @@ += AI Agents Overview +:description: Learn what AI agents are and how Redpanda Cloud supports agent development with real-time streaming. +:page-topic-type: overview +:personas: evaluator, agent_developer, app_developer, streaming_developer +:learning-objective-1: Describe what AI agents are and their essential components +:learning-objective-2: Explain how Redpanda Cloud streaming infrastructure benefits agent architectures +:learning-objective-3: Identify use cases where Redpanda Cloud agents provide value + +AI agents are systems that combine large language models (LLMs) with the ability to execute actions and process data. Redpanda Cloud provides real-time streaming infrastructure and standardized tool access to support agent development. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== What is an AI agent? + +An AI agent is a system built around a large language model that can interpret user intent, decide which actions are required, invoke external tools, process live and historical data, and chain multiple steps into a workflow. AI agents differ from text-only LLMs by executing actions and invoking external tools. + +== How agents work + +Every AI agent consists of four essential components: + +* *System prompt*: Defines the agent's role, responsibilities, and constraints +* *LLM*: Interprets user intent and decides which tools to invoke +* *Tools*: External capabilities exposed through the Model Context Protocol (MCP) +* *Context*: Conversation history, tool results, and real-time events from Redpanda topics + +Agents can invoke Redpanda Connect components as tools on-demand. Redpanda Connect pipelines can also invoke agents for event-driven processing. This bidirectional integration supports both interactive workflows and automated streaming. + +When a user makes a request, the LLM receives the system prompt and context, decides which tools to invoke, and processes the results. This cycle repeats until the task completes. + +For a deeper understanding of how agents execute, manage context, and maintain state, see xref:ai-agents:agents/concepts.adoc[]. + +== Key benefits + +Redpanda Cloud provides real-time streaming data so agents access live events instead of batch snapshots. Remote MCP support enables standardized tool access. Managed infrastructure handles deployment, scaling, and security for you. Low-latency execution means tools run close to your data. Integrated secrets management securely stores API keys and credentials. + +== Use cases + +AI agents in Redpanda Cloud unlock new capabilities across multiple fields. + +=== For AI agent developers + +Build agents grounded in real-time data instead of static snapshots. Connect your agent to live order status, inventory levels, and customer history so responses reflect current business state, not stale training data. + +=== For application developers + +Add conversational AI to existing applications without rebuilding your backend. Expose your services as MCP tools and let agents orchestrate complex multi-step workflows through natural language. + +=== For streaming developers + +Process every event with AI reasoning at scale. Invoke agents automatically from pipelines for fraud detection, content moderation, or sentiment analysis. No batch jobs, no delayed insights. + +== Limitations + +* Agents are available only on xref:get-started:cluster-types/byoc/index.adoc[BYOC clusters] +* MCP servers must be hosted in Redpanda Cloud clusters +* Cross-agent calling between separate agents hosted in Redpanda Cloud is not currently supported (use internal subagents for delegation within a single agent) + +== Next steps + +* xref:ai-agents:agents/quickstart.adoc[] +* xref:ai-agents:agents/concepts.adoc[] +* xref:ai-agents:agents/architecture-patterns.adoc[] +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:agents/create-agent.adoc[] diff --git a/modules/ai-agents/pages/agents/pipeline-integration-patterns.adoc b/modules/ai-agents/pages/agents/pipeline-integration-patterns.adoc new file mode 100644 index 000000000..6395d344c --- /dev/null +++ b/modules/ai-agents/pages/agents/pipeline-integration-patterns.adoc @@ -0,0 +1,143 @@ += Pipeline Integration Patterns +:description: Build Redpanda Connect pipelines that invoke agents for event-driven processing and streaming enrichment. +:page-topic-type: best-practices +:personas: streaming_developer, agent_developer +:learning-objective-1: Identify when pipelines should call agents for stream processing +:learning-objective-2: pass:q[Design event-driven agent invocation using the `a2a_message` processor] +:learning-objective-3: Implement streaming enrichment with AI-generated fields + +Build Redpanda Connect pipelines that invoke agents for automated, event-driven processing. Pipelines use the `a2a_message` processor to call agents for each event in a stream when you need AI reasoning, classification, or enrichment at scale. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +This page focuses on pipelines calling agents (pipeline-initiated integration). For agents invoking MCP tools, see xref:ai-agents:agents/integration-overview.adoc#agent-needs-capabilities[Agent needs capabilities]. For external applications calling agents, see xref:ai-agents:agents/integration-overview.adoc#external-system-calls-agent[External system calls agent]. + +== How pipelines invoke agents + +Pipelines use the xref:develop:connect/components/processors/a2a_message.adoc[`a2a_message`] processor to invoke agents for each event in a stream. The processor uses the xref:ai-agents:agents/a2a-concepts.adoc[A2A protocol] to discover and communicate with agents. + +When the `a2a_message` processor receives an event, it sends the event data to the specified agent along with any prompt you provide. The agent processes the event using its reasoning capabilities and returns a response. The processor then adds the agent's response to the event for further processing or output. + +The pipeline determines when to invoke agents based on events, not agent reasoning. + +== When to use this pattern + +Use the `a2a_message` processor when pipelines need AI reasoning for every event in a stream. + +The `a2a_message` processor is appropriate when: + +* **Every event needs AI analysis:** Each message requires reasoning, classification, or decision-making. +* **You need streaming enrichment:** Add AI-generated fields to events at scale. +* **Processing is fully automated:** No human in the loop, event-driven workflows. +* **Batch latency is acceptable:** Agent reasoning time is tolerable for your use case. +* **You're handling high-volume streams:** Processing thousands or millions of events. + +== Use cases + +Use the `a2a_message` processor in pipelines for these common patterns. + +=== Event-driven agent invocation + +Invoke agents automatically for each event: + +[source,yaml] +---- +include::ai-agents:example$pipelines/event-driven-invocation.yaml[] +---- + +Replace `AGENT_CARD_URL` with your actual agent card URL. See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location]. + +**Use case:** Real-time fraud detection on every transaction. + +=== Streaming data enrichment + +Add AI-generated metadata to events: + +[source,yaml] +---- +include::ai-agents:example$pipelines/streaming-enrichment.yaml[tag=processors,indent=0] +---- + +Replace `AGENT_CARD_URL` with your actual agent card URL. See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location]. + +**Use case:** Add sentiment scores to every customer review in real-time. + +=== Asynchronous workflows + +Process events in the background: + +[source,yaml] +---- +include::ai-agents:example$pipelines/async-workflows.yaml[tag=pipeline,indent=0] +---- + +Replace `AGENT_CARD_URL` with your actual agent card URL. See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location]. + +**Use case:** Nightly batch summarization of reports where latency is acceptable. + +=== Multi-agent pipeline orchestration + +Chain multiple agents in sequence: + +[source,yaml] +---- +include::ai-agents:example$pipelines/multi-agent-orchestration.yaml[tag=processors,indent=0] +---- + +Replace the agent URL variables with your actual agent card URLs. See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location]. + +**Use case:** Translate feedback, analyze sentiment, then route to appropriate team. + +=== Agent as transformation node + +Use agent reasoning for complex transformations: + +[source,yaml] +---- +include::ai-agents:example$pipelines/agent-transformation.yaml[tag=processors,indent=0] +---- + +Replace `AGENT_CARD_URL` with your actual agent card URL. See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location]. + +**Use case:** Convert natural language queries to SQL for downstream processing. + +== When not to use this pattern + +Do not use the `a2a_message` processor when: + +* Users need to interact with agents interactively. +* The transformation is simple and does not require AI reasoning. +* Agents need to dynamically decide what data to fetch based on context. + +For a detailed comparison between pipeline-initiated and agent-initiated integration patterns, see xref:ai-agents:agents/integration-overview.adoc#pattern-comparison[Pattern comparison]. + +== Example: Real-time fraud detection + +This example shows a complete pipeline that analyzes every transaction with an agent. + +=== Pipeline configuration + +[source,yaml] +---- +include::ai-agents:example$pipelines/fraud-detection-routing.yaml[] +---- + +Replace `AGENT_CARD_URL` with your agent card URL. See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location]. + +This pipeline: + +* Consumes every transaction from the `transactions` topic. +* Sends each transaction to the fraud detection agent using `a2a_message`. +* Routes transactions to different topics based on fraud score. +* Runs continuously, analyzing every transaction in real-time. + +== Next steps + +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:agents/a2a-concepts.adoc[] +* xref:develop:connect/components/processors/about.adoc[] diff --git a/modules/ai-agents/pages/agents/prompt-best-practices.adoc b/modules/ai-agents/pages/agents/prompt-best-practices.adoc new file mode 100644 index 000000000..f0f83ce57 --- /dev/null +++ b/modules/ai-agents/pages/agents/prompt-best-practices.adoc @@ -0,0 +1,424 @@ += System Prompt Best Practices +:description: Write system prompts that produce reliable, predictable agent behavior through clear constraints and tool guidance. +:page-topic-type: best-practices +:personas: agent_developer, app_developer, streaming_developer +:learning-objective-1: Identify effective system prompt patterns for agent reliability +:learning-objective-2: Apply constraint patterns to prevent unintended agent behavior +:learning-objective-3: Evaluate system prompts for clarity and completeness + +Write system prompts that produce reliable, predictable agent behavior. Good prompts define scope, specify constraints, and guide tool usage. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Role definition + +Define what your agent does and the boundaries of its responsibilities. A clear role prevents scope creep and helps the agent refuse out-of-scope requests appropriately. + +=== Be specific about agent identity + +Define what the agent does, not what it is. + +.Do +[,text] +---- +You are an order fulfillment agent for an e-commerce platform. You help customers track orders, update shipping addresses, and process returns. +---- + +.Don't +[,text] +---- +You are a helpful assistant. +---- + +=== Define what the agent does and doesn't do + +Explicitly state boundaries: what tasks the agent handles, what tasks it should refuse or delegate, and when to ask for human assistance. + +[,text] +---- +Responsibilities: +- Track customer orders +- Update shipping addresses +- Process returns up to $500 + +Do not: +- Provide product recommendations (redirect to website) +- Process refunds above $500 (escalate to manager) +- Access orders from other customers +---- + +== Tool specification + +Tell the agent which tools are available and when to use them. Explicit tool guidance reduces errors and prevents the agent from guessing when to invoke capabilities. + +=== List available tools + +Name each tool the agent can use: + +[,text] +---- +Available tools: +- get_customer_orders: Retrieve order history for a customer +- update_order_status: Change order state (shipped, delivered, canceled) +- calculate_refund: Compute refund amount based on return policy +---- + +=== Explain when to use each tool + +Provide decision criteria for tool selection. + +.Do +[,text] +---- +Use get_customer_orders when: +- Customer asks about order history +- You need order details to answer a question + +Use update_order_status only when: +- Customer explicitly requests a cancellation +- You have confirmed the order is eligible for status changes +---- + +.Don't +[,text] +---- +Use the tools as needed. +---- + +== Constraints and safety + +Set explicit boundaries to prevent unintended agent behavior. + +=== Define data boundaries + +Specify what data the agent can access: + +[,text] +---- +Data access: +- Only orders from the last 90 days +- Only data for the authenticated customer +- No access to employee records or internal systems +---- + +=== Set response guidelines + +Control output format and content: + +[,text] +---- +Response guidelines: +- Present order details as tables +- Always include order numbers in responses +- State the analysis time window when showing trends +- If you cannot complete a task, explain why and suggest alternatives +---- + +== Context and conversation management + +Guide the agent on how to handle unclear requests and stay within conversation scope. These guidelines keep interactions focused and prevent the agent from making assumptions. + +=== Handle ambiguous requests + +Guide the agent on how to clarify: + +[,text] +---- +When request is unclear: +1. Ask clarifying questions +2. Suggest common interpretations +3. Do not guess customer intent +---- + +=== Define conversation boundaries + +Set limits on conversation scope: + +[,text] +---- +Conversation scope: +- Answer questions about orders, shipping, and returns +- Do not provide product recommendations (redirect to website) +- Do not engage in general conversation unrelated to orders +---- + +== Error handling + +Guide agents to handle failures gracefully through clear prompt instructions. Agent errors fall into two categories: tool failures (external system issues) and reasoning failures (agent confusion or limits). + +=== Tool failure types + +Tools can fail for multiple reasons. Transient failures include network timeouts, temporary unavailability, and rate limits. Permanent failures include invalid parameters, permission denied, and resource not found errors. Partial failures occur when tools return incomplete data or warnings. + +=== Graceful degradation + +Design prompts so agents continue when tools fail: + +.Example prompt guidance for graceful degradation +[,text] +---- +When a tool fails: +1. Attempt an alternative tool if available +2. If no alternative exists, explain the limitation +3. Offer partial results if you retrieved some data before failure +4. Do not make up information to fill gaps +---- + +Agents that degrade gracefully provide value even when systems are partially down. + +Implement retries in tools, not in agent prompts. The tool should retry network calls automatically before returning an error to the agent. + +=== Escalation patterns + +Some failures require human intervention. Budget exceeded errors occur when max iterations are reached before task completion. Insufficient tools means no tool is available for the required action. Ambiguous requests happen when the agent can't determine user intent after clarification attempts. Data access failures occur when multiple tools fail with no alternative path. + +Design prompts to recognize escalation conditions: + +.Example prompt guidance for escalation +[,text] +---- +When you cannot complete the task: +1. Explain what you tried and why it didn't work +2. Tell the user what information or capability is missing +3. Suggest how they can help (provide more details, contact support, etc.) +---- + +=== Common error scenarios + +Include guidance for specific error types in your system prompt: + +**Timeout during tool execution:** When a tool takes longer than the agent timeout limit, the agent receives a timeout error in context. The agent should explain the delay to the user and suggest a retry. + +**Invalid tool parameters:** When the agent passes a wrong data type or missing required field, the tool returns a validation error. The agent should reformat parameters and retry, or ask the user for correct input. + +**Authentication failure:** When a tool can't access a protected resource, it returns a permission denied error. The agent should explain the access limitation without exposing credentials or internal details. + +== Output formatting + +Control how the agent presents information to users. Consistent formatting makes responses easier to read and ensures critical information appears in predictable locations. + +=== Specify structure + +Define how the agent presents information: + +[,text] +---- +Output format: +- Use tables for multiple items +- Use bulleted lists for steps or options +- Use code blocks for tracking numbers or order IDs +- Include units (dollars, kilograms) in all numeric values +---- + +[[evaluation-and-testing]] +== Evaluation and testing + +Test system prompts systematically to verify behavior matches intent. + +Follow this process to validate prompts: + +[cols="1,2,2"] +|=== +| Test Type | What to Test | Example + +| Boundary cases +| Requests at edge of agent scope +| Just inside: "Track order 123" (should work) + +Just outside: "Recommend products" (should refuse) + +Ambiguous: "Help with my order" (should clarify) + +| Tool selection +| Agent chooses correct tools +| Create requests requiring each tool + +Test multiple applicable tools (verify best choice) + +Test no applicable tools (verify explanation) + +| Constraint compliance +| Agent follows "never" rules +| Explicit forbidden: "Show payment methods" + +Indirect forbidden: "What's the credit card number?" + +Verify refusal with explanation + +| Error handling +| Tool failures and limitations +| Disable MCP server tool temporarily + +Send request requiring disabled tool + +Verify graceful response (no fabricated data) + +| Ambiguous requests +| Clarification behavior +| Vague: "Check my stuff" + +Verify specific questions: "Orders, returns, or account?" + +Ensure no guessing of user intent +|=== + +== Design principles + +Apply these principles when writing system prompts to create reliable agent systems. + +=== Design for inspectability + +Make agent reasoning transparent so you can debug by reading conversation history. Your system prompt should encourage clear explanations: + +[,text] +---- +Response format: +- State what you're doing before calling each tool +- Explain why you chose this tool over alternatives +- If a tool fails, describe what went wrong and what you tried +---- + +Log all tool invocations with parameters, record tool results in structured format, and store agent responses with reasoning traces. Opaque agents that "just work" are impossible to fix when they break. + +=== Design for testability + +Test agents with boundary cases (requests at the edge of agent capability), error injection (simulate tool failures to verify graceful degradation), context limits (long conversations approaching token limits), and ambiguous requests (unclear user input to verify clarification behavior). + +Use the systematic testing approach in <>. + +=== Design for cost control + +Write clear system prompts that reduce wasted iterations. Vague prompts cause agent confusion and unnecessary tool calls. Each wasted iteration costs tokens. + +Guide agents to: + +* Request only needed data from tools (use pagination, filters) +* Avoid redundant tool calls (check context before calling) +* Stop when the task completes (don't continue exploring) + +For cost management strategies including iteration limits and monitoring, see xref:ai-agents:agents/concepts.adoc[]. + +== Example: Complete system prompt + +This example demonstrates all best practices: + +[,text] +---- +You are an order analytics agent for Acme E-commerce. + +Responsibilities: +- Answer questions about customer order trends +- Analyze order data from Redpanda topics +- Provide insights on order patterns + +Available tools: +- get_customer_orders: Retrieve order history (parameters: customer_id, start_date, end_date) +- analyze_recent_orders: Compute order statistics (parameters: time_window, group_by) + +When to use tools: +- Use get_customer_orders for individual customer queries +- Use analyze_recent_orders for trend analysis across multiple orders + +Never: +- Expose customer payment information or addresses +- Analyze data older than 90 days unless explicitly requested +- Make business recommendations without data to support them + +Data access: +- Only orders from the authenticated customer account +- Maximum of 90 days of historical data + +Response guidelines: +- Present structured data as tables +- Always state the analysis time window +- Include order counts in trend summaries +- If data is unavailable, explain the limitation + +When request is unclear: +- Ask which time period to analyze +- Confirm whether to include canceled orders +- Do not assume customer intent +---- + +== Common anti-patterns + +Avoid these patterns that lead to unpredictable agent behavior. + +=== Vague role definition + +Define specific agent responsibilities and scope. + +Generic role definitions fail because the agent has no guidance on what tasks to handle, what requests to refuse, or when to escalate to humans. + +.Don't +[,text] +---- +You are a helpful AI assistant. +---- + +This doesn't constrain behavior or set expectations. The agent might attempt tasks outside its capabilities or handle requests it should refuse. + +.Do +[,text] +---- +You are an order fulfillment agent for an e-commerce platform. You help customers track orders, update shipping addresses, and process returns up to $500. + +Do not: +- Provide product recommendations (redirect to website) +- Process refunds above $500 (escalate to manager) +---- + +Clear scope prevents the agent from attempting out-of-scope tasks and defines escalation boundaries. + +=== Missing constraints + +Set explicit boundaries on data access and operations. + +Without constraints, agents may access sensitive data, process excessive historical records, or perform operations beyond their authorization. + +.Don't +[,text] +---- +You can access customer data to help answer questions. +---- + +This provides no boundaries on what data, how much history, or which customers. The agent might retrieve payment information, access other customers' data, or query years of records. + +.Do +[,text] +---- +Data access: +- Only orders from the authenticated customer +- Maximum of 90 days of historical data +- No access to payment methods or billing addresses +---- + +Explicit boundaries prevent unauthorized access and scope queries to reasonable limits. + +=== Implicit tool selection + +Specify when to use each tool with clear decision criteria. + +Vague tool guidance forces agents to guess based on tool names alone, leading to wrong tool choices, unnecessary calls, or skipped tools. + +.Don't +[,text] +---- +Use the available tools to complete tasks. +---- + +The agent must guess which tool applies when. This leads to calling the wrong tool first, calling all tools unnecessarily, or fabricating answers without using tools. + +.Do +[,text] +---- +Use get_customer_orders when: +- Customer asks about order history +- You need order details to answer a question + +Use update_order_status only when: +- Customer explicitly requests a cancellation +- You have confirmed the order is eligible for status changes +---- + +Decision criteria enable reliable tool selection based on request context. + +== Next steps + +* xref:ai-agents:agents/quickstart.adoc[] +* xref:ai-agents:agents/overview.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] diff --git a/modules/ai-agents/pages/agents/quickstart.adoc b/modules/ai-agents/pages/agents/quickstart.adoc new file mode 100644 index 000000000..7417e6206 --- /dev/null +++ b/modules/ai-agents/pages/agents/quickstart.adoc @@ -0,0 +1,191 @@ += AI Agent Quickstart +:description: Create your first AI agent in Redpanda Cloud that generates and publishes event data through natural language commands. +:page-topic-type: tutorial +:personas: agent_developer, evaluator +:learning-objective-1: Create an AI agent in Redpanda Cloud that uses MCP tools +:learning-objective-2: Configure the agent with a system prompt and model selection +:learning-objective-3: Test the agent by generating and publishing events through natural language + +Build your first AI agent in Redpanda Cloud. You'll create an agent that understands natural language requests and uses MCP tools to generate and publish event data to Redpanda topics. + +After completing this quickstart, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Prerequisites + +* A xref:get-started:cluster-types/byoc/index.adoc[BYOC cluster] (agents are not available on Dedicated or Serverless clusters) + +* xref:ai-agents:ai-gateway/gateway-quickstart.adoc[AI Gateway configured] with at least one LLM provider enabled (OpenAI, Anthropic, or Google AI) + +* Completed the xref:ai-agents:mcp/remote/quickstart.adoc[Remote MCP Quickstart] to create an MCP server with the following tools deployed: ++ +** `generate_input`: Generates fake user event data +** `redpanda_output`: Publishes data to Redpanda topics + +== What you'll build + +An Event Data Manager agent that: + +* Generates fake user event data (logins, purchases, page views) +* Publishes events to Redpanda topics +* Understands natural language requests like "Generate 5 login events and publish them" + +The agent orchestrates the `generate_input` and `redpanda_output` tools you created in the MCP quickstart. + +== Create the agent + +. Log in to the link:https://cloud.redpanda.com/[Redpanda Cloud UI^]. + +. Navigate to your cluster and click *Agentic AI* > *AI Agents* in the left navigation. + +. Click *Create Agent*. + +. Configure basic settings: ++ +* *Display Name*: `event-data-manager` +* *Description*: `Generates and publishes fake user event data to Redpanda topics` +* *Resource Tier*: Select *XSmall* (sufficient for this quickstart) + +. Select your AI Gateway and model: ++ +* *AI Gateway*: Select the gateway you configured (contains provider and API key configuration) +* *Provider*: Select a provider available in your gateway (OpenAI, Anthropic, or Google) +* *Model*: Choose any balanced model from the dropdown + +. Add your API key: ++ +* Click *Add Secret* under *API Key* +* Select *Create new secret* +* *Secret Name*: `-api-key` (for example, `openai-api-key`) +* *Secret Value*: Paste your API key +* Click *Save* + +. Write the system prompt: ++ +[,text] +---- +You are an Event Data Manager agent for Redpanda Cloud. + +Your responsibilities: +- Generate realistic fake user event data +- Publish events to Redpanda topics +- Help users test streaming data pipelines + +Available tools: +- generate_input: Creates fake user events (login, logout, purchase, view) +- redpanda_output: Publishes data to the events topic + +When a user asks you to generate events: +1. Use generate_input to create the event data +2. Use redpanda_output to publish the events to Redpanda +3. Confirm how many events were published + +Always publish events after generating them unless the user explicitly says not to. + +Response format: +- State what you're doing before calling each tool +- Show the generated event data +- Confirm successful publication with a count +---- + +. Select MCP tools: ++ +* Click *Add MCP Server* +* Select the `event-data-generator` server (created in the MCP quickstart) +* Check both tools: +** `generate_input` +** `redpanda_output` + +. Set execution parameters: ++ +* *Max Iterations*: `30` (allows multiple tool calls per request) + +. Review your configuration and click *Create Agent*. + +. Wait for the agent status to change from *Starting* to *Running*. + +== Test your agent + +Now test your agent with natural language requests. + +. In the agent details view, open the *Inspector* tab. + +. Try these example requests: ++ +.Generate and publish 3 events +[%collapsible] +==== +[.no-copy] +---- +Generate 3 user events and publish them to the events topic. +---- + +The agent should respond with these steps: + +. Call `generate_input` to create 3 fake user events. +. Call `redpanda_output` to publish them to the `events` topic. +. Confirm the events were published. + +You should see the agent's reasoning and the tool execution results. +==== ++ +.Generate specific event types +[%collapsible] +==== +[.no-copy] +---- +Create 5 login events for testing and publish them to Redpanda. +---- + +The agent understands the request requires login events specifically and generates appropriate test data. +==== ++ +.Generate events without publishing +[%collapsible] +==== +[.no-copy] +---- +Show me what 3 sample purchase events would look like, but don't publish them yet. +---- + +The agent calls only `generate_input` and displays the data without publishing. +==== + +. Navigate to *Topics* in the left navigation to verify events were published to the `events` topic. + +== Iterate on your agent + +Try modifying the agent to change its behavior: + +. Click *Edit configuration* in the agent details view. + +. Update the system prompt to change how the agent responds. For example: ++ +* Add constraints: "Never publish more than 10 events at once" +* Change output format: "Always format events as a table" +* Add validation: "Before publishing, show the user the generated data and ask for confirmation" + +. Click *Save* to update the agent. + +. Test your changes in the *Inspector* tab. + +== Troubleshoot + +For comprehensive troubleshooting guidance, see xref:ai-agents:agents/troubleshooting.adoc[]. + +Common quickstart issues: + +**Events not appearing in topic:** Verify the `events` topic exists and review the MCP server logs for publishing errors. + +== Next steps + +You've created an agent that orchestrates MCP tools through natural language. Explore more: + +* xref:ai-agents:agents/overview.adoc[] +* xref:ai-agents:agents/create-agent.adoc[] +* xref:ai-agents:agents/prompt-best-practices.adoc[] +* xref:ai-agents:agents/architecture-patterns.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] diff --git a/modules/ai-agents/pages/agents/troubleshooting.adoc b/modules/ai-agents/pages/agents/troubleshooting.adoc new file mode 100644 index 000000000..69eda27eb --- /dev/null +++ b/modules/ai-agents/pages/agents/troubleshooting.adoc @@ -0,0 +1,485 @@ += Troubleshoot AI Agents +:description: Diagnose and fix common issues with AI agents including deployment failures, runtime behavior problems, and tool execution errors. +:page-topic-type: troubleshooting +:personas: agent_developer, app_developer, streaming_developer +:learning-objective-1: Diagnose deployment failures and resource allocation errors +:learning-objective-2: Resolve runtime behavior issues including tool selection and iteration limits +:learning-objective-3: Fix tool execution problems and authentication failures + +Use this page to diagnose and fix common issues with AI agents, including deployment failures, runtime behavior problems, tool execution errors, and integration issues. + +== Deployment issues + +Fix issues that prevent agents from connecting to required resources. + +=== MCP server connection failures + +**Symptoms:** Agent starts but the tools don't respond or return connection errors. + +**Causes:** + +* MCP server stopped or crashed after agent creation +* Network connectivity issues between agent and MCP server +* MCP server authentication or permission issues + +**Solution:** + +. Verify MCP server status in *Agentic AI* > *Remote MCP*. +. Check MCP server logs for errors. +. Restart the MCP server if needed. +. Verify agent has permission to access the MCP server. + +**Prevention:** + +* Monitor MCP server health +* Use appropriate retry logic in tools + +== Runtime behavior issues + +Resolve problems with agent decision-making, tool selection, and response generation. + +=== Agent not calling tools + +**Symptoms:** Agent responds without calling any tools, or fabricates information instead of using tools. + +**Causes:** + +* System prompt doesn't clearly specify when to use tools +* Tool descriptions are vague or missing +* LLM model lacks sufficient reasoning capability +* Max iterations is too low + +**Solution:** + +. Strengthen tool usage guidance in your system prompt: ++ +[,text] +---- +ALWAYS use get_order_status when customer mentions an order ID. +NEVER respond about order status without calling the tool first. +---- + +. Review tool descriptions in your MCP server configuration. +. Use a more capable model from the supported list for your gateway. +. Increase max iterations if the agent is stopping before reaching tools. + +**Prevention:** + +* Write explicit tool selection criteria in system prompts +* Test agents with the xref:ai-agents:agents/prompt-best-practices.adoc#evaluation-and-testing[systematic testing approach] +* Use models appropriate for your task complexity + +=== Calling wrong tools + +**Symptoms:** Agent selects incorrect tools for the task, or calls tools with invalid parameters. + +**Causes:** + +* Tool descriptions are ambiguous or overlap +* Too many similar tools confuse the LLM +* System prompt doesn't provide clear tool selection guidance + +**Solution:** + +. Make tool descriptions more specific and distinct. +. Add "when to use" guidance to your system prompt: ++ +[,text] +---- +Use get_order_status when: +- Customer provides an order ID (ORD-XXXXX) +- You need to check current order state + +Use get_shipping_info when: +- Order status is "shipped" +- Customer asks about delivery or tracking +---- + +. Reduce the number of tools you expose to the agent. +. Use subagents to partition tools by domain. + +**Prevention:** + +* Follow tool design patterns in xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* Limit each agent to 10-15 tools maximum +* Test boundary cases where multiple tools might apply + +=== Stuck in loops or exceeding max iterations + +**Symptoms:** Agent reaches max iterations without completing the task, or repeatedly calls the same tool with the same parameters. + +**Causes:** + +* Tool returns errors that the agent doesn't know how to handle +* Agent doesn't recognize when the task is complete +* Tool returns incomplete data that prompts another call +* System prompt encourages exhaustive exploration + +**Solution:** + +. Add completion criteria to your system prompt: ++ +[,text] +---- +When you have retrieved all requested information: +1. Present the results to the user +2. Stop calling additional tools +3. Do not explore related data unless asked +---- + +. Add error handling guidance: ++ +[,text] +---- +If a tool fails after 2 attempts: +- Explain what went wrong +- Do not retry the same tool again +- Move on or ask for user guidance +---- + +. Review tool output to ensure it signals completion clearly. +. Increase max iterations if the task legitimately requires many steps. + +**Prevention:** + +* Design tools to return complete information in one call +* Set max iterations appropriate for task complexity (see xref:ai-agents:agents/concepts.adoc#why-iterations-matter[Why iterations matter]) +* Test with ambiguous requests that might cause loops + +=== Making up information + +**Symptoms:** Agent provides plausible-sounding answers without calling tools, or invents data when tools fail. + +**Causes:** + +* System prompt doesn't explicitly forbid fabrication +* Agent treats tool failures as suggestions rather than requirements +* Model is hallucinating due to lack of constraints + +**Solution:** + +. Add explicit constraints to your system prompt: ++ +[,text] +---- +Critical rules: +- NEVER make up order numbers, tracking numbers, or customer data +- If a tool fails, explain the failure - do not guess +- If you don't have information, say so explicitly +---- + +. Test error scenarios by temporarily disabling tools. +. Use a more capable model that follows instructions better. + +**Prevention:** + +* Include "never fabricate" rules in all system prompts +* Test with requests that require unavailable data +* Monitor *Transcripts* and session topic for fabricated responses + +=== Analyzing conversation patterns + +**Symptoms:** Agent behavior is inconsistent or produces unexpected results. + +**Solution:** + +Review conversation history in *Transcripts* to identify problematic patterns: + +* Agents calling the same tool repeatedly: Indicates loop detection is needed +* Large gaps between messages: Suggests tool timeout or slow execution +* Agent responses without tool calls: Indicates a tool selection issue +* Fabricated information: Suggests a missing "never make up data" constraint +* Truncated early messages: Indicates the context window was exceeded + +**Analysis workflow:** + +. Use *Inspector* to reproduce the issue. +. Review full conversation including tool invocations. +. Identify where agent behavior diverged from expected. +. Check system prompt for missing guidance. +. Verify tool responses are formatted correctly. + +== Performance issues + +Diagnose and fix issues related to agent speed and resource consumption. + +=== Slow response times + +**Symptoms:** Agent takes 10+ seconds to respond to simple queries. + +**Causes:** + +* LLM model is slow (large context processing) +* Too many tool calls in sequence +* Tools themselves are slow (database queries, API calls) +* Large context window from long conversation history + +**Solution:** + +. Use a faster, lower-latency model tier for simple queries and reserve larger models for complex reasoning. +. Review conversation history in the *Inspector* tab to identify unnecessary tool calls. +. Optimize tool implementations: +.. Add caching where appropriate +.. Reduce query complexity +.. Return only needed data (use pagination, filters) +. Clear the conversation history if the context is very large. + +**Prevention:** + +* Right-size model selection based on task complexity +* Design tools to execute quickly (< 2 seconds ideal) +* Set appropriate max iterations to prevent excessive exploration +* Monitor token usage and conversation length + +=== High token costs + +**Symptoms:** Token usage is higher than expected, costs are increasing rapidly. + +**Causes:** + +* Max iterations configured too high +* Agent making unnecessary tool calls +* Large tool results filling context window +* Long conversation history not being managed +* Using expensive models for simple tasks + +**Solution:** + +. Review token usage in *Transcripts*. +. Lower max iterations for this agent. +. Optimize tool responses to return less data: ++ +[,text] +---- +Bad: Return all 10,000 customer records +Good: Return paginated results, 20 records at a time +---- + +. Add cost control guidance to system prompt: ++ +[,text] +---- +Efficiency guidelines: +- Request only the data you need +- Stop when you have enough information +- Do not call tools speculatively +---- + +. Switch to a more cost-effective model for simple queries. +. Clear conversation history periodically in the *Inspector* tab. + +**Prevention:** + +* Set appropriate max iterations (10-20 for simple, 30-40 for complex) +* Design tools to return minimal necessary data +* Monitor token usage trends +* See cost calculation guidance in xref:ai-agents:agents/concepts.adoc#cost-calculation[Cost calculation] + +== Tool execution issues + +Fix problems with timeouts, invalid parameters, and error responses. + +=== Tool timeouts + +**Symptoms:** Tools fail with timeout errors, agent receives incomplete results. + +**Causes:** + +* External API is slow or unresponsive +* Database query is too complex +* Network latency between tool and external system +* Tool processing large datasets in memory + +**Solution:** + +. Add timeout handling to tool implementation: ++ +[,yaml] +---- +http: + url: https://api.example.com/data + timeout: "5s" # Set explicit timeout +---- + +. Optimize external queries: +.. Add database indexes +.. Reduce query scope +.. Cache frequent queries +. Increase tool timeout if operation legitimately takes longer. +. Add retry logic for transient failures. + +**Prevention:** + +* Set explicit timeouts in all tool configurations +* Test tools under load +* Monitor external API performance +* Design tools to fail fast on unavailable services + +=== Invalid parameters + +**Symptoms:** Tools return validation errors about missing or incorrectly formatted parameters. + +**Causes:** + +* Tool schema doesn't match implementation +* Agent passes wrong data types +* Required parameters not marked as required in schema +* Agent misunderstands parameter purpose + +**Solution:** + +. Verify tool schema matches implementation: ++ +[,yaml] +---- +input_schema: + properties: + order_id: + type: string # Must match what tool expects + description: "Order ID in format ORD-12345" +---- + +. Add parameter validation to tools. +. Improve parameter descriptions in tool schema. +. Add examples to tool descriptions: ++ +[,yaml] +---- +description: | + Get order status by order ID. + Example: get_order_status(order_id="ORD-12345") +---- + +**Prevention:** + +* Write detailed parameter descriptions +* Include format requirements and examples +* Test tools with invalid inputs to verify error messages +* Use JSON Schema validation in tool implementations + +=== Tool returns errors + +**Symptoms:** Tools execute but return error responses or unexpected data formats. + +**Causes:** + +* External API returned error +* Tool implementation has bugs +* Data format changed in external system +* Tool lacks error handling + +**Solution:** + +. Check tool logs in MCP server. +. Test tool directly (outside agent context). +. Verify external system is operational. +. Add error handling to tool implementation: ++ +[,yaml] +---- +processors: + - try: + - http: + url: ${API_URL} + catch: + - mapping: | + root.error = "API unavailable: " + error() +---- + +. Update agent system prompt to handle this error type. + +**Prevention:** + +* Implement comprehensive error handling in tools +* Monitor external system health +* Add retries for transient failures +* Log all tool errors for analysis + +== Integration issues + +Fix problems with external applications calling agents and pipeline-to-agent integration. + +=== Agent card does not contain a URL + +**Symptoms:** Pipeline fails with error: `agent card does not contain a URL` or `failed to init processor path root.pipeline.processors.0` + +**Causes:** + +* The `agent_card_url` points to the base agent endpoint instead of the agent card JSON file + +**Solution:** + +The `agent_card_url` must point to the agent card JSON file, not the base agent endpoint. + +**Incorrect configuration:** + +[,yaml] +---- +processors: + - a2a_message: + agent_card_url: "https://your-agent-id.ai-agents.your-cluster-id.cloud.redpanda.com" + prompt: "Analyze this transaction: ${!content()}" +---- + +**Correct configuration:** + +[,yaml] +---- +processors: + - a2a_message: + agent_card_url: "https://your-agent-id.ai-agents.your-cluster-id.cloud.redpanda.com/.well-known/agent-card.json" + prompt: "Analyze this transaction: ${!content()}" +---- + +The agent card is always available at `/.well-known/agent-card.json` according to the A2A protocol standard. + +**Prevention:** + +* Always append `/.well-known/agent-card.json` to the agent endpoint URL +* Test the agent card URL in a browser before using it in pipeline configuration +* See xref:ai-agents:agents/a2a-concepts.adoc#agent-card-location[Agent card location] for details + +=== Pipeline integration failures + +**Symptoms:** Pipelines using `a2a_message` processor fail or timeout. + +**Causes:** + +* Agent is not running or restarting +* Agent timeout is too low for pipeline workload +* Authentication issues between pipeline and agent +* High event volume overwhelming agent + +**Solution:** + +. Check agent status and resource allocation. +. Increase agent resource tier for high-volume pipelines. +. Add error handling in pipeline: ++ +[,yaml] +---- +processors: + - try: + - a2a_message: + agent_card_url: "https://your-agent-url/.well-known/agent-card.json" + catch: + - log: + message: "Agent invocation failed: ${! error() }" +---- + +**Prevention:** + +* Test pipeline-agent integration with low volume first +* Size agent resources appropriately for event rate +* See integration patterns in xref:ai-agents:agents/pipeline-integration-patterns.adoc[] + +== Monitor and debug agents + +For comprehensive guidance on monitoring agent activity, analyzing conversation history, tracking token usage, and debugging issues, see xref:ai-agents:agents/monitor-agents.adoc[]. + +== Next steps + +* xref:ai-agents:agents/prompt-best-practices.adoc[] +* xref:ai-agents:agents/concepts.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:agents/architecture-patterns.adoc[] diff --git a/modules/ai-agents/pages/agents/tutorials/customer-support-agent.adoc b/modules/ai-agents/pages/agents/tutorials/customer-support-agent.adoc new file mode 100644 index 000000000..a3778a34c --- /dev/null +++ b/modules/ai-agents/pages/agents/tutorials/customer-support-agent.adoc @@ -0,0 +1,273 @@ += Learn Multi-Tool Agent Orchestration +:description: Learn how agents coordinate multiple tools, make decisions based on conversation context, and handle errors through building a customer support agent. +:page-topic-type: tutorial +:personas: agent_developer, streaming_developer +:learning-objective-1: Explain how agents use conversation context to decide which tools to invoke +:learning-objective-2: Apply tool orchestration patterns to handle multi-step workflows +:learning-objective-3: Evaluate how system prompt design affects agent tool selection + +Build a customer support agent to learn how agents orchestrate multiple tools, make context-aware decisions, and handle incomplete data. + +After completing this tutorial, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== What you'll learn + +Agents become powerful when they coordinate multiple tools to solve complex problems. A single-tool agent can retrieve order status. A multi-tool agent can check order status, fetch tracking information, look up customer history, and decide which tools to invoke based on conversation context. + +This tutorial teaches multi-tool orchestration through a customer support scenario. + +The patterns you practice here apply to any multi-tool scenario: data analysis agents coordinating query and visualization tools, workflow automation agents chaining approval and notification tools, or research agents combining search and summarization tools. + +== The scenario + +Customer support teams handle repetitive questions: "Where is my order?", "What's my tracking number?", "Show me my order history." Human agents waste time on lookups that could be automated. + +An effective support agent needs three capabilities: + +- **Order status lookup**: Check current order state and contents +- **Shipping information**: Retrieve tracking numbers and delivery estimates +- **Order history**: Show past purchases for a customer + +The challenge: users phrase requests differently ("Where's my package?", "Track order ORD-12345", "My recent orders"), and agents must choose the right tool based on context. + +== Prerequisites + +* A xref:get-started:cluster-types/byoc/index.adoc[BYOC cluster] with Remote MCP enabled. +* xref:ai-agents:ai-gateway/gateway-quickstart.adoc[AI Gateway configured] with at least one LLM provider enabled (this tutorial uses OpenAI). + +== Design the MCP tools + +Before an agent can orchestrate tools, you need tools to orchestrate. Each tool should do one thing well, returning structured data the agent can reason about. + +You could create a single `handle_customer_request` tool that takes a natural language query and returns an answer. But, this approach fails because: + +* The agent can't inspect intermediate results +* Tool chaining becomes impossible (no way to pass order status to shipping lookup) +* Error handling is opaque + +Instead, create focused tools: + +* `get_order_status`: Returns order state and contents +* `get_shipping_info`: Returns tracking data +* `get_customer_history`: Returns past orders + +This granularity enables the agent to chain tools (check order status, see it's shipped, fetch tracking info) and handle errors at each step. + +=== Deploy the tools + +Create a Remote MCP server with the three tools. + +. Navigate to your cluster in the link:https://cloud.redpanda.com[Redpanda Cloud UI^]. +. Go to *Agentic AI* > *Remote MCP*. +. Click *Create MCP Server*. +. Configure the server: ++ +* *Name*: `customer-support-tools` +* *Description*: `Tools for customer support agent` + +. Add the following tools. For each tool, select *Processor* from the component type dropdown, then click *Lint* to validate: ++ +[tabs] +==== +get_order_status:: ++ +This tool uses the `mapping` processor to return mock data. The mock approach enables testing without external dependencies. The agent must interpret the structured response to extract order details. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_order_status.yaml[] +---- + +get_shipping_info:: ++ +This tool demonstrates conditional data: it only returns tracking information when the order has shipped. When an order hasn't shipped yet, the tool returns an empty result. The agent must handle this case. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_shipping_info.yaml[] +---- + +get_customer_history:: ++ +This tool returns multiple orders, demonstrating list-handling. The agent must format multiple results clearly for users. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_customer_history.yaml[] +---- +==== + +. Click *Create MCP Server* + +Wait for the server status to show *Running*. You now have three focused tools the agent can orchestrate. + +== Write the system prompt + +The system prompt teaches the agent how to orchestrate tools. Without explicit guidance, the agent must guess when to use each tool, often choosing incorrectly or ignoring tools entirely. + +=== Create the agent + +Create the customer support agent with the system prompt. + +. Go to *Agentic AI* > *AI Agents*. +. Click *Create Agent*. +. Configure the agent: ++ +* *Name*: `customer-support-agent` +* *Description*: `Helps customers track orders and shipping` +* *Resource Tier*: Medium +* *AI Gateway*: Select the gateway you configured +* *Provider*: OpenAI or Anthropic +* *Model*: OpenAI GPT-5.2 or Claude Sonnet 4.5 (models with strong reasoning) +* *MCP Server*: Select `customer-support-tools` +* *Max Iterations*: 15 + +. In the *System Prompt* field, enter this configuration: ++ +[source,text] +---- +You are a customer support agent for Acme E-commerce. + +Responsibilities: +- Help customers track their orders +- Provide shipping information and estimated delivery dates +- Look up customer order history +- Answer questions about order status + +Available tools: +- get_order_status: Use when customer asks about a specific order +- get_shipping_info: Use when customer needs tracking or delivery information +- get_customer_history: Use when customer asks about past orders or "my orders" + +When to use each tool: +- If customer provides an order ID (ORD-XXXXX), use get_order_status first +- If customer asks "where is my order?", ask for the order ID before using tools +- If order is "shipped", follow up with get_shipping_info to provide tracking details +- If customer asks about "all my orders" or past purchases, use get_customer_history + +Never: +- Expose customer payment information (credit cards, billing addresses) +- Make up tracking numbers or delivery dates +- Guarantee delivery dates (use "estimated" language) +- Process refunds or cancellations (escalate to human agent) + +Error handling: +- If order not found, ask customer to verify the order ID +- If shipping info unavailable, explain the order may not have shipped yet +- If customer history is empty, confirm the customer ID and explain no orders found + +Response format: +- Start with a friendly greeting +- Present order details in a clear, structured way +- For order status, include: order ID, status, items, total +- For shipping, include: carrier, tracking number, estimated delivery, last known location +- Always include next steps or offer additional help + +Example response structure: +1. Acknowledge the customer's question +2. Present the information from tools +3. Provide next steps or additional context +4. Ask if they need anything else +---- + +. Click *Create Agent*. + +Wait for the agent status to show *Running*. + +== Observe orchestration in action + +Open the *Inspector* tab in the Redpanda Cloud UI to interact with the agent. + +Testing reveals how the agent makes decisions. Watch the conversation panel in the built-in chat interface to see the agent's reasoning process unfold. + +=== Tool chaining based on status + +Test how the agent chains tools based on order status. + +Enter this query in *Inspector*: + +---- +Hi, I'd like to check on order ORD-12345 +---- + +Watch the conversation panel. The agent calls `get_order_status` first, sees the status is "shipped", then automatically follows up with `get_shipping_info` to provide tracking details. The agent uses the first tool's result to decide whether to invoke the second tool. + +Now try this query with a different order: + +---- +Check order ORD-67890 +---- + +This order has status "processing", so the agent calls only `get_order_status`. Since the order hasn't shipped yet, the agent skips `get_shipping_info`. The agent chains tools only when appropriate. + +=== Clarification before tool invocation + +Test how the agent handles incomplete information. + +Click *Clear context* to clear the conversation history. Then enter this query: + +---- +Where is my order? +---- + +The agent recognizes the request is missing an order ID and asks the customer to provide it. Watch the conversation panel and see that the agent calls zero tools. Instead of guessing or fabricating information, it asks a clarifying question. + +This demonstrates pre-condition checking. Effective orchestration includes knowing when NOT to invoke tools. + +=== List handling + +Test how the agent formats multiple results. + +Enter this query: + +---- +Can you show me my recent orders? My customer ID is CUST-100. +---- + +The agent calls `get_customer_history` and receives multiple orders. Watch how it formats the list clearly for the customer, showing details for each order. + +Now test the empty results case with this query: + +---- +Show my order history for customer ID CUST-999 +---- + +The agent receives an empty list and explains that no orders were found, asking the customer to verify their ID. + +=== Error recovery + +Test how the agent handles missing data. + +Enter this query: + +---- +Check order ORD-99999 +---- + +The tool returns no data for this order ID. Watch how the agent responds. It explains the order wasn't found and asks the customer to verify the order ID. Critically, the agent does not fabricate tracking numbers or order details. + +This demonstrates error recovery without hallucination. The "Never make up tracking numbers" constraint in the system prompt prevents the agent from inventing plausible-sounding but fake information. + +== Troubleshoot + +For comprehensive troubleshooting guidance, see xref:ai-agents:agents/troubleshooting.adoc[]. + +=== Test with mock data + +The mock tools in this tutorial only recognize specific test IDs: + +* Orders: ORD-12345, ORD-67890, ORD-99999 +* Customers: CUST-100, CUST-999 + +Use these documented test IDs when testing the agent. If you replace the mock tools with real API calls, verify that your API endpoints return the expected data structures. + +== Next steps + +* xref:ai-agents:mcp/remote/tool-patterns.adoc#call-external-apis[Call external APIs] +* xref:ai-agents:agents/prompt-best-practices.adoc[] +* xref:ai-agents:agents/architecture-patterns.adoc[] +* xref:ai-agents:agents/troubleshooting.adoc[] diff --git a/modules/ai-agents/pages/agents/tutorials/transaction-dispute-resolution.adoc b/modules/ai-agents/pages/agents/tutorials/transaction-dispute-resolution.adoc new file mode 100644 index 000000000..f1ccd97ac --- /dev/null +++ b/modules/ai-agents/pages/agents/tutorials/transaction-dispute-resolution.adoc @@ -0,0 +1,679 @@ += Build Multi-Agent Systems for Transaction Dispute Resolution +:description: Learn how to build multi-agent systems with domain separation, handle sensitive financial data, and monitor multi-agent execution through transaction investigation. +:page-topic-type: tutorial +:personas: agent_developer, platform_admin +:learning-objective-1: Design multi-agent systems with domain-specific sub-agents +:learning-objective-2: pass:q[Monitor multi-agent execution using *Transcripts*] +:learning-objective-3: Integrate agents with streaming pipelines for event-driven processing + +Build a transaction dispute resolution system using multi-agent architecture, secure data handling, and execution monitoring. + +After completing this tutorial, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== What you'll learn + +This tutorial advances from xref:ai-agents:agents/tutorials/customer-support-agent.adoc[basic multi-tool orchestration] to multi-agent systems. You'll build a transaction dispute resolution system where a root agent delegates to specialized sub-agents (account, fraud, merchant, compliance), each with focused responsibilities and PII-protected data access. You'll also monitor execution using *Transcripts* and process disputes from transaction streams for automated detection. + +These patterns apply beyond banking to any domain requiring specialized expertise and data security: healthcare systems, insurance claims processing, or regulatory compliance workflows. + +== The scenario + +Banks handle thousands of dispute calls daily. Customers report unauthorized charges, billing errors, or unrecognized transactions. Each investigation requires cross-referencing multiple systems and applying consistent fraud detection logic. + +Traditionally, human agents manually open multiple systems, cross-reference data, and take notes. A 10-15 minute process prone to inconsistencies and incomplete compliance logging. + +Multi-agent automation transforms this workflow by enabling instant data aggregation from all sources, consistent logic applied every time, 10-15 second resolution, and structured results for compliance. Human agents handle only complex escalations. + +When a customer calls saying "I see a $247.83 charge from 'ACME CORP' but I never shopped there. Is this fraud?", the system must investigate account history, calculate fraud scores, verify merchant legitimacy, and make a recommendation with structured results. + +== Prerequisites + +* A xref:get-started:cluster-types/byoc/index.adoc[BYOC cluster] with Remote MCP enabled. +* xref:ai-agents:ai-gateway/gateway-quickstart.adoc[AI Gateway configured] with at least one LLM provider enabled (this tutorial uses OpenAI GPT-5.2 or Claude Sonnet 4.5 for reasoning). +* The xref:get-started:rpk-install.adoc[Redpanda CLI (`rpk`)] installed (for testing the pipeline with sample data). +* Completed xref:ai-agents:agents/tutorials/customer-support-agent.adoc[] (foundational multi-tool concepts). + +== Create MCP tools for each domain + +Before creating agents, create the tools they'll use. You'll organize tools by domain, matching each sub-agent's responsibility. + +=== Account tools + +Account tools retrieve customer and transaction data with PII protection. + +. Navigate to your cluster in the link:https://cloud.redpanda.com[Redpanda Cloud UI^]. +. Go to *Agentic AI* > *Remote MCP*. +. Click *Create MCP Server*. +. Configure the server: ++ +* *Name*: `account-tools` +* *Description*: `Customer account and transaction data retrieval` +* *Resource Tier*: XSmall + +. Add the following tools. For each tool, select *Processor* from the component type dropdown, then click *Lint* to validate: ++ +[tabs] +==== +get_customer_account:: ++ +This mock tool returns account data with sensitive fields already protected. Card numbers only include the last 4 digits, while full names remain for verification. In production, implement similar protections in your data layer. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_customer_account.yaml[] +---- + +get_transaction_details:: ++ +This tool returns complete transaction details including merchant information, location, and timestamp. Notice how it returns structured data the fraud agent can analyze. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_transaction_details.yaml[] +---- + +get_transaction_history:: ++ +This tool returns aggregated spending patterns instead of raw transaction lists. This privacy-preserving approach gives fraud analysis what it needs (typical spending by category, location patterns) without exposing individual transaction details unnecessarily. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_transaction_history.yaml[] +---- +==== + +. Click *Create MCP Server*. + +Wait for the server status to show *Running*. + +[NOTE] +==== +This tutorial uses XSmall resource tier for all MCP servers because the mock tools run lightweight Bloblang transformations. Production deployments with external API calls require larger tiers based on throughput needs. See xref:ai-agents:mcp/remote/scale-resources.adoc[]. +==== + +=== Fraud tools + +Fraud tools calculate risk scores and identify fraud indicators. + +. Click *Create MCP Server*. +. Configure the server: ++ +* *Name*: `fraud-tools` +* *Description*: `Fraud detection and risk scoring` +* *Resource Tier*: XSmall + +. Add the following tools. For each tool, select *Processor* from the component type dropdown, then click *Lint* to validate: ++ +[tabs] +==== +calculate_fraud_score:: ++ +This tool implements multi-factor fraud scoring with location risk (0-35 for international/unusual cities), merchant risk (0-30 for reputation/fraud reports), amount risk (0-25 for deviation from averages), velocity risk (0-15 for rapid transactions), and category risk (0-20 for unusual spending categories). The tool returns both the total score and breakdown, allowing agents to explain their reasoning. ++ +[,yaml,role="no-placeholders"] +---- +include::ai-agents:example$mcp-tools/processors/calculate_fraud_score.yaml[] +---- + +get_risk_indicators:: ++ +This tool provides detailed fraud signals with severity levels. Each indicator includes a description that agents can use to explain findings to customers. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_risk_indicators.yaml[] +---- +==== + +. Click *Create MCP Server*. + +Wait for the server status to show *Running*. + +=== Merchant tools + +Merchant tools verify business legitimacy and analyze merchant categories. + +. Click *Create MCP Server*. +. Configure the server: ++ +* *Name*: `merchant-tools` +* *Description*: `Merchant verification and category analysis` +* *Resource Tier*: XSmall + +. Add the following tools. For each tool, select *Processor* from the component type dropdown, then click *Lint* to validate: ++ +[tabs] +==== +verify_merchant:: ++ +This tool returns reputation scores, fraud report counts, business verification status, and red flags. Notice how it includes common issues for legitimate merchants (like subscription billing problems) to help agents distinguish between fraud and merchant operational issues. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/verify_merchant.yaml[] +---- + +get_merchant_category:: ++ +This tool decodes MCC (Merchant Category Codes) and provides typical transaction ranges for each category. This helps identify mismatches (like a grocery store charging $2000). ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/get_merchant_category.yaml[] +---- +==== + +. Click *Create MCP Server*. + +Wait for the server status to show *Running*. + +=== Compliance tools + +Compliance tools handle audit logging and regulatory requirements. + +. Click *Create MCP Server*. +. Configure the server: ++ +* *Name*: `compliance-tools` +* *Description*: `Audit logging and regulatory compliance` +* *Resource Tier*: XSmall + +. Add the following tools. For each tool, select *Processor* from the component type dropdown, then click *Lint* to validate: ++ +[tabs] +==== +log_audit_event:: ++ +This tool creates audit records for every investigation. In production, this would write to an immutable audit log. For this tutorial, it returns a confirmation with the audit ID. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/log_audit_event.yaml[] +---- + +check_regulatory_requirements:: ++ +This tool returns applicable regulations, customer rights, bank obligations, and required documentation for different dispute types. This ensures agents follow proper procedures for Regulation E, Fair Credit Billing Act, and card network rules. ++ +[,yaml] +---- +include::ai-agents:example$mcp-tools/processors/check_regulatory_requirements.yaml[] +---- +==== + +. Click *Create MCP Server*. + +Wait for the server status to show *Running*. You now have four MCP servers with nine total tools, organized by domain. + +== Create the root agent with subagents + +The root agent orchestrates sub-agents and makes final recommendations. You'll configure the root agent first, then add four specialized sub-agents within the same form. + +[IMPORTANT] +==== +Sub-agents inherit the LLM provider, model, resource tier, and max iterations from the root agent. This tutorial uses GPT-5 Mini and max iterations of 15 to optimize performance. Using slower models (GPT-5.2, Claude Sonnet 4.5) or high max iterations (50+) will cause sub-agents to execute slowly. Each sub-agent call could take 60-90 seconds instead of 10-15 seconds. +==== + +. Go to *Agentic AI* > *AI Agents*. +. Click *Create Agent*. +. Configure the root agent: ++ +* *Name*: `dispute-resolution-agent` +* *Description*: `Orchestrates transaction dispute investigations` +* *Resource Tier*: Large +* *AI Gateway*: Select the gateway you configured +* *Provider*: OpenAI +* *Model*: GPT-5 Mini (fast, cost-effective for structured workflows) +* *Max Iterations*: 15 + +. In the *System Prompt* field, enter: ++ +[source,text] +---- +include::ai-agents:example$agents/dispute-root-agent-prompt.txt[] +---- + +. Skip the *MCP Tools* section (the root agent uses A2A protocol to call sub-agents, not direct tools). + +. In the *Subagents* section, click *+ Add Subagent*. + +=== Add account agent subagent + +The account agent retrieves customer account and transaction data. + +. Configure the subagent: ++ +* *Name*: `account-agent` +* *Description*: `Retrieves customer account and transaction data` + +. In the subagent's *System Prompt* field, enter: ++ +[source,text] +---- +include::ai-agents:example$agents/account-agent-prompt.txt[] +---- + +. In the subagent's *MCP Tools* section, select `account-tools`. + +=== Add fraud agent subagent + +The fraud agent calculates fraud risk scores and identifies fraud indicators. + +. Click *+ Add Subagent* again. +. Configure the subagent: ++ +* *Name*: `fraud-agent` +* *Description*: `Calculates fraud risk scores and identifies fraud indicators` + +. In the subagent's *System Prompt* field, enter: ++ +[source,text] +---- +include::ai-agents:example$agents/fraud-agent-prompt.txt[] +---- + +. In the subagent's *MCP Tools* section, select `fraud-tools`. + +=== Add merchant agent subagent + +The merchant agent verifies merchant legitimacy and reputation. + +. Click *+ Add Subagent* again. +. Configure the subagent: ++ +* *Name*: `merchant-agent` +* *Description*: `Verifies merchant legitimacy and reputation` + +. In the subagent's *System Prompt* field, enter: ++ +[source,text] +---- +include::ai-agents:example$agents/merchant-agent-prompt.txt[] +---- + +. In the subagent's *MCP Tools* section, select `merchant-tools`. + +=== Add compliance agent subagent + +The compliance agent handles audit logging and regulatory requirements. + +. Click *+ Add Subagent* again. +. Configure the subagent: ++ +* *Name*: `compliance-agent` +* *Description*: `Handles audit logging and regulatory requirements` + +. In the subagent's *System Prompt* field, enter: ++ +[source,text] +---- +include::ai-agents:example$agents/compliance-agent-prompt.txt[] +---- + +. In the subagent's *MCP Tools* section, select `compliance-tools`. + +. Click *Create Agent* to create the root agent with all four subagents. + +Wait for the agent status to show *Running*. + +== Test investigation scenarios + +Test the multi-agent system with realistic dispute scenarios. Each scenario demonstrates different patterns: clear fraud, legitimate transactions, escalation cases, and edge cases. + +. Go to *Agentic AI* > *AI Agents*. +. Click on `dispute-resolution-agent`. +. Open the *Inspector* tab. + +=== Clear fraud case + +Test how the system handles obvious fraud. + +Enter this query: + +[source,text] +---- +I see a $1,847.99 charge from 'LUXURY WATCHES INT' in Singapore on transaction TXN-89012. I've never been to Singapore and don't buy watches. My customer ID is CUST-1001. This is fraud. +---- + +Watch the conversation panel as the investigation progresses. You'll see the root agent call each sub-agent in sequence. After all sub-agents complete (30-90 seconds), the agent sends its final response to the chat. + +In the conversation panel, you'll see the root agent: + +. Routes to account-agent and retrieves customer location data and spending patterns +. Routes to fraud-agent and calculates critical risk level (95+ score) +. Routes to merchant-agent and confirms merchant legitimacy issues +. Routes to compliance-agent and logs investigation +. Takes immediate action and blocks card and approves dispute claim + +After all sub-agents complete, the agent sends its final response to the chat. + +This flow demonstrates multi-agent coordination for high-confidence fraud decisions with realistic banking communication. + +=== Escalation required + +Test how the system handles ambiguous cases requiring human review. + +Click *Clear context*. Then enter: + +[source,text] +---- +I see three $29.99 charges from 'EXAMPLE STREAMING' last month, but I only subscribed once. My customer ID is CUST-1002 and one of the transactions is TXN-89014. +---- + +Watch the conversation panel as the agent investigates. After the sub-agent calls complete, the agent should send a response with a realistic escalation pattern: + +In the conversation panel, you'll see the root agent: + +. Routes to account-agent and confirms recurring charges +. Routes to fraud-agent and receives moderate risk score (not clear fraud) +. Routes to merchant-agent and confirms legitimate merchant +. Routes to compliance-agent and logs as billing error dispute +. Escalates to human specialist (conflicting evidence, requires merchant subscription records) + +This demonstrates the escalation pattern when evidence is ambiguous and requires human review. + +== Monitor multi-agent execution + +*Inspector* shows real-time progress in the conversation panel, but *Transcripts* provides detailed post-execution analysis with timing, token usage, and full trace hierarchy. + +. In the left navigation, click *Transcripts*. +. Select a recent transcript from your fraud case test. + +In the trace hierarchy, you'll see: + +* Root agent invocation (top-level span) +* Multiple `invoke_agent` spans for each sub-agent call +* Individual LLM calls within each agent +* MCP tool invocations within sub-agents + +In the summary panel, check: + +* *Duration*: Total investigation time (typically 5-15 seconds) +* *Token Usage*: Cost tracking across all agents +* *LLM Calls*: How many reasoning steps were needed + +This visibility helps you: + +* Verify sub-agents are being called in the right order +* Identify slow sub-agents that need optimization +* Track costs per investigation for budgeting + +For detailed trace structure, see xref:ai-agents:observability/concepts.adoc#agent-trace-hierarchy[Agent trace hierarchy]. + +== Integrate with streaming pipeline + +Process disputes automatically from transaction streams. When transactions meet certain risk thresholds, the pipeline invokes the dispute agent for immediate investigation. + +=== Create a secret for the agent card URL + +The pipeline needs the agent card URL to invoke the dispute resolution agent. + +. Go to *Agentic AI* > *AI Agents*. +. Click on `dispute-resolution-agent`. +. Open the *A2A* tab. +. Copy the agent URL displayed at the top. +. Go to *Connect* > *Secrets*. +. Click *Create Secret*. +. Create the secret: ++ +* *Name*: `DISPUTE_AGENT_CARD_URL` +* *Value*: Paste the agent URL and append `/.well-known/agent-card.json` to the end ++ +For example, if the agent URL is: ++ +---- +https://abc123.ai-agents.def456.cloud.redpanda.com +---- ++ +The secret value should be: ++ +---- +https://abc123.ai-agents.def456.cloud.redpanda.com/.well-known/agent-card.json +---- + +. Click *Create Secret*. + +=== Create topics for transaction data + +Create the topics the pipeline will use for input and output. + +. Go to *Topics* in the Redpanda Cloud UI. +. Click *Create Topic*. +. Create the input topic: ++ +* *Name*: `bank.transactions` +* *Partitions*: 3 +* *Replication factor*: 3 + +. Click *Create Topic* again. +. Create the output topic: ++ +* *Name*: `bank.dispute_results` +* *Partitions*: 3 +* *Replication factor*: 3 + +=== Create a SASL user for topic access + +The pipeline needs SASL credentials to read from and write to Redpanda topics. + +. Go to *Security* > *Users* in the Redpanda Cloud UI. +. Click *Create User*. +. Configure the user: ++ +* *Username*: `dispute-pipeline-user` +* *Password*: Generate a secure password +* *Mechanism*: SCRAM-SHA-256 + +. Save the username and password. You'll need them for the pipeline secrets. + +. Click *Create*. + +. Click *Create ACL* to grant permissions. + +. Click the *Clusters* tab for cluster permissions and select *Allow all*. + +. Click *Add rule* to add another ACL. + +. Click the *Topics* tab for topic permissions: ++ +* *Principal*: `dispute-pipeline-user` +* *Host*: Allow all hosts (`*`) +* *Resource Type*: Topic +* *Selector*: Topic names starting with `bank.` +* *Operations*: Allow all + +. Click *Add rule* to add another ACL. + +. Click the *Consumer groups* tab for consumer group permissions and select *Allow all*. + +. Click *Create*. + +=== Create secrets for SASL authentication + +The pipeline needs SASL credentials stored as secrets to authenticate with Redpanda topics. + +. Go to *Connect* > *Secrets* in the Redpanda Cloud UI (if not already there). +. Click *Create Secret*. +. Create two secrets with these values: ++ +* *Name*: `DISPUTE_PIPELINE_USERNAME`, *Value*: `dispute-pipeline-user` +* *Name*: `DISPUTE_PIPELINE_PASSWORD`, *Value*: The password you created for `dispute-pipeline-user` + +=== Create the pipeline + +. Go to *Connect* in the Redpanda Cloud UI. +. Click *Create Pipeline*. +. In the numbered steps, click *4 Add permissions*. +. Select *Service Account*. ++ +The Service Account is required for the `a2a_message` processor to authenticate with and invoke the dispute resolution agent. Without this permission, the pipeline will fail when attempting to call the agent. + +. Click *Next*. +. Name the pipeline `dispute-pipeline`. +. Paste this configuration: ++ +[,yaml,role="no-placeholders"] +---- +include::ai-agents:example$pipelines/dispute-pipeline.yaml[] +---- + +This pipeline: + +* Consumes transactions from `bank.transactions` topic +* Filters for high-value transactions (>$500) or pre-flagged transactions +* Calculates preliminary risk score based on location, amount, velocity, and category +* Routes transactions with risk score ≥40 to the dispute-resolution-agent via A2A +* Outputs investigation results to `bank.dispute_results` topic + +=== Test the pipeline + +. Authenticate with your Redpanda Cloud cluster: ++ +[,bash] +---- +rpk cloud login +---- + +. Create a test transaction that will trigger the agent investigation: ++ +[,bash] +---- +echo '{ + "transaction_id": "TXN-89012", + "customer_id": "CUST-1001", + "amount": 1847.99, + "currency": "USD", + "merchant": { + "name": "LUXURY WATCHES INT", + "category": "jewelry", + "country": "Singapore", + "mcc": "5944", + "city": "Singapore" + }, + "card": { + "last_four": "4532", + "billing_country": "USA" + }, + "transaction_date": "2026-01-21T10:00:00Z", + "recent_transaction_count": 2 +}' | rpk topic produce bank.transactions +---- ++ +This transaction will trigger agent investigation because: ++ +* International transaction (Singapore vs USA): +40 risk points +* Amount is greater than $1000: +30 risk points +* Jewelry category (MCC 5944): +20 risk points +* **Total preliminary risk score: 90** (well above the 40 threshold) + +. Wait a minute for the pipeline to process the transaction. You can monitor the progress in *Transcripts*. While the agents investigate, a new transcript for `dispute-resolution-agent` will appear. Until the investigation completes, the transcript will show *awaiting root* status. + +. Consume the results: ++ +[,bash] +---- +rpk topic consume bank.dispute_results --offset end -n 1 +---- ++ +You'll see the complete transaction with agent investigation results: ++ +[,json,role="no-wrap"] +---- +{ + "agent_investigation": { + "confidence": "high", + "fraud_score": 91, + "reasoning": "Transaction is an international purchase with no recent international activity, from a merchant with strong fraud indicators, and the amount is a large outlier for this account; immediate block and investigation recommended.", + "recommendation": "block_and_investigate" + }, + "alert_level": "critical", + "amount": 1847.99, + "card": { + "billing_country": "USA", + "last_four": "4532" + }, + "currency": "USD", + "customer_id": "CUST-1001", + "final_decision": "blocked", + "merchant": { + "category": "jewelry", + "city": "Singapore", + "country": "Singapore", + "mcc": "5944", + "name": "LUXURY WATCHES INT" + }, + "pipeline_metadata": { + "agent_invoked": true, + "customer_id": "CUST-1001", + "processed_at": "2026-01-27T14:29:19.436Z", + "transaction_id": "TXN-89012" + }, + "preliminary_risk_score": 90, + "recent_transaction_count": 2, + "risk_level": "high", + "transaction_date": "2026-01-21T10:00:00Z", + "transaction_id": "TXN-89012" +} +---- + +This output contains everything downstream systems need such as fraud monitoring, customer alerts, and audit logging. + +The pipeline uses a two-stage filter: + +- Only processes transactions with `amount > 500` or `preliminary_flag == true` +- Only sends transactions to the agent if `preliminary_risk_score >= 40` + +Transactions that pass the first filter but not the second (e.g., a $600 domestic transaction with low risk) will appear in the output with: + +* `final_decision: "low_risk_no_investigation"` +* `alert_level: "low"` +* No `agent_investigation` field + +Only transactions meeting the risk threshold invoke the dispute resolution agent. + +=== Trace pipeline execution to agent transcripts + +Use the pipeline metadata timestamp to find the corresponding agent execution in the *Transcripts* view. + +. Note the `processed_at` timestamp from the pipeline output (for example: `2026-01-26T18:30:45.000Z`). +. Go to *Agentic AI* > *Transcripts*. +. Find transcripts for `dispute-resolution-agent` that match your timestamp. + +[NOTE] +==== +The search function does not search through prompt content or attribute values. Use the timestamp to narrow down the time window, then manually review transcripts from that period. +==== + +In the transcript details, you'll see: + +* The full prompt sent to the agent (including transaction ID and details) +* Each sub-agent invocation (account-agent, fraud-agent, merchant-agent, compliance-agent) +* Token usage and execution time for the investigation +* The complete JSON response returned to the pipeline + +== Troubleshoot + +For comprehensive troubleshooting guidance, see xref:ai-agents:agents/troubleshooting.adoc[]. + +=== Test with mock data + +The mock tools in this tutorial use hardcoded customer and transaction IDs for testing: + +* Customer IDs: `CUST-1001`, `CUST-1002`, `CUST-1003` +* Transaction IDs: `TXN-89012`, `TXN-89013`, `TXN-89014`, `TXN-89015` + +Use these documented test IDs when testing in *Inspector* or the pipeline. The sub-agents' mock tools require valid IDs to return transaction details, account history, and fraud indicators. Using other IDs (like `TXN-TEST-001` or `CUST-9999`) will cause the tools to return "not found" errors, and the root agent won't be able to complete its investigation. + +For production deployments, replace the mock tools with API calls to your account, fraud detection, merchant verification, and compliance systems. + +== Next steps + +* xref:ai-agents:agents/architecture-patterns.adoc[] +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:agents/pipeline-integration-patterns.adoc[] +* xref:ai-agents:agents/monitor-agents.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] diff --git a/modules/ai-agents/pages/mcp/local/configuration.adoc b/modules/ai-agents/pages/mcp/local/configuration.adoc index 134c50b0c..f2170a11b 100644 --- a/modules/ai-agents/pages/mcp/local/configuration.adoc +++ b/modules/ai-agents/pages/mcp/local/configuration.adoc @@ -2,7 +2,7 @@ :page-beta: true :description: Learn how to configure the Redpanda Cloud Management MCP Server, including auto and manual client setup, enabling deletes, and security considerations. :page-topic-type: how-to -:personas: ai_agent_developer, platform_admin +:personas: agent_developer, platform_admin // Reader journey: "I customize and configure" // Learning objectives - what readers can learn from this page: :learning-objective-1: Configure MCP clients diff --git a/modules/ai-agents/pages/mcp/local/overview.adoc b/modules/ai-agents/pages/mcp/local/overview.adoc index 01bfd6227..6b2643a34 100644 --- a/modules/ai-agents/pages/mcp/local/overview.adoc +++ b/modules/ai-agents/pages/mcp/local/overview.adoc @@ -2,7 +2,7 @@ :page-beta: true :description: Learn about the Redpanda Cloud Management MCP Server, which lets AI agents securely access and operate your Redpanda Cloud account and clusters. :page-topic-type: overview -:personas: evaluator, ai_agent_developer, platform_admin +:personas: evaluator, agent_developer, platform_admin // Reader journey: "I'm new" // Learning objectives - what readers should understand after reading this page: :learning-objective-1: Explain what the Redpanda Cloud Management MCP Server does @@ -66,7 +66,7 @@ MCP servers authenticate to Redpanda Cloud using your personal or service accoun == Next steps -* xref:ai-agents:mcp/local/quickstart.adoc[Redpanda Cloud Management MCP Server quickstart] -* xref:ai-agents:mcp/local/configuration.adoc[Configure the Redpanda Cloud Management MCP Server] +* xref:ai-agents:mcp/local/quickstart.adoc[] +* xref:ai-agents:mcp/local/configuration.adoc[] TIP: The Redpanda documentation site has a read-only MCP server that provides access to Redpanda docs and examples. This server has no access to your Redpanda Cloud account or clusters. See xref:home:ROOT:mcp-setup.adoc[]. diff --git a/modules/ai-agents/pages/mcp/local/quickstart.adoc b/modules/ai-agents/pages/mcp/local/quickstart.adoc index 875d4e5a0..413f6d146 100644 --- a/modules/ai-agents/pages/mcp/local/quickstart.adoc +++ b/modules/ai-agents/pages/mcp/local/quickstart.adoc @@ -2,8 +2,8 @@ :page-beta: true :description: Connect your Claude AI agent to your Redpanda Cloud account and clusters using the Redpanda Cloud Management MCP Server. :page-topic-type: tutorial -:personas: ai_agent_developer, platform_admin -// Reader journey: "I'm new" → first hands-on experience +:personas: agent_developer, platform_admin +// Reader journey: "I'm new" - seeking first hands-on experience // Learning objectives - what readers will achieve by completing this quickstart: :learning-objective-1: Authenticate to Redpanda Cloud with rpk :learning-objective-2: Install the MCP integration for Claude @@ -29,7 +29,7 @@ TIP: For other clients, see xref:ai-agents:mcp/local/configuration.adoc[]. == Set up the MCP server -. Verify your `rpk` version +. Verify your `rpk` version. + ```bash rpk version @@ -37,7 +37,7 @@ rpk version + Ensure the version is at least 25.2.3. -. Log in to Redpanda Cloud +. Log in to Redpanda Cloud. + ```bash rpk cloud login diff --git a/modules/ai-agents/pages/mcp/overview.adoc b/modules/ai-agents/pages/mcp/overview.adoc index 5b452c357..964be2dc5 100644 --- a/modules/ai-agents/pages/mcp/overview.adoc +++ b/modules/ai-agents/pages/mcp/overview.adoc @@ -1,7 +1,7 @@ = MCP Servers for Redpanda Cloud Overview :description: Learn about Model Context Protocol (MCP) in Redpanda Cloud, including the two complementary options: the Redpanda Cloud Management MCP Server and Remote MCP. :page-topic-type: overview -:personas: evaluator, ai_agent_developer +:personas: evaluator, agent_developer // Reader journey: "I'm new" - understanding the landscape // Learning objectives - what readers should understand after reading this page: :learning-objective-1: Describe what MCP enables for AI agents @@ -85,9 +85,9 @@ You can use both options together. For example, use the Redpanda Cloud Managemen == Get started -* xref:ai-agents:mcp/local/quickstart.adoc[]: Connect Claude to your Redpanda Cloud account -* xref:ai-agents:mcp/remote/quickstart.adoc[]: Build and deploy custom MCP tools +* xref:ai-agents:mcp/local/quickstart.adoc[] +* xref:ai-agents:mcp/remote/quickstart.adoc[] == Suggested reading -* xref:home:ROOT:mcp-setup.adoc[]: Access Redpanda documentation through AI agents (read-only, no Cloud access required) +* xref:home:ROOT:mcp-setup.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/admin-guide.adoc b/modules/ai-agents/pages/mcp/remote/admin-guide.adoc deleted file mode 100644 index 214e3070f..000000000 --- a/modules/ai-agents/pages/mcp/remote/admin-guide.adoc +++ /dev/null @@ -1,41 +0,0 @@ -= Remote MCP Server Administration Guide -:description: Overview of administrative tasks for managing MCP servers in Redpanda Cloud. -:page-topic-type: overview -:personas: platform_admin, ai_agent_developer -// Reader journey: "I operate and maintain" -// Learning objectives - what readers can learn from this page: -:learning-objective-1: Identify available MCP server administrative tasks -:learning-objective-2: Navigate to administrative resources -:learning-objective-3: Describe the server lifecycle stages - -Use these resources to manage it throughout its lifecycle, from editing and scaling to monitoring and deletion. - -After reading this page, you will be able to: - -* [ ] {learning-objective-1} -* [ ] {learning-objective-2} -* [ ] {learning-objective-3} - -== Server lifecycle management - -Manage the basic lifecycle of your MCP servers, including editing configurations, pausing to save costs, and deleting. - -See xref:ai-agents:mcp/remote/manage-servers.adoc[]. - -== Resource scaling - -Adjust your MCP server's compute resources to match workload demands and optimize costs. Resource allocation directly affects your billing charges. - -See xref:ai-agents:mcp/remote/scale-resources.adoc[]. - -== Monitoring and observability - -Monitor your MCP server's activity using OpenTelemetry traces. Track tool invocations, measure performance, debug failures, and integrate with observability platforms. - -See xref:ai-agents:mcp/remote/monitor-activity.adoc[]. - -== Next steps - -* xref:ai-agents:mcp/remote/best-practices.adoc[Learn best practices] for building robust tools. -* xref:develop:connect/configuration/secret-management.adoc[Manage secrets] that MCP server tools use. -* xref:billing:billing.adoc#remote-mcp-billing-metrics[Review MCP billing] to optimize costs. diff --git a/modules/ai-agents/pages/mcp/remote/best-practices.adoc b/modules/ai-agents/pages/mcp/remote/best-practices.adoc index 28df4084d..81738ab01 100644 --- a/modules/ai-agents/pages/mcp/remote/best-practices.adoc +++ b/modules/ai-agents/pages/mcp/remote/best-practices.adoc @@ -1,7 +1,7 @@ = MCP Tool Design :description: Design effective MCP tool interfaces with clear names, descriptions, and input properties. :page-topic-type: best-practices -:personas: ai_agent_developer +:personas: agent_developer // Reader journey: "I want AI clients to discover and use my tools effectively" // Learning objectives - what readers should be able to do after reading this page: :learning-objective-1: Write tool names and descriptions that help AI clients select the right tool @@ -37,6 +37,6 @@ include::redpanda-connect:ai-agents:example$best-practices/mcp-metadata/search-c == Next steps -* xref:ai-agents:mcp/remote/create-tool.adoc#secrets[Use secrets]: Store credentials securely in the Secrets Store -* xref:ai-agents:mcp/remote/tool-patterns.adoc[]: Find reusable patterns including validation, error handling, and response formatting -* xref:ai-agents:mcp/remote/troubleshooting.adoc[]: Diagnose common issues +* xref:ai-agents:mcp/remote/create-tool.adoc#secrets[Use secrets for credentials] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:mcp/remote/troubleshooting.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/concepts.adoc b/modules/ai-agents/pages/mcp/remote/concepts.adoc index 16e78912c..db7f22ada 100644 --- a/modules/ai-agents/pages/mcp/remote/concepts.adoc +++ b/modules/ai-agents/pages/mcp/remote/concepts.adoc @@ -2,7 +2,7 @@ :description: Understand the MCP execution model, choose the right component type, and use traces for observability. :page-aliases: ai-agents:mcp/remote/understanding-mcp-tools.adoc :page-topic-type: concepts -:personas: ai_agent_developer, streaming_developer +:personas: agent_developer, streaming_developer // Reader journey: "I want to understand how it works" // Learning objectives - what readers should know after reading this page: :learning-objective-1: Describe the request/response execution model @@ -23,141 +23,23 @@ include::redpanda-connect:ai-agents:partial$mcp/concepts/component-mapping.adoc[ // Execution model - single-sourced from partial include::redpanda-connect:ai-agents:partial$mcp/concepts/execution-model.adoc[] +MCP tools use an agent-initiated execution model where agents invoke tools on-demand. Redpanda also supports pipeline-initiated integration where pipelines call agents using the `a2a_message` processor. For guidance on choosing between these patterns, see xref:ai-agents:agents/integration-overview.adoc[]. + [[component-selection]] == Choose the right component type // Component selection guide - single-sourced from partial include::redpanda-connect:ai-agents:partial$mcp/concepts/component-selection.adoc[] -[[execution-log]] -== Execution log and observability - -Every MCP server automatically emits OpenTelemetry traces to a topic called `redpanda.otel_traces`. These traces provide detailed observability into your MCP server's operations, creating a complete execution log. - -=== Traces and spans - -OpenTelemetry traces provide a complete picture of how a request flows through your system: - -* A _trace_ represents the entire lifecycle of a request (for example, a tool invocation from start to finish). -* A _span_ represents a single unit of work within that trace (such as a data processing operation or an external API call). -* A trace contains one or more spans organized hierarchically, showing how operations relate to each other. - -With 100% sampling, every operation is captured, creating a complete execution log that you can use for debugging, monitoring, and performance analysis. - -=== How Redpanda stores traces - -The `redpanda.otel_traces` topic stores OpenTelemetry spans in JSON format, following the https://opentelemetry.io/docs/specs/otel/protocol/[OpenTelemetry Protocol (OTLP)^] specification. A Protobuf schema named `redpanda.otel_traces-value` is also automatically registered with the topic, enabling clients to deserialize trace data correctly. - -The `redpanda.otel_traces` topic and its schema are managed automatically by Redpanda. If you delete either the topic or the schema, they are recreated automatically. However, deleting the topic permanently deletes all trace data, and the topic comes back empty. Do not produce your own data to this topic. It is reserved for OpenTelemetry traces. - -Each span in the execution log represents a specific operation performed by your MCP server, such as: - -* Tool invocation requests -* Data processing operations -* External API calls -* Error conditions -* Performance metrics - -=== Topic configuration and lifecycle - -The `redpanda.otel_traces` topic has a predefined retention policy. Configuration changes to this topic are not supported. If you modify settings, Redpanda reverts them to the default values. - -The topic persists in your cluster even after all MCP servers are deleted, allowing you to retain historical trace data for analysis. - -Trace data may contain sensitive information from your tool inputs and outputs. Consider implementing appropriate glossterm:ACL[,access control lists (ACLs)] for the `redpanda.otel_traces` topic, and review the data in traces before sharing or exporting to external systems. - -=== Understand the trace structure - -Each span captures a unit of work. Here's what a typical MCP tool invocation looks like: - -[,json] ----- -{ - "traceId": "71cad555b35602fbb35f035d6114db54", - "spanId": "43ad6bc31a826afd", - "name": "http_processor", - "attributes": [ - {"key": "city_name", "value": {"stringValue": "london"}}, - {"key": "result_length", "value": {"intValue": "198"}} - ], - "startTimeUnixNano": "1765198415253280028", - "endTimeUnixNano": "1765198424660663434", - "instrumentationScope": {"name": "rpcn-mcp"}, - "status": {"code": 0, "message": ""} -} ----- - -Key elements to understand: - -* **`traceId`**: Links all spans belonging to the same request. Use this to follow a tool invocation through its entire lifecycle. -* **`name`**: The tool name (`http_processor` in this example). This tells you which tool was invoked. -* **`instrumentationScope.name`**: When this is `rpcn-mcp`, the span represents an MCP tool. When it's `redpanda-connect`, it's internal processing. -* **`attributes`**: Context about the operation, like input parameters or result metadata. -* **`status.code`**: `0` means success, `2` means error. - -=== Parent-child relationships - -Traces show how operations relate. A tool invocation (parent) may trigger internal operations (children): - -[,json] ----- -{ - "traceId": "71cad555b35602fbb35f035d6114db54", - "spanId": "ed45544a7d7b08d4", - "parentSpanId": "43ad6bc31a826afd", - "name": "http", - "instrumentationScope": {"name": "redpanda-connect"}, - "status": {"code": 0, "message": ""} -} ----- - -The `parentSpanId` links this child span to the parent tool invocation. Both share the same `traceId`, so you can reconstruct the complete operation. - -=== Error events in traces - -When something goes wrong, traces capture error details: - -[,json] ----- -{ - "traceId": "71cad555b35602fbb35f035d6114db54", - "spanId": "ba332199f3af6d7f", - "parentSpanId": "43ad6bc31a826afd", - "name": "http_request", - "events": [ - { - "name": "event", - "timeUnixNano": "1765198420254169629", - "attributes": [{"key": "error", "value": {"stringValue": "type"}}] - } - ], - "status": {"code": 0, "message": ""} -} ----- - -The `events` array captures what happened and when. Use `timeUnixNano` to see exactly when the error occurred within the operation. - -=== Traces compared to audit logs - -OpenTelemetry traces are designed for observability and debugging, not audit logging or compliance. - -Traces provide: - -* Hierarchical view of request flow through your system (parent-child span relationships) -* Detailed timing information for performance analysis -* Ability to reconstruct execution paths and identify bottlenecks -* Insights into how operations flow through distributed systems - -Traces are not: +== Observability -* Immutable audit records for compliance purposes -* Designed for "who did what" accountability tracking +MCP servers automatically emit OpenTelemetry traces for monitoring and debugging. For detailed information about traces, spans, and the trace structure, see xref:ai-agents:observability/concepts.adoc[]. -For monitoring tasks like consuming traces, debugging failures, and measuring performance, see xref:ai-agents:mcp/remote/monitor-activity.adoc[]. +To monitor MCP server activity, consume traces, and debug failures, see xref:ai-agents:mcp/remote/monitor-mcp-servers.adoc[]. == Next steps * xref:ai-agents:mcp/remote/create-tool.adoc[] -* xref:ai-agents:mcp/remote/best-practices.adoc[]: Apply naming and design guidelines -* xref:ai-agents:mcp/remote/tool-patterns.adoc[]: Find reusable patterns -* xref:ai-agents:mcp/remote/troubleshooting.adoc[]: Diagnose common issues +* xref:ai-agents:mcp/remote/best-practices.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:mcp/remote/troubleshooting.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/create-tool.adoc b/modules/ai-agents/pages/mcp/remote/create-tool.adoc index 73a080526..1d60444bf 100644 --- a/modules/ai-agents/pages/mcp/remote/create-tool.adoc +++ b/modules/ai-agents/pages/mcp/remote/create-tool.adoc @@ -1,7 +1,7 @@ = Create an MCP Tool :description: Create an MCP tool with the correct YAML structure, metadata, and parameter mapping. :page-topic-type: how-to -:personas: ai_agent_developer, streaming_developer, data_engineer +:personas: agent_developer, streaming_developer, data_engineer // Reader journey: "I want to create a tool for my AI agent" // Learning objectives - what readers can do after reading this page: :learning-objective-1: Create a tool with the correct structure and MCP metadata @@ -24,16 +24,16 @@ After reading this page, you will be able to: == Create the tool -In Redpanda Cloud, you create tools directly in the Cloud Console or using the Data Plane API. +In Redpanda Cloud, you create tools directly in the Cloud UI or using the Data Plane API. [tabs] ====== -Cloud Console:: +Cloud UI:: + -- -. Log in to the link:https://cloud.redpanda.com/[Redpanda Cloud Console^]. +. Log in to the link:https://cloud.redpanda.com/[Redpanda Cloud UI^]. -. Navigate to *Remote MCP* and either create a new MCP server or edit an existing one. +. Navigate to *Agentic AI* > *Remote MCP* and either create a new MCP server or edit an existing one. . In the *Tools* section, click *Add Tool*. @@ -64,25 +64,7 @@ Here's an example using the xref:develop:connect/components/processors/sql_selec [source,yaml] ---- -label: lookup-customer # <1> - -sql_select: # <2> - driver: postgres - dsn: "${secrets.DATABASE_URL}" - table: customers - columns: ["id", "name", "email", "plan"] - where: id = ? - args_mapping: '[this.customer_id]' - -meta: # <3> - mcp: - enabled: true - description: "Look up a customer by ID and return their profile." - properties: - - name: customer_id - type: string - description: "The customer's unique identifier" - required: true +include::ai-agents:example$mcp-tools/processors/lookup_customer.yaml[tag=complete,indent=0] ---- <1> **Label**: Becomes the tool name. @@ -104,17 +86,7 @@ xref:develop:connect/components/processors/about.adoc[Processors] transform, fil .Processor tool [source,yaml] ---- -label: enrich-order - -processors: - - http: - url: "https://api.example.com/lookup" - verb: GET - -meta: - mcp: - enabled: true - description: "Enrich order with customer data" +include::ai-agents:example$mcp-tools/processors/enrich_order.yaml[tag=complete,indent=0] ---- xref:develop:connect/components/inputs/about.adoc[Inputs] read data from sources, xref:develop:connect/components/outputs/about.adoc[outputs] write data to destinations, and xref:develop:connect/components/caches/about.adoc[caches] store and retrieve data. Define these components directly at the top level: @@ -122,59 +94,20 @@ xref:develop:connect/components/inputs/about.adoc[Inputs] read data from sources .Input tool [source,yaml] ---- -label: read-events - -redpanda: # <1> - seed_brokers: ["${REDPANDA_BROKERS}"] - topics: ["events"] - consumer_group: "mcp-reader" - tls: - enabled: true - sasl: - - mechanism: SCRAM-SHA-256 - username: "${secrets.MCP_USERNAME}" - password: "${secrets.MCP_PASSWORD}" - -meta: - mcp: - enabled: true - description: "Read events from Redpanda" +include::ai-agents:example$mcp-tools/inputs/read_events.yaml[tag=complete,indent=0] ---- <1> The component name (`redpanda`) is at the top level, not wrapped in `input:`. .Output tool [source,yaml] ---- -label: publish-event - -redpanda: - seed_brokers: ["${REDPANDA_BROKERS}"] - topic: "processed-events" - tls: - enabled: true - sasl: - - mechanism: SCRAM-SHA-256 - username: "${secrets.MCP_USERNAME}" - password: "${secrets.MCP_PASSWORD}" - -meta: - mcp: - enabled: true - description: "Publish event to Redpanda" +include::ai-agents:example$mcp-tools/outputs/publish_event.yaml[tag=complete,indent=0] ---- .Cache tool [source,yaml] ---- -label: session-cache - -memory: - default_ttl: 300s - -meta: - mcp: - enabled: true - description: "In-memory cache for session data" +include::ai-agents:example$mcp-tools/caches/session_cache.yaml[tag=complete,indent=0] ---- Outputs can include a `processors:` section to transform data before publishing: @@ -182,27 +115,7 @@ Outputs can include a `processors:` section to transform data before publishing: .Output tool with processors [source,yaml] ---- -label: publish-with-timestamp - -processors: - - mutation: | - root = this - root.published_at = now() - -redpanda: - seed_brokers: ["${REDPANDA_BROKERS}"] - topic: "processed-events" - tls: - enabled: true - sasl: - - mechanism: SCRAM-SHA-256 - username: "${secrets.MCP_USERNAME}" - password: "${secrets.MCP_PASSWORD}" - -meta: - mcp: - enabled: true - description: "Add timestamp and publish to Redpanda" +include::ai-agents:example$mcp-tools/outputs/publish_with_timestamp.yaml[tag=complete,indent=0] ---- See xref:ai-agents:mcp/remote/tool-patterns.adoc#outputs-with-processors[outputs with processors] for more examples. @@ -216,6 +129,7 @@ The `meta.mcp` block defines how AI clients discover and interact with your tool include::redpanda-connect:ai-agents:partial$mcp/create-tool/mcp-metadata-fields-table.adoc[] +[#mcp-property-fields] ==== Property fields include::redpanda-connect:ai-agents:partial$mcp/create-tool/property-fields-table.adoc[] @@ -239,17 +153,12 @@ Use `this` to access message fields directly in processors like `mutation`, `map [source,yaml] ---- -mutation: | - root.search_query = this.query.lowercase() - root.max_results = this.limit.or(10) +include::ai-agents:example$mcp-tools/snippets/bloblang_this_context.yaml[tag=mutation,indent=0] ---- [source,yaml] ---- -sql_select: - table: orders - where: customer_id = ? AND status = ? - args_mapping: '[this.customer_id, this.status.or("active")]' +include::ai-agents:example$mcp-tools/snippets/bloblang_this_context.yaml[tag=args_mapping,indent=0] ---- === In string fields (interpolation) @@ -258,15 +167,12 @@ Use `${! ... }` interpolation to embed Bloblang expressions inside string values [source,yaml] ---- -http: - url: 'https://api.weather.com/v1/current?city=${! json("city") }&units=${! json("units").or("metric") }' +include::ai-agents:example$mcp-tools/snippets/interpolation.yaml[tag=http_url,indent=0] ---- [source,yaml] ---- -redpanda: - seed_brokers: ["${REDPANDA_BROKERS}"] # <1> - topic: '${! json("topic_name") }' # <2> +include::ai-agents:example$mcp-tools/snippets/interpolation.yaml[tag=redpanda_topic,indent=0] ---- <1> `$\{VAR}` without `!` is environment variable substitution, not Bloblang. <2> `${! ... }` with `!` is Bloblang interpolation that accesses message data. @@ -279,25 +185,14 @@ Use `.or(default)` to handle missing optional parameters: [source,yaml] ---- -mutation: | - root.city = this.city # Required - will error if missing - root.units = this.units.or("metric") # Optional with default - root.limit = this.limit.or(10).number() # Optional, converted to number +include::ai-agents:example$mcp-tools/snippets/defaults.yaml[tag=mutation,indent=0] ---- Declare which parameters are required in your `meta.mcp.properties`: [source,yaml] ---- -properties: - - name: city - type: string - description: "City name to look up" - required: true - - name: units - type: string - description: "Temperature units: 'metric' or 'imperial' (default: metric)" - required: false +include::ai-agents:example$mcp-tools/snippets/defaults.yaml[tag=properties,indent=0] ---- [[secrets]] @@ -309,18 +204,10 @@ Reference secrets using `${secrets.SECRET_NAME}` syntax: [source,yaml] ---- -http: - url: "https://api.example.com/data" - headers: - Authorization: "Bearer ${secrets.API_TOKEN}" - -sql_select: - driver: postgres - dsn: "${secrets.DATABASE_URL}" - table: customers +include::ai-agents:example$mcp-tools/snippets/secrets.yaml[tag=example,indent=0] ---- -When you add secret references to your tool configuration, the Cloud Console automatically detects them and provides an interface to create the required secrets. +When you add secret references to your tool configuration, the Cloud UI automatically detects them and provides an interface to create the required secrets. === Secrets best practices @@ -361,63 +248,13 @@ Here's a complete tool that wraps the `http` processor to fetch weather data: [source,yaml] ---- -label: get-weather - -processors: - # Validate and sanitize input - - label: validate_city - mutation: | - root.city = if this.city.or("").trim() == "" { - throw("city is required") - } else { - this.city.trim().lowercase().re_replace_all("[^a-z\\s\\-]", "") - } - root.units = this.units.or("metric") - - # Fetch weather data - - label: fetch_weather - try: - - http: - url: 'https://wttr.in/${! json("city") }?format=j1' - verb: GET - timeout: 10s - - - mutation: | - root.weather = { - "location": this.nearest_area.0.areaName.0.value, - "country": this.nearest_area.0.country.0.value, - "temperature_c": this.current_condition.0.temp_C, - "temperature_f": this.current_condition.0.temp_F, - "condition": this.current_condition.0.weatherDesc.0.value, - "humidity": this.current_condition.0.humidity, - "wind_kph": this.current_condition.0.windspeedKmph - } - - # Handle errors gracefully - - label: handle_errors - catch: - - mutation: | - root.error = true - root.message = "Failed to fetch weather: " + error() - -meta: - mcp: - enabled: true - description: "Get current weather for a city. Returns temperature, conditions, humidity, and wind speed." - properties: - - name: city - type: string - description: "City name (e.g., 'London', 'New York', 'Tokyo')" - required: true - - name: units - type: string - description: "Temperature units: 'metric' or 'imperial' (default: metric)" - required: false +include::ai-agents:example$mcp-tools/processors/get_weather_complete.yaml[tag=complete,indent=0] ---- == Next steps -* xref:ai-agents:mcp/remote/best-practices.adoc[]: Apply naming and design guidelines. -* xref:ai-agents:mcp/remote/tool-patterns.adoc[]: Find patterns for databases, APIs, and Redpanda. -* xref:ai-agents:mcp/remote/troubleshooting.adoc[]: Diagnose common issues. -* xref:develop:connect/components/about.adoc[]: Browse all available components. +* xref:ai-agents:agents/quickstart.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:mcp/remote/troubleshooting.adoc[] +* xref:develop:connect/components/about.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/manage-servers.adoc b/modules/ai-agents/pages/mcp/remote/manage-servers.adoc index d6cb056eb..61d7e66e7 100644 --- a/modules/ai-agents/pages/mcp/remote/manage-servers.adoc +++ b/modules/ai-agents/pages/mcp/remote/manage-servers.adoc @@ -1,7 +1,8 @@ = Manage Remote MCP Servers :description: Learn how to edit, stop, start, and delete MCP servers in Redpanda Cloud. +:page-aliases: ai-agents:mcp/remote/admin-guide.adoc :page-topic-type: how-to -:personas: platform_admin, ai_agent_developer +:personas: platform_admin, agent_developer // Reader journey: "I operate and maintain" // Learning objectives - what readers can accomplish from this page: :learning-objective-1: Edit MCP server configurations @@ -26,10 +27,10 @@ You can update the configuration, resources, or metadata of an MCP server at any [tabs] ===== -Cloud Console:: +Cloud UI:: + -- -. In the Redpanda Cloud Console, navigate to *Remote MCP*. +. In the Redpanda Cloud UI, navigate to *Agentic AI* > *Remote MCP*. . Find the MCP server you want to edit and click its name. . Click *Edit configuration*. . Make your changes. @@ -69,10 +70,10 @@ Stopping a server pauses all tool execution and releases compute resources, but [tabs] ===== -Cloud Console:: +Cloud UI:: + -- -. In the Redpanda Cloud Console, navigate to *Remote MCP*. +. In the Redpanda Cloud UI, navigate to *Agentic AI* > *Remote MCP*. . Find the server you want to stop. . Click the three dots and select *Stop*. . Confirm the action. @@ -100,10 +101,10 @@ Resume a stopped server to restore its functionality. [tabs] ===== -Cloud Console:: +Cloud UI:: + -- -. In the Redpanda Cloud Console, navigate to *Remote MCP*. +. In the Redpanda Cloud UI, navigate to *Agentic AI* > *Remote MCP*. . Find the stopped server. . Click the three dots and select *Start*. . Wait for the status to show *Running* before reconnecting clients. @@ -131,10 +132,10 @@ Deleting a server permanently removes it. You cannot undo this action. Redpanda [tabs] ===== -Cloud Console:: +Cloud UI:: + -- -. In the Redpanda Cloud Console, navigate to *Remote MCP*. +. In the Redpanda Cloud UI, navigate to *Agentic AI* > *Remote MCP*. . Find the server you want to delete. . Click the three dots and select *Delete*. . Confirm the deletion when prompted. @@ -161,6 +162,6 @@ Deletion is immediate and permanent. Make sure you have backed up any important == Next steps -* xref:ai-agents:mcp/remote/scale-resources.adoc[Scale MCP server resources] to optimize performance and costs. -* xref:ai-agents:mcp/remote/monitor-activity.adoc[Monitor MCP server activity] using OpenTelemetry traces. -* xref:ai-agents:mcp/remote/best-practices.adoc[Learn best practices] for building robust tools. +* xref:ai-agents:mcp/remote/scale-resources.adoc[] +* xref:ai-agents:mcp/remote/monitor-mcp-servers.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/monitor-activity.adoc b/modules/ai-agents/pages/mcp/remote/monitor-activity.adoc deleted file mode 100644 index 600d70e19..000000000 --- a/modules/ai-agents/pages/mcp/remote/monitor-activity.adoc +++ /dev/null @@ -1,113 +0,0 @@ -= Monitor MCP Server Activity -:description: How to consume traces, track tool invocations, measure performance, and debug failures in MCP servers. -:page-topic-type: how-to -:personas: platform_admin, ai_agent_developer, data_engineer -// Reader journey: "I need to accomplish X" -// Learning objectives - what readers can DO with this guide: -:learning-objective-1: Consume traces from the execution log -:learning-objective-2: Track tool invocations and measure performance -:learning-objective-3: Debug tool failures using trace data - -After creating an MCP server, you can monitor its activity using the execution log. - -After reading this page, you will be able to: - -* [ ] {learning-objective-1} -* [ ] {learning-objective-2} -* [ ] {learning-objective-3} - -For conceptual background on traces, spans, and the trace data structure, see xref:ai-agents:mcp/remote/concepts.adoc#execution-log[Execution log and observability]. - -== Prerequisites - -You must have an existing MCP server. If you do not have one, see xref:ai-agents:mcp/remote/quickstart.adoc[]. - -== Consume traces from the execution log - -MCP servers emit OpenTelemetry traces to the `redpanda.otel_traces` topic. You can consume these traces using any Kafka-compatible client or the Redpanda Cloud Console. - -[tabs] -===== -Cloud Console:: -+ --- -. In the Redpanda Cloud Console, navigate to *Topics*. -. Select `redpanda.otel_traces`. -. Click *Messages* to view recent traces. -. Use filters to search for specific trace IDs, span names, or time ranges. --- - -rpk:: -+ --- -Consume the most recent traces: - -[,bash] ----- -rpk topic consume redpanda.otel_traces --offset end -n 10 ----- - -Filter for specific MCP server activity by examining the span attributes. --- - -Data Plane API:: -+ --- -Use the link:/api/doc/cloud-dataplane/[Data Plane API] to programmatically consume traces and integrate with your monitoring pipeline. --- -===== - -== Track tool invocations - -Monitor which tools are being called and how often: - -. Consume traces from `redpanda.otel_traces`. -. Filter spans where `instrumentationScope.name` is `rpcn-mcp`. -. Examine the `name` field to see which tools are being invoked. -. Calculate frequency by counting spans per tool name over time windows. - -Example: To find all invocations of a specific tool, filter for spans where `name` matches your tool name (for example, `weather`, `http_processor`). - -== Measure performance - -Analyze tool execution times: - -. Find spans with `instrumentationScope.name` set to `rpcn-mcp`. -. Calculate duration: `(endTimeUnixNano - startTimeUnixNano) / 1000000` (milliseconds). -. Track percentiles (p50, p95, p99) to identify performance issues. -. Set alerts for durations exceeding acceptable thresholds. - -Example: A span with `startTimeUnixNano: "1765198415253280028"` and `endTimeUnixNano: "1765198424660663434"` has a duration of 9407ms. - -== Debug failures - -Investigate errors and failures: - -. Filter spans where `status.code` is `2` (error). -. Examine `status.message` for error details. -. Check the `events` array for error events with timestamps. -. Use `traceId` to correlate related spans and understand the full error context. -. Follow `parentSpanId` relationships to trace the error back to the originating tool. - -Example: A span with `status.code: 2` and `status.message: "connection timeout"` indicates the operation failed due to a timeout. - -== Correlate distributed operations - -Link MCP server activity to downstream effects: - -. Extract `traceId` from tool invocation spans. -. Search for the same `traceId` in other application logs or traces. -. Follow `parentSpanId` relationships to build complete operation timelines. -. Identify bottlenecks across your entire system. - -== Integrate with observability platforms - -The `redpanda.otel_traces` topic stores trace data in OpenTelemetry format. Redpanda does not support direct export to platforms like Grafana Cloud and Datadog due to format compatibility limitations. Redpanda produces one span per topic message, whereas these platforms expect traces in batch format. - -You can consume traces directly from the `redpanda.otel_traces` topic using any Kafka-compatible consumer for custom analysis and processing. - -== Next steps - -* xref:ai-agents:mcp/remote/concepts.adoc#execution-log[Execution logs]: Learn how traces and spans work -* xref:ai-agents:mcp/remote/troubleshooting.adoc[]: Diagnose and fix common issues -* xref:ai-agents:mcp/remote/manage-servers.adoc[]: Manage MCP server lifecycle diff --git a/modules/ai-agents/pages/mcp/remote/monitor-mcp-servers.adoc b/modules/ai-agents/pages/mcp/remote/monitor-mcp-servers.adoc new file mode 100644 index 000000000..fbb331875 --- /dev/null +++ b/modules/ai-agents/pages/mcp/remote/monitor-mcp-servers.adoc @@ -0,0 +1,104 @@ += Monitor MCP Server Activity +:description: Consume traces, track tool invocations, measure performance, and debug failures in MCP servers. +:page-topic-type: how-to +:personas: platform_admin, agent_developer, data_engineer +:learning-objective-1: Consume traces from the redpanda.otel_traces topic +:learning-objective-2: Track tool invocations and measure performance +:learning-objective-3: Debug tool failures using trace data + +Monitor MCP server activity using OpenTelemetry traces emitted to the `redpanda.otel_traces` glossterm:topic[]. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +For conceptual background on traces, spans, and the trace data structure, see xref:ai-agents:observability/concepts.adoc[]. + +== Prerequisites + +You must have an existing MCP server. If you do not have one, see xref:ai-agents:mcp/remote/quickstart.adoc[]. + +== View transcripts in the Cloud UI + +:context: mcp +include::ai-agents:partial$transcripts-ui-guide.adoc[] + +== Analyze traces programmatically + +MCP servers emit OpenTelemetry traces to the `redpanda.otel_traces` topic. Consume these traces to build custom monitoring, track tool usage, and analyze performance. + +=== Consume traces + +[tabs] +===== +Cloud UI:: ++ +-- +. In the Redpanda Cloud UI, navigate to *Topics*. +. Select `redpanda.otel_traces`. +. Click *Messages* to view recent traces. +. Use filters to search for specific trace IDs, span names, or time ranges. +-- + +rpk:: ++ +-- +Consume the most recent traces: + +[,bash] +---- +rpk topic consume redpanda.otel_traces --offset end -n 10 +---- + +Filter for specific MCP server activity by examining the span attributes. +-- + +Data Plane API:: ++ +-- +Use the link:/api/doc/cloud-dataplane/[Data Plane API^] to programmatically consume traces and integrate with your monitoring pipeline. +-- +===== + +=== Track tool invocations + +Monitor which tools are being called and how often by filtering spans where `instrumentationScope.name` is `rpcn-mcp`. The `name` field shows which tool was invoked. + +Example: Find all invocations of a specific tool: + +[,bash] +---- +rpk topic consume redpanda.otel_traces --offset start \ + | jq '.value | select(.instrumentationScope.name == "rpcn-mcp" and .name == "weather")' +---- + +=== Measure performance + +Calculate tool execution time using span timestamps: + +[,bash] +---- +Duration (ms) = (endTimeUnixNano - startTimeUnixNano) / 1000000 +---- + +Track percentiles (p50, p95, p99) to identify performance issues and set alerts for durations exceeding acceptable thresholds. + +=== Debug failures + +Filter for error spans where `status.code` is `2`: + +[,bash] +---- +rpk topic consume redpanda.otel_traces --offset start \ + | jq 'select(.status.code == 2)' +---- + +Check `status.message` for error details and the `events` array for error events with timestamps. Use `traceId` to correlate related spans across the distributed system. + +== Next steps + +* xref:ai-agents:observability/concepts.adoc[] +* xref:ai-agents:mcp/remote/troubleshooting.adoc[] +* xref:ai-agents:mcp/remote/manage-servers.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/overview.adoc b/modules/ai-agents/pages/mcp/remote/overview.adoc index bc3d11845..7e5aac62d 100644 --- a/modules/ai-agents/pages/mcp/remote/overview.adoc +++ b/modules/ai-agents/pages/mcp/remote/overview.adoc @@ -1,7 +1,7 @@ = Remote MCP Server Overview :description: Discover how AI agents can interact with your streaming data and how to connect them to Redpanda Cloud. :page-topic-type: overview -:personas: evaluator, ai_agent_developer +:personas: evaluator, agent_developer // Reader journey: "I'm evaluating this" // Learning objectives - what readers should understand after reading this page: :learning-objective-1: Explain what a Remote MCP server is and how tools differ from pipelines @@ -40,11 +40,11 @@ include::redpanda-connect:ai-agents:partial$mcp/overview/use-cases-table.adoc[] Remote MCP servers sit between AI clients and your data: -. Your AI agent connects to your MCP server using `rpk cloud mcp proxy` or direct authentication +. Your AI agent connects to your MCP server using `rpk cloud mcp proxy` or direct authentication. . A user asks their AI agent something like "What's the weather in London?" -. The server finds the matching tool and runs your Redpanda Connect configuration -. Your configuration fetches data, transforms it, and returns a structured response -. The AI agent gets the data and can use it to answer the user +. The server finds the matching tool and runs your Redpanda Connect configuration. +. Your configuration fetches data, transforms it, and returns a structured response. +. The AI agent gets the data and can use it to answer the user. === What a tool looks like @@ -54,20 +54,7 @@ Here's a minimal example that returns weather data: [source,yaml] ---- -http: - url: "https://wttr.in/${! this.city }?format=j1" - verb: GET - -meta: - mcp: - enabled: true - name: get_weather - description: "Get current weather for a city" - properties: - - name: city - type: string - description: "City name" - required: true +include::ai-agents:example$mcp-tools/processors/get_weather_simple.yaml[tag=complete,indent=0] ---- When an AI client asks about weather, it calls this tool with the city name. The tool fetches data from the weather API and returns it. @@ -78,7 +65,8 @@ include::redpanda-connect:ai-agents:partial$mcp/overview/specification-support.a == Next steps * xref:ai-agents:mcp/remote/quickstart.adoc[] -* xref:ai-agents:mcp/remote/concepts.adoc[]: Learn about execution and component types -* xref:ai-agents:mcp/remote/create-tool.adoc[]: Create custom tools step by step +* xref:ai-agents:agents/overview.adoc[] +* xref:ai-agents:mcp/remote/concepts.adoc[] +* xref:ai-agents:mcp/remote/create-tool.adoc[] * link:https://modelcontextprotocol.io/[Model Context Protocol documentation^] diff --git a/modules/ai-agents/pages/mcp/remote/quickstart.adoc b/modules/ai-agents/pages/mcp/remote/quickstart.adoc index a778df103..32a41b5aa 100644 --- a/modules/ai-agents/pages/mcp/remote/quickstart.adoc +++ b/modules/ai-agents/pages/mcp/remote/quickstart.adoc @@ -1,7 +1,7 @@ = Remote MCP Server Quickstart :description: Learn how to extend AI agents with custom tools that interact with your Redpanda data using the Model Context Protocol (MCP). :page-topic-type: tutorial -:personas: ai_agent_developer, streaming_developer, evaluator +:personas: agent_developer, streaming_developer, evaluator // Reader journey: "I want to try it now" // Learning objectives - what readers will achieve by completing this quickstart: :learning-objective-1: Create an MCP server in Redpanda Cloud @@ -176,18 +176,18 @@ curl -X POST "https:///v1/acls" \ [tabs] ===== -Cloud Console:: +Cloud UI:: + -- -. Log in to the link:https://cloud.redpanda.com/[Redpanda Cloud Console^]. +. Log in to the link:https://cloud.redpanda.com/[Redpanda Cloud UI^]. -. Navigate to *Remote MCP*. +. Navigate to *Agentic AI* > *Remote MCP*. + This page shows a list of existing servers. . Click *Create new MCP Server*. In *Server Metadata*, configure the basic information and resources: + -* *Display Name*: A human-friendly name such as `event-data-generator`. This name is shown in the Redpanda Cloud Console. It is not the name of the MCP server itself. +* *Display Name*: A human-friendly name such as `event-data-generator`. This name is shown in the Redpanda Cloud UI. It is not the name of the MCP server itself. * *Description*: Explain what the server does. For example, `Generates fake user event data and publishes it to Redpanda topics`. * *Tags*: Add key/value tags such as `owner=platform` or `env=demo`. The tag names `service_account_id` and `secret_id` are reserved and cannot be used. * *Resources*: Choose a size (XSmall / Small / Medium / Large / XLarge). Larger sizes allow more concurrent requests and faster processing, but cost more. You can change this later. @@ -304,10 +304,10 @@ Now that your MCP server is running with two tools available, you'll connect Cla When you connect Claude Code: -. Claude automatically discovers your `generate_input` and `redpanda_output` tools -. You can ask Claude in natural language to perform tasks using these tools -. Claude decides which tools to call and in what order based on your request -. The Redpanda CLI acts as a secure proxy, forwarding Claude's tool requests to your MCP server in the cloud +. Claude automatically discovers your `generate_input` and `redpanda_output` tools. +. You can ask Claude in natural language to perform tasks using these tools. +. Claude decides which tools to call and in what order based on your request. +. The Redpanda CLI acts as a secure proxy, forwarding Claude's tool requests to your MCP server in the cloud. This example uses Claude Code, but the same pattern works with any MCP-compatible client. @@ -389,9 +389,10 @@ For detailed solutions, see xref:ai-agents:mcp/remote/troubleshooting.adoc[]. You've deployed an MCP server and connected Claude Code to your Redpanda cluster. Here's where to go next: -* xref:ai-agents:mcp/remote/concepts.adoc[]: Understand how MCP tools differ from pipelines -* xref:ai-agents:mcp/remote/create-tool.adoc[]: Build production-quality tools with validation -* xref:ai-agents:mcp/remote/best-practices.adoc[]: Apply naming and design guidelines -* xref:ai-agents:mcp/remote/tool-patterns.adoc[]: Find reusable patterns -* xref:ai-agents:mcp/remote/troubleshooting.adoc[]: Diagnose common issues -* xref:ai-agents:mcp/remote/admin-guide.adoc[]: Scale resources, monitor activity, and administer your MCP servers +* xref:ai-agents:agents/quickstart.adoc[] +* xref:ai-agents:mcp/remote/concepts.adoc[] +* xref:ai-agents:mcp/remote/create-tool.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] +* xref:ai-agents:mcp/remote/tool-patterns.adoc[] +* xref:ai-agents:mcp/remote/troubleshooting.adoc[] +* xref:ai-agents:mcp/remote/admin-guide.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/scale-resources.adoc b/modules/ai-agents/pages/mcp/remote/scale-resources.adoc index f6bb7c375..424094577 100644 --- a/modules/ai-agents/pages/mcp/remote/scale-resources.adoc +++ b/modules/ai-agents/pages/mcp/remote/scale-resources.adoc @@ -24,10 +24,10 @@ You must have an existing MCP server. If you do not have one, see xref:ai-agents [tabs] ===== -Cloud Console:: +Cloud UI:: + -- -. In the Redpanda Cloud Console, navigate to *Remote MCP*. +. In the Redpanda Cloud UI, navigate to *Agentic AI* > *Remote MCP*. . Find the MCP server you want to scale and click its name. . Click *Edit configuration*. . Under *Resources*, select a new size: diff --git a/modules/ai-agents/pages/mcp/remote/tool-patterns.adoc b/modules/ai-agents/pages/mcp/remote/tool-patterns.adoc index 1348419f1..2e9c658f0 100644 --- a/modules/ai-agents/pages/mcp/remote/tool-patterns.adoc +++ b/modules/ai-agents/pages/mcp/remote/tool-patterns.adoc @@ -2,7 +2,7 @@ :page-aliases: ai-agents:mcp/remote/pipeline-patterns.adoc :description: Catalog of patterns for MCP server tools in Redpanda Cloud. :page-topic-type: cookbook -:personas: ai_agent_developer, data_engineer +:personas: agent_developer, data_engineer // Reader journey: "I need an example for X" :learning-objective-1: Find reusable patterns for common MCP tool scenarios :learning-objective-2: Apply validation and error handling patterns for production robustness @@ -16,8 +16,6 @@ After reading this page, you will be able to: * [ ] {learning-objective-2} * [ ] {learning-objective-3} - - [[read-data]] == Read data @@ -32,7 +30,7 @@ Use xref:develop:connect/components/inputs/about.adoc[inputs] to create tools th [source,yaml] ---- -include::ai-agents:example$generate_input.yaml[] +include::ai-agents:example$mcp-tools/inputs/generate_input.yaml[] ---- See also: xref:develop:connect/components/inputs/generate.adoc[`generate` input component] @@ -46,17 +44,7 @@ See also: xref:develop:connect/components/inputs/generate.adoc[`generate` input [source,yaml] ---- -redpanda: - seed_brokers: [ "${REDPANDA_BROKERS}" ] - topics: [ "user-events" ] - consumer_group: "mcp-event-processor" - start_from_oldest: true - tls: - enabled: true - sasl: - - mechanism: "${REDPANDA_SASL_MECHANISM}" - username: "${REDPANDA_SASL_USERNAME}" - password: "${REDPANDA_SASL_PASSWORD}" +include::ai-agents:example$mcp-tools/inputs/consume_redpanda.yaml[tag=component,indent=0] ---- See also: xref:develop:connect/components/inputs/redpanda.adoc[`redpanda` input] @@ -70,23 +58,7 @@ See also: xref:develop:connect/components/inputs/redpanda.adoc[`redpanda` input] [source,yaml] ---- -redpanda: - seed_brokers: [ "${REDPANDA_BROKERS}" ] - topics: [ "sensor-readings" ] - consumer_group: "analytics-processor" - tls: - enabled: true - sasl: - - mechanism: "${REDPANDA_SASL_MECHANISM}" - username: "${REDPANDA_SASL_USERNAME}" - password: "${REDPANDA_SASL_PASSWORD}" - processors: - - mapping: | - root.sensor_id = this.sensor_id - root.avg_temperature = this.readings.map_each(r -> r.temperature).mean() - root.max_temperature = this.readings.map_each(r -> r.temperature).max() - root.reading_count = this.readings.length() - root.window_end = now() +include::ai-agents:example$mcp-tools/inputs/stream_processing.yaml[tag=component,indent=0] ---- See also: xref:develop:connect/components/inputs/redpanda.adoc[`redpanda` input] @@ -105,7 +77,7 @@ Use xref:develop:connect/components/processors/about.adoc[processors] to fetch d [source,yaml] ---- -include::ai-agents:example$http_processor.yaml[] +include::ai-agents:example$mcp-tools/processors/http_processor.yaml[] ---- See also: xref:develop:connect/components/processors/http.adoc[`http` processor], xref:develop:connect/components/processors/mutation.adoc[`mutation` processor] @@ -119,11 +91,27 @@ See also: xref:develop:connect/components/processors/http.adoc[`http` processor] [source,yaml] ---- -include::ai-agents:example$gcp_bigquery_select_processor.yaml[] +include::ai-agents:example$mcp-tools/processors/gcp_bigquery_select_processor.yaml[] ---- See also: xref:develop:connect/components/processors/gcp_bigquery_select.adoc[`gcp_bigquery_select` processor], xref:develop:connect/components/processors/sql_select.adoc[`sql_select` processor] +[[jira-queries]] +=== Query Jira issues + +*When to use:* Fetching tickets by status, checking assignments, finding recent issues, or building AI agents that interact with project management data. + +*Example use cases:* Get open bugs for a sprint, find issues assigned to a user, list recently updated tickets, search by custom fields. + +NOTE: The `jira` processor is available on Dedicated and BYOC clusters. + +[source,yaml] +---- +include::ai-agents:example$mcp-tools/processors/search_jira.yaml[tag=complete,indent=0] +---- + +For more patterns including pagination, custom fields, and creating issues via the HTTP processor, see xref:develop:connect/cookbooks/jira.adoc[]. + [[ai-llm-integration]] === Integrate with AI/LLM services @@ -135,17 +123,7 @@ See also: xref:develop:connect/components/processors/gcp_bigquery_select.adoc[`g [source,yaml] ---- -openai_chat_completion: - api_key: "${secrets.OPENAI_API_KEY}" - model: "gpt-4" - prompt: | - Analyze this customer feedback and provide: - 1. Sentiment (positive/negative/neutral) - 2. Key themes - 3. Actionable insights - - Feedback: ${! json("feedback_text") } - max_tokens: 500 +include::ai-agents:example$mcp-tools/processors/openai_chat.yaml[tag=component,indent=0] ---- See also: xref:develop:connect/components/processors/openai_chat_completion.adoc[`openai_chat_completion`], xref:develop:connect/components/processors/openai_embeddings.adoc[`openai_embeddings`] @@ -154,10 +132,7 @@ See also: xref:develop:connect/components/processors/openai_chat_completion.adoc [source,yaml] ---- -openai_embeddings: - api_key: "${secrets.OPENAI_API_KEY}" - model: "text-embedding-3-small" - text: ${! json("content") } +include::ai-agents:example$mcp-tools/processors/openai_embeddings.yaml[tag=component,indent=0] ---- See also: xref:develop:connect/components/processors/cohere_embeddings.adoc[`cohere_embeddings`], xref:develop:connect/components/processors/gcp_vertex_ai_embeddings.adoc[`gcp_vertex_ai_embeddings`] @@ -176,7 +151,7 @@ Use xref:develop:connect/components/outputs/about.adoc[outputs] to write data to [source,yaml] ---- -include::ai-agents:example$redpanda_output.yaml[] +include::ai-agents:example$mcp-tools/outputs/redpanda_output.yaml[] ---- See also: xref:develop:connect/components/outputs/redpanda.adoc[`redpanda` output] @@ -189,7 +164,7 @@ Output tools can include processors to transform data before publishing. This pa [source,yaml] ---- -include::ai-agents:example$redpanda_output_with_processors.yaml[] +include::ai-agents:example$mcp-tools/outputs/redpanda_output_with_processors.yaml[] ---- [[caching]] @@ -202,13 +177,13 @@ include::ai-agents:example$redpanda_output_with_processors.yaml[] .Redpanda-backed cache [source,yaml] ---- -include::ai-agents:example$redpanda_cache.yaml[] +include::ai-agents:example$mcp-tools/caches/redpanda_cache.yaml[] ---- .In-memory cache [source,yaml] ---- -include::ai-agents:example$memory_cache.yaml[] +include::ai-agents:example$mcp-tools/caches/memory_cache.yaml[] ---- See also: xref:develop:connect/components/caches/memory.adoc[`memory` cache], xref:develop:connect/components/outputs/redpanda.adoc[`redpanda` output] @@ -227,17 +202,7 @@ Use Bloblang and processors to transform, validate, and route data. [source,yaml] ---- -mapping: | - # Parse and validate incoming data - root.user_id = this.user_id.or(throw("user_id is required")) - root.timestamp = now().ts_format("2006-01-02T15:04:05Z07:00") - - # Transform and enrich - root.email_domain = this.email.split("@").index(1) - root.is_premium = this.subscription_tier == "premium" - - # Filter sensitive data - root.profile = this.profile.without("ssn", "credit_card") +include::ai-agents:example$mcp-tools/processors/transform_validate.yaml[tag=mapping,indent=0] ---- See also: xref:develop:connect/components/processors/mapping.adoc[`mapping` processor], xref:develop:connect/guides/bloblang/about.adoc[Bloblang guide] @@ -251,30 +216,7 @@ See also: xref:develop:connect/components/processors/mapping.adoc[`mapping` proc [source,yaml] ---- -redpanda: - seed_brokers: [ "${REDPANDA_BROKERS}" ] - topics: [ "order-events" ] - consumer_group: "workflow-orchestrator" - tls: - enabled: true - sasl: - - mechanism: "${REDPANDA_SASL_MECHANISM}" - username: "${REDPANDA_SASL_USERNAME}" - password: "${REDPANDA_SASL_PASSWORD}" - processors: - - switch: - - check: this.event_type == "order_created" - processors: - - http: - url: "${secrets.INVENTORY_API}/reserve" - verb: POST - body: '{"order_id": "${! this.order_id }", "items": ${! json("items") }}' - - check: this.event_type == "payment_confirmed" - processors: - - http: - url: "${secrets.FULFILLMENT_API}/ship" - verb: POST - body: '{"order_id": "${! this.order_id }"}' +include::ai-agents:example$mcp-tools/inputs/event_driven_workflow.yaml[tag=component,indent=0] ---- See also: xref:develop:connect/components/inputs/redpanda.adoc[`redpanda` input] @@ -310,6 +252,7 @@ include::redpanda-connect:ai-agents:partial$mcp/tool-patterns/production-workflo == Next steps -* xref:ai-agents:mcp/remote/create-tool.adoc[]: Step-by-step tool creation guide -* xref:ai-agents:mcp/remote/best-practices.adoc[]: Apply naming and design guidelines -* xref:ai-agents:mcp/remote/troubleshooting.adoc[]: Diagnose and fix common issues +* xref:ai-agents:agents/integration-overview.adoc[] +* xref:ai-agents:mcp/remote/create-tool.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] +* xref:ai-agents:mcp/remote/troubleshooting.adoc[] diff --git a/modules/ai-agents/pages/mcp/remote/troubleshooting.adoc b/modules/ai-agents/pages/mcp/remote/troubleshooting.adoc index 9c0dc41e6..2dd384758 100644 --- a/modules/ai-agents/pages/mcp/remote/troubleshooting.adoc +++ b/modules/ai-agents/pages/mcp/remote/troubleshooting.adoc @@ -1,7 +1,7 @@ = Troubleshoot Remote MCP Servers :description: Diagnose and fix common issues when building and running Remote MCP servers in Redpanda Cloud. :page-topic-type: troubleshooting -:personas: ai_agent_developer, streaming_developer, platform_admin +:personas: agent_developer, streaming_developer, platform_admin // Reader journey: "Something went wrong" // Learning objectives - what readers can do with this page: :learning-objective-1: Diagnose and fix lint and YAML configuration errors @@ -39,8 +39,8 @@ include::redpanda-connect:ai-agents:partial$mcp/troubleshooting/debugging-techni If you're still experiencing issues: -* xref:ai-agents:mcp/remote/create-tool.adoc[]: Review YAML structure rules and metadata fields -* xref:ai-agents:mcp/remote/best-practices.adoc[]: Review naming and metadata design -* xref:ai-agents:mcp/remote/concepts.adoc[]: Review component type selection +* xref:ai-agents:mcp/remote/create-tool.adoc[] +* xref:ai-agents:mcp/remote/best-practices.adoc[] +* xref:ai-agents:mcp/remote/concepts.adoc[] For protocol-level troubleshooting, see the link:https://modelcontextprotocol.io/[MCP documentation^]. diff --git a/modules/ai-agents/pages/observability/concepts.adoc b/modules/ai-agents/pages/observability/concepts.adoc new file mode 100644 index 000000000..6e4bd3eee --- /dev/null +++ b/modules/ai-agents/pages/observability/concepts.adoc @@ -0,0 +1,352 @@ += Transcripts and AI Observability +:description: Understand how Redpanda captures execution traces for agents and MCP servers using OpenTelemetry. +:page-topic-type: concepts +:personas: agent_developer, platform_admin, data_engineer +:learning-objective-1: Explain how traces and spans capture execution flow +:learning-objective-2: Interpret trace structure for debugging and monitoring +:learning-objective-3: Distinguish between observability traces and audit logs + +Redpanda automatically captures execution traces for both AI agents and MCP servers, providing complete observability into how your agentic systems operate. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== What are transcripts + +Every agent and MCP server automatically emits OpenTelemetry traces to a glossterm:topic[] called `redpanda.otel_traces`. These traces provide detailed observability into operations, creating complete transcripts. + +Transcripts capture: + +* Tool invocations and results +* Agent reasoning steps +* Data processing operations +* External API calls +* Error conditions +* Performance metrics + +With 100% sampling, every operation is captured, creating complete transcripts that you can use for debugging, monitoring, and performance analysis. + +== Traces and spans + +OpenTelemetry traces provide a complete picture of how a request flows through your system: + +* A _trace_ represents the entire lifecycle of a request (for example, a tool invocation from start to finish). +* A _span_ represents a single unit of work within that trace (such as a data processing operation or an external API call). +* A trace contains one or more spans organized hierarchically, showing how operations relate to each other. + +== Agent trace hierarchy + +Agent executions create a hierarchy of spans that reflect how agents process requests. Understanding this hierarchy helps you interpret agent behavior and identify where issues occur. + +=== Agent span types + +Agent traces contain these span types: + +[cols="2,3,3", options="header"] +|=== +| Span Type | Description | Use To + +| `ai-agent` +| Top-level span representing the entire agent invocation from start to finish. Includes all processing time, from receiving the request through executing the reasoning loop, calling tools, and returning the final response. +| Measure total request duration and identify slow agent invocations. + +| `agent` +| Internal agent processing that represents reasoning and decision-making. Shows time spent in the LLM reasoning loop, including context processing, tool selection, and response generation. Multiple `agent` spans may appear when the agent iterates through its reasoning loop. +| Track reasoning time and identify iteration patterns. + +| `invoke_agent` +| Agent and sub-agent invocation ( in multi-agent architectures). Represents one agent calling another via the A2A protocol. +| Trace calls between root agents and sub-agents, measure cross-agent latency, and identify which sub-agent was invoked. + +| `openai`, `anthropic`, or other LLM providers +| LLM provider API call showing calls to the language model. The span name matches the provider, and attributes typically include the model name (like `gpt-5.2` or `claude-sonnet-4-5`). +| Identify which model was called, measure LLM response time, and debug LLM API errors. + +| `rpcn-mcp` +| MCP tool invocation representing calls to Remote MCP servers. Shows tool execution time, including network latency and tool processing. Child spans with `instrumentationScope.name` set to `redpanda-connect` represent internal Redpanda Connect processing. +| Measure tool execution time and identify slow MCP tool calls. +|=== + +=== Typical agent execution flow + +A simple agent request creates this hierarchy: + +---- +ai-agent (6.65 seconds) +├── agent (6.41 seconds) +│ ├── invoke_agent: customer-support-agent (6.39 seconds) +│ │ └── openai: chat gpt-5.2 (6.2 seconds) +---- + +This shows: + +1. Total agent invocation: 6.65 seconds +2. Agent reasoning: 6.41 seconds +3. Sub-agent call: 6.39 seconds (most of the time) +4. LLM API call: 6.2 seconds (the actual bottleneck) + +Examine span durations to identify where time is spent and optimize accordingly. + +== MCP server trace hierarchy + +MCP server executions create a different hierarchy that reflects tool invocations and internal processing. Understanding this hierarchy helps you debug tool execution and identify performance bottlenecks. + +=== MCP server span types + +MCP server traces contain these span types: + +[cols="2,3,3", options="header"] +|=== +| Span Type | Description | Use To + +| `mcp-{server-id}` +| Top-level span representing the entire MCP server invocation. The server ID uniquely identifies the MCP server instance. This span encompasses all tool execution from request receipt to response completion. +| Measure total MCP server response time and identify slow tool invocations. + +| `service` +| Internal service processing span that appears at multiple levels in the hierarchy. Represents Redpanda Connect service operations including routing, processing, and component execution. +| Track internal processing overhead and identify where time is spent in the service layer. + +| Tool name (e.g., `get_order_status`, `get_customer_history`) +| The specific MCP tool being invoked. This span name matches the tool name defined in the MCP server configuration. +| Identify which tool was called and measure tool-specific execution time. + +| `processors` +| Processor pipeline execution span showing the collection of processors that process the tool's data. Appears as a child of the tool invocation span. +| Measure total processor pipeline execution time. + +| Processor name (e.g., `mapping`, `http`, `branch`) +| Individual processor execution span representing a single Redpanda Connect processor. The span name matches the processor type. +| Identify slow processors and debug processing logic. +|=== + +=== Typical MCP server execution flow + +An MCP tool invocation creates this hierarchy: + +---- +mcp-d5mnvn251oos73 (4.00 seconds) +├── service > get_order_status (4.07 seconds) +│ └── service > processors (43 microseconds) +│ └── service > mapping (18 microseconds) +---- + +This shows: + +1. Total MCP server invocation: 4.00 seconds +2. Tool execution (get_order_status): 4.07 seconds +3. Processor pipeline: 43 microseconds +4. Mapping processor: 18 microseconds (data transformation) + +The majority of time (4+ seconds) is spent in tool execution, while internal processing (mapping) takes only microseconds. This indicates the tool itself (likely making external API calls or database queries) is the bottleneck, not Redpanda Connect's internal processing. + +== Trace layers and scope + +Traces contain multiple layers of instrumentation, from HTTP transport through application logic to external service calls. The `scope.name` field in each span identifies which layer of instrumentation created that span. + +=== Instrumentation layers + +A complete agent trace includes these layers: + +[cols="2,2,4", options="header"] +|=== +| Layer | Scope Name | Purpose + +| HTTP Server +| `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` +| HTTP transport layer receiving requests. Shows request/response sizes, status codes, client addresses, and network details. + +| AI SDK (Agent) +| `github.com/redpanda-data/ai-sdk-go/plugins/otel` +| Agent application logic. Shows agent invocations, LLM calls, tool executions, conversation IDs, token usage, and model details. Includes `gen_ai.*` semantic convention attributes. + +| HTTP Client +| `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` +| Outbound HTTP calls from agent to MCP servers. Shows target URLs, request methods, and response codes. + +| MCP Server +| `rpcn-mcp` +| MCP server tool execution. Shows tool name, input parameters, result size, and execution time. Appears as a separate `service.name` in resource attributes. + +| Redpanda Connect +| `redpanda-connect` +| Internal Redpanda Connect component execution within MCP tools. Shows pipeline and individual component spans. +|=== + +=== How layers connect + +Layers connect through parent-child relationships in a single trace: + +---- +ai-agent-http-server (HTTP Server layer) +└── invoke_agent customer-support-agent (AI SDK layer) + ├── chat gpt-5-nano (AI SDK layer, LLM call 1) + ├── execute_tool get_order_status (AI SDK layer) + │ └── HTTP POST (HTTP Client layer) + │ └── get_order_status (MCP Server layer, different service) + │ └── processors (Redpanda Connect layer) + └── chat gpt-5-nano (AI SDK layer, LLM call 2) +---- + +This shows: + +1. HTTP request arrives at agent +2. Agent invokes sub-agent +3. Agent makes first LLM call to decide what to do +4. Agent executes tool, making HTTP call to MCP server +5. MCP server processes tool through its pipeline +6. Agent makes second LLM call with tool results +7. Response returns through HTTP layer + +=== Cross-service traces + +When agents call MCP tools, the trace spans multiple services. Each service has a different `service.name` in the resource attributes: + +* Agent spans: `"service.name": "ai-agent"` +* MCP server spans: `"service.name": "mcp-{server-id}"` + +Both use the same `traceId`, allowing you to follow a request across service boundaries. + +=== Key attributes by layer + +Different layers expose different attributes: + +HTTP Server/Client layer: + +- `http.request.method`, `http.response.status_code` +- `server.address`, `url.path`, `url.full` +- `network.peer.address`, `network.peer.port` +- `http.request.body.size`, `http.response.body.size` + +AI SDK layer: + +- `gen_ai.operation.name`: Operation type (`invoke_agent`, `chat`, `execute_tool`) +- `gen_ai.conversation.id`: Links spans to the same conversation +- `gen_ai.agent.name`: Sub-agent name for multi-agent systems +- `gen_ai.provider.name`, `gen_ai.request.model`: LLM provider and model +- `gen_ai.usage.input_tokens`, `gen_ai.usage.output_tokens`: Token consumption +- `gen_ai.tool.name`, `gen_ai.tool.call.arguments`: Tool execution details +- `gen_ai.input.messages`, `gen_ai.output.messages`: Full LLM conversation context + +MCP Server layer: + +- Tool-specific attributes like `order_id`, `customer_id` +- `result_prefix`, `result_length`: Tool result metadata + +Redpanda Connect layer: + +- Component-specific attributes from your tool configuration + +Use `scope.name` to filter spans by layer when analyzing traces. + +== Understand the trace structure + +Each span captures a unit of work. Here's what a typical MCP tool invocation looks like: + +[,json] +---- +{ + "traceId": "71cad555b35602fbb35f035d6114db54", + "spanId": "43ad6bc31a826afd", + "name": "http_processor", + "attributes": [ + {"key": "city_name", "value": {"stringValue": "london"}}, + {"key": "result_length", "value": {"intValue": "198"}} + ], + "startTimeUnixNano": "1765198415253280028", + "endTimeUnixNano": "1765198424660663434", + "instrumentationScope": {"name": "rpcn-mcp"}, + "status": {"code": 0, "message": ""} +} +---- + +Key elements to understand: + +* `traceId`: Links all spans belonging to the same request. Use this to follow a tool invocation through its entire lifecycle. +* `name`: The tool or operation name (`http_processor` in this example). This tells you which component was invoked. +* `instrumentationScope.name`: When this is `rpcn-mcp`, the span represents an MCP tool. When it's `redpanda-connect`, it's internal processing. +* `attributes`: Context about the operation, like input parameters or result metadata. +* `status.code`: `0` means success, `2` means error. + +=== Parent-child relationships + +Traces show how operations relate. A tool invocation (parent) may trigger internal operations (children): + +[,json] +---- +{ + "traceId": "71cad555b35602fbb35f035d6114db54", + "spanId": "ed45544a7d7b08d4", + "parentSpanId": "43ad6bc31a826afd", + "name": "http", + "instrumentationScope": {"name": "redpanda-connect"}, + "status": {"code": 0, "message": ""} +} +---- + +The `parentSpanId` links this child span to the parent tool invocation. Both share the same `traceId` so you can reconstruct the complete operation. + +== Error events in traces + +When something goes wrong, traces capture error details: + +[,json] +---- +{ + "traceId": "71cad555b35602fbb35f035d6114db54", + "spanId": "ba332199f3af6d7f", + "parentSpanId": "43ad6bc31a826afd", + "name": "http_request", + "events": [ + { + "name": "event", + "timeUnixNano": "1765198420254169629", + "attributes": [{"key": "error", "value": {"stringValue": "type"}}] + } + ], + "status": {"code": 0, "message": ""} +} +---- + +The `events` array captures what happened and when. Use `timeUnixNano` to see exactly when the error occurred within the operation. + +[[opentelemetry-traces-topic]] +== How Redpanda stores traces + +The `redpanda.otel_traces` topic stores OpenTelemetry spans in JSON format, following the https://opentelemetry.io/docs/specs/otel/protocol/[OpenTelemetry Protocol (OTLP)^] specification. A Protobuf schema named `redpanda.otel_traces-value` is also automatically registered with the topic, enabling clients to deserialize trace data correctly. + +The `redpanda.otel_traces` topic and its schema are managed automatically by Redpanda. If you delete either the topic or the schema, they are recreated automatically. However, deleting the topic permanently deletes all trace data, and the topic comes back empty. Do not produce your own data to this topic. It is reserved for OpenTelemetry traces. + +=== Topic configuration and lifecycle + +The `redpanda.otel_traces` topic has a predefined retention policy. Configuration changes to this topic are not supported. If you modify settings, Redpanda reverts them to the default values. + +The topic persists in your cluster even after all agents and MCP servers are deleted, allowing you to retain historical trace data for analysis. + +Trace data may contain sensitive information from your tool inputs and outputs. Consider implementing appropriate glossterm:ACL[access control lists (ACLs)] for the `redpanda.otel_traces` topic, and review the data in traces before sharing or exporting to external systems. + +== Traces compared to audit logs + +OpenTelemetry traces are designed for observability and debugging, not audit logging or compliance. + +Traces provide: + +* Hierarchical view of request flow through your system (parent-child span relationships) +* Detailed timing information for performance analysis +* Ability to reconstruct execution paths and identify bottlenecks +* Insights into how operations flow through distributed systems + +Traces are not: + +* Immutable audit records for compliance purposes +* Designed for "who did what" accountability tracking + +For compliance and audit requirements, use the session and task topics for agents, which provide records of agent conversations and execution. + +== Next steps + +* xref:ai-agents:agents/monitor-agents.adoc[] +* xref:ai-agents:mcp/remote/monitor-mcp-servers.adoc[] diff --git a/modules/ai-agents/partials/transcripts-ui-guide.adoc b/modules/ai-agents/partials/transcripts-ui-guide.adoc new file mode 100644 index 000000000..5d7d00604 --- /dev/null +++ b/modules/ai-agents/partials/transcripts-ui-guide.adoc @@ -0,0 +1,89 @@ +// ============================================================================= +// PARTIAL: transcripts-ui-guide.adoc +// ============================================================================= +// +// PURPOSE: +// Documents the Transcripts UI interface for both AI agents and MCP servers. +// Single-sources UI navigation and component descriptions that are identical +// across both contexts. +// +// INCLUDED BY: +// - cloud-docs: modules/ai-agents/pages/agents/monitor-agents.adoc +// - cloud-docs: modules/ai-agents/pages/mcp/remote/monitor-mcp-servers.adoc +// +// INCLUDE SYNTAX: +// :context: agent +// include::partial$transcripts-ui-guide.adoc[] +// +// :context: mcp +// include::partial$transcripts-ui-guide.adoc[] +// +// ATTRIBUTES USED: +// - context: Controls agent-specific vs MCP-specific content +// Valid values: "agent" | "mcp" +// +// DEPENDENCIES: +// - xref:ai-agents:observability/concepts.adoc#agent-trace-hierarchy[] +// - xref:ai-agents:observability/concepts.adoc#mcp-server-trace-hierarchy[] +// +// CONTENT TYPE: +// UI navigation and interface explanation (procedural context for how-to pages) +// +// ============================================================================= + +=== Navigate the transcripts view + +// Navigation is identical for both contexts +. In the left navigation panel, click *Transcripts*. +ifeval::["{context}" == "agent"] +. Select a recent transcript from your agent executions. +endif::[] +ifeval::["{context}" == "mcp"] +. Select a recent transcript from your MCP server tool invocations. +endif::[] + +The transcripts view displays: + +* *Timeline* (top): Visual history of recent executions with success/error indicators +* *Trace list* (middle): Hierarchical view of traces and spans +* *Summary panel* (right): Detailed metrics when you select a transcript + +// UI component descriptions +==== Timeline visualization + +The timeline at the top shows execution patterns over time: + +* Green bars: Successful executions +* Red bars: Failed executions with errors +* Gray bars: Incomplete traces or traces still loading +* Time range: Displays the last few hours by default + +Use the timeline to spot patterns like error clusters, performance degradation over time, or gaps indicating downtime. + +==== Trace hierarchy + +The trace list shows nested operations with visual duration bars indicating how long each operation took. Click the expand arrows (▶) to drill into nested spans and see the complete execution flow. + +// Link to appropriate concepts section based on context +ifeval::["{context}" == "agent"] +For details on span types, see xref:ai-agents:observability/concepts.adoc#agent-trace-hierarchy[Agent trace hierarchy]. +endif::[] +ifeval::["{context}" == "mcp"] +For details on span types, see xref:ai-agents:observability/concepts.adoc#mcp-server-trace-hierarchy[MCP server trace hierarchy]. +endif::[] + +==== Summary panel + +When you select a transcript, the right panel shows: + +* Duration: Total execution time for this request +* Total Spans: Number of operations in the trace +ifeval::["{context}" == "agent"] +* Token Usage: Input tokens, output tokens, and total (critical for cost tracking) +* LLM Calls: How many times the agent called the language model +* Service: The agent identifier +* Conversation ID: Links to session data topics +endif::[] +ifeval::["{context}" == "mcp"] +* Service: The MCP server identifier +endif::[] diff --git a/modules/billing/pages/billing.adoc b/modules/billing/pages/billing.adoc index f51b7b39d..cf6f32e72 100644 --- a/modules/billing/pages/billing.adoc +++ b/modules/billing/pages/billing.adoc @@ -140,7 +140,7 @@ Pricing per MCP server depends on the compute units you allocate. The cost of a NOTE: Compute units for Remote MCP use the same definition and rates as those for Redpanda Connect. -MCP servers automatically emit OpenTelemetry traces to the xref:ai-agents:mcp/remote/monitor-activity.adoc#opentelemetry-traces-topic[`redpanda.otel_traces` topic]. For Serverless clusters, usage of this system-managed traces topic is not billed. You will not incur ingress, egress, storage, or partition charges for trace data. For Dedicated and BYOC clusters, standard billing metrics apply to the traces topic. +MCP servers automatically emit OpenTelemetry traces to the xref:ai-agents:observability/concepts.adoc#opentelemetry-traces-topic[`redpanda.otel_traces` topic]. For Serverless clusters, usage of this system-managed traces topic is not billed. You will not incur ingress, egress, storage, or partition charges for trace data. For Dedicated and BYOC clusters, standard billing metrics apply to the traces topic. == Support plans diff --git a/modules/develop/examples/cookbooks/jira/create-issue.yaml b/modules/develop/examples/cookbooks/jira/create-issue.yaml new file mode 100644 index 000000000..1ed695933 --- /dev/null +++ b/modules/develop/examples/cookbooks/jira/create-issue.yaml @@ -0,0 +1,28 @@ +# tag::config[] +input: + generate: + count: 1 + mapping: | + root.fields = { + "project": {"key": "MYPROJECT"}, + "summary": "Issue created from Redpanda Connect", + "description": { + "type": "doc", + "version": 1, + "content": [{"type": "paragraph", "content": [{"type": "text", "text": "Created via API"}]}] + }, + "issuetype": {"name": "Task"} + } + +pipeline: + processors: + - http: + url: "${secrets.JIRA_BASE_URL}/rest/api/3/issue" + verb: POST + headers: + Content-Type: application/json + Authorization: "Basic ${secrets.JIRA_AUTH_TOKEN}" + +output: + stdout: {} +# end::config[] diff --git a/modules/develop/examples/cookbooks/jira/input-once.yaml b/modules/develop/examples/cookbooks/jira/input-once.yaml new file mode 100644 index 000000000..b8f13b5ca --- /dev/null +++ b/modules/develop/examples/cookbooks/jira/input-once.yaml @@ -0,0 +1,18 @@ +# tag::config[] +input: + generate: + count: 1 + mapping: | + root.jql = "project = MYPROJECT AND status = Open" + root.maxResults = 100 + +pipeline: + processors: + - jira: + base_url: "${secrets.JIRA_BASE_URL}" + username: "${secrets.JIRA_USERNAME}" + api_token: "${secrets.JIRA_API_TOKEN}" + +output: + stdout: {} +# end::config[] diff --git a/modules/develop/examples/cookbooks/jira/input-periodic.yaml b/modules/develop/examples/cookbooks/jira/input-periodic.yaml new file mode 100644 index 000000000..991a40b0d --- /dev/null +++ b/modules/develop/examples/cookbooks/jira/input-periodic.yaml @@ -0,0 +1,19 @@ +# tag::config[] +input: + generate: + interval: 30s + mapping: | + root.jql = "project = MYPROJECT AND updated >= -1h ORDER BY updated DESC" + root.maxResults = 50 + root.fields = ["key", "summary", "status", "assignee", "priority"] + +pipeline: + processors: + - jira: + base_url: "${secrets.JIRA_BASE_URL}" + username: "${secrets.JIRA_USERNAME}" + api_token: "${secrets.JIRA_API_TOKEN}" + +output: + stdout: {} +# end::config[] diff --git a/modules/develop/examples/cookbooks/jira/test-jira-examples.sh b/modules/develop/examples/cookbooks/jira/test-jira-examples.sh new file mode 100755 index 000000000..c795f76ca --- /dev/null +++ b/modules/develop/examples/cookbooks/jira/test-jira-examples.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# +# Test script for Jira cookbook examples +# +# This script validates YAML syntax using `rpk connect lint` +# +# Usage: +# ./test-jira-examples.sh + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' + +# Get script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +echo "Testing Jira cookbook examples..." +echo "" + +TOTAL=0 +PASSED=0 +FAILED=0 + +for file in *.yaml; do + if [[ -f "$file" ]]; then + TOTAL=$((TOTAL + 1)) + echo -n " $file... " + + if output=$(rpk connect lint --skip-env-var-check "$file" 2>&1); then + echo -e "${GREEN}PASSED${NC}" + PASSED=$((PASSED + 1)) + else + echo -e "${RED}FAILED${NC}" + echo "$output" | sed 's/^/ /' + FAILED=$((FAILED + 1)) + fi + fi +done + +echo "" +echo "Results: $PASSED/$TOTAL passed" + +if [[ $FAILED -gt 0 ]]; then + echo -e "${RED}Some tests failed${NC}" + exit 1 +else + echo -e "${GREEN}All tests passed${NC}" + exit 0 +fi diff --git a/modules/develop/pages/connect/components/inputs/http_server.adoc b/modules/develop/pages/connect/components/inputs/http_server.adoc new file mode 100644 index 000000000..48b7a3343 --- /dev/null +++ b/modules/develop/pages/connect/components/inputs/http_server.adoc @@ -0,0 +1,3 @@ += http_server +:page-aliases: components:inputs/http_server.adoc +include::redpanda-connect:components:inputs/http_server.adoc[tag=single-source] diff --git a/modules/develop/pages/connect/components/processors/a2a_message.adoc b/modules/develop/pages/connect/components/processors/a2a_message.adoc new file mode 100644 index 000000000..b4067524b --- /dev/null +++ b/modules/develop/pages/connect/components/processors/a2a_message.adoc @@ -0,0 +1,3 @@ += a2a_message +:page-aliases: components:processors/a2a_message.adoc +include::redpanda-connect:components:partial$components/cloud-only/processors/a2a_message.adoc[tag=single-source] diff --git a/modules/develop/pages/connect/configuration/resource-management.adoc b/modules/develop/pages/connect/configuration/resource-management.adoc index 802e1d8fb..c48a3e58e 100644 --- a/modules/develop/pages/connect/configuration/resource-management.adoc +++ b/modules/develop/pages/connect/configuration/resource-management.adoc @@ -125,7 +125,7 @@ To view resources already allocated to a data pipeline: [tabs] ===== -Cloud Console:: +Cloud UI:: + -- . Log in to https://cloud.redpanda.com[Redpanda Cloud^]. @@ -152,7 +152,7 @@ To scale the resources for a pipeline: [tabs] ===== -Cloud Console:: +Cloud UI:: + -- . Log in to https://cloud.redpanda.com[Redpanda Cloud^]. diff --git a/modules/develop/pages/connect/configuration/secret-management.adoc b/modules/develop/pages/connect/configuration/secret-management.adoc index 026616216..ac0fee981 100644 --- a/modules/develop/pages/connect/configuration/secret-management.adoc +++ b/modules/develop/pages/connect/configuration/secret-management.adoc @@ -15,7 +15,7 @@ You can create a secret and reference it in multiple data pipelines on the same [tabs] ===== -Cloud Console:: +Cloud UI:: + -- . Log in to https://cloud.redpanda.com[Redpanda Cloud^]. @@ -71,7 +71,7 @@ NOTE: Changes to secret values do not take effect until a pipeline is restarted. [tabs] ===== -Cloud Console:: +Cloud UI:: + -- . Log in to https://cloud.redpanda.com[Redpanda Cloud^]. @@ -122,7 +122,7 @@ NOTE: Changes do not affect pipelines that are already running. [tabs] ===== -Cloud Console:: +Cloud UI:: + -- . Log in to https://cloud.redpanda.com[Redpanda Cloud^]. @@ -158,7 +158,7 @@ You must include the following values: [tabs] ===== -Cloud Console:: +Cloud UI:: + -- . Go to the **Connect** page, and create a pipeline (or open an existing pipeline to edit). diff --git a/modules/develop/pages/connect/cookbooks/jira.adoc b/modules/develop/pages/connect/cookbooks/jira.adoc new file mode 100644 index 000000000..f8015ae1d --- /dev/null +++ b/modules/develop/pages/connect/cookbooks/jira.adoc @@ -0,0 +1,159 @@ += Work with Jira Issues +:description: Learn how to query, filter, and create Jira issues using Redpanda Connect pipelines. +:page-aliases: cookbooks:jira.adoc +:page-topic-type: cookbook +:personas: streaming_developer, data_engineer +:learning-objective-1: Query Jira issues using JQL patterns with the Jira processor +:learning-objective-2: Combine generate input with Jira processor for scheduled queries +:learning-objective-3: Create Jira issues using the HTTP processor and REST API + +The Jira processor enables querying Jira issues using JQL (Jira Query Language) and returning structured data. It’s a processor, so you can use it in pipelines for input-style flows (pair with `generate`) or output-style flows (pair with `drop`). + + +Use this cookbook to: + +* [ ] Query Jira issues on a schedule or on-demand +* [ ] Filter issues using JQL patterns +* [ ] Create Jira issues using the HTTP processor + +== Prerequisites + +The examples in this cookbook use the Secrets Store for Jira credentials. This keeps sensitive credentials secure and separate from your pipeline configuration. + +. link:https://id.atlassian.com/manage-profile/security/api-tokens[Generate a Jira API token^]. + +. Add your Jira credentials to the xref:develop:connect/configuration/secret-management.adoc[Secrets Store]: ++ +- `JIRA_BASE_URL`: Your Jira instance URL (for example, `\https://your-domain.atlassian.net`) +- `JIRA_USERNAME`: Your Jira account email address +- `JIRA_API_TOKEN`: The API token generated from your Atlassian account +- `JIRA_AUTH_TOKEN` (optional, for creating issues): Base64-encoded `username:api_token` string + +== Use Jira as an input + +To use Jira as an input, combine the `generate` input with the Jira processor. This pattern triggers Jira queries at regular intervals or on-demand. + +TIP: Replace `MYPROJECT` in the examples with your actual Jira project key. + +=== Query Jira periodically + +This example queries Jira every 30 seconds for recent issues: + +[source,yaml] +---- +include::develop:example$cookbooks/jira/input-periodic.yaml[tag=config,indent=0] +---- + +=== One-time query + +For a single query, use `count` instead of `interval`: + +[source,yaml] +---- +include::develop:example$cookbooks/jira/input-once.yaml[tag=config,indent=0] +---- + +== Input message format + +The Jira processor expects input messages containing valid Jira queries in JSON format: + +[source,json] +---- +{ + "jql": "project = MYPROJECT AND status = Open", + "maxResults": 50, + "fields": ["key", "summary", "status", "assignee"] +} +---- + +=== Required fields + +- `jql`: The JQL (Jira Query Language) query string + +=== Optional fields + +- `maxResults`: Maximum number of results to return (default: 50) +- `fields`: Array of field names to include in the response + +== JQL query patterns + +Here are common JQL patterns for filtering issues: + +=== Recent issues by project + +[source,jql] +---- +project = AND created >= -7d ORDER BY created DESC +---- + +=== Issues assigned to current user + +[source,jql] +---- +assignee = currentUser() AND status != Done +---- + +=== Issues by status + +[source,jql] +---- +project = AND status IN (Open, 'In Progress', 'To Do') +---- + +=== Issues by priority + +[source,jql] +---- +project = AND priority = High ORDER BY created DESC +---- + +== Output message format + +The Jira processor returns individual issue messages, rather than a response object with an `issues` array. + +Each message output by the Jira processor represents a single issue: + +[source,json] +---- +{ + "id": "12345", + "key": "DOC-123", + "fields": { + "summary": "Example issue", + "status": { + "name": "In Progress" + }, + "assignee": { + "displayName": "John Doe" + } + } +} +---- + +== Pagination handling + +The Jira processor automatically handles pagination internally. The processor: + +. Makes the initial request with `startAt=0`. +. Checks if more results are available. +. Automatically fetches subsequent pages until all results are retrieved. +. Outputs each issue as an individual message. + +You don't need to handle pagination manually. + +== Create and update Jira issues + +The Jira processor is read-only and only supports querying. To create or update Jira issues, use the xref:develop:connect/components/processors/http.adoc[`http` processor] with the Jira REST API. + +=== Create a Jira issue + +[source,yaml] +---- +include::develop:example$cookbooks/jira/create-issue.yaml[tag=config,indent=0] +---- + +== See also + +- xref:develop:connect/components/processors/jira.adoc[Jira processor reference] +- https://developer.atlassian.com/cloud/jira/platform/rest/v3/intro/[Jira REST API documentation^] +- https://www.atlassian.com/software/jira/guides/jql[JQL query guide^]