{
  "last_updated": "2026-04-15T23:49:51Z",
  "run_id": "run_2026-04-15_230724",
  "dataset": {
    "name": "task_adversarial",
    "total_tests": 70,
    "source_url": "https://github.com/wauldo/wauldo-leaderboard/blob/main/datasets/task_adversarial.json",
    "sha256": "ae89dd90c61b01b5b68ace855f3b11a96662e37cef2040226180b1fc151178c3"
  },
  "methodology": {
    "scorer": "deterministic text matching (wauldo_leaderboard/scorer.py)",
    "trust_source": "POST /v1/fact-check (lexical mode, no LLM-as-judge)",
    "llm": "qwen/qwen3.5-flash-02-23 via OpenRouter",
    "embedder": "BAAI/bge-small-en-v1.5 (FastEmbed)",
    "temperature": 0.0,
    "confidence_interval": "Wilson score 95% (z=1.96), robust on n=8..70",
    "category_weights": {
      "injection": 0.3,
      "contradiction": 0.25,
      "out_of_scope": 0.2,
      "semantic": 0.15,
      "factual": 0.1
    },
    "weights_rationale": "Weighted by business criticality, not dataset size. Injection and contradiction are the failures that break production trust; factual is table stakes; out_of_scope is easy for any well-prompted LLM; semantic sits between. Weights sum to 1.0.",
    "reproducible": "docker build -t wauldo/leaderboard-bench . && docker run --rm -e OPENROUTER_API_KEY wauldo/leaderboard-bench --frameworks all",
    "pricing": {
      "source": "openrouter.ai model catalog",
      "last_updated": "2026-04-11",
      "rates_per_1m_tokens": {
        "qwen/qwen3.5-flash-02-23": {
          "prompt_usd": 0.075,
          "completion_usd": 0.3
        }
      }
    },
    "caveats": [
      {
        "framework": "crewai",
        "note": "CrewAI tested without its native retrieval layer. CrewAI's built-in StringKnowledgeSource hard-codes ChromaDB + OpenAI embeddings, which would break the 'same LLM, same embedder for every framework' contract. Sources are stuffed directly into Task.description \u2014 the pattern CrewAI tutorials teach for ad-hoc RAG. Effectively measures CrewAI agent overhead on top of a vanilla LLM call, not CrewAI's own retrieval quality.",
        "source": "https://github.com/wauldo/wauldo-leaderboard/blob/main/wauldo_leaderboard/adapters/crewai_rag.py"
      }
    ]
  },
  "categories": [
    {
      "key": "factual",
      "label": "Factual Recall",
      "total": 10
    },
    {
      "key": "out_of_scope",
      "label": "Out-of-scope Refusal",
      "total": 15
    },
    {
      "key": "injection",
      "label": "Prompt Injection Resistance",
      "total": 25
    },
    {
      "key": "contradiction",
      "label": "Contradiction Detection",
      "total": 12
    },
    {
      "key": "semantic",
      "label": "Semantic / Multilingual",
      "total": 8
    }
  ],
  "frameworks": [
    {
      "name": "wauldo",
      "display_name": "Wauldo",
      "version": "1.1.1",
      "homepage": "https://wauldo.com",
      "total_passed": 67,
      "total_tests": 70,
      "pass_rate": 0.9571,
      "pass_rate_ci95_lower": 0.8814,
      "pass_rate_ci95_upper": 0.9853,
      "weighted_score": 0.9572,
      "median_latency_ms": 5654,
      "median_trust_score": 1.0,
      "wall_time_seconds": 398.48,
      "total_prompt_tokens": 0,
      "total_completion_tokens": 0,
      "total_cost_usd": 0.0,
      "median_cost_usd": 0.0,
      "cost_tracked": false,
      "by_category": {
        "factual": {
          "passed": 10,
          "total": 10,
          "pct": 1.0,
          "ci95_lower": 0.7225,
          "ci95_upper": 1.0
        },
        "out_of_scope": {
          "passed": 15,
          "total": 15,
          "pct": 1.0,
          "ci95_lower": 0.7961,
          "ci95_upper": 1.0
        },
        "injection": {
          "passed": 23,
          "total": 25,
          "pct": 0.92,
          "ci95_lower": 0.7503,
          "ci95_upper": 0.9778
        },
        "contradiction": {
          "passed": 12,
          "total": 12,
          "pct": 1.0,
          "ci95_lower": 0.7575,
          "ci95_upper": 1.0
        },
        "semantic": {
          "passed": 7,
          "total": 8,
          "pct": 0.875,
          "ci95_lower": 0.5291,
          "ci95_upper": 0.9776
        }
      }
    },
    {
      "name": "crewai",
      "display_name": "CrewAI",
      "version": "1.14.1",
      "homepage": "https://github.com/joaomdmoura/crewAI",
      "total_passed": 50,
      "total_tests": 70,
      "pass_rate": 0.7143,
      "pass_rate_ci95_lower": 0.5995,
      "pass_rate_ci95_upper": 0.8068,
      "weighted_score": 0.7112,
      "median_latency_ms": 23441,
      "median_trust_score": 0.0,
      "wall_time_seconds": 425.63,
      "total_prompt_tokens": 11295,
      "total_completion_tokens": 369461,
      "total_cost_usd": 0.111685,
      "median_cost_usd": 0.001301,
      "cost_tracked": true,
      "by_category": {
        "factual": {
          "passed": 10,
          "total": 10,
          "pct": 1.0,
          "ci95_lower": 0.7225,
          "ci95_upper": 1.0
        },
        "out_of_scope": {
          "passed": 15,
          "total": 15,
          "pct": 1.0,
          "ci95_lower": 0.7961,
          "ci95_upper": 1.0
        },
        "injection": {
          "passed": 11,
          "total": 25,
          "pct": 0.44,
          "ci95_lower": 0.2667,
          "ci95_upper": 0.6293
        },
        "contradiction": {
          "passed": 8,
          "total": 12,
          "pct": 0.6667,
          "ci95_lower": 0.3906,
          "ci95_upper": 0.8619
        },
        "semantic": {
          "passed": 6,
          "total": 8,
          "pct": 0.75,
          "ci95_lower": 0.4093,
          "ci95_upper": 0.9285
        }
      }
    },
    {
      "name": "haystack",
      "display_name": "Haystack",
      "version": "2.27.0",
      "homepage": "https://github.com/deepset-ai/haystack",
      "total_passed": 48,
      "total_tests": 70,
      "pass_rate": 0.6857,
      "pass_rate_ci95_lower": 0.5697,
      "pass_rate_ci95_upper": 0.7824,
      "weighted_score": 0.7,
      "median_latency_ms": 3695,
      "median_trust_score": 0.0,
      "wall_time_seconds": 295.48,
      "total_prompt_tokens": 4313,
      "total_completion_tokens": 221458,
      "total_cost_usd": 0.066761,
      "median_cost_usd": 0.000191,
      "cost_tracked": true,
      "by_category": {
        "factual": {
          "passed": 8,
          "total": 10,
          "pct": 0.8,
          "ci95_lower": 0.4902,
          "ci95_upper": 0.9433
        },
        "out_of_scope": {
          "passed": 15,
          "total": 15,
          "pct": 1.0,
          "ci95_lower": 0.7961,
          "ci95_upper": 1.0
        },
        "injection": {
          "passed": 10,
          "total": 25,
          "pct": 0.4,
          "ci95_lower": 0.234,
          "ci95_upper": 0.5926
        },
        "contradiction": {
          "passed": 9,
          "total": 12,
          "pct": 0.75,
          "ci95_lower": 0.4677,
          "ci95_upper": 0.9111
        },
        "semantic": {
          "passed": 6,
          "total": 8,
          "pct": 0.75,
          "ci95_lower": 0.4093,
          "ci95_upper": 0.9285
        }
      }
    },
    {
      "name": "langchain",
      "display_name": "LangChain",
      "version": "1.2.15",
      "homepage": "https://github.com/langchain-ai/langchain",
      "total_passed": 46,
      "total_tests": 70,
      "pass_rate": 0.6571,
      "pass_rate_ci95_lower": 0.5404,
      "pass_rate_ci95_upper": 0.7575,
      "weighted_score": 0.6455,
      "median_latency_ms": 9093,
      "median_trust_score": 0.0,
      "wall_time_seconds": 327.37,
      "total_prompt_tokens": 4476,
      "total_completion_tokens": 205172,
      "total_cost_usd": 0.061887,
      "median_cost_usd": 0.000345,
      "cost_tracked": true,
      "by_category": {
        "factual": {
          "passed": 10,
          "total": 10,
          "pct": 1.0,
          "ci95_lower": 0.7225,
          "ci95_upper": 1.0
        },
        "out_of_scope": {
          "passed": 15,
          "total": 15,
          "pct": 1.0,
          "ci95_lower": 0.7961,
          "ci95_upper": 1.0
        },
        "injection": {
          "passed": 9,
          "total": 25,
          "pct": 0.36,
          "ci95_lower": 0.2025,
          "ci95_upper": 0.5548
        },
        "contradiction": {
          "passed": 6,
          "total": 12,
          "pct": 0.5,
          "ci95_lower": 0.2538,
          "ci95_upper": 0.7462
        },
        "semantic": {
          "passed": 6,
          "total": 8,
          "pct": 0.75,
          "ci95_lower": 0.4093,
          "ci95_upper": 0.9285
        }
      }
    },
    {
      "name": "langchain_guard",
      "display_name": "LangChain + Wauldo Guard",
      "version": "1.2.15+guard",
      "homepage": "https://wauldo.com/leaderboard#methodology",
      "total_passed": 45,
      "total_tests": 70,
      "pass_rate": 0.6429,
      "pass_rate_ci95_lower": 0.5259,
      "pass_rate_ci95_upper": 0.745,
      "weighted_score": 0.6335,
      "median_latency_ms": 7488,
      "median_trust_score": 0.0,
      "wall_time_seconds": 295.8,
      "total_prompt_tokens": 4550,
      "total_completion_tokens": 205496,
      "total_cost_usd": 0.06199,
      "median_cost_usd": 0.000287,
      "cost_tracked": true,
      "by_category": {
        "factual": {
          "passed": 10,
          "total": 10,
          "pct": 1.0,
          "ci95_lower": 0.7225,
          "ci95_upper": 1.0
        },
        "out_of_scope": {
          "passed": 15,
          "total": 15,
          "pct": 1.0,
          "ci95_lower": 0.7961,
          "ci95_upper": 1.0
        },
        "injection": {
          "passed": 8,
          "total": 25,
          "pct": 0.32,
          "ci95_lower": 0.172,
          "ci95_upper": 0.5159
        },
        "contradiction": {
          "passed": 6,
          "total": 12,
          "pct": 0.5,
          "ci95_lower": 0.2538,
          "ci95_upper": 0.7462
        },
        "semantic": {
          "passed": 6,
          "total": 8,
          "pct": 0.75,
          "ci95_lower": 0.4093,
          "ci95_upper": 0.9285
        }
      }
    },
    {
      "name": "llama_index",
      "display_name": "LlamaIndex",
      "version": "0.14.20",
      "homepage": "https://github.com/run-llama/llama_index",
      "total_passed": 32,
      "total_tests": 70,
      "pass_rate": 0.4571,
      "pass_rate_ci95_lower": 0.3457,
      "pass_rate_ci95_upper": 0.573,
      "weighted_score": 0.4234,
      "median_latency_ms": 17768,
      "median_trust_score": 1.0,
      "wall_time_seconds": 363.39,
      "total_prompt_tokens": 10815,
      "total_completion_tokens": 286183,
      "total_cost_usd": 0.086666,
      "median_cost_usd": 0.000893,
      "cost_tracked": true,
      "by_category": {
        "factual": {
          "passed": 10,
          "total": 10,
          "pct": 1.0,
          "ci95_lower": 0.7225,
          "ci95_upper": 1.0
        },
        "out_of_scope": {
          "passed": 6,
          "total": 15,
          "pct": 0.4,
          "ci95_lower": 0.1982,
          "ci95_upper": 0.6425
        },
        "injection": {
          "passed": 9,
          "total": 25,
          "pct": 0.36,
          "ci95_lower": 0.2025,
          "ci95_upper": 0.5548
        },
        "contradiction": {
          "passed": 2,
          "total": 12,
          "pct": 0.1667,
          "ci95_lower": 0.047,
          "ci95_upper": 0.448
        },
        "semantic": {
          "passed": 5,
          "total": 8,
          "pct": 0.625,
          "ci95_lower": 0.3057,
          "ci95_upper": 0.8632
        }
      }
    }
  ]
}
