/** Centralized tooltip copy for forecast metrics. */
export const METRIC_TOOLTIPS: Record<string, { title: string; body: string; hint?: string }> = {
  'tooltip.model_probability': {
    title: 'Model Probability',
    body: "Rain Man's estimated chance (%) that the forecasted outcome occurs, based on our inputs and historical patterns.",
  },
  'tooltip.market_implied': {
    title: 'Market Implied',
    body: "The probability (%) implied by current markets. It's the market's baseline expectation after pricing.",
  },
  'tooltip.edge': {
    title: 'Edge',
    body: "The gap between Rain Man's probability and the market implied probability. Bigger Edge means a larger disagreement vs current markets.",
  },
  'tooltip.confidence_tier': {
    title: 'Confidence Tier',
    body: 'A stability rating for the forecast. Higher tiers reflect stronger agreement across inputs and fewer uncertainty flags.',
  },
  'tooltip.clv': {
    title: 'Closing Line Value (CLV)',
    body: 'CLV measures how your saved forecasts compared to the final market price. Positive CLV means you captured a better price than the close.',
    hint: 'CLV % shows the size of that improvement vs the closing market.',
  },
  'tooltip.perf_win_rate': {
    title: 'Win Rate',
    body: 'Percentage of graded forecasts that resulted in a win. Only forecasts with a qualifying confidence tier are included.',
    hint: 'A higher win rate across a large sample indicates consistent model accuracy.',
  },
  'tooltip.perf_game_win_rate': {
    title: 'Benchmark Win Rate',
    body: 'Percentage of benchmark-graded team forecasts that resulted in a win after pushes are removed from the denominator.',
    hint: 'This is the game forecast ledger only. Props are tracked separately.',
  },
  'tooltip.perf_prop_hit_rate': {
    title: 'Prop Hit Rate',
    body: 'Percentage of graded player props that resulted in a win inside the selected window.',
    hint: 'This view only uses player-prop rows with closing-line tracking.',
  },
  'tooltip.tier_criteria': {
    title: 'Confidence Tier Criteria',
    body: 'Confidence tiers group forecasts by published pick strength on a comparable scale. Game forecasts use Rain Man confidence bands, while prop tier buckets only include current PIFF 3.x rows and legacy lock-tagged rows that can be mapped cleanly.',
    hint: 'Games: A+ >= 85% · A >= 70% · B+ >= 55% · Older incompatible prop generations are excluded from tier buckets.',
  },
  'tooltip.perf_total_forecasts': {
    title: 'Selected Window Sample',
    body: 'The number of graded rows in the active 7-day or 30-day view.',
    hint: 'Props and games are split so the page does not mix incompatible grading systems.',
  },
  'tooltip.perf_avg_clv': {
    title: 'Average CLV',
    body: 'The mean Closing Line Value across the graded player-prop sample in this window.',
  },
  'tooltip.perf_clv_pos': {
    title: 'CLV+ Rate',
    body: 'The percentage of graded player props that achieved positive Closing Line Value. A CLV+ Rate above 50% indicates the model beats the closing market more often than not.',
  },
  'tooltip.perf_roi_status': {
    title: 'ROI Status',
    body: 'Public ROI is intentionally hidden until stake sizing and price history are normalized across benchmark-graded games and prop ledgers.',
    hint: 'Showing a fake blended ROI would be worse than showing none.',
  },
};
