#!/usr/bin/env python3
"""
╔══════════════════════════════════════════════════════════════════════╗
║  🧪 CRASH-TEST 10 JOURS — Comparatif AVANT/APRÈS modifications      ║
║                                                                      ║
║  Scénarios testés :                                                  ║
║    BASELINE : code avant les modifs (no new filters)                 ║
║    +SLIPPAGE : + garde bookTicker (MAX_SLIPPAGE 0.5%)                ║
║    +DEC_FILT : + pre-filtre surge_faible_déclin (FLASH<1.5%+24h<-3%)║
║    +LT_SURGE : + détecteur LONG_TREND_SURGE (36-72min)               ║
║    CURRENT   : tous les changements actifs (= code prod actuel)      ║
║                                                                      ║
║  Données : Binance API publique, klines 1-min, 10 derniers jours     ║
╚══════════════════════════════════════════════════════════════════════╝
"""
import json
import os
import time
import requests
from datetime import datetime, timedelta, timezone
from collections import defaultdict

# ═══════════════════════════════════════════════════════════════════════
# PARAMÈTRES (extraits de market_spy.py — valeurs actuelles)
# ═══════════════════════════════════════════════════════════════════════
POSITION_SIZE   = 10.0          # USDT par trade (taille réelle prod)
MAX_POSITIONS   = 1             # 1 position simultanée max
MAX_TRADES_HOUR = 3             # Limite horaire
DAYS            = 10            # Période de test

# ENTRY — seuils spy (1-min bar ≈ 8 scans × 7s)
SURGE_MIN_PRICE_CHANGE   = 1.0   # FLASH: +1% en 1 bougie
SURGE_MIN_PRICE_CHANGE_2 = 1.5   # BREAKOUT: +1.5% en 2 bougies
SURGE_MIN_VOLUME_RATIO   = 3.0   # Volume ≥ 3× moyenne
MOMENTUM_MIN_CHANGE_20   = 4.0   # MOMENTUM: +4% sur ~2.5 min (20 scans ≈ 2-3 bougies 1min)
MOMENTUM_MIN_CHANGE_40   = 6.0   # MOMENTUM: +6% sur ~5 min (40 scans ≈ 4-5 bougies 1min)
SURGE_MAX_DECLINE_24H    = -15.0 # Baseline: bloquer si <-15% en 24h
SURGE_MAX_ALREADY_PUMPED = 50.0  # Bloquer si >50% en 24h

# LONG_TREND (TYPE 4 — nouveau)
LT_MIN_RISE        = 10.0   # +10% min entre premier et dernier snapshot
LT_SNAPSHOT_BARS   = 6      # Un snapshot toutes les 6 bougies (≈ 6min)
LT_MIN_SNAPSHOTS   = 6      # Min 6 snapshots → ~36min
LT_MAX_SNAPSHOTS   = 12     # Max 12 → 72min
LT_COOLDOWN_BARS   = 60     # 60 bougies entre 2 LT sur même coin
LT_MAX_PER_HOUR    = 2

# EXIT
HARD_SL_PCT              = -1.2
TRAILING_ACTIVATION      = 1.0
MAX_HOLD_MINUTES         = 15
STAGNATION_MINUTES       = 10
STAGNATION_THRESHOLD     = 0.5
MOMENTUM_EXIT_DROPS      = 3
VOLUME_EXIT_SELL_RATIO   = 0.38
EARLY_SL_MAX_PEAK        = 0.2
EARLY_SL_PCT             = -0.5
TRAIL_TIERS = [
    (0.5, 0.3), (1.0, 0.7), (2.0, 1.0), (3.0, 1.8),
    (5.0, 2.0), (10.0, 2.5), (15.0, 4.0), (999, 5.0)
]

# Blacklist FLASH_SURGE (toujours actif)
FLASH_SURGE_BLACKLIST = {'NOMUSDT', 'NOMUSDC'}

# Cache
CACHE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "crashtest_cache_10d")

# ═══════════════════════════════════════════════════════════════════════
# COINS À TESTER (25 coins représentatifs de la watchlist)
# ═══════════════════════════════════════════════════════════════════════
SYMBOLS = [
    # Coins réellement tradés en prod
    'NOMUSDT', 'ENJUSDT', 'DASHUSDT', 'FLOKIUSDT',
    # High-volume altcoins présents dans la watchlist
    'SEIUSDT', 'ANKRUSDT', 'ONTUSDT', 'CHRUSDT',
    'BLURUSDT', 'HEIUSDT', 'CETUSUSDT', 'COSUSDT',
    # Majeurs (BTC/ETH/SOL pour contexte)
    'SOLUSDT', 'ADAUSDT', 'DOTUSDT', 'XRPUSDT',
    # Coins momentum connus
    'THETAUSDT', 'INJUSDT', 'AXSUSDT', 'WIFUSDT',
    # Petits caps volatiles
    'KERNELUSDT', 'CTSIUSDT', 'GASUSDT', 'PHAUSDT', 'REZUSDT',
]


# ═══════════════════════════════════════════════════════════════════════
# TÉLÉCHARGEMENT
# ═══════════════════════════════════════════════════════════════════════
def fetch_klines(symbol: str, days: int) -> list:
    """Télécharge les klines 1-min pour les {days} derniers jours."""
    os.makedirs(CACHE_DIR, exist_ok=True)
    cache_file = os.path.join(CACHE_DIR, f"{symbol}_1m_{days}d.json")

    # Cache valide si moins de 2h
    if os.path.exists(cache_file):
        age = time.time() - os.path.getmtime(cache_file)
        if age < 7200:
            with open(cache_file) as f:
                return json.load(f)
        os.remove(cache_file)

    end_ms   = int(time.time() * 1000)
    start_ms = end_ms - days * 86400 * 1000
    url = "https://api.binance.com/api/v3/klines"

    all_klines = []
    cur = start_ms
    while cur < end_ms:
        try:
            resp = requests.get(url, params={
                "symbol": symbol, "interval": "1m",
                "startTime": cur,
                "endTime": min(cur + 499 * 60000, end_ms),
                "limit": 500
            }, timeout=15)
            resp.raise_for_status()
            batch = resp.json()
            if not batch:
                break
            for k in batch:
                all_klines.append({
                    'ts':               int(k[0]),
                    'open':             float(k[1]),
                    'high':             float(k[2]),
                    'low':              float(k[3]),
                    'close':            float(k[4]),
                    'volume':           float(k[5]),
                    'quote_volume':     float(k[7]),
                    'trades':           int(k[8]),
                    'taker_buy_quote':  float(k[10]),
                })
            cur = batch[-1][0] + 60000
            time.sleep(0.08)
        except Exception as e:
            print(f"    ⚠️  {symbol} err: {e}")
            time.sleep(1)
            break

    with open(cache_file, 'w') as f:
        json.dump(all_klines, f)
    return all_klines


# ═══════════════════════════════════════════════════════════════════════
# HELPERS
# ═══════════════════════════════════════════════════════════════════════
def get_trail_pct(pnl_pct: float) -> float:
    for thr, trail in TRAIL_TIERS:
        if pnl_pct < thr:
            return trail
    return 5.0


def change24h(klines: list, idx: int) -> float:
    if idx >= 1440:
        ref = klines[idx - 1440]['close']
        if ref > 0:
            return (klines[idx]['close'] - ref) / ref * 100
    return 0.0


# ═══════════════════════════════════════════════════════════════════════
# DÉTECTION SURGE (5 variantes, activables par flags)
# ═══════════════════════════════════════════════════════════════════════
def detect_surge(klines, idx, symbol,
                 use_slippage_guard=False,
                 use_decline_filter=False,
                 use_long_trend=False,
                 lt_snapshots=None, lt_last_snap_idx=None, lt_cd_idx=None, lt_hour=None):
    """
    Détecte un surge sur klines[idx].
    Retourne un dict de signal ou None.
    
    Flags:
      use_slippage_guard : simule le guard bookTicker via le gap ouverture bougie+1
      use_decline_filter : pre-filtre FLASH<1.5% avec 24h<-3%
      use_long_trend     : active TYPE 4 LONG_TREND_SURGE
    """
    if idx < 45:
        return None

    curr = klines[idx]
    prev = klines[idx - 1]

    change_1 = (curr['close'] - prev['close']) / prev['close'] * 100
    change_2 = (curr['close'] - klines[idx - 2]['close']) / klines[idx - 2]['close'] * 100

    avg_vol = sum(k['quote_volume'] for k in klines[idx - 20:idx]) / 20
    vol_ratio = curr['quote_volume'] / avg_vol if avg_vol > 0 else 0

    # ── TYPE 1 FLASH ──
    surge_type = None
    surge_strength = 0.0

    if change_1 >= SURGE_MIN_PRICE_CHANGE and vol_ratio >= SURGE_MIN_VOLUME_RATIO:
        surge_type = 'FLASH_SURGE'
        surge_strength = change_1

    # ── TYPE 2 BREAKOUT ──
    elif change_2 >= SURGE_MIN_PRICE_CHANGE_2 and change_1 >= 0.5 and vol_ratio >= 2.0:
        surge_type = 'BREAKOUT_SURGE'
        surge_strength = change_2

    # ── TYPE 3 MOMENTUM ──
    if surge_type is None and idx >= 5:
        change_3m = (curr['close'] - klines[idx - 3]['close']) / klines[idx - 3]['close'] * 100
        change_5m = (curr['close'] - klines[idx - 5]['close']) / klines[idx - 5]['close'] * 100
        if change_1 >= 0.3:
            mom_detected = change_3m >= MOMENTUM_MIN_CHANGE_20 or change_5m >= MOMENTUM_MIN_CHANGE_40
            if mom_detected and vol_ratio >= 1.5:
                surge_type = 'MOMENTUM_SURGE'
                surge_strength = max(change_3m, change_5m)

    # ── TYPE 4 LONG_TREND (nouveauté) ──
    if surge_type is None and use_long_trend and lt_snapshots is not None:
        snaps = lt_snapshots.get(symbol, [])
        if len(snaps) >= LT_MIN_SNAPSHOTS:
            oldest_price = snaps[0]
            lt_rise = (curr['close'] - oldest_price) / oldest_price * 100
            if lt_rise >= LT_MIN_RISE and change_1 >= 0.1:
                # Monotonicité: ≥60% des transitions consécutives positives
                n_pos = sum(1 for a, b in zip(snaps, snaps[1:]) if b >= a)
                mono = n_pos / (len(snaps) - 1) if len(snaps) > 1 else 0
                if mono >= 0.6:
                    cd_ok = (lt_cd_idx.get(symbol, 0) == 0 or idx - lt_cd_idx[symbol] >= LT_COOLDOWN_BARS)
                    hour_ok = sum(1 for t in (lt_hour or []) if idx - t < 60) < LT_MAX_PER_HOUR
                    if cd_ok and hour_ok:
                        surge_type = 'LONG_TREND_SURGE'
                        surge_strength = round(lt_rise, 2)

    if surge_type is None:
        return None

    # ─── CONFIRM: bougies vertes ───
    if idx >= 3:
        last3 = klines[idx - 2:idx + 1]
        green = sum(1 for k in last3 if k['close'] > k['open'])
        if green < 2 and surge_strength < 3.0:
            return None

    # ─── CONFIRM: buy ratio ───
    if curr['quote_volume'] > 0:
        buy_r = curr['taker_buy_quote'] / curr['quote_volume']
        if buy_r < 0.35:
            return None

    # ─── CONFIRM: pas de grande ombre haute ───
    body = abs(curr['close'] - curr['open'])
    upper_wick = curr['high'] - max(curr['close'], curr['open'])
    if body > 0 and upper_wick > 2.5 * body and surge_strength < 3.0:
        return None

    # ─── PRE-FILTRES 24h ───
    p24 = change24h(klines, idx)

    # Already pumped
    if p24 > SURGE_MAX_ALREADY_PUMPED:
        return None

    # Downtrend (baseline: -15% seuil)
    if p24 < SURGE_MAX_DECLINE_24H:
        if not (surge_type == 'FLASH_SURGE' and surge_strength >= 2.0):
            return None

    # ─── BLACKLIST FLASH ───
    if surge_type == 'FLASH_SURGE' and symbol in FLASH_SURGE_BLACKLIST:
        return None

    # ─── [NOUVEAU] Pre-filtre surge faible en déclin 24h ───
    if use_decline_filter:
        if surge_type == 'FLASH_SURGE' and surge_strength < 1.5 and p24 < -3.0:
            return None

    # ─── [NOUVEAU] Slippage guard simulé ───
    # On simule le guard bookTicker via le gap entre close de la bougie d'entrée
    # et open de la bougie suivante (représente l'ask réel au moment de l'ordre)
    if use_slippage_guard and idx + 1 < len(klines):
        next_open = klines[idx + 1]['open']
        simulated_slippage = (next_open - curr['close']) / curr['close'] * 100
        if simulated_slippage > 0.5:
            return None

    return {
        'type':     surge_type,
        'strength': surge_strength,
        'price':    curr['close'],
        'p24':      p24,
        'ts':       curr['ts'],
        'vol_ratio': vol_ratio,
    }


# ═══════════════════════════════════════════════════════════════════════
# SIMULATION EXIT
# ═══════════════════════════════════════════════════════════════════════
def simulate_exit(klines, entry_idx, entry_price, surge_strength):
    max_price = entry_price
    max_pnl_pct = 0.0
    trailing_stop_price = 0.0

    for i in range(entry_idx + 1, min(entry_idx + MAX_HOLD_MINUTES + 2, len(klines))):
        candle = klines[i]
        hold = i - entry_idx

        for tick_price in [candle['open'], candle['high'], candle['low'], candle['close']]:
            pnl = (tick_price - entry_price) / entry_price * 100

            if tick_price > max_price:
                max_price = tick_price
                max_pnl_pct = (max_price - entry_price) / entry_price * 100

            # Instant reversal
            if hold <= 1 and max_pnl_pct < 0.05 and pnl <= -0.6:
                return tick_price, i, 'INSTANT_REV', pnl

            # Early SL
            if hold >= 1 and max_pnl_pct < EARLY_SL_MAX_PEAK:
                early_sl = max(EARLY_SL_PCT, -surge_strength * 0.8, HARD_SL_PCT * 0.9)
                if pnl <= early_sl:
                    return tick_price, i, 'EARLY_SL', pnl

            # Hard SL
            if pnl <= HARD_SL_PCT:
                return tick_price, i, 'HARD_SL', pnl

            # Trailing
            if max_pnl_pct >= TRAILING_ACTIVATION:
                trail_pct = get_trail_pct(max_pnl_pct)
                new_ts = max_price * (1 - trail_pct / 100)
                trailing_stop_price = max(trailing_stop_price, new_ts)
                if tick_price <= trailing_stop_price:
                    return tick_price, i, 'TRAILING', pnl

        # Fin de bougie
        cur_pnl = (candle['close'] - entry_price) / entry_price * 100

        # Stagnation
        if hold >= STAGNATION_MINUTES and abs(cur_pnl) < STAGNATION_THRESHOLD:
            return candle['close'], i, 'STAGNATION', cur_pnl

        # Momentum exit (3 bougies rouges)
        if i >= entry_idx + 3 and cur_pnl < 5.0:
            reds = sum(1 for k in klines[i - 2:i + 1] if k['close'] < k['open'])
            if reds >= MOMENTUM_EXIT_DROPS:
                return candle['close'], i, 'MOMENTUM_EXIT', cur_pnl

        # Reversal (retrace depuis peak)
        if max_pnl_pct >= 3.0 and cur_pnl < 1.0 and max_pnl_pct < 8.0:
            return candle['close'], i, 'REVERSAL', cur_pnl

        # Volume rouge
        if hold >= 2 and candle['quote_volume'] > 0:
            buy_r = candle['taker_buy_quote'] / candle['quote_volume']
            if buy_r < VOLUME_EXIT_SELL_RATIO and cur_pnl < 1.0:
                return candle['close'], i, 'VOL_ROUGE', cur_pnl

        # Max hold
        if hold >= MAX_HOLD_MINUTES and cur_pnl < 3.0:
            return candle['close'], i, 'MAX_HOLD', cur_pnl

    last_idx = min(entry_idx + MAX_HOLD_MINUTES, len(klines) - 1)
    last_price = klines[last_idx]['close']
    pnl = (last_price - entry_price) / entry_price * 100
    return last_price, last_idx, 'TIMEOUT', pnl


# ═══════════════════════════════════════════════════════════════════════
# BACKTEST D'UN SCÉNARIO SUR UN SYMBOLE
# ═══════════════════════════════════════════════════════════════════════
def backtest_symbol(symbol, klines, scenario_flags):
    """
    scenario_flags: dict with keys use_slippage_guard, use_decline_filter, use_long_trend
    Returns list of trade dicts.
    """
    trades        = []
    last_exit_idx = 0
    cooldown_bars = 5

    # État LONG_TREND
    lt_snapshots     = {symbol: []}
    lt_last_snap_idx = {symbol: 0}
    lt_cd_idx        = {symbol: 0}
    lt_hour          = []
    snap_ctr         = 0

    for idx in range(45, len(klines) - MAX_HOLD_MINUTES - 2):
        # Accumulation snapshots LONG_TREND (indépendant des trades)
        if scenario_flags.get('use_long_trend'):
            snap_ctr += 1
            if snap_ctr >= LT_SNAPSHOT_BARS:
                snap_ctr = 0
                snaps = lt_snapshots[symbol]
                snaps.append(klines[idx]['close'])
                if len(snaps) > LT_MAX_SNAPSHOTS:
                    snaps.pop(0)
                lt_last_snap_idx[symbol] = idx

        if idx <= last_exit_idx + cooldown_bars:
            continue

        sig = detect_surge(
            klines, idx, symbol,
            use_slippage_guard=scenario_flags.get('use_slippage_guard', False),
            use_decline_filter=scenario_flags.get('use_decline_filter', False),
            use_long_trend=scenario_flags.get('use_long_trend', False),
            lt_snapshots=lt_snapshots,
            lt_last_snap_idx=lt_last_snap_idx,
            lt_cd_idx=lt_cd_idx,
            lt_hour=lt_hour,
        )
        if sig is None:
            continue

        entry_price = sig['price']
        exit_price, exit_idx, reason, pnl_pct = simulate_exit(
            klines, idx, entry_price, sig['strength']
        )

        # Mise à jour état LT après un trade
        if sig['type'] == 'LONG_TREND_SURGE' and scenario_flags.get('use_long_trend'):
            lt_cd_idx[symbol] = idx
            lt_hour.append(idx)
            lt_snapshots[symbol].clear()

        pnl_usdt = pnl_pct / 100 * POSITION_SIZE
        trades.append({
            'symbol':   symbol,
            'entry_ts': klines[idx]['ts'],
            'exit_ts':  klines[exit_idx]['ts'],
            'entry_price': entry_price,
            'exit_price':  exit_price,
            'surge_type':  sig['type'],
            'surge_strength': round(sig['strength'], 2),
            'p24h':       round(sig['p24'], 1),
            'vol_ratio':  round(sig['vol_ratio'], 1),
            'reason':     reason,
            'hold_bars':  exit_idx - idx,
            'pnl_pct':    round(pnl_pct, 2),
            'pnl_usdt':   round(pnl_usdt, 4),
        })
        last_exit_idx = exit_idx

    return trades


# ═══════════════════════════════════════════════════════════════════════
# RÉSUMÉ STATISTIQUE
# ═══════════════════════════════════════════════════════════════════════
def summarize(trades, label):
    if not trades:
        print(f"  {label:20} | 0 trades")
        return {
            'label': label, 'n': 0, 'wins': 0, 'losses': 0,
            'win_rate': 0, 'total_pnl': 0, 'avg_pnl': 0,
            'profit_factor': 0, 'max_dd': 0,
        }
    wins   = [t for t in trades if t['pnl_usdt'] > 0]
    losses = [t for t in trades if t['pnl_usdt'] <= 0]
    total_pnl  = sum(t['pnl_usdt'] for t in trades)
    win_rate   = len(wins) / len(trades) * 100
    avg_win    = sum(t['pnl_usdt'] for t in wins)  / len(wins)   if wins   else 0
    avg_loss   = sum(t['pnl_usdt'] for t in losses)/ len(losses) if losses else 0
    profit_fac = abs(sum(t['pnl_usdt'] for t in wins) / sum(t['pnl_usdt'] for t in losses)) \
                 if losses and sum(t['pnl_usdt'] for t in losses) != 0 else 99.9

    # Max drawdown
    running, peak, max_dd = 0, 0, 0
    for t in sorted(trades, key=lambda x: x['exit_ts']):
        running += t['pnl_usdt']
        peak  = max(peak, running)
        max_dd = min(max_dd, running - peak)

    return {
        'label': label, 'n': len(trades),
        'wins': len(wins), 'losses': len(losses),
        'win_rate': round(win_rate, 1),
        'total_pnl': round(total_pnl, 4),
        'avg_pnl': round(total_pnl / len(trades), 4),
        'avg_win': round(avg_win, 4),
        'avg_loss': round(avg_loss, 4),
        'profit_factor': round(profit_fac, 2),
        'max_dd': round(max_dd, 4),
    }


def print_result_table(results):
    h  = f"{'Scénario':<22} | {'Trades':>6} | {'WR%':>6} | {'PnL $':>8} | {'PF':>5} | {'MaxDD $':>7} | {'avg/trade':>9}"
    print(h)
    print("─" * len(h))
    for r in results:
        if r['n'] == 0:
            print(f"  {r['label']:<20} | {'0':>6} | {'—':>6} | {'—':>8} | {'—':>5} | {'—':>7} | {'—':>9}")
            continue
        wins_str = f"{r['wins']}W/{r['losses']}L"
        print(
            f"  {r['label']:<20} | {r['n']:>6} | {r['win_rate']:>5.1f}% |"
            f" {r['total_pnl']:>+8.4f} | {r['profit_factor']:>5.2f} | {r['max_dd']:>+7.4f} |"
            f" {r['avg_pnl']:>+9.4f}  ({wins_str})"
        )


def print_blocked_trades(baseline_trades, current_trades, klines_map):
    """Affiche les trades présents dans BASELINE mais bloqués dans CURRENT."""
    baseline_keys = {(t['symbol'], t['entry_ts']) for t in baseline_trades}
    current_keys  = {(t['symbol'], t['entry_ts']) for t in current_trades}
    blocked = [t for t in baseline_trades if (t['symbol'], t['entry_ts']) not in current_keys]
    added   = [t for t in current_trades  if (t['symbol'], t['entry_ts']) not in baseline_keys]

    if blocked:
        print(f"\n  ── Trades BLOQUÉS par les nouvelles filtres ({len(blocked)}) ──")
        w = sum(1 for t in blocked if t['pnl_usdt'] > 0)
        l = len(blocked) - w
        tot = sum(t['pnl_usdt'] for t in blocked)
        print(f"  {w}W / {l}L | PnL évité = {tot:+.4f} $")
        for t in sorted(blocked, key=lambda x: x['pnl_usdt'])[:20]:
            dt = datetime.fromtimestamp(t['entry_ts'] / 1000, tz=timezone.utc).strftime('%d/%m %H:%M')
            print(f"    {t['symbol']:16} {dt}  {t['surge_type']:18} "
                  f"str={t['surge_strength']:.1f}%  24h={t['p24h']:+.1f}%  "
                  f"→ {t['reason']:14}  PnL={t['pnl_usdt']:+.4f}$")

    if added:
        print(f"\n  ── Trades AJOUTÉS par LONG_TREND_SURGE ({len(added)}) ──")
        w = sum(1 for t in added if t['pnl_usdt'] > 0)
        l = len(added) - w
        tot = sum(t['pnl_usdt'] for t in added)
        print(f"  {w}W / {l}L | PnL ajouté = {tot:+.4f} $")
        for t in sorted(added, key=lambda x: -x['pnl_usdt'])[:10]:
            dt = datetime.fromtimestamp(t['entry_ts'] / 1000, tz=timezone.utc).strftime('%d/%m %H:%M')
            print(f"    {t['symbol']:16} {dt}  {t['surge_type']:18} "
                  f"str={t['surge_strength']:.1f}%  → {t['reason']:14}  PnL={t['pnl_usdt']:+.4f}$")


# ═══════════════════════════════════════════════════════════════════════
# MAIN
# ═══════════════════════════════════════════════════════════════════════
def main():
    print()
    print("╔══════════════════════════════════════════════════════════════════════╗")
    print("║  🧪 CRASH-TEST 10 JOURS — AVANT vs APRÈS modifications              ║")
    print(f"║  Période : {datetime.now(timezone.utc) - timedelta(days=DAYS):%d/%m/%Y} → {datetime.now(timezone.utc):%d/%m/%Y}  ({DAYS} jours, klines 1-min)   ║")
    print(f"║  Coins   : {len(SYMBOLS)} | Position : {POSITION_SIZE:.0f} USDT | SL : {abs(HARD_SL_PCT):.1f}%                  ║")
    print("╚══════════════════════════════════════════════════════════════════════╝")
    print()

    # ── Phase 1 : Téléchargement ──────────────────────────────────────────
    print(f"📥 Téléchargement des données 1-min ({DAYS}j) pour {len(SYMBOLS)} symboles...")
    all_data = {}
    for sym in SYMBOLS:
        klines = fetch_klines(sym, DAYS)
        if len(klines) >= 1440:
            all_data[sym] = klines
            status = "✅"
        else:
            status = f"⚠️  seulement {len(klines)} bars"
        print(f"  {status} {sym:16} : {len(klines):6} klines")
        time.sleep(0.02)

    print(f"\n  ✅ {len(all_data)}/{len(SYMBOLS)} coins chargés\n")

    # ── Phase 2 : Backtest 5 scénarios ───────────────────────────────────
    SCENARIOS = {
        'BASELINE':   dict(use_slippage_guard=False, use_decline_filter=False, use_long_trend=False),
        '+SLIPPAGE':  dict(use_slippage_guard=True,  use_decline_filter=False, use_long_trend=False),
        '+DEC_FILT':  dict(use_slippage_guard=True,  use_decline_filter=True,  use_long_trend=False),
        'CURRENT':    dict(use_slippage_guard=True,  use_decline_filter=True,  use_long_trend=True),
    }

    scenario_trades = {name: [] for name in SCENARIOS}

    for sym, klines in all_data.items():
        for name, flags in SCENARIOS.items():
            trades = backtest_symbol(sym, klines, flags)
            scenario_trades[name].extend(trades)

    # ── Phase 3 : Résultats ───────────────────────────────────────────────
    stats = []
    for name in SCENARIOS:
        s = summarize(scenario_trades[name], name)
        stats.append(s)

    print("╔══════════════════════════════════════════════════════════════════════╗")
    print("║  📊 RÉSULTATS COMPARATIFS                                           ║")
    print("╚══════════════════════════════════════════════════════════════════════╝")
    print()
    print_result_table(stats)

    # ── Détail des trades bloqués / ajoutés
    print()
    print("╔══════════════════════════════════════════════════════════════════════╗")
    print("║  🔍 ANALYSE DÉTAILLÉE : BASELINE → CURRENT                         ║")
    print("╚══════════════════════════════════════════════════════════════════════╝")
    print_blocked_trades(scenario_trades['BASELINE'], scenario_trades['CURRENT'], all_data)

    # ── Stats par type de surge (scénario CURRENT) ────────────────────────
    print()
    print("  ── Par type de surge (scénario CURRENT) ──")
    by_surge = defaultdict(list)
    for t in scenario_trades['CURRENT']:
        by_surge[t['surge_type']].append(t)
    for stype, tl in sorted(by_surge.items(), key=lambda x: sum(t['pnl_usdt'] for t in x[1]), reverse=True):
        pnl = sum(t['pnl_usdt'] for t in tl)
        w   = sum(1 for t in tl if t['pnl_usdt'] > 0)
        wr  = w / len(tl) * 100 if tl else 0
        print(f"    {stype:20} | {len(tl):4} trades | WR={wr:5.1f}% | PnL={pnl:+.4f}$")

    # ── Stats par raison de sortie (CURRENT) ─────────────────────────────
    print()
    print("  ── Par raison de sortie (scénario CURRENT) ──")
    by_reason = defaultdict(list)
    for t in scenario_trades['CURRENT']:
        by_reason[t['reason']].append(t)
    for reason, tl in sorted(by_reason.items(), key=lambda x: len(x[1]), reverse=True):
        pnl = sum(t['pnl_usdt'] for t in tl)
        print(f"    {reason:16} | {len(tl):4} trades | PnL={pnl:+.4f}$")

    # ── Top 5 meilleurs et pires trades (CURRENT) ─────────────────────────
    cur_trades_sorted = sorted(scenario_trades['CURRENT'], key=lambda x: x['pnl_usdt'])
    if cur_trades_sorted:
        print()
        print("  ── Top 5 PIRES trades (CURRENT) ──")
        for t in cur_trades_sorted[:5]:
            dt = datetime.fromtimestamp(t['entry_ts'] / 1000, tz=timezone.utc).strftime('%d/%m %H:%M')
            print(f"    {t['symbol']:16} {dt}  {t['surge_type']:18} str={t['surge_strength']:.1f}%  "
                  f"24h={t['p24h']:+.1f}%  → {t['reason']:14}  PnL={t['pnl_usdt']:+.4f}$")

        print()
        print("  ── Top 5 MEILLEURS trades (CURRENT) ──")
        for t in cur_trades_sorted[-5:][::-1]:
            dt = datetime.fromtimestamp(t['entry_ts'] / 1000, tz=timezone.utc).strftime('%d/%m %H:%M')
            print(f"    {t['symbol']:16} {dt}  {t['surge_type']:18} str={t['surge_strength']:.1f}%  "
                  f"24h={t['p24h']:+.1f}%  → {t['reason']:14}  PnL={t['pnl_usdt']:+.4f}$")

    # ── Comparaison nette AVANT → APRÈS ───────────────────────────────────
    b = stats[0]   # BASELINE
    c = stats[-1]  # CURRENT
    print()
    print("╔══════════════════════════════════════════════════════════════════════╗")
    print("║  🏆 VERDICT FINAL : BASELINE → CURRENT                              ║")
    print("╚══════════════════════════════════════════════════════════════════════╝")
    if b['n'] > 0 and c['n'] > 0:
        delta_wr  = c['win_rate'] - b['win_rate']
        delta_pnl = c['total_pnl'] - b['total_pnl']
        delta_n   = c['n'] - b['n']
        print(f"  Trades        : {b['n']} → {c['n']}  (Δ {delta_n:+d})")
        print(f"  Win rate      : {b['win_rate']:.1f}% → {c['win_rate']:.1f}%  (Δ {delta_wr:+.1f}pp)")
        print(f"  P&L Total     : {b['total_pnl']:+.4f}$ → {c['total_pnl']:+.4f}$  (Δ {delta_pnl:+.4f}$)")
        print(f"  Profit Factor : {b['profit_factor']:.2f} → {c['profit_factor']:.2f}")
        print(f"  Max Drawdown  : {b['max_dd']:+.4f}$ → {c['max_dd']:+.4f}$")
        print()
        if delta_pnl > 0 and delta_wr > 0:
            print("  ✅ AMÉLIORÉ : les 3 modifications augmentent le PnL ET le WR")
        elif delta_pnl > 0:
            print("  ✅ PnL amélioré (WR légèrement différent)")
        elif delta_wr > 0:
            print("  ⚠️  WR amélioré mais PnL global différent — vérifier la taille d'échantillon")
        else:
            print("  🔴 Attention : les modifications n'améliorent pas sur cet échantillon")
    else:
        print("  ⚠️  Pas assez de trades pour conclure")

    # ── Sauvegarde ─────────────────────────────────────────────────────────
    output = {
        'generated_at': datetime.now(timezone.utc).isoformat(),
        'period_days': DAYS,
        'symbols': list(all_data.keys()),
        'results': stats,
        'trades_by_scenario': {
            name: scenario_trades[name] for name in SCENARIOS
        }
    }
    out_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "crashtest_10d_results.json")
    with open(out_file, 'w') as f:
        json.dump(output, f, indent=2)
    print(f"\n  💾 Résultats détaillés → crashtest_10d_results.json")
    print()


if __name__ == '__main__':
    main()
