#!/usr/bin/env python3
"""
flair_arrival_lp_v1.py — FLAIR + Arrival Rate LP Algorithm

Нові сигнали з PDF (не RSI/EMA!):
  FLAIR = метрика конкурентності LP у пулі
  Arrival Rate = кількість свапів/годину (retail vs institutional)
  Avg Swap Size = виявляє informed flow

Алгоритм E: FLAIR-GATED LP
  FLAIR ≈ fee_earned / (fee_earned + LVR_loss)
        = fee_rate × vol_rate / (fee_rate × vol_rate + 0.5 × σ²)
  → Deploy коли FLAIR > threshold (fees > LVR)
  → Exit коли FLAIR drops (LVR > fees)

Алгоритм F: ARRIVAL-SIZE SEGMENTED LP  
  retail_flow = events/h × (1 - large_swap_fraction)
  institutional_flow = events/h × large_swap_fraction
  → Retail-dominated: tight range (more fees, less LVR)
  → Institutional-dominated: wide range або idle

Алгоритм G: COMBINED ENSEMBLE (FLAIR + ChatGPT downtrend_harvester)
  Synthesizes best of both approaches:
  FLAIR high + retail: passive_wide  
  FLAIR medium + downtrend: downtrend_harvester
  FLAIR low or volatile: idle (capital preservation)
"""
from __future__ import annotations
import argparse, json, math
from pathlib import Path
import numpy as np
import pandas as pd

SCRIPT_VERSION = "flair_arrival_lp_v1_2026_05_04"


# ── Math ─────────────────────────────────────────────────────────────────────

def sqrt_raw(p, d0=6, d1=18): return math.sqrt(10**(d1-d0)/max(p,1e-300))

def liquidity_for_capital(cap, p0, lo, up, d0=6, d1=18):
    sp=sqrt_raw(p0,d0,d1); sa=sqrt_raw(up,d0,d1); sb=sqrt_raw(lo,d0,d1)
    if p0<=lo: uval=(sb-sa)/10**d1*p0
    elif p0>=up: uval=(sb-sa)/(sa*sb)/10**d0
    else: uval=(sb-sp)/(sp*sb)/10**d0+(sp-sa)/10**d1*p0
    return cap/uval if uval>1e-300 else 0

def position_value(L, p, lo, up, d0=6, d1=18):
    sp=sqrt_raw(p,d0,d1); sa=sqrt_raw(up,d0,d1); sb=sqrt_raw(lo,d0,d1)
    if p<=lo: a0,a1=0.0,L*(sb-sa)
    elif p>=up: a0,a1=L*(sb-sa)/(sa*sb),0.0
    else: a0=L*(sb-sp)/(sp*sb); a1=L*(sp-sa)
    return a0/10**d0+a1/10**d1*p

def max_drawdown(eq):
    pk=np.maximum.accumulate(eq)
    return float(np.nanmin(eq/np.where(pk==0,np.nan,pk)-1)*100)


# ── Window features (extended from ChatGPT with size segmentation) ───────────

def window_features_extended(price, volumes, ts, end_idx, lookback_h, capital):
    end_ts = int(ts[end_idx])
    start_ts = end_ts - int(lookback_h * 3600)
    lo = max(0, int(np.searchsorted(ts, start_ts, 'left')))
    p = price[lo:end_idx+1]
    v = volumes[lo:end_idx+1]
    t = ts[lo:end_idx+1]
    
    if len(p) < 3:
        return {'flair': 0.0, 'retail_rate': 0.0, 'inst_rate': 0.0,
                'drift_pct': 0.0, 'vol_pct': 0.0, 'events_h': 0.0,
                'avg_swap_usd': 0.0, 'toxicity': 0.0}
    
    lr = np.diff(np.log(np.maximum(p, 1e-300)))
    hours = max(1/60, (int(t[-1])-int(t[0]))/3600)
    drift_pct = float((p[-1]/p[0]-1)*100)
    path_pct = float(np.sum(np.abs(lr))*100)
    vol_pct = float(np.std(lr)*np.sqrt(max(1,len(lr)))*100)
    events_h = len(p)/hours
    
    # Arrival rate segmentation
    flow_h = float(np.sum(v)/hours)
    avg_swap = float(np.mean(v)) if len(v) > 0 else 0.0
    
    # Large = informed (> 2% of capital)
    large_threshold = capital * 0.02
    large_mask = v > large_threshold
    large_frac = float(large_mask.mean()) if len(v) > 0 else 0.0
    
    retail_rate = events_h * (1 - large_frac)   # small swaps/h
    inst_rate = events_h * large_frac            # large swaps/h
    
    # FLAIR approximation (from PDF)
    # fee_yield_rate = fee_rate × flow_per_hour × our_share / capital
    # LVR_rate = 0.5 × σ² (Fritsch & Canidio formula)
    # FLAIR = fee_yield / (fee_yield + LVR) → ranges 0-1
    sigma_per_event = vol_pct/100 / max(len(lr), 1) ** 0.5
    lvr_rate = 0.5 * sigma_per_event ** 2  # per event
    # fee_rate filled in by caller; use proxy here
    fee_proxy = 0.003  # will be overridden
    fee_event_rate = fee_proxy * avg_swap / max(capital, 1)
    flair = fee_event_rate / (fee_event_rate + lvr_rate) if (fee_event_rate + lvr_rate) > 0 else 0.0
    
    toxicity = abs(drift_pct)/max(path_pct, 1e-9)
    
    return {'flair': float(np.clip(flair, 0, 1)),
            'retail_rate': retail_rate, 'inst_rate': inst_rate,
            'drift_pct': drift_pct, 'vol_pct': vol_pct,
            'events_h': events_h, 'avg_swap_usd': avg_swap,
            'toxicity': float(min(1, toxicity)), 'flow_h': flow_h}


# ── Algorithms ───────────────────────────────────────────────────────────────

def run_algo(prices, volumes, active_liq, ts, capital, fee_rate, d0, d1,
             algo_name, lookback_h, rebalance_h, params):
    n = len(prices)
    equity = np.empty(n); cash=capital; fees_cum=0.0
    L=lo=up=0.0; in_lp=False
    share_arr=np.zeros(n); in_range_arr=np.zeros(n,np.int8); in_lp_arr=np.zeros(n,np.int8)
    rebalances=0; last_reb_ts=ts[0]; decisions=[]
    
    def enter(p0, lo_pct, up_pct, cap):
        nonlocal L,lo,up,in_lp,cash
        l=p0*(1-lo_pct/100); u=p0*(1+up_pct/100)
        Lv=liquidity_for_capital(cap,p0,l,u,d0,d1)
        if Lv>0: L,lo,up,in_lp,cash=Lv,l,u,True,0.0
    
    def exit_(p):
        nonlocal L,lo,up,in_lp,cash,fees_cum
        if in_lp:
            cash=position_value(L,p,lo,up,d0,d1)+fees_cum
            fees_cum=0.0; L=lo=up=0.0; in_lp=False
    
    rebal_window = int(rebalance_h * 3600)
    
    for i in range(n):
        p,v,al,t=prices[i],volumes[i],active_liq[i],ts[i]
        
        should_rebal = (t - last_reb_ts) >= rebal_window
        
        if should_rebal and i >= 10:
            feat = window_features_extended(prices, volumes, ts, i, lookback_h, capital)
            feat['flair'] = feat['flair'] * fee_rate / 0.003  # adjust for actual fee
            
            # Decide based on algorithm
            deploy, lp, up_pct, reason = False, 30.0, 30.0, 'idle'
            
            if algo_name == 'flair_gated':
                # [E] Deploy only when FLAIR > threshold
                flair_thresh = params.get('flair_thresh', 0.4)
                min_events = params.get('min_events_h', 2.0)
                if feat['flair'] > flair_thresh and feat['events_h'] > min_events:
                    # Range based on volatility
                    range_pct = max(10, min(60, feat['vol_pct'] * params.get('vol_mult', 3)))
                    lp, up_pct, deploy, reason = range_pct, range_pct, True, f'flair={feat["flair"]:.2f}'
            
            elif algo_name == 'arrival_size':
                # [F] Segment by retail vs institutional flow
                min_retail = params.get('min_retail_h', 2.0)
                max_inst = params.get('max_inst_h', 1.0)
                if feat['retail_rate'] > min_retail and feat['inst_rate'] < max_inst:
                    # Retail-dominated: tighter range (more fees, less LVR)
                    range_pct = max(8, min(40, feat['vol_pct'] * 2))
                    lp, up_pct, deploy, reason = range_pct, range_pct, True, f'retail={feat["retail_rate"]:.1f}/h'
                elif feat['retail_rate'] > min_retail:
                    # Mixed: wider range
                    range_pct = max(20, min(70, feat['vol_pct'] * 4))
                    lp, up_pct, deploy, reason = range_pct, range_pct, True, f'mixed'
            
            elif algo_name == 'ensemble_v1':
                # [G] Combined: FLAIR + downtrend awareness + arrival
                flair = feat['flair']
                drift = feat['drift_pct']
                events = feat['events_h']
                toxicity = feat['toxicity']
                
                if events < params.get('min_events', 1.0):
                    reason = 'idle-thin'
                elif flair > params.get('flair_hi', 0.5) and drift > -params.get('drift_thresh', 5):
                    # Bull + strong fee evidence → passive wide
                    lp, up_pct, deploy = 60, 60, True; reason = 'passive-wide'
                elif drift < -params.get('drift_thresh', 5) and events > 1:
                    # Downtrend → downtrend harvester (asymmetric)
                    lp = max(40, min(80, abs(drift)*2)); up_pct = min(2, feat['vol_pct']*0.2)
                    deploy = True; reason = f'downtrend-harvest drift={drift:.1f}'
                elif flair > params.get('flair_lo', 0.25) and toxicity < 0.3:
                    # Low toxicity, some fee evidence → conservative
                    range_pct = max(15, min(50, feat['vol_pct']*2.5))
                    lp, up_pct, deploy = range_pct, range_pct, True; reason = 'conservative'
                else:
                    reason = f'idle flair={flair:.2f} tox={toxicity:.2f}'
            
            decisions.append({'i': i, 'deploy': deploy, 'reason': reason})
            
            if deploy != in_lp:
                if deploy:
                    exit_(p); enter(p, lp, up_pct, cash); rebalances += 1
                else:
                    exit_(p); rebalances += 1
            elif deploy and in_lp and should_rebal:
                # Re-center range
                cap_now = position_value(L,p,lo,up,d0,d1)+fees_cum
                exit_(p); enter(p, lp, up_pct, cap_now); rebalances += 1
            
            last_reb_ts = t
        
        if in_lp:
            ir=(p>=lo)&(p<=up); in_range_arr[i]=int(ir); in_lp_arr[i]=1
            if ir and al>0:
                sh=L/(al+L); share_arr[i]=sh; fees_cum+=sh*fee_rate*v
            equity[i]=position_value(L,p,lo,up,d0,d1)+fees_cum
        else:
            equity[i]=cash
    
    ir_in=share_arr[in_range_arr.astype(bool)&in_lp_arr.astype(bool)]
    return {'algo':algo_name, 'return_pct':(equity[-1]/capital-1)*100,
            'mdd_pct':max_drawdown(equity), 'equity_end':float(equity[-1]),
            'fees_cum':float(fees_cum), 'rebalances':rebalances,
            'time_in_lp':float(in_lp_arr.mean()*100),
            'time_in_range':float(in_range_arr.mean()*100),
            'avg_share':float(ir_in.mean()*100) if len(ir_in) else 0,
            'p99_share':float(np.percentile(ir_in,99)*100) if len(ir_in) else 0,
            'params':str(params)}


def run_all_datasets(npz_paths, out_dir, capital, fee_rate, d0, d1, lookback_h, rebalance_h):
    out = Path(out_dir); out.mkdir(parents=True, exist_ok=True)
    
    algo_configs = [
        ('flair_gated', {'flair_thresh': 0.3, 'min_events_h': 1.5, 'vol_mult': 3}),
        ('flair_gated', {'flair_thresh': 0.4, 'min_events_h': 2.0, 'vol_mult': 4}),
        ('flair_gated', {'flair_thresh': 0.5, 'min_events_h': 2.0, 'vol_mult': 5}),
        ('arrival_size', {'min_retail_h': 1.0, 'max_inst_h': 2.0}),
        ('arrival_size', {'min_retail_h': 2.0, 'max_inst_h': 1.0}),
        ('arrival_size', {'min_retail_h': 3.0, 'max_inst_h': 0.5}),
        ('ensemble_v1', {'flair_hi': 0.4, 'flair_lo': 0.2, 'min_events': 1.0, 'drift_thresh': 5}),
        ('ensemble_v1', {'flair_hi': 0.5, 'flair_lo': 0.25, 'min_events': 2.0, 'drift_thresh': 8}),
        ('ensemble_v1', {'flair_hi': 0.35, 'flair_lo': 0.15, 'min_events': 1.0, 'drift_thresh': 3}),
    ]
    
    all_rows = []
    for npz_path in npz_paths:
        name = Path(npz_path).stem
        try:
            z = np.load(npz_path, allow_pickle=False)
            pr=z['price'].astype(np.float64); vol=z['input_usd'].astype(np.float64)
            al=z['active_liquidity'].astype(np.float64); ts=z['ts'].astype(np.int64)
            days=(ts[-1]-ts[0])/86400
            print(f"  {name}: {len(pr)} swaps, {days:.1f}d")
        except Exception as e:
            print(f"  SKIP {name}: {e}"); continue
        
        for algo_name, params in algo_configs:
            try:
                r = run_algo(pr, vol, al, ts, capital, fee_rate, d0, d1,
                             algo_name, lookback_h, rebalance_h, params)
                r['dataset'] = name
                r['days'] = float(days)
                r['annual_pct'] = r['return_pct']/max(days,0.1)*365
                r['pnl_mdd'] = abs(r['return_pct'])/abs(r['mdd_pct']) if r['mdd_pct'] else 0
                all_rows.append(r)
            except Exception as e:
                print(f"    ERROR {algo_name}: {e}")
    
    df = pd.DataFrame(all_rows)
    df.to_csv(out/'summary.csv', index=False)
    
    valid = df[(df.mdd_pct>=-25)&(df.pnl_mdd>=2)&(df.return_pct>0)&(df.p99_share<10)]
    valid.to_csv(out/'valid.csv', index=False)
    
    # Cross-pool: count passing per algo
    cross = df.groupby(['algo','params']).apply(lambda g: pd.Series({
        'datasets': len(g), 'passing': ((g.mdd_pct>=-25)&(g.pnl_mdd>=2)&(g.return_pct>0)&(g.p99_share<10)).sum(),
        'avg_return': g.return_pct.mean(), 'worst_mdd': g.mdd_pct.min()
    })).reset_index()
    cross['pass_rate'] = cross['passing']/cross['datasets']
    cross.sort_values('pass_rate', ascending=False).to_csv(out/'cross_pool.csv', index=False)
    
    print(f"\n  Total: {len(df)} results | Valid: {len(valid)}")
    print(f"  Cross-pool leaders:")
    for _,r in cross.sort_values('pass_rate',ascending=False).head(5).iterrows():
        print(f"    [{r['algo']}] {r['params'][:40]} | pass={r['passing']:.0f}/{r['datasets']:.0f} ({r['pass_rate']*100:.0f}%) | avg_ret={r['avg_return']:.1f}%")
    
    return df, valid, cross


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument('--npzs', nargs='+', required=True)
    ap.add_argument('--out-dir', required=True)
    ap.add_argument('--capital', type=float, default=600)
    ap.add_argument('--fee-rate', type=float, default=0.003)
    ap.add_argument('--dec0', type=int, default=6)
    ap.add_argument('--dec1', type=int, default=18)
    ap.add_argument('--lookback-h', type=float, default=168)
    ap.add_argument('--rebalance-h', type=float, default=168)
    args = ap.parse_args()
    print(f"[{SCRIPT_VERSION}]")
    run_all_datasets(args.npzs, args.out_dir, args.capital, args.fee_rate,
                     args.dec0, args.dec1, args.lookback_h, args.rebalance_h)

if __name__ == '__main__':
    main()
