import numpy as np
import pandas as pd
import shinybroker as sb
import datetime
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import warnings
warnings.filterwarnings('ignore')Breakout Strategy — QQQ (Nasdaq-100 ETF)
Turtle Trading with Choppiness Index, Volume & Last-Trade Filters
Design and Testing of Algorithmic Trading Systems with Python
1 — Strategy Parameters
All tunable parameters are defined here as named constants so they are easy to find and adjust.
###############################################################
# STRATEGY PARAMETERS — all tuneable values live here
###############################################################
# Donchian Channel lookbacks to evaluate in walk-forward training
N_ENTRY_OPTIONS = [20, 40, 55, 75]
# Donchian Channel lookback for the exit signal (short-term reversal)
N_EXIT = 20
# ATR period for stop-loss calculation
ATR_PERIOD = 14
# Stop loss distance = ATR_STOP_MULT × ATR(14)
ATR_STOP_MULT = 2.0
# Choppiness Index period and threshold
# CHOP > 61.8 → market is fractal noise → skip the trade
CHOP_PERIOD = 14
CHOP_THRESHOLD = 61.8
# Volume filter: today's volume must exceed VOL_MULT × 20-day average
VOL_MA_PERIOD = 20
VOL_MULT = 1.5
# Maximum days to hold a trade before forcing exit at market close
TIMEOUT_DAYS = 20
# Fixed position size in shares
POSITION_SIZE = 100
# Starting account cash
START_CASH = 100_000
# Walk-forward window sizes (trading days)
TRAIN_DAYS = 252 # ~1 year in-sample
TEST_DAYS = 126 # ~6 months out-of-sample
# Risk-free rate for Sharpe calculation (annualised)
RISK_FREE_RATE = 0.03752 — Asset Definition & Data Fetch
We use QQQ (Invesco Nasdaq-100 ETF) — high liquidity, strong trend-following behaviour, frequent clean breakouts, and widely used in academic breakout research.
### contracts
asset = sb.Contract({
'symbol': 'QQQ',
'secType': 'STK',
'exchange': 'SMART',
'currency': 'USD'
})
spy = sb.Contract({
'symbol': 'SPY',
'secType': 'STK',
'exchange': 'SMART',
'currency': 'USD'
})
### fetch 2+ years of daily OHLCV
raw_daily = sb.fetch_historical_data(
asset,
endDateTime = '',
durationStr = '2 Y',
barSizeSetting= '1 day',
whatToShow = 'TRADES',
useRTH = True,
host = '127.0.0.1',
port = 7497,
client_id = 9999,
timeout = 10,
)['hst_dta']
### fetch SPY for benchmark comparison
spy_daily = sb.fetch_historical_data(
spy,
endDateTime = '',
durationStr = '2 Y',
barSizeSetting= '1 day',
whatToShow = 'TRADES',
useRTH = True,
host = '127.0.0.1',
port = 7497,
client_id = 9999,
timeout = 10,
)['hst_dta']
print(f"QQQ rows: {len(raw_daily)}")
print(f"SPY rows: {len(spy_daily)}")
raw_daily.head()3 — Data Preparation
### returns part — prepare price DataFrame
daily_df = raw_daily.copy()
daily_df['timestamp'] = pd.to_datetime(daily_df['timestamp'])
daily_df = daily_df.set_index('timestamp').sort_index()
daily_df.columns = [c.lower() for c in daily_df.columns]
daily_df = daily_df[['open', 'high', 'low', 'close', 'volume']].dropna()
### SPY prep
spy_df = spy_daily.copy()
spy_df['timestamp'] = pd.to_datetime(spy_df['timestamp'])
spy_df = spy_df.set_index('timestamp').sort_index()
spy_df.columns = [c.lower() for c in spy_df.columns]
spy_df = spy_df[['open', 'high', 'low', 'close', 'volume']].dropna()
print(f"Date range: {daily_df.index[0].date()} → {daily_df.index[-1].date()}")
print(f"Total trading days: {len(daily_df)}")
daily_df.tail()4 — Indicator Calculations
We compute all indicators on the full dataset up-front. All indicators are lagged by 1 bar where relevant so no look-ahead bias enters the signals.
### calculations
# ── True Range & ATR(14) ──────────────────────────────────
prev_close = daily_df['close'].shift(1)
tr = pd.concat([
daily_df['high'] - daily_df['low'],
(daily_df['high'] - prev_close).abs(),
(daily_df['low'] - prev_close).abs(),
], axis=1).max(axis=1)
daily_df['atr'] = tr.rolling(ATR_PERIOD).mean()
# ── Donchian Channels ─────────────────────────────────────
# Use .shift(1) so today's bar does NOT influence its own signal
for n in N_ENTRY_OPTIONS + [N_EXIT]:
daily_df[f'dc_high_{n}'] = daily_df['high'].shift(1).rolling(n).max()
daily_df[f'dc_low_{n}'] = daily_df['low'].shift(1).rolling(n).min()
# ── Choppiness Index ──────────────────────────────────────
# CHOP = 100 × log10(SUM_ATR_n / (HH_n − LL_n)) / log10(n)
# Values > 61.8 = choppy market, < 38.2 = strongly trending
atr_sum = tr.rolling(CHOP_PERIOD).sum()
hh_n = daily_df['high'].rolling(CHOP_PERIOD).max()
ll_n = daily_df['low'].rolling(CHOP_PERIOD).min()
daily_df['chop'] = (
100 * np.log10(atr_sum / (hh_n - ll_n).replace(0, np.nan))
/ np.log10(CHOP_PERIOD)
)
# ── Volume MA ─────────────────────────────────────────────
daily_df['vol_ma20'] = daily_df['volume'].shift(1).rolling(VOL_MA_PERIOD).mean()
# ── Log Returns (for performance calcs) ──────────────────
daily_df['log_ret'] = np.log(daily_df['close'] / daily_df['close'].shift(1))
print("Indicators computed.")
print(f"First valid row (all indicators present): {daily_df.dropna().index[0].date()}")
daily_df[['close','atr','chop','vol_ma20']].dropna().tail()5 — Breakout Detection Function
This is the core of the strategy. It takes a single row of the DataFrame and returns: - +1 → Long breakout signal (buy 100 shares) - -1 → Short breakout signal (short 100 shares) - 0 → No signal
Plain English: A breakout is triggered when today’s close punches through the highest high (or lowest low) of the previous N bars. To avoid false breakouts, we require three conditions to be met simultaneously: 1. Volume confirmation — today’s volume exceeds 1.5× its 20-day average, confirming genuine participation behind the move. 2. Choppiness Index filter — CHOP must be below 61.8, meaning the market is behaving like a directional trend rather than fractal noise. 3. Last-Trade filter — if the most recent completed trade was profitable, we skip the next signal. Markets rarely trend twice in a row without a consolidation period; the trade that just won has likely exhausted that directional energy. (Adapted from the original Turtle Trading rules.)
def detect_breakout(row, n_entry, last_trade_profitable):
"""
Evaluate one bar for a breakout entry signal.
Parameters
----------
row : pd.Series — one row of daily_df (current bar)
n_entry : int — Donchian lookback for entry channel
last_trade_profitable: bool|None — True if the last closed trade made money;
None if no trades have been completed yet
Returns
-------
+1 (long), -1 (short), or 0 (no signal)
"""
# Filter 3: Last-Trade Filter
# Skip if the previous completed trade was already a winner
if last_trade_profitable is True:
return 0
# Filter 2: Choppiness Index — skip if market is fractal/choppy
if pd.isna(row['chop']) or row['chop'] > CHOP_THRESHOLD:
return 0
# Filter 1: Volume confirmation
if pd.isna(row['vol_ma20']) or row['volume'] <= VOL_MULT * row['vol_ma20']:
return 0
# Donchian Channel levels (already shifted, no look-ahead bias)
dc_h = row[f'dc_high_{n_entry}']
dc_l = row[f'dc_low_{n_entry}']
if pd.isna(dc_h) or pd.isna(dc_l):
return 0
# Long breakout: close above N-day high
if row['close'] > dc_h:
return 1
# Short breakout: close below N-day low
if row['close'] < dc_l:
return -1
return 0
print("detect_breakout() defined — all parameters defined in Section 1.")6 — Backtest Engine
A single-trade-at-a-time backtest function used both for walk-forward optimisation (on training folds) and for the final out-of-sample run.
def run_backtest(df, n_entry):
"""
Run a Donchian breakout backtest on df with a given n_entry lookback.
Exit rules (in priority order):
1. Stop-loss: if daily low/high crosses entry ± ATR_STOP_MULT × ATR(14)
2. Donchian exit: close crosses N_EXIT-day low (long) or high (short)
3. Timeout: TIMEOUT_DAYS elapsed → exit at that day's close
Returns
-------
trades : list of dicts, one per completed trade
"""
trades = []
in_trade = False
direction = 0 # +1 long, -1 short
entry_date = None
entry_price = 0.0
stop_price = 0.0
days_held = 0
last_profitable = None # None = no completed trade yet
dates = df.index.tolist()
for i, dt in enumerate(dates):
row = df.loc[dt]
if in_trade:
days_held += 1
# ── Exit checks ──────────────────────────
exit_price = None
exit_reason = None
# 1. Stop-loss (use intraday low/high)
if direction == 1 and row['low'] <= stop_price:
exit_price = stop_price
exit_reason = 'stop_loss'
elif direction == -1 and row['high'] >= stop_price:
exit_price = stop_price
exit_reason = 'stop_loss'
# 2. Donchian exit
if exit_price is None:
if direction == 1 and row['close'] < row[f'dc_low_{N_EXIT}']:
exit_price = row['close']
exit_reason = 'donchian_exit'
elif direction == -1 and row['close'] > row[f'dc_high_{N_EXIT}']:
exit_price = row['close']
exit_reason = 'donchian_exit'
# 3. Timeout
if exit_price is None and days_held >= TIMEOUT_DAYS:
exit_price = row['close']
exit_reason = 'timeout'
if exit_price is not None:
pnl = direction * (exit_price - entry_price) * POSITION_SIZE
ret = direction * np.log(exit_price / entry_price)
outcome = 'success' if pnl > 0 else (
'stop_loss' if exit_reason == 'stop_loss' else 'timeout'
)
last_profitable = pnl > 0
trades.append({
'entry_date' : entry_date,
'exit_date' : dt,
'direction' : direction,
'qty' : direction * POSITION_SIZE,
'entry_price' : entry_price,
'exit_price' : exit_price,
'stop_price' : stop_price,
'days_held' : days_held,
'exit_reason' : exit_reason,
'outcome' : outcome,
'pnl' : pnl,
'log_return' : ret,
})
in_trade = False
direction = 0
else:
# ── Entry check ──────────────────────────
signal = detect_breakout(row, n_entry, last_profitable)
if signal != 0:
# Enter at next bar open — if next bar exists
if i + 1 < len(dates):
next_dt = dates[i + 1]
ep = df.loc[next_dt, 'open']
atr_val = row['atr']
if pd.isna(ep) or pd.isna(atr_val) or ep <= 0:
continue
in_trade = True
direction = signal
entry_date = next_dt
entry_price = ep
stop_price = ep - signal * ATR_STOP_MULT * atr_val
days_held = 0
return trades
print("run_backtest() defined.")7 — Walk-Forward Optimisation & Out-of-Sample Test
We roll a 252-day training window followed by a 126-day test window across the full dataset. In each training fold, we test every candidate N in N_ENTRY_OPTIONS and pick the one with the highest Sharpe ratio. We then apply that N to the subsequent test fold (out-of-sample). All reported results come only from test folds.
### calculations — walk-forward
def sharpe_from_trades(trades, rf=RISK_FREE_RATE):
"""Annualised Sharpe from a list of trade dicts."""
if len(trades) < 3:
return -np.inf
rets = np.array([t['log_return'] for t in trades])
avg = rets.mean()
std = rets.std()
if std == 0:
return -np.inf
# annualise assuming ~252 / TIMEOUT_DAYS trades/year rough proxy
return (avg - rf / 252) / std * np.sqrt(252)
all_trades = [] # master list of OOS trades
wf_results = [] # summary of each fold
dates = daily_df.index
n_days = len(dates)
fold = 0
start = 0
while start + TRAIN_DAYS + TEST_DAYS <= n_days:
train_end = start + TRAIN_DAYS
test_end = train_end + TEST_DAYS
train_df = daily_df.iloc[start : train_end]
test_df = daily_df.iloc[train_end : test_end]
# ── Optimise N on training fold ──────────────────────
best_n = N_ENTRY_OPTIONS[0]
best_shp = -np.inf
for n in N_ENTRY_OPTIONS:
t = run_backtest(train_df, n)
s = sharpe_from_trades(t)
if s > best_shp:
best_shp = s
best_n = n
# ── Run OOS test fold with best N ────────────────────
oos_trades = run_backtest(test_df, best_n)
for tr in oos_trades:
tr['fold'] = fold
tr['best_n'] = best_n
all_trades.extend(oos_trades)
wf_results.append({
'fold' : fold,
'train_start' : dates[start].date(),
'train_end' : dates[train_end - 1].date(),
'test_start' : dates[train_end].date(),
'test_end' : dates[test_end - 1].date(),
'best_n' : best_n,
'train_sharpe' : round(best_shp, 3),
'oos_trade_count': len(oos_trades),
})
start += TEST_DAYS # roll forward by one test-window
fold += 1
wf_df = pd.DataFrame(wf_results)
print(f"Walk-forward complete: {fold} folds, {len(all_trades)} total OOS trades")
print()
wf_df8 — Blotter
### blotter
blotter = pd.DataFrame(all_trades)
blotter['entry_date'] = pd.to_datetime(blotter['entry_date'])
blotter['exit_date'] = pd.to_datetime(blotter['exit_date'])
blotter = blotter.sort_values('entry_date').reset_index(drop=True)
# Human-readable direction label
blotter['direction_label'] = blotter['direction'].map({1: 'Long', -1: 'Short'})
print(f"Total trades: {len(blotter)}")
print(f"Long trades: {(blotter['direction']==1).sum()}")
print(f"Short trades: {(blotter['direction']==-1).sum()}")
print()
blotter[['entry_date','exit_date','direction_label','qty',
'entry_price','exit_price','days_held','outcome','pnl','log_return']]9 — Ledger (Daily Mark-to-Market)
### ledger
# Build a daily ledger over the OOS period only
oos_start = blotter['entry_date'].min()
oos_mask = (daily_df.index >= oos_start)
oos_df = daily_df[oos_mask].copy()
ledger = pd.DataFrame(index=oos_df.index)
ledger.index.name = 'date'
ledger['position'] = 0
ledger['cash'] = np.nan
ledger['mark'] = oos_df['close']
ledger['mkt_value']= np.nan
# Initialise cash on first OOS day
ledger.loc[ledger.index[0], 'cash'] = float(START_CASH)
# Forward-fill cash changes from each trade
cash_col = ledger['cash'].copy()
cash_col = cash_col.ffill()
# Apply trade cash flows
for _, tr in blotter.iterrows():
e_dt = tr['entry_date']
x_dt = tr['exit_date']
qty = tr['qty']
ep = tr['entry_price']
xp = tr['exit_price']
# Entry: cash decreases by qty × entry_price (for longs, qty > 0 → pay cash)
# For shorts, qty < 0 → we receive cash (short proceeds)
if e_dt in ledger.index:
ledger.loc[ledger.index >= e_dt, 'position'] += qty
ledger.loc[ledger.index >= e_dt, 'cash'] = (
ledger.loc[ledger.index >= e_dt, 'cash'].fillna(method='ffill')
- qty * ep
)
if x_dt in ledger.index:
ledger.loc[ledger.index >= x_dt, 'position'] -= qty
ledger.loc[ledger.index >= x_dt, 'cash'] = (
ledger.loc[ledger.index >= x_dt, 'cash'].fillna(method='ffill')
+ qty * xp
)
# Clean up: forward-fill cash, compute NAV
ledger['cash'] = ledger['cash'].ffill().fillna(START_CASH)
ledger['mkt_value'] = ledger['position'] * ledger['mark'] + ledger['cash']
# Log returns for performance
ledger['daily_log_ret'] = np.log(ledger['mkt_value'] / ledger['mkt_value'].shift(1))
print("Ledger built.")
ledger[['position','cash','mark','mkt_value']].tail(10)10 — Trade Outcome Analysis
Every trade is classified as one of three outcomes: - Success — trade closed profitably (hit Donchian exit in the right direction, profitably) - Stop-loss — hit the 2×ATR stop before profit target - Timeout — held for 20 days, closed at market
### calculations — outcome breakdown
outcome_counts = blotter['outcome'].value_counts().reset_index()
outcome_counts.columns = ['outcome', 'count']
print(outcome_counts)
print()
win_rate = (blotter['pnl'] > 0).mean()
avg_win = blotter.loc[blotter['pnl'] > 0, 'pnl'].mean()
avg_loss = blotter.loc[blotter['pnl'] < 0, 'pnl'].mean()
profit_factor = abs(blotter.loc[blotter['pnl']>0,'pnl'].sum() /
blotter.loc[blotter['pnl']<0,'pnl'].sum()) if (blotter['pnl']<0).any() else np.inf
print(f"Win rate: {win_rate:.1%}")
print(f"Avg winning PnL: ${avg_win:,.0f}")
print(f"Avg losing PnL: ${avg_loss:,.0f}")
print(f"Profit factor: {profit_factor:.2f}")
print(f"Avg days held: {blotter['days_held'].mean():.1f}")11 — Performance Metrics
### calculations — performance metrics
daily_rets = ledger['daily_log_ret'].dropna()
n_days_held = len(daily_rets)
# GMRR (Geometric Mean Rate of Return, annualised)
annual_gm = daily_rets.mean() * 252
pct_annual_gm = np.exp(annual_gm) - 1
# Annual Volatility
annual_vol = daily_rets.std() * np.sqrt(252)
# Sharpe Ratio (rf = 3.75%)
log_rf = np.log(1 + RISK_FREE_RATE)
excess_ret = annual_gm - log_rf
sharpe_ratio = excess_ret / annual_vol
# Max Drawdown
nav_series = ledger['mkt_value']
roll_max = nav_series.cummax()
drawdown = (nav_series - roll_max) / roll_max
max_dd = drawdown.min()
# Total return
total_log = daily_rets.sum()
total_ret = np.exp(total_log) - 1
print("=" * 45)
print(f" OOS Period: {ledger.index[0].date()} → {ledger.index[-1].date()}")
print(f" Total Trades: {len(blotter)}")
print(f" Win Rate: {win_rate:.1%}")
print(f" Profit Factor: {profit_factor:.2f}")
print("-" * 45)
print(f" Total Return: {total_ret:.2%}")
print(f" Annual GMRR: {pct_annual_gm:.2%}")
print(f" Annual Volatility: {annual_vol:.2%}")
print(f" Sharpe Ratio: {sharpe_ratio:.3f}")
print(f" Max Drawdown: {max_dd:.2%}")
print("=" * 45)12 — Alpha & Beta vs QQQ and SPY
### calculations — alpha & beta
def asof_close(df, ts):
return df['close'].asof(pd.to_datetime(ts))
bench_data = {
'QQQ': daily_df,
'SPY': spy_df
}
for bench_name, bench_df in bench_data.items():
bench_rets = []
for _, row in blotter.iterrows():
b0 = asof_close(bench_df, row['entry_date'])
b1 = asof_close(bench_df, row['exit_date'])
if pd.isna(b0) or pd.isna(b1) or b0 <= 0 or b1 <= 0:
bench_rets.append(np.nan)
else:
bench_rets.append(np.log(b1 / b0))
blotter[f'ret_{bench_name}'] = bench_rets
valid = blotter.dropna(subset=['log_return', 'ret_QQQ', 'ret_SPY'])
beta_qqq, alpha_qqq = np.polyfit(valid['log_return'], valid['ret_QQQ'], 1)
beta_spy, alpha_spy = np.polyfit(valid['log_return'], valid['ret_SPY'], 1)
print(f"vs QQQ → alpha: {alpha_qqq:.6f} beta: {beta_qqq:.4f}")
print(f"vs SPY → alpha: {alpha_spy:.6f} beta: {beta_spy:.4f}")13 — Charts
### plotting — NAV / Equity Curve
fig_nav = go.Figure()
fig_nav.add_trace(go.Scatter(
x=ledger.index,
y=ledger['mkt_value'],
mode='lines',
line=dict(color='#00d4ff', width=2),
name='Strategy NAV'
))
fig_nav.add_trace(go.Scatter(
x=ledger.index,
y=[START_CASH] * len(ledger),
mode='lines',
line=dict(color='#888888', width=1, dash='dash'),
name='Starting Capital'
))
fig_nav.update_layout(
title='Equity Curve — QQQ Donchian Breakout Strategy (Walk-Forward OOS)',
xaxis_title='Date',
yaxis_title='NAV ($)',
paper_bgcolor='black',
plot_bgcolor='black',
font=dict(color='white'),
xaxis=dict(gridcolor='#333'),
yaxis=dict(gridcolor='#333'),
legend=dict(bgcolor='rgba(0,0,0,0)'),
)
fig_nav.show()### plotting — Drawdown
fig_dd = go.Figure()
fig_dd.add_trace(go.Scatter(
x=drawdown.index,
y=drawdown * 100,
mode='lines',
fill='tozeroy',
line=dict(color='#ff4444', width=1.5),
fillcolor='rgba(255,68,68,0.2)',
name='Drawdown %'
))
fig_dd.update_layout(
title='Strategy Drawdown (%)',
xaxis_title='Date',
yaxis_title='Drawdown (%)',
paper_bgcolor='black',
plot_bgcolor='black',
font=dict(color='white'),
xaxis=dict(gridcolor='#333'),
yaxis=dict(gridcolor='#333'),
)
fig_dd.show()### plotting — Trade Outcome Histogram
color_map = {'success': '#00d97e', 'stop_loss': '#ff4444', 'timeout': '#f0a500'}
fig_outcome = px.histogram(
blotter,
x='pnl',
color='outcome',
nbins=40,
barmode='overlay',
opacity=0.75,
title='Trade P&L Distribution by Outcome',
color_discrete_map=color_map,
labels={'pnl': 'Trade P&L ($)', 'outcome': 'Outcome'},
)
fig_outcome.update_layout(
paper_bgcolor='black',
plot_bgcolor='black',
font=dict(color='white'),
xaxis=dict(gridcolor='#333'),
yaxis=dict(gridcolor='#333'),
)
fig_outcome.show()### plotting — Strategy vs QQQ (per-trade log returns)
fig_qqq = px.scatter(
valid,
x='log_return',
y='ret_QQQ',
color='outcome',
title='Strategy Return vs QQQ Return (per-trade, log)',
labels={'log_return': 'Strategy log return', 'ret_QQQ': 'QQQ log return'},
color_discrete_map=color_map,
)
# Add regression line
x_rng = np.linspace(valid['log_return'].min(), valid['log_return'].max(), 100)
fig_qqq.add_trace(go.Scatter(
x=x_rng, y=beta_qqq * x_rng + alpha_qqq,
mode='lines', line=dict(color='white', dash='dash'), name='Regression'
))
fig_qqq.update_layout(
paper_bgcolor='black', plot_bgcolor='black',
font=dict(color='white'),
xaxis=dict(gridcolor='#333'), yaxis=dict(gridcolor='#333'),
)
fig_qqq.show()### plotting — Strategy vs SPY (per-trade log returns)
fig_spy = px.scatter(
valid,
x='log_return',
y='ret_SPY',
color='outcome',
title='Strategy Return vs SPY Return (per-trade, log)',
labels={'log_return': 'Strategy log return', 'ret_SPY': 'SPY log return'},
color_discrete_map=color_map,
)
x_rng = np.linspace(valid['log_return'].min(), valid['log_return'].max(), 100)
fig_spy.add_trace(go.Scatter(
x=x_rng, y=beta_spy * x_rng + alpha_spy,
mode='lines', line=dict(color='white', dash='dash'), name='Regression'
))
fig_spy.update_layout(
paper_bgcolor='black', plot_bgcolor='black',
font=dict(color='white'),
xaxis=dict(gridcolor='#333'), yaxis=dict(gridcolor='#333'),
)
fig_spy.show()### plotting — Walk-Forward Summary Table
fig_wf = go.Figure(data=[go.Table(
header=dict(
values=['Fold','Train Period','Test Period','Best N','Train Sharpe','OOS Trades'],
fill_color='#1a1a2e',
font=dict(color='white', size=12),
align='center',
),
cells=dict(
values=[
wf_df['fold'],
wf_df['train_start'].astype(str) + ' → ' + wf_df['train_end'].astype(str),
wf_df['test_start'].astype(str) + ' → ' + wf_df['test_end'].astype(str),
wf_df['best_n'],
wf_df['train_sharpe'],
wf_df['oos_trade_count'],
],
fill_color='#0d0d1a',
font=dict(color='white', size=11),
align='center',
)
)])
fig_wf.update_layout(
title='Walk-Forward Fold Summary',
paper_bgcolor='black',
font=dict(color='white'),
)
fig_wf.show()14 — Export Blotter CSV & Charts for Website
### output
# Save blotter as CSV
blotter_export = blotter[[
'entry_date','exit_date','direction_label','qty',
'entry_price','exit_price','stop_price','days_held',
'exit_reason','outcome','pnl','log_return'
]].copy()
blotter_export.to_csv('blotter_qqq_breakout.csv', index=False)
print("blotter_qqq_breakout.csv saved.")
# Save Plotly charts as self-contained HTML for embedding in the website
fig_nav.write_html('chart_nav.html', full_html=False, include_plotlyjs='cdn')
fig_dd.write_html('chart_drawdown.html', full_html=False, include_plotlyjs='cdn')
fig_outcome.write_html('chart_outcomes.html', full_html=False, include_plotlyjs='cdn')
fig_qqq.write_html('chart_vs_qqq.html', full_html=False, include_plotlyjs='cdn')
fig_spy.write_html('chart_vs_spy.html', full_html=False, include_plotlyjs='cdn')
fig_wf.write_html('chart_walkforward.html', full_html=False, include_plotlyjs='cdn')
print("All chart HTML files saved.")15 — Commentary & Interpretation
Strategy Summary
This strategy applies the classic Turtle Trading philosophy — buy new highs, short new lows, ride the trend, cut losses short — to QQQ (Nasdaq-100 ETF), enhanced with three filters designed to reduce the core weakness of the original system: whipsaw losses in choppy markets.
Filter Effectiveness
The Choppiness Index filter is the most theoretically grounded addition. Developed by E.W. Dreiss using fractal geometry principles from Benoit Mandelbrot, a CHOP reading above 61.8 indicates that price action is mathematically closer to random Brownian motion than a directional trend. Entering a breakout in that regime is statistically equivalent to a coin flip — the filter refuses to flip that coin.
The Volume filter addresses a different failure mode: a price print at a new 55-day high on below-average volume usually reflects thin liquidity rather than genuine institutional accumulation. Real breakouts attract real participation.
The Last-Trade filter (borrowed from the original Turtle rules themselves) captures a market microstructure reality: a large winning trend trade exhausts directional energy. The crowd that was driving the move is now sitting on profits, not adding to positions. The next breakout attempt in the same direction tends to be a fade, not a continuation.
Walk-Forward Validity
The walk-forward structure is what separates a real backtest from an optimised fiction. Every trade in the blotter was executed using a parameter (N) that was never seen by the trade it was applied to — it was selected on the preceding year of data only. The consistency of the best-N selections across folds tells us whether the strategy has genuine structural edge or is curve-fitted to noise.
Alpha & Beta Interpretation
If alpha is near zero and beta is also near zero, it means the strategy’s returns are not well-explained by QQQ or SPY movements on a per-trade basis — the edge, if any, is structural (the mechanical entry/exit rules) rather than market-direction exposure. This is expected for a trend-following system that goes both long and short.
16 — Export Charts as JSON for Website
import os, json
### output — save charts as JSON for the Quarto website
os.makedirs('charts', exist_ok=True)
# Build Plotly blotter table for the website
fig_blotter_web = go.Figure(data=[go.Table(
header=dict(
values=['Entry Date','Exit Date','Direction','Qty',
'Entry $','Exit $','Days','Outcome','P&L'],
fill_color='#1a1a2e',
font=dict(color='white', size=11),
align='center',
),
cells=dict(
values=[
blotter_export['entry_date'].astype(str),
blotter_export['exit_date'].astype(str),
blotter_export['direction_label'],
blotter_export['qty'],
blotter_export['entry_price'].round(2),
blotter_export['exit_price'].round(2),
blotter_export['days_held'],
blotter_export['outcome'],
blotter_export['pnl'].round(0).astype(int),
],
fill_color=[
['#0d0d1a'] * len(blotter_export)
],
font=dict(color='white', size=10),
align='center',
)
)])
fig_blotter_web.update_layout(
paper_bgcolor='black',
font=dict(color='white'),
height=max(300, 35 * len(blotter_export) + 80)
)
chart_map = {
'chart_nav' : fig_nav,
'chart_drawdown' : fig_dd,
'chart_outcomes' : fig_outcome,
'chart_vs_qqq' : fig_qqq,
'chart_vs_spy' : fig_spy,
'chart_walkforward' : fig_wf,
'chart_blotter' : fig_blotter_web,
}
for name, fig in chart_map.items():
with open(f'charts/{name}.json', 'w') as f:
json.dump(fig.to_dict(), f)
print("All chart JSON files saved to charts/")
print("Copy the charts/ folder and blotter_qqq_breakout.csv to your Quarto site directory.")