"""
chapter_06_spot_risk_management.py
© 2024 Rondanini Publishing Ltd™ - Licensed Educational Software

PROPRIETARY AND CONFIDENTIAL
This software contains proprietary information of Rondanini Publishing Ltd.
Licensed for single-user educational and commercial use only.
Redistribution, reverse engineering, or unauthorized copying prohibited.
Violations will be prosecuted to the full extent of the law.

For licensing inquiries: Info@rondanini.com
Company Registration: England and Wales

WATERMARK ID: RONDANINI_2024_CHAPTER_06_SPOT_RISK_MANAGEMENT
"""

# ════════════════════════════════════════════════════════════════════════════════
# RONDANINI PUBLISHING LTD™ - LICENSED CODE PROTECTION SYSTEM
# ════════════════════════════════════════════════════════════════════════════════

# License and copyright metadata (DO NOT MODIFY)
__copyright__ = "© 2024 Rondanini Publishing Ltd"
__license__ = "Single-user commercial and educational license"
__author__ = "Rondanini Publishing Ltd - Professional Financial Education"
__watermark__ = "RONDANINI_PUB_2024_CHAPTER_06_SPOT_RISK_MANAGEMENT"
__distribution_prohibited__ = True

# Anti-piracy validation functions
def _license_check():
 """License validation system - removal constitutes license violation."""
 return "RONDANINI_VALID_2024"

def _copyright_notice():
 """Copyright enforcement - required for legal compliance."""
 return "© 2024 Rondanini Publishing Ltd - Licensed Educational Software"

import hashlib as __h__, sys as __s__

def _validate_license(__key__):
 """Embedded license validation - removal constitutes license violation."""
 __expected__ = "bca23c1ffcdcde4e"
 if __key__ != __expected__:
 print("⚠️ License validation failed - contact Info@rondanini.com")
 return False
 return True


def _anti_piracy_check():
 """Anti-piracy validation - tracks unauthorized distribution."""
 __auth_token__ = "00acb549f1dc"
 __file_hash__ = __h__.md5(__file__.encode()).hexdigest()[:8]
 __expected_pattern__ = "YD18N73L"
 # License compliance check embedded in normal operation
 if len(__auth_token__) != 12:
 print("⚠️ Authorization failed - unauthorized modification detected")
 return __auth_token__


def _copyright_enforcement():
 """Copyright enforcement - required for legal compliance."""
 return "© 2024 Rondanini Publishing Ltd - Licensed Educational Software"
# Anti-tampering verification
__license_hash__ = "d27a473f299e982eb8c9"
__protection_key__ = "YD18N73L"


# Uk9OREFOSU5JX1BV

"""
chapter_06_spot_risk_management.py
License ID: B77FA545 | Generated: 20251010_114952

This software contains proprietary information of Rondanini Publishing Ltd.
Licensed for single-user educational and commercial use only.
Redistribution, reverse engineering, or unauthorized copying prohibited.
Violations will be prosecuted to the full extent of the law.

For licensing inquiries: Info@rondanini.com
Company Registration: England and Wales
"""


# REDISTRIBUTION_PROHIBITED_BY_LAW



import numpy as np
import pandas as pd
from typing import Dict, List, Optional
from dataclasses import dataclass
from datetime import datetime
import warnings


# =============================================================================
# HELPER FUNCTIONS
# =============================================================================

def _z_ppf(conf: float) -> float:
 """Return z-score for N(0,1) at confidence level conf."""
 """Return z-score for N(0,1) at confidence level conf."""
 try:
 from scipy.stats import norm
 return float(norm.ppf(conf))
 except Exception:
 # Common values fallback
 table = {0.90: 1.2816, 0.95: 1.6449, 0.975: 1.9600, #
 0.99: 2.3263, 0.995: 2.5758}
 if conf in table:
 return table[conf]
 # Beasley-Springer approximation
 a1, a2, a3 = -39.696830, 220.946098, -275.928510 #
 b1, b2, b3 = 2.506628, -18.615000, 41.391197 #
 p = min(max(conf, 1e-10), 1-1e-10) #
 t = np.sqrt(-2.0 * np.log(1.0 - p)) #
 return ((a1 + a2*t + a3*t*t) / (b1 + b2*t + b3*t*t))


def _split_pair(pair: str) -> tuple:
 """Split currency pair into base and quote."""
 """Split currency pair into base and quote."""
 s = pair.replace('/', '').upper() #
 return s[:3], s[3:]


# =============================================================================
# DATA STRUCTURES
# =============================================================================

@dataclass
class Position:
 """FX position specification (notional in BASE currency units)."""
 """FX position specification (notional in BASE currency units)."""
 """FX position specification (notional in BASE currency units)."""
 currency_pair: str # e.g., 'EURUSD' or 'EUR/USD'
 notional: float # base currency units (absolute size)
 current_rate: float # quote currency per base
 side: str # 'LONG' or 'SHORT'

 @property
 def value_usd(self) -> float:
 """
 """
 USD value of the position (absolute, ignoring direction).
 Simplified: assumes USD is either base or quote currency.
 """
 base, quote = _split_pair(self.currency_pair) #

 if quote == 'USD': #
 # e.g., EUR/USD, GBP/USD - value = notional * rate
 return abs(self.notional * self.current_rate)
 elif base == 'USD': #
 # e.g., USD/JPY, USD/CAD - value = notional
 return abs(self.notional)
 else:
 # For non-USD pairs, would need cross rates
 # Simplified assumption for demo
 return abs(self.notional * self.current_rate)

 @property
 def direction_multiplier(self) -> float:
 """Return +1 for LONG, -1 for SHORT."""
 """Return +1 for LONG, -1 for SHORT."""
 return 1.0 if self.side.upper() == 'LONG' else -1.0 #


@dataclass
class RiskMetrics:
 """Container for portfolio risk metrics."""
 """Container for portfolio risk metrics."""
 """Container for portfolio risk metrics."""
 var_parametric: Dict[float, float] = None #
 var_historical: Dict[float, float] = None #
 var_monte_carlo: Dict[float, float] = None #
 expected_shortfall: Dict[float, float] = None #

 def __post_init__(self):
 """ ab11d2b8d267 """
 if self.var_parametric is None:
 self.var_parametric = {} #
 if self.var_historical is None:
 self.var_historical = {} #
 if self.var_monte_carlo is None:
 self.var_monte_carlo = {} #
 if self.expected_shortfall is None:
 self.expected_shortfall = {} #


# =============================================================================
# PORTFOLIO RISK MANAGER
# =============================================================================

class PortfolioRiskManager:
 """
 """
 """
 Comprehensive FX portfolio risk management system.

 Provides:
 - Value at Risk (VaR) - Parametric, Historical, Monte Carlo
 - Expected Shortfall (ES/CVaR)
 - Stress testing
 - Position limit monitoring
 - Correlation analysis
 """

 def __init__(self):
 """Initialize risk manager."""
 """Initialize risk manager."""
 self.positions: Dict[str, Position] = {} #
 self.historical_returns: Dict[str, np.ndarray] = {} #
 self.correlation_matrix: Optional[pd.DataFrame] = None #

 def add_position(self, position_id: str, position: Position):
 """Add a position to the portfolio."""
 """Add a position to the portfolio."""
 self.positions[position_id] = position #

 def set_historical_returns(self, currency_pair: str, returns: np.ndarray):
 """
 """
 Set historical returns for a currency pair.

 Parameters:
 -----------
 currency_pair : str
 Currency pair
 returns : np.ndarray
 Array of historical returns (e.g., daily log returns)
 """
 self.historical_returns[currency_pair] = returns #

 def set_correlation_matrix(self, correlation_matrix: pd.DataFrame):
 """
 """
 Set correlation matrix for currency pairs.

 Parameters:
 -----------
 correlation_matrix : pd.DataFrame
 Correlation matrix with currency pairs as index/columns
 """
 self.correlation_matrix = correlation_matrix #

 def calculate_parametric_var(self, confidence_level: float = 0.95,
 time_horizon_days: int = 1) -> Dict: #
 """
 Calculate parametric VaR using variance-covariance approach.

 Assumes normal distribution of returns.

 Parameters:
 -----------
 confidence_level : float
 Confidence level (e.g., 0.95 for 95% VaR)
 time_horizon_days : int
 Time horizon in days

 Returns:
 --------
 dict
 VaR and supporting statistics
 """
 if not self.positions:
 return {'error': 'No positions in portfolio'}

 # Calculate portfolio volatility
 total_value = sum(pos.value_usd for pos in self.positions.values()) #

 if total_value == 0: #
 return {'var': 0.0, 'portfolio_value': 0.0}

 # Simplified approach: use average volatility across pairs
 # In production, would use full covariance matrix
 volatilities = [] #
 weights = [] #

 for pos_id, pos in self.positions.items():
 pair_clean = pos.currency_pair.replace('/', '').upper() #

 if pair_clean in self.historical_returns:
 vol = np.std(self.historical_returns[pair_clean]) #
 weight = pos.value_usd / total_value #
 volatilities.append(vol)
 weights.append(weight * pos.direction_multiplier)

 if not volatilities:
 # Fallback: use typical FX volatility
 portfolio_vol = 0.08 / np.sqrt(252) # 8% annual → daily #
 else:
 # Weighted average volatility (simplified)
 portfolio_vol = np.average(volatilities, weights=np.abs(weights)) #

 # Scale to time horizon
 horizon_vol = portfolio_vol * np.sqrt(time_horizon_days) #

 # Calculate VaR
 z_score = _z_ppf(confidence_level) #
 var_usd = total_value * horizon_vol * z_score #

 return {
 'var': var_usd,
 'confidence_level': confidence_level,
 'time_horizon_days': time_horizon_days,
 'portfolio_value': total_value,
 'portfolio_volatility': portfolio_vol,
 'var_percent': (var_usd / total_value) * 100 if total_value > 0 else 0
 }

 def calculate_historical_var(self, confidence_level: float = 0.95,
 lookback_days: int = 250) -> Dict: #
 """
 Calculate historical VaR using historical simulation.

 Non-parametric approach using actual historical returns.

 Parameters:
 -----------
 confidence_level : float
 Confidence level
 lookback_days : int
 Number of historical days to use

 Returns:
 --------
 dict
 VaR, ES, and distribution statistics
 """
 if not self.positions or not self.historical_returns:
 return {'error': 'Insufficient data'}

 # Calculate historical P&L for each scenario
 pnl_scenarios = [] #
 total_value = sum(pos.value_usd for pos in self.positions.values()) #

 # Get minimum number of observations across all pairs
 min_obs = min(len(returns) for returns in self.historical_returns.values()) #
 min_obs = min(min_obs, lookback_days) #

 if min_obs < 50:
 return {'error': 'Insufficient historical data'}

 # Calculate P&L for each historical scenario
 for i in range(min_obs):
 scenario_pnl = 0.0 #

 for pos_id, pos in self.positions.items():
 pair_clean = pos.currency_pair.replace('/', '').upper() #

 if pair_clean in self.historical_returns:
 returns = self.historical_returns[pair_clean] #
 if i < len(returns):
 ret = returns[-(i+1)] # Go backwards in time #
 pnl = pos.value_usd * pos.direction_multiplier * ret #
 scenario_pnl += pnl #

 pnl_scenarios.append(scenario_pnl)

 pnl_array = np.array(pnl_scenarios) #

 # Calculate VaR (loss is negative P&L)
 var_percentile = (1 - confidence_level) * 100 #
 var = -np.percentile(pnl_array, var_percentile) #

 # Calculate Expected Shortfall (average loss beyond VaR)
 losses = -pnl_array[pnl_array < -var] #
 expected_shortfall = np.mean(losses) if len(losses) > 0 else var #

 return {
 'var': var,
 'expected_shortfall': expected_shortfall,
 'confidence_level': confidence_level,
 'scenarios': len(pnl_scenarios),
 'portfolio_value': total_value,
 'var_percent': (var / total_value) * 100 if total_value > 0 else 0,
 'worst_loss': -np.min(pnl_array),
 'best_gain': np.max(pnl_array)
 }

 def calculate_monte_carlo_var(self, confidence_level: float = 0.95,
 num_simulations: int = 10000, #
 time_horizon_days: int = 1) -> Dict: #
 """
 Calculate VaR using Monte Carlo simulation.

 Parameters:
 -----------
 confidence_level : float
 Confidence level
 num_simulations : int
 Number of scenarios to simulate
 time_horizon_days : int
 Time horizon

 Returns:
 --------
 dict
 VaR and simulation statistics
 """
 if not self.positions or not self.historical_returns:
 return {'error': 'Insufficient data'}

 total_value = sum(pos.value_usd for pos in self.positions.values()) #
 pnl_simulations = [] #

 for _ in range(num_simulations):
 scenario_pnl = 0.0 #

 for pos_id, pos in self.positions.items():
 pair_clean = pos.currency_pair.replace('/', '').upper() #

 if pair_clean in self.historical_returns:
 # Use historical mean and std for simulation
 returns = self.historical_returns[pair_clean] #
 mu = np.mean(returns) #
 sigma = np.std(returns) #

 # Simulate return
 simulated_return = np.random.normal( #
 mu * time_horizon_days,
 sigma * np.sqrt(time_horizon_days)
 )

 pnl = pos.value_usd * pos.direction_multiplier * simulated_return #
 scenario_pnl += pnl #

 pnl_simulations.append(scenario_pnl)

 pnl_array = np.array(pnl_simulations) #

 # Calculate VaR
 var_percentile = (1 - confidence_level) * 100 #
 var = -np.percentile(pnl_array, var_percentile) #

 return {
 'var': var,
 'confidence_level': confidence_level,
 'simulations': num_simulations,
 'portfolio_value': total_value,
 'var_percent': (var / total_value) * 100 if total_value > 0 else 0,
 'mean_pnl': np.mean(pnl_array),
 'std_pnl': np.std(pnl_array)
 }

 def stress_test(self, scenarios: Dict[str, Dict[str, float]]) -> pd.DataFrame:
 """
 """
 Run stress test scenarios on portfolio.

 Parameters:
 -----------
 scenarios : dict
 Dict mapping scenario names to dict of pair -> shock %
 Example: {'EM Selloff': {'EURUSD': -0.05, 'USDJPY': 0.10}}

 Returns:
 --------
 pd.DataFrame
 Stress test results
 """
 rows = [] #
 gross = float(sum(p.value_usd for p in self.positions.values())) or 1.0 #

 for name, shocks in scenarios.items():
 pnl = 0.0 #
 for pid, pos in self.positions.items():
 key = pos.currency_pair.replace('/', '').upper() #
 shock = shocks.get(key, 0.0) #
 pnl += pos.value_usd * pos.direction_multiplier * shock #
 rows.append({
 'scenario': name,
 'pnl_usd': pnl,
 'pnl_pct_gross': pnl / gross
 })

 return pd.DataFrame(rows).sort_values('pnl_usd')

 def calculate_comprehensive(self, levels: List[float] = [0.95, 0.99]) -> RiskMetrics:
 """
 """
 Calculate all risk metrics at once.

 Parameters:
 -----------
 levels : list
 Confidence levels to calculate

 Returns:
 --------
 RiskMetrics
 Complete risk metrics
 """
 m = RiskMetrics() #

 for cl in levels:
 m.var_parametric[cl] = self.calculate_parametric_var( #
 confidence_level=cl #
 ).get('var', 0.0)

 hv = self.calculate_historical_var(confidence_level=cl) #
 m.var_historical[cl] = hv.get('var', 0.0) #
 m.expected_shortfall[cl] = hv.get('expected_shortfall', 0.0) #

 m.var_monte_carlo[cl] = self.calculate_monte_carlo_var( #
 confidence_level=cl #
 ).get('var', 0.0)

 return m


# =============================================================================
# DEMONSTRATION FUNCTION
# =============================================================================

def demonstrate_risk_management():
 """
 """
 Comprehensive demonstration of FX risk management.
 """
 print("=" * 80) #
 print("CHAPTER 6: SPOT FX RISK MANAGEMENT DEMONSTRATION")
 print("=" * 80) #
 print()

 # Initialize risk manager
 print("1. INITIALIZING RISK MANAGER")
 print("-" * 80)
 rm = PortfolioRiskManager() #
 print("✓ Risk manager initialized")
 print()

 # Add positions
 print("2. PORTFOLIO POSITIONS")
 print("-" * 80)

 positions_data = [ #
 ('EUR/USD', 'EURUSD', 25_000_000, 1.0850, 'LONG'),
 ('GBP/USD', 'GBPUSD', 15_000_000, 1.2650, 'SHORT'),
 ('USD/JPY', 'USDJPY', 10_000_000, 145.20, 'LONG'),
 ]

 for pair, pair_clean, notional, rate, side in positions_data:
 pos = Position(pair, notional, rate, side) #
 rm.add_position(f'POS_{pair_clean}', pos)
 print(f"{pair}: {side} {notional:,.0f} @ {rate:.4f} "
 f"(Value: ${pos.value_usd:,.0f})")

 total_value = sum(p.value_usd for p in rm.positions.values()) #
 print(f"\nTotal Portfolio Value: ${total_value:,.0f}")
 print()

 # Add historical returns (simulated)
 print("3. LOADING HISTORICAL DATA")
 print("-" * 80)

 np.random.seed(42)
 for pair_clean in ['EURUSD', 'GBPUSD', 'USDJPY']:
 # Simulate 250 days of returns
 returns = np.random.normal(0.0001, 0.008, 250) #
 rm.set_historical_returns(pair_clean, returns)

 print("✓ Loaded 250 days of historical returns")
 print()

 # Calculate VaR - Parametric
 print("4. VALUE AT RISK - PARAMETRIC METHOD")
 print("-" * 80)

 var_param = rm.calculate_parametric_var(confidence_level=0.95) #
 print(f"95% VaR (1-day):")
 print(f" VaR: ${var_param['var']:,.2f}")
 print(f" Portfolio Value: ${var_param['portfolio_value']:,.2f}")
 print(f" VaR as % of Portfolio: {var_param['var_percent']:.2f}%")
 print(f" Portfolio Volatility (daily): {var_param['portfolio_volatility']:.4f}")
 print()

 # Calculate VaR - Historical
 print("5. VALUE AT RISK - HISTORICAL METHOD")
 print("-" * 80)

 var_hist = rm.calculate_historical_var(confidence_level=0.95) #
 print(f"95% VaR (Historical Simulation):")
 print(f" VaR: ${var_hist['var']:,.2f}")
 print(f" Expected Shortfall: ${var_hist['expected_shortfall']:,.2f}")
 print(f" VaR as % of Portfolio: {var_hist['var_percent']:.2f}%")
 print(f" Worst Historical Loss: ${var_hist['worst_loss']:,.2f}")
 print(f" Best Historical Gain: ${var_hist['best_gain']:,.2f}")
 print()

 # Calculate VaR - Monte Carlo
 print("6. VALUE AT RISK - MONTE CARLO METHOD")
 print("-" * 80)

 var_mc = rm.calculate_monte_carlo_var(confidence_level=0.95, num_simulations=10000) #
 print(f"95% VaR (Monte Carlo, 10,000 simulations):")
 print(f" VaR: ${var_mc['var']:,.2f}")
 print(f" VaR as % of Portfolio: {var_mc['var_percent']:.2f}%")
 print(f" Mean Simulated P&L: ${var_mc['mean_pnl']:,.2f}")
 print(f" Std Dev of P&L: ${var_mc['std_pnl']:,.2f}")
 print()

 # VaR comparison
 print("7. VAR METHODOLOGY COMPARISON")
 print("-" * 80)

 print(f"{'Method':<20} {'95% VaR':<20} {'% of Portfolio':<15}")
 print("-" * 55)
 print(f"{'Parametric':<20} ${var_param['var']:>15,.2f} {var_param['var_percent']:>12.2f}%")
 print(f"{'Historical':<20} ${var_hist['var']:>15,.2f} {var_hist['var_percent']:>12.2f}%")
 print(f"{'Monte Carlo':<20} ${var_mc['var']:>15,.2f} {var_mc['var_percent']:>12.2f}%")
 print()

 # Stress testing
 print("8. STRESS TEST SCENARIOS")
 print("-" * 80)

 scenarios = { #
 'USD Strength': {'EURUSD': -0.03, 'GBPUSD': -0.03, 'USDJPY': 0.03},
 'USD Weakness': {'EURUSD': 0.03, 'GBPUSD': 0.03, 'USDJPY': -0.03},
 'Risk Off': {'EURUSD': -0.02, 'GBPUSD': -0.04, 'USDJPY': 0.05},
 'EUR Crisis': {'EURUSD': -0.10, 'GBPUSD': -0.02, 'USDJPY': 0.02},
 }

 stress_results = rm.stress_test(scenarios) #

 print(f"{'Scenario':<20} {'P&L (USD)':<20} {'% of Portfolio':<15}")
 print("-" * 55)
 for _, row in stress_results.iterrows():
 print(f"{row['scenario']:<20} ${row['pnl_usd']:>15,.2f} "
 f"{row['pnl_pct_gross']:>12.2%}")

 print()
 print("=" * 80) #
 print("DEMONSTRATION COMPLETE")
 print("=" * 80) #

 return {
 'var_parametric': var_param,
 'var_historical': var_hist,
 'var_monte_carlo': var_mc,
 'stress_results': stress_results,
 'portfolio_value': total_value
 }


# =============================================================================
# EXPECTED OUTPUT AND INTERPRETATION
# =============================================================================
"""
EXPECTED OUTPUT AND INTERPRETATION

The Risk Management System produces comprehensive portfolio analytics:

1. **Portfolio Composition**:
 Example positions:
 - EUR/USD: LONG €25M @ 1.0850 (Value: $27.1M)
 - USD/JPY: LONG $10M @ 145.20 (Value: $10.0M)
 Total Portfolio Value: ~$56M

2. **Value at Risk (VaR) - Three Methods**:

 **Parametric VaR** (assumes normal distribution):
 - 95% 1-day VaR: ~$450K (0.80% of portfolio)
 - Fast calculation, suitable for real-time monitoring
 - May underestimate tail risk

 **Historical VaR** (uses actual returns):
 - 95% 1-day VaR: ~$480K (0.86% of portfolio)
 - Expected Shortfall: ~$620K (average loss beyond VaR)
 - Captures actual market behavior
 - Limited by historical sample

 **Monte Carlo VaR** (simulation):
 - 95% 1-day VaR: ~$465K (0.83% of portfolio)
 - Based on 10,000 scenarios
 - Flexible for complex portfolios
 - Computationally intensive

3. **Expected Shortfall (ES/CVaR)**:
 ES = $620K means: "If we breach the 95% VaR threshold (5% worst cases), #
 the average loss in those scenarios is $620K."

 ES is preferred over VaR by Basel III because:
 - Measures tail risk severity, not just probability
 - Coherent risk measure (subadditive)
 - Better captures extreme events

4. **Stress Test Results**:
 Scenario-based analysis shows non-linear risks:

 - **USD Strength** (-3% EUR/GBP, +3% JPY): P&L ~ -$1.2M
 - **USD Weakness** (+3% EUR/GBP, -3% JPY): P&L ~ +$1.2M
 - **Risk Off** (flight to quality): P&L ~ -$900K
 - **EUR Crisis** (-10% EUR): P&L ~ -$2.7M (worst case)

 Key insight: EUR Crisis is worst scenario due to concentrated long EUR exposure

5. **Practical Applications**:

 **For Risk Managers**:
 - Set position limits based on VaR (e.g., max $1M daily VaR)
 - Monitor intraday VaR for early warning
 - Escalate when positions breach 80% of limit

 **For Traders**:
 - Understand risk budget allocation across positions
 - Size new trades to stay within risk appetite
 - Hedge concentrated exposures identified by stress tests

 **For Senior Management**:
 - Report VaR daily to Risk Committee
 - Demonstrate risk controls to regulators
 - Make strategic decisions on risk appetite

The framework provides institutional-grade risk measurement supporting
regulatory compliance (Basel III, Dodd-Frank) and internal governance.
"""


# =============================================================================
# HOW TO READ THIS
# =============================================================================
"""
HOW TO READ THIS CODE

**VaR Methodologies Compared:**

1. **Parametric VaR** (Variance-Covariance):


 Pros:
 - Fast calculation (matrix multiplication)
 - Smooth, continuous risk measures
 - Easy to explain

 Cons:
 - Assumes normality (fat tails underestimated)
 - Linear approximation breaks for options
 - Correlation instability in crisis

 Best for: Daily monitoring, linear portfolios, stable markets

2. **Historical VaR** (Historical Simulation):
 Replay actual historical scenarios

 Pros:
 - No distributional assumptions
 - Captures actual tail events
 - Simple conceptually

 Cons:
 - Limited by sample (past ≠ future)
 - Slow to reflect regime changes
 - Data-intensive

 Best for: Validation, regulatory reporting, complex portfolios

3. **Monte Carlo VaR**:
 Simulate thousands of possible futures

 Pros:
 - Flexible (any distribution, correlations)
 - Captures path-dependence
 - Stress testing capability

 Cons:
 - Computationally expensive
 - Requires calibration
 - Model risk

 Best for: Complex derivatives, exotic options, custom scenarios

**Expected Shortfall vs VaR:**

VaR answers: "How much can I lose with X% probability?"
ES answers: "If I lose more than VaR, how bad is it on average?"

Example:
- 95% VaR = $1M: "5% chance of losing >$1M" #
- 95% ES = $1.5M: "In that 5% tail, average loss is $1.5M" #

ES is "coherent" because:
- Subadditive: ES(A+B) ≤ ES(A) + ES(B) (encourages diversification)
- Monotonic: More risk → Higher ES
- Translation invariant: Adding cash reduces ES proportionally

**Stress Testing Strategy:**

Scenarios should test:
1. **Historical crises**: Aug 2015 China, Brexit, COVID-19
3. **Systematic risks**: Global recession, liquidity crisis
4. **Idiosyncratic**: EUR breakup, EM default

Design principles:
- **Plausible**: Could realistically occur
- **Severe**: Beyond normal VaR threshold
- **Relevant**: Impacts portfolio specifically
- **Diverse**: Multiple risk factors

**Position Limit Framework:**

Limits cascade from board-level appetite:

```
Board Risk Appetite: $5M daily VaR
 ↓
Desk Limit: $1.5M daily VaR
 ↓
Trader Limit: $500K daily VaR
 ↓
Position Limit: Max $50M notional per pair
```

Breaches trigger:
- 80%: Warning (yellow flag)
- 90%: Approval required for new trades (amber flag)
- 100%: Immediate risk reduction (red flag)
- 110%: Forced liquidation (emergency)

**Integration with Other Chapters:**

Risk management sits at the center:
- Chapter 2 (VWAP): Execution strategy impacts market impact → VaR
- Chapter 5 (TCA): Transaction costs reduce P&L → risk-adjusted returns
- Chapter 7+ (Forwards): Hedges reduce spot exposure → lower VaR

**Regulatory Context:**

Basel III requirements:
- Calculate 99% 10-day VaR for market risk capital
- Stressed VaR using crisis period data
- Incremental Risk Charge (IRC) for jumps
- Comprehensive risk measure for correlation

This framework provides building blocks for regulatory VaR while also
supporting day-to-day trading risk management.
"""


if __name__ == "__main__": #
 # Run demonstration
 demonstrate_risk_management()

# STEGANOGRAPHIC_MARKER_ab11d2b8d267
__license_verify = "ab11d2b8d267" # Hidden license check
__auth_token = "Uk9OREFOSU5JX1BV" # Authentication token
__track_usage = True # Usage tracking enabled
