diff --git a/dev/quantflow.dockerfile b/dev/quantflow.dockerfile
index 1498748..c9c11af 100644
--- a/dev/quantflow.dockerfile
+++ b/dev/quantflow.dockerfile
@@ -20,6 +20,7 @@ COPY mkdocs.yml ./
COPY dev/ ./dev/
COPY docs/ ./docs/
COPY quantflow/ ./quantflow/
+COPY app/ ./app/
RUN uv run ./dev/build-examples
RUN uv run mkdocs build
@@ -31,14 +32,11 @@ WORKDIR /app
# Copy virtualenv from builder
COPY --from=builder /build/.venv /app/.venv
-# Copy application code
+# Copy application code (app/ from builder includes built docs)
COPY quantflow/ ./quantflow/
-COPY app/ ./app/
+COPY --from=builder /build/app ./app
COPY pyproject.toml ./
-# Copy built documentation
-COPY --from=builder /build/app/docs ./app/docs
-
ENV PYTHONPATH=/app
ENV PYTHONUNBUFFERED=1
ENV PATH="/app/.venv/bin:$PATH"
diff --git a/docs/api/options/calibration.md b/docs/api/options/calibration.md
index f385e66..b2eadd8 100644
--- a/docs/api/options/calibration.md
+++ b/docs/api/options/calibration.md
@@ -13,3 +13,5 @@
::: quantflow.options.calibration.heston.DoubleHestonJCalibration
::: quantflow.options.calibration.bns.BNSCalibration
+
+::: quantflow.options.calibration.bns.BNS2Calibration
diff --git a/docs/api/sp/bns.md b/docs/api/sp/bns.md
index ddbe84f..3cca740 100644
--- a/docs/api/sp/bns.md
+++ b/docs/api/sp/bns.md
@@ -1,3 +1,6 @@
# Barndorff-Nielson & Shephard process
::: quantflow.sp.bns.BNS
+
+
+::: quantflow.sp.bns.BNS2
diff --git a/docs/api/sp/index.md b/docs/api/sp/index.md
index 1d72867..a4e5223 100644
--- a/docs/api/sp/index.md
+++ b/docs/api/sp/index.md
@@ -16,7 +16,8 @@ This page gives an overview of all stochastic processes available in the library
|---|---|
| [CIR][quantflow.sp.cir.CIR] | Cox-Ingersoll-Ross square-root diffusion |
| [Vasicek][quantflow.sp.ou.Vasicek] | Gaussian Ornstein-Uhlenbeck process |
-| [GammaOU][quantflow.sp.ou.GammaOU] | Non-Gaussian OU process driven by a Gamma subordinator |
+| [NGOU][quantflow.sp.ou.NGOU] | Generic non-Gaussian OU process driven by a pure-jump Lévy process |
+| [GammaOU][quantflow.sp.ou.GammaOU] | Non-Gaussian OU process with Gamma stationary marginal |
### Jump processes
@@ -33,7 +34,9 @@ This page gives an overview of all stochastic processes available in the library
| [Heston][quantflow.sp.heston.Heston] | Classical square-root stochastic volatility model |
| [HestonJ][quantflow.sp.heston.HestonJ] | Heston model with compound Poisson jumps |
| [DoubleHeston][quantflow.sp.heston.DoubleHeston] | Two independent Heston variance processes |
-| [DoubleHestonJ][quantflow.sp.heston.DoubleHestonJ] | Double Heston with compound Poisson jumps on the second component |
+| [DoubleHestonJ][quantflow.sp.heston.DoubleHestonJ] | Double Heston with compound Poisson jumps on the first component |
+| [BNS][quantflow.sp.bns.BNS] | Barndorff-Nielsen and Shephard model with Gamma-OU variance |
+| [BNS2][quantflow.sp.bns.BNS2] | Two-factor BNS with convex-combination variance |
### Jump diffusion
@@ -41,6 +44,14 @@ This page gives an overview of all stochastic processes available in the library
|---|---|
| [JumpDiffusion][quantflow.sp.jump_diffusion.JumpDiffusion] | Diffusion with compound Poisson jumps |
+### Copulas
+
+| Process | Description |
+|---|---|
+| [Copula][quantflow.sp.copula.Copula] | Abstract base class for bivariate copulas |
+| [IndependentCopula][quantflow.sp.copula.IndependentCopula] | Independence copula $C(u, v) = u v$ |
+| [FrankCopula][quantflow.sp.copula.FrankCopula] | Archimedean Frank copula |
+
## Base classes
::: quantflow.sp.base.StochasticProcess
diff --git a/docs/examples/vol_surface_bns2_calibration.py b/docs/examples/vol_surface_bns2_calibration.py
new file mode 100644
index 0000000..f051f98
--- /dev/null
+++ b/docs/examples/vol_surface_bns2_calibration.py
@@ -0,0 +1,39 @@
+import json
+
+from docs.examples._utils import assets_path, print_model
+from quantflow.options.calibration import BNS2Calibration
+from quantflow.options.pricer import OptionPricer
+from quantflow.options.surface import VolSurface, VolSurfaceInputs, surface_from_inputs
+from quantflow.sp.bns import BNS, BNS2
+
+# Load a saved volatility surface snapshot and build the surface
+with open("docs/examples/volsurface.json") as fp:
+ surface: VolSurface = surface_from_inputs(VolSurfaceInputs(**json.load(fp)))
+
+surface.bs()
+surface.disable_outliers()
+
+# Two-factor BNS: a fast factor for short maturities and a slow one for long.
+# Opposite-sign leverages lets one factor lift the OTM-call wing (rho>0) while
+# the other carries the equity-style downside skew (rho<0).
+pricer = OptionPricer(
+ model=BNS2(
+ bns1=BNS.create(vol=0.4, kappa=20.0, decay=20.0, rho=-0.6),
+ bns2=BNS.create(vol=0.5, kappa=0.3, decay=5.0, rho=0.3),
+ weight=0.3,
+ )
+)
+
+calibration: BNS2Calibration[BNS2] = BNS2Calibration(
+ pricer=pricer,
+ vol_surface=surface,
+ moneyness_weight=0.5,
+)
+
+result = calibration.fit()
+print(result.message)
+print_model(calibration.model)
+
+fig = calibration.plot_maturities(max_moneyness=1.5, support=101)
+fig.update_layout(title="BNS2 Calibrated Smiles")
+fig.write_image(assets_path("bns2_calibrated_smile.png"), width=1200)
diff --git a/docs/index.md b/docs/index.md
index adc1441..84dfadc 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,4 +1,4 @@
-#
+#
[](https://badge.fury.io/py/quantflow)
[](https://pypi.org/project/quantflow)
diff --git a/docs/tutorials/bns_calibration.md b/docs/tutorials/bns_calibration.md
index 8e4af10..55be5dd 100644
--- a/docs/tutorials/bns_calibration.md
+++ b/docs/tutorials/bns_calibration.md
@@ -1,16 +1,16 @@
# BNS Volatility Model
This tutorial calibrates the [BNS][quantflow.sp.bns.BNS] stochastic-volatility
-model (Barndorff-Nielsen and Shephard) to an implied volatility surface, using
-the same workflow as the Heston tutorial in
-[Volatility Surface](volatility_surface.md).
+model (Barndorff-Nielsen and Shephard) and its two-factor extension
+[BNS2][quantflow.sp.bns.BNS2] to an implied volatility surface, using the same
+workflow as the Heston tutorial in [Volatility Surface](volatility_surface.md).
BNS is structurally different from Heston. The variance process is a
non-Gaussian Ornstein-Uhlenbeck process driven by a pure-jump Lévy process
(Gamma-OU in this implementation), and the leverage effect is introduced by
correlating the same jumps into the log-price.
-## Model Parameters
+## Single-factor BNS
[BNSCalibration][quantflow.options.calibration.bns.BNSCalibration] fits five
parameters to the surface:
@@ -25,46 +25,38 @@ parameters to the surface:
The BDLP intensity is set as $\lambda = \theta \beta$ so that the stationary
mean of the Gamma-OU variance process equals $\theta$. This gives the same
-$(v_0, \theta)$ parameterisation as Heston.
+$(v_0, \theta)$ parameterisation as Heston. Because the variance is built from
+positive jumps and exponential mean reversion, it stays positive by
+construction; no Feller-style penalty is needed.
-Because the variance is built from positive jumps and exponential mean
-reversion, it stays positive by construction. No Feller-style penalty is
-needed.
-
-## How BNS Fits the Surface
+### How BNS fits the surface
The mechanism that produces a smile in BNS is structurally different from
Heston. Heston relies on a diffusive volatility-of-variance $\sigma$ for the
wings and a spot-variance correlation $\rho$ for the skew, both accumulating
-as $\sqrt{T}$.
-
-BNS instead injects discrete jumps directly into the variance process: each
-jump in $v_t$ is mirrored, scaled by $\rho$, into the log-price. The wing
-thickness is governed by the jump-size distribution (controlled by $\beta$)
-and the skew by $\rho$.
+as $\sqrt{T}$. BNS instead injects discrete jumps directly into the variance
+process: each jump in $v_t$ is mirrored, scaled by $\rho$, into the log-price.
+The wing thickness is governed by the jump-size distribution (controlled by
+$\beta$) and the skew by $\rho$.
A consequence of this structural difference is that the calibrator often
settles at a small $\kappa$ together with a large $\theta$. The time scale of
mean reversion is $1/\kappa$, so when $\kappa$ is small the variance process
barely relaxes towards $\theta$ over the calibration horizon and stays close
-to $v_0$ throughout.
-
-In that regime $\theta$ is only weakly identified by the surface and the
-optimizer can move it freely as long as the jump-driven smile dynamics are
-preserved. The headline number to read in the output is $v_0$, which sets the
-at-the-money level.
+to $v_0$ throughout. In that regime $\theta$ is only weakly identified by the
+surface and the optimizer can move it freely as long as the jump-driven smile
+dynamics are preserved. The headline number to read in the output is $v_0$,
+which sets the at-the-money level.
-## Calibration
+### Calibrated parameters
-The fit reuses [VolModelCalibration][quantflow.options.calibration.base.VolModelCalibration]
-two-stage optimiser from the Heston tutorial: L-BFGS-B for basin search,
-followed by trust-region reflective on the residual vector with parameter
-bounds.
+The fit uses the
+[VolModelCalibration][quantflow.options.calibration.base.VolModelCalibration]
+two-stage optimiser: L-BFGS-B for basin search, followed by trust-region
+reflective on the residual vector with parameter bounds.
--8<-- "docs/examples/output/vol_surface_bns_calibration.out"
-## Calibrated Smile
-
[](../assets/examples/bns_calibrated_smile.png){target="_blank"}
The fit is good for medium and long maturities and visibly off at the front
@@ -74,14 +66,62 @@ Heston-jump-diffusion.
The cause here is structural: BNS adds jumps, but they live in the variance
process, not directly in the log-price. The jump-driven contribution to the
log-price is bounded by the size of the variance jumps multiplied by $|\rho|$,
-which is small for short tenors.
-
-A model with explicit jumps in the log-price (such as
-[HestonJ][quantflow.sp.heston.HestonJ]) or a rough volatility model is better
-suited to the steep short-term skew observed in crypto markets.
+which is small for short tenors. A model with explicit jumps in the log-price
+(such as [HestonJ][quantflow.sp.heston.HestonJ]) or a rough volatility model
+is better suited to the steep short-term skew observed in crypto markets.
-## Code
+### Code
```python
--8<-- "docs/examples/vol_surface_bns_calibration.py"
```
+
+## Two-factor BNS
+
+The original multi-factor BNS extends the single-factor model by replacing the
+variance with a convex combination of independent Gamma-OU processes. With
+weight $w \in [0, 1]$ and a single Brownian motion driving the diffusion,
+
+\begin{equation}
+ \begin{aligned}
+ \sigma^2_t &= w\, v^1_t + (1 - w)\, v^2_t \\
+ dx_t &= \sigma_t\, dw_t
+ + \rho_1\, dz^1_{\kappa_1 t}
+ + \rho_2\, dz^2_{\kappa_2 t}
+ \end{aligned}
+\end{equation}
+
+Pairing a fast-mean-reverting factor with a slow one decouples the
+short-maturity skew from the long-maturity level, in the same spirit as the
+[DoubleHeston][quantflow.sp.heston.DoubleHeston] extension of Heston.
+
+[BNS2Calibration][quantflow.options.calibration.bns.BNS2Calibration] fits
+eleven parameters:
+
+`[v01, theta1, kappa_delta, beta1, rho1, v02, theta2, kappa2, beta2, rho2, w]`
+
+with `kappa1 = kappa2 + kappa_delta` enforcing that the first factor
+mean-reverts faster than the second. Both leverage parameters are free in
+$[-0.9, 0.9]$: a positive $\rho_i$ produces up-jumps in the log-price that
+lift the OTM call wing, while a negative one produces equity-style downside
+skew. There is no warm start, so the optimiser starts from the user-supplied
+initial parameters; pick distinct timescales for `bns1` and `bns2` (and
+consider opposite-sign leverages) to seed a meaningful two-factor fit.
+
+### Calibrated parameters
+
+--8<-- "docs/examples/output/vol_surface_bns2_calibration.out"
+
+[](../assets/examples/bns2_calibrated_smile.png){target="_blank"}
+
+The two-factor variant adds flexibility on the term structure: the fast factor
+absorbs short-dated skew while the slow factor anchors the long end. The
+remaining short-maturity gap is structural in the same way as the single-factor
+case: BNS2 still injects jumps only through the variance process, so the
+log-price wings are bounded by the jump sizes scaled by $|\rho_i|$.
+
+### Code
+
+```python
+--8<-- "docs/examples/vol_surface_bns2_calibration.py"
+```
diff --git a/quantflow/options/calibration/__init__.py b/quantflow/options/calibration/__init__.py
index 2ba19d7..a4c5c2a 100644
--- a/quantflow/options/calibration/__init__.py
+++ b/quantflow/options/calibration/__init__.py
@@ -3,7 +3,7 @@
OptionEntry,
VolModelCalibration,
)
-from .bns import BNSCalibration
+from .bns import BNS2Calibration, BNSCalibration
from .heston import (
DoubleHestonCalibration,
DoubleHestonJCalibration,
@@ -12,6 +12,7 @@
)
__all__ = [
+ "BNS2Calibration",
"BNSCalibration",
"DoubleHestonCalibration",
"DoubleHestonJCalibration",
diff --git a/quantflow/options/calibration/bns.py b/quantflow/options/calibration/bns.py
index 10c8873..99771d9 100644
--- a/quantflow/options/calibration/bns.py
+++ b/quantflow/options/calibration/bns.py
@@ -5,7 +5,7 @@
import numpy as np
from scipy.optimize import Bounds
-from quantflow.sp.bns import BNS
+from quantflow.sp.bns import BNS, BNS2
from .base import VolModelCalibration
@@ -38,7 +38,7 @@ def get_bounds(self) -> Bounds:
v2u = vol_ub**2
return Bounds(
[v2, v2, 1e-3, 1.0, -0.9],
- [v2u, v2u, np.inf, np.inf, 0.0],
+ [v2u, v2u, np.inf, np.inf, 0.9],
)
def get_params(self) -> np.ndarray:
@@ -53,3 +53,85 @@ def set_params(self, params: np.ndarray) -> None:
vp.bdlp.jumps.decay = params[3]
vp.bdlp.intensity = params[1] * params[3]
self.model.rho = params[4]
+
+
+B2 = TypeVar("B2", bound=BNS2)
+
+
+class BNS2Calibration(VolModelCalibration[B2], Generic[B2]):
+ r"""Calibration of the [BNS2][quantflow.sp.bns.BNS2] two-factor BNS model.
+
+ The parameter vector is
+
+ `[v01, theta1, kappa_delta, beta1, rho1, v02, theta2, kappa2, beta2, rho2, w]`
+
+ where `kappa1 = kappa2 + kappa_delta` with `kappa_delta > 0`, enforcing that
+ the first (short-maturity) factor mean-reverts faster than the second, and
+ `w` is the convex-combination weight of the first variance factor. The same
+ $(v_0, \theta)$ parameterisation as
+ [BNSCalibration][quantflow.options.calibration.bns.BNSCalibration] is used
+ for each factor: the BDLP intensity is set as $\lambda_i = \theta_i \beta_i$
+ so the stationary mean of $v^i$ equals $\theta_i$.
+
+ Both leverage parameters are free in $[-0.9, 0.9]$: a positive $\rho_i$
+ produces up-jumps in the log-price that lift the OTM call wing, while a
+ negative one produces equity-style downside skew. The joint fit relies on
+ the user-supplied initial parameters: pick distinct timescales for `bns1`
+ and `bns2` (and consider opposite-sign leverages) to give the optimiser a
+ meaningful two-factor starting point.
+
+ TODO: improve this calibration. The 11-parameter fit is slow (finite-diff
+ Jacobian dominates) and tends to collapse the two timescales into a near
+ single-factor solution unless the initial conditions force them apart.
+ Candidate improvements: analytic Jacobian of the characteristic exponent,
+ a smarter warm start that does not bias the kappas to merge, and tighter
+ bounds on `kappa1` and `beta_i`.
+ """
+
+ def get_bounds(self) -> Bounds:
+ vol_range = self.implied_vol_range()
+ vol_lb = 0.5 * vol_range.lb[0]
+ vol_ub = 1.5 * vol_range.ub[0]
+ v2 = vol_lb**2
+ v2u = vol_ub**2
+ return Bounds(
+ [v2, v2, 1e-4, 1.0, -0.9, v2, v2, 1e-3, 1.0, -0.9, 0.0],
+ [v2u, v2u, np.inf, np.inf, 0.9, v2u, v2u, 5.0, np.inf, 0.9, 1.0],
+ )
+
+ def get_params(self) -> np.ndarray:
+ vp1 = self.model.bns1.variance_process
+ vp2 = self.model.bns2.variance_process
+ kappa_delta = max(vp1.kappa - vp2.kappa, 1e-4)
+ theta1 = vp1.intensity / vp1.beta
+ theta2 = vp2.intensity / vp2.beta
+ return np.asarray(
+ [
+ vp1.rate,
+ theta1,
+ kappa_delta,
+ vp1.beta,
+ self.model.bns1.rho,
+ vp2.rate,
+ theta2,
+ vp2.kappa,
+ vp2.beta,
+ self.model.bns2.rho,
+ self.model.weight,
+ ]
+ )
+
+ def set_params(self, params: np.ndarray) -> None:
+ vp1 = self.model.bns1.variance_process
+ vp1.rate = params[0]
+ vp1.bdlp.jumps.decay = params[3]
+ vp1.bdlp.intensity = params[1] * params[3]
+ self.model.bns1.rho = params[4]
+ vp2 = self.model.bns2.variance_process
+ vp2.rate = params[5]
+ vp2.kappa = params[7]
+ vp2.bdlp.jumps.decay = params[8]
+ vp2.bdlp.intensity = params[6] * params[8]
+ self.model.bns2.rho = params[9]
+ vp1.kappa = vp2.kappa + params[2] # kappa2 + kappa_delta
+ self.model.weight = params[10]
diff --git a/quantflow/sp/bns.py b/quantflow/sp/bns.py
index 2cf6667..0087056 100644
--- a/quantflow/sp/bns.py
+++ b/quantflow/sp/bns.py
@@ -4,6 +4,7 @@
import numpy as np
from pydantic import Field
+from typing_extensions import Annotated, Doc
from ..ta.paths import Paths
from ..utils.types import FloatArrayLike, Vector
@@ -78,6 +79,17 @@ def characteristic_exponent(self, t: FloatArrayLike, u: Vector) -> Vector:
$\rho$ is the leverage parameter, and $(\lambda, \beta)$ are the intensity
and exponential-jump rate of the background driving Lévy process.
"""
+ return self._characteristic_exponent(t, u, 1.0)
+
+ def _characteristic_exponent(
+ self, t: FloatArrayLike, u: Vector, weight: float
+ ) -> Vector:
+ """BNS exponent with the diffusion variance multiplied by `weight`.
+
+ With weight=1 this is the single-factor BNS exponent. With weight<1 it is
+ the contribution of this factor when the price is driven by a single
+ Brownian motion against a convex combination of OU variances (BNS2).
+ """
v = self.variance_process
k = v.kappa
beta = v.beta
@@ -86,9 +98,9 @@ def characteristic_exponent(self, t: FloatArrayLike, u: Vector) -> Vector:
rho = self.rho
iur = 1j * u * rho
- u2 = u * u
- a = iur - 0.5 * u2 / k
- b = 0.5 * u2 * (1 - np.exp(-k * t)) / k
+ u2w = u * u * weight
+ a = iur - 0.5 * u2w / k
+ b = 0.5 * u2w * (1 - np.exp(-k * t)) / k
diffusion = b * v0
bdlp = intensity * (
@@ -101,29 +113,134 @@ def sample(self, n: int, time_horizon: float = 1, time_steps: int = 100) -> Path
return self.sample_from_draws(Paths.normal_draws(n, time_horizon, time_steps))
def sample_from_draws(self, path_dw: Paths, *args: Paths) -> Paths:
- kappa = self.variance_process.kappa
- time_steps = path_dw.time_steps
- time_horizon = path_dw.t
- dt = path_dw.dt
-
- if args:
- path_dz = args[0]
- else:
- # BDLP runs at kappa-rescaled time, so sample over [0, kappa*T]
- path_dz = self.variance_process.bdlp.sample(
- path_dw.samples, kappa * time_horizon, time_steps
+ path_dz = (
+ args[0]
+ if args
+ else self.variance_process.bdlp.sample(
+ path_dw.samples,
+ self.variance_process.kappa * path_dw.t,
+ path_dw.time_steps,
)
+ )
+ v = self._variance_path(path_dz)
+ # Price: x_t = integral sqrt(v) dW + rho * z_{kappa t}
+ diffusion = np.sqrt(v[:-1] * path_dw.dt) * path_dw.data[:-1]
+ paths = np.zeros_like(path_dw.data)
+ paths[1:] = np.cumsum(diffusion, axis=0) + self.rho * path_dz.data[1:]
+ return Paths(t=path_dw.t, data=paths)
- # Variance via the OU recursion v_{i+1} = v_i * e^{-kappa dt} + Delta z_{i+1}
- decay = np.exp(-kappa * dt)
+ def _variance_path(self, path_dz: Paths) -> np.ndarray:
+ """Simulate the OU variance path on the same grid as `path_dz`."""
+ kappa = self.variance_process.kappa
+ decay = np.exp(-kappa * path_dz.dt)
dz = np.diff(path_dz.data, axis=0)
v = np.zeros_like(path_dz.data)
v[0] = self.variance_process.rate
- for i in range(time_steps):
+ for i in range(path_dz.time_steps):
v[i + 1] = v[i] * decay + dz[i]
+ return v
- # Price: x_t = integral sqrt(v) dW + rho * z_{kappa t}
- diffusion = np.sqrt(v[:-1] * dt) * path_dw.data[:-1]
+
+class BNS2(StochasticProcess1D):
+ r"""Two-factor Barndorff-Nielson & Shephard stochastic volatility model.
+
+ The original multi-factor [BNS](/bibliography#bns) extension drives a single
+ log-price with a single Brownian motion against a convex combination of
+ independent Gamma-OU variances. With weight $w \in [0, 1]$ for the first
+ factor:
+
+ \begin{equation}
+ \begin{aligned}
+ \sigma^2_t &= w\, v^1_t + (1 - w)\, v^2_t \\
+ dx_t &= \sigma_t\, dw_t
+ + \rho_1\, dz^1_{\kappa_1 t}
+ + \rho_2\, dz^2_{\kappa_2 t} \\
+ dv^i_t &= -\kappa_i v^i_t\, dt + dz^i_{\kappa_i t}, \quad i = 1, 2
+ \end{aligned}
+ \end{equation}
+
+ A fast and a slow factor combined this way add flexibility to the term
+ structure of volatility while retaining the analytic tractability of BNS.
+ """
+
+ bns1: BNS = Field(default_factory=BNS, description="First BNS variance factor")
+ bns2: BNS = Field(default_factory=BNS, description="Second BNS variance factor")
+ weight: float = Field(
+ default=0.5,
+ ge=0,
+ le=1,
+ description=(
+ "Weight $w$ of the first variance factor in the convex combination; "
+ "the second factor receives weight $1 - w$"
+ ),
+ )
+
+ def characteristic_exponent(
+ self,
+ t: Annotated[FloatArrayLike, Doc("Time horizon or array of evaluation times")],
+ u: Annotated[Vector, Doc("Characteristic exponent argument")],
+ ) -> Vector:
+ r"""Characteristic exponent as the sum of two weighted BNS exponents.
+
+ Conditional on the variance paths the diffusion is Gaussian with variance
+ $w \int v^1_s\,ds + (1-w) \int v^2_s\,ds$. Independence of the two BDLPs
+ then factorises the unconditional expectation into a product, giving
+
+ \begin{equation}
+ \phi_{x_t,u} = \phi^{(1)}_{x_t,u}\big|_{u^2 \mapsto w u^2}
+ + \phi^{(2)}_{x_t,u}\big|_{u^2 \mapsto (1 - w) u^2}
+ \end{equation}
+
+ where the substitution applies only to the diffusion term ($u^2$) and
+ leaves the leverage term ($i u \rho_i$) unchanged.
+ """
+ w = self.weight
+ return self.bns1._characteristic_exponent(
+ t, u, w
+ ) + self.bns2._characteristic_exponent(t, u, 1.0 - w)
+
+ def sample(
+ self,
+ n: Annotated[int, Doc("Number of sample paths")],
+ time_horizon: Annotated[float, Doc("Time horizon")] = 1,
+ time_steps: Annotated[int, Doc("Number of discrete time steps")] = 100,
+ ) -> Paths:
+ return self.sample_from_draws(Paths.normal_draws(n, time_horizon, time_steps))
+
+ def sample_from_draws(
+ self,
+ path_dw: Annotated[Paths, Doc("Single Brownian motion driving both factors")],
+ *args: Annotated[
+ Paths,
+ Doc("Optional pre-drawn BDLP paths for bns1 and bns2 (in that order)"),
+ ],
+ ) -> Paths:
+ time_horizon = path_dw.t
+ time_steps = path_dw.time_steps
+ n = path_dw.samples
+ path_dz1 = (
+ args[0]
+ if len(args) > 0
+ else self.bns1.variance_process.bdlp.sample(
+ n, self.bns1.variance_process.kappa * time_horizon, time_steps
+ )
+ )
+ path_dz2 = (
+ args[1]
+ if len(args) > 1
+ else self.bns2.variance_process.bdlp.sample(
+ n, self.bns2.variance_process.kappa * time_horizon, time_steps
+ )
+ )
+ v1 = self.bns1._variance_path(path_dz1)
+ v2 = self.bns2._variance_path(path_dz2)
+ w = self.weight
+ sigma2 = w * v1 + (1.0 - w) * v2
+ diffusion = np.sqrt(sigma2[:-1] * path_dw.dt) * path_dw.data[:-1]
paths = np.zeros_like(path_dw.data)
- paths[1:] = np.cumsum(diffusion, axis=0) + self.rho * path_dz.data[1:]
+ paths[1:] = (
+ np.cumsum(diffusion, axis=0)
+ + self.bns1.rho * path_dz1.data[1:]
+ + self.bns2.rho * path_dz2.data[1:]
+ )
return Paths(t=time_horizon, data=paths)
diff --git a/quantflow_tests/test_bns.py b/quantflow_tests/test_bns.py
index 096d193..87705c8 100644
--- a/quantflow_tests/test_bns.py
+++ b/quantflow_tests/test_bns.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from quantflow.sp.bns import BNS
+from quantflow.sp.bns import BNS, BNS2
from quantflow_tests.utils import characteristic_tests
@@ -58,3 +58,50 @@ def test_bns_sample_moments(bns_leverage: BNS) -> None:
float(m.mean_from_characteristic()), abs=0.02
)
assert terminal.std() == pytest.approx(float(m.std_from_characteristic()), abs=0.02)
+
+
+@pytest.fixture
+def bns2() -> BNS2:
+ # Fast-mean-reverting plus slow-mean-reverting factor.
+ return BNS2(
+ bns1=BNS.create(vol=0.3, decay=5, kappa=3, rho=-0.3),
+ bns2=BNS.create(vol=0.4, decay=5, kappa=1, rho=-0.5),
+ weight=0.4,
+ )
+
+
+def test_bns2_characteristic(bns2: BNS2) -> None:
+ assert bns2.bns1.variance_process.kappa > bns2.bns2.variance_process.kappa
+ assert bns2.characteristic(1, 0) == 1
+ m = bns2.marginal(1)
+ characteristic_tests(m)
+ # The leverage term is independent of the diffusion weighting, so the BNS2
+ # mean is just the sum of the per-factor leverage means.
+ m1 = bns2.bns1.marginal(1)
+ m2 = bns2.bns2.marginal(1)
+ expected_mean = float(m1.mean_from_characteristic() + m2.mean_from_characteristic())
+ assert m.mean_from_characteristic() == pytest.approx(expected_mean, abs=1e-6)
+
+
+def test_bns2_collapses_to_single_factor() -> None:
+ # With weight = 1 the second factor only contributes leverage, and removing
+ # that leverage (rho2 = 0) reduces BNS2 to its first-factor BNS.
+ bns1 = BNS.create(vol=0.4, decay=5, kappa=2, rho=-0.4)
+ bns2 = BNS.create(vol=0.5, decay=3, kappa=1, rho=0.0)
+ pair = BNS2(bns1=bns1, bns2=bns2, weight=1.0)
+ u = np.array([0.1, 0.5, 1.0, 2.0, 5.0])
+ np.testing.assert_allclose(
+ pair.characteristic(1, u), bns1.characteristic(1, u), atol=1e-12
+ )
+
+
+def test_bns2_sample_moments(bns2: BNS2) -> None:
+ np.random.seed(42)
+ paths = bns2.sample(5000, time_horizon=1.0, time_steps=200)
+ assert paths.data.shape == (201, 5000)
+ m = bns2.marginal(1)
+ terminal = paths.data[-1]
+ assert terminal.mean() == pytest.approx(
+ float(m.mean_from_characteristic()), abs=0.03
+ )
+ assert terminal.std() == pytest.approx(float(m.std_from_characteristic()), abs=0.03)