// SPF Smart Gateway - Transformer Decoder // Copyright 2026 Joseph Stone - All Rights Reserved // // N stacked decoder layers: masked self-attention → cross-attention → FFN // Each sublayer has residual connection + layer norm (pre-norm architecture). // Causal masking prevents attending to future tokens. // Cross-attention allows decoder to attend to encoder output. // // Depends on: tensor.rs, attention.rs, ffn.rs (Layers 0-1) use crate::tensor::Tensor; use crate::attention::{AttentionCache, AttentionConfig, MultiHeadAttention}; use crate::ffn::{FfnCache, FfnConfig, FeedForward}; // ============================================================================ // ACTIVATION CACHE (for backward pass — P2-C) // ============================================================================ /// Cached activations from one decoder layer forward pass (causal/self-only mode) pub struct DecoderLayerCache { /// Input before LN1 (for layer_norm_backward) pub ln1_input: Tensor, /// Self-attention cache (Q, K, V, attn_weights, input, scale) pub self_attn_cache: AttentionCache, /// Input before LN3 (before FFN — ln2 unused in causal path) pub ln3_input: Tensor, /// FFN cache (input, hidden_pre_gelu) pub ffn_cache: FfnCache, /// Original layer input (for residual backward) pub residual_input: Tensor, } // ============================================================================ // DECODER CONFIGURATION // ============================================================================ /// Configuration for the decoder stack #[derive(Debug, Clone)] pub struct DecoderConfig { /// Number of decoder layers pub n_layers: usize, /// Model dimension pub d_model: usize, /// Number of attention heads pub n_heads: usize, /// Feed-forward hidden dimension pub d_ff: usize, /// Maximum sequence length pub max_seq_len: usize, /// Layer norm epsilon pub ln_eps: f32, } impl DecoderConfig { /// SPF Writer default: matches encoder config pub fn spf_writer() -> Self { Self { n_layers: 6, d_model: 256, n_heads: 8, d_ff: 1024, max_seq_len: 2048, ln_eps: 1e-5, } } /// Smaller config for testing pub fn small() -> Self { Self { n_layers: 2, d_model: 64, n_heads: 4, d_ff: 256, max_seq_len: 512, ln_eps: 1e-5, } } } // ============================================================================ // SINGLE DECODER LAYER // ============================================================================ /// One decoder layer with three sublayers: /// /// 1. Masked self-attention (causal — can't see future tokens) /// 2. Cross-attention (attends to encoder output) /// 3. Feed-forward network /// /// Pre-norm architecture: /// x → LN → MaskedSelfAttn → +residual /// → LN → CrossAttn → +residual /// → LN → FFN → +residual pub struct DecoderLayer { /// Masked multi-head self-attention (causal) pub self_attn: MultiHeadAttention, /// Cross-attention to encoder output pub cross_attn: MultiHeadAttention, /// Feed-forward network pub ffn: FeedForward, /// Layer norm before self-attention pub ln1_gamma: Tensor, pub ln1_beta: Tensor, /// Layer norm before cross-attention pub ln2_gamma: Tensor, pub ln2_beta: Tensor, /// Layer norm before FFN pub ln3_gamma: Tensor, pub ln3_beta: Tensor, /// Epsilon for layer norm pub ln_eps: f32, } impl DecoderLayer { /// Initialize a single decoder layer pub fn new(d_model: usize, n_heads: usize, d_ff: usize, ln_eps: f32, seed: u64) -> Self { let self_attn_config = AttentionConfig { d_model, n_heads, causal: true, // Decoder self-attention is causal }; let cross_attn_config = AttentionConfig { d_model, n_heads, causal: false, // Cross-attention is bidirectional over encoder output }; let ffn_config = FfnConfig { d_model, d_ff }; Self { self_attn: MultiHeadAttention::new(self_attn_config, seed), cross_attn: MultiHeadAttention::new(cross_attn_config, seed + 50), ffn: FeedForward::new(ffn_config, seed + 100), ln1_gamma: Tensor::ones(&[d_model]), ln1_beta: Tensor::zeros(&[d_model]), ln2_gamma: Tensor::ones(&[d_model]), ln2_beta: Tensor::zeros(&[d_model]), ln3_gamma: Tensor::ones(&[d_model]), ln3_beta: Tensor::zeros(&[d_model]), ln_eps, } } /// Forward pass with encoder output for cross-attention /// x: decoder input [batch, dec_seq, d_model] /// encoder_output: [batch, enc_seq, d_model] pub fn forward(&self, x: &Tensor, encoder_output: &Tensor) -> Result { // 1. Masked self-attention with residual let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; let self_attn_out = self.self_attn.forward(&normed)?; let x = x.add(&self_attn_out)?; // 2. Cross-attention with residual let normed = x.layer_norm(&self.ln2_gamma, &self.ln2_beta, self.ln_eps)?; let cross_attn_out = self.cross_attn.forward_cross(&normed, encoder_output)?; let x = x.add(&cross_attn_out)?; // 3. FFN with residual let normed = x.layer_norm(&self.ln3_gamma, &self.ln3_beta, self.ln_eps)?; let ffn_out = self.ffn.forward(&normed)?; x.add(&ffn_out) } /// Forward pass without cross-attention (decoder-only / causal LM mode) /// Used when running as a causal language model without encoder. /// The Researcher transformer uses this mode. pub fn forward_self_only(&self, x: &Tensor) -> Result { // 1. Masked self-attention with residual let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; let self_attn_out = self.self_attn.forward(&normed)?; let x = x.add(&self_attn_out)?; // Skip cross-attention (no encoder output) // 2. FFN with residual (use ln3 — ln2 unused in this path) let normed = x.layer_norm(&self.ln3_gamma, &self.ln3_beta, self.ln_eps)?; let ffn_out = self.ffn.forward(&normed)?; x.add(&ffn_out) } /// Forward pass without cross-attention, with cached activations for backward. /// Output is IDENTICAL to forward_self_only(). Cache is additional data only. pub fn forward_self_only_with_cache(&self, x: &Tensor) -> Result<(Tensor, DecoderLayerCache), String> { let residual_input = x.clone(); // 1. Masked self-attention with residual let ln1_input = x.clone(); let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; let (self_attn_out, self_attn_cache) = self.self_attn.forward_with_cache(&normed)?; let x = x.add(&self_attn_out)?; // Skip cross-attention (no encoder output) // 2. FFN with residual (use ln3 — ln2 unused in this path) let ln3_input = x.clone(); let normed = x.layer_norm(&self.ln3_gamma, &self.ln3_beta, self.ln_eps)?; let (ffn_out, ffn_cache) = self.ffn.forward_with_cache(&normed)?; let output = x.add(&ffn_out)?; let cache = DecoderLayerCache { ln1_input, self_attn_cache, ln3_input, ffn_cache, residual_input, }; Ok((output, cache)) } /// Total parameters in this layer pub fn num_params(&self) -> usize { let d = self.ln1_gamma.numel(); // self_attn + cross_attn + ffn + 3 layer norms × (gamma + beta) self.self_attn.num_params() + self.cross_attn.num_params() + self.ffn.num_params() + 6 * d } /// Collect all weight tensors pub fn weights(&self) -> Vec<&Tensor> { let mut w = self.self_attn.weights(); w.extend(self.cross_attn.weights()); w.extend(self.ffn.weights()); w.extend([ &self.ln1_gamma, &self.ln1_beta, &self.ln2_gamma, &self.ln2_beta, &self.ln3_gamma, &self.ln3_beta, ]); w } /// Collect all weight tensors mutably pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { let mut w = self.self_attn.weights_mut(); w.extend(self.cross_attn.weights_mut()); w.extend(self.ffn.weights_mut()); w.extend([ &mut self.ln1_gamma, &mut self.ln1_beta, &mut self.ln2_gamma, &mut self.ln2_beta, &mut self.ln3_gamma, &mut self.ln3_beta, ]); w } } // ============================================================================ // DECODER STACK // ============================================================================ /// Full decoder: positional encoding + N decoder layers + final layer norm. pub struct Decoder { pub config: DecoderConfig, /// Positional encoding table [max_seq_len, d_model] pub pos_encoding: Tensor, /// Stack of decoder layers pub layers: Vec, /// Final layer norm pub final_ln_gamma: Tensor, pub final_ln_beta: Tensor, } impl Decoder { /// Initialize decoder with given config pub fn new(config: DecoderConfig, seed: u64) -> Self { // Reuse encoder's sinusoidal positional encoding let pos_encoding = crate::encoder::sinusoidal_positional_encoding( config.max_seq_len, config.d_model, ); let layers: Vec = (0..config.n_layers) .map(|i| { DecoderLayer::new( config.d_model, config.n_heads, config.d_ff, config.ln_eps, seed + (i as u64) * 2000, ) }) .collect(); Self { final_ln_gamma: Tensor::ones(&[config.d_model]), final_ln_beta: Tensor::zeros(&[config.d_model]), pos_encoding, layers, config, } } /// Forward pass with encoder output (encoder-decoder mode) /// embeddings: [batch, dec_seq, d_model] /// encoder_output: [batch, enc_seq, d_model] pub fn forward( &self, embeddings: &Tensor, encoder_output: &Tensor, ) -> Result { let seq_len = embeddings.shape[1]; if seq_len > self.config.max_seq_len { return Err(format!( "Sequence length {} exceeds max {}", seq_len, self.config.max_seq_len )); } let mut x = self.add_positional_encoding(embeddings)?; for layer in &self.layers { x = layer.forward(&x, encoder_output)?; } x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps) } /// Forward pass without encoder (decoder-only / causal LM mode) /// Used by the Researcher transformer and for autoregressive generation pub fn forward_causal(&self, embeddings: &Tensor) -> Result { let seq_len = embeddings.shape[1]; if seq_len > self.config.max_seq_len { return Err(format!( "Sequence length {} exceeds max {}", seq_len, self.config.max_seq_len )); } let mut x = self.add_positional_encoding(embeddings)?; for layer in &self.layers { x = layer.forward_self_only(&x)?; } x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps) } /// Forward pass without encoder, with cached activations for backward. /// Output is IDENTICAL to forward_causal(). Caches are additional data only. pub fn forward_causal_with_cache(&self, embeddings: &Tensor) -> Result<(Tensor, Vec), String> { let seq_len = embeddings.shape[1]; if seq_len > self.config.max_seq_len { return Err(format!( "Sequence length {} exceeds max {}", seq_len, self.config.max_seq_len )); } let mut x = self.add_positional_encoding(embeddings)?; let mut layer_caches = Vec::with_capacity(self.layers.len()); for layer in &self.layers { let (out, cache) = layer.forward_self_only_with_cache(&x)?; x = out; layer_caches.push(cache); } let output = x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps)?; Ok((output, layer_caches)) } /// Add sinusoidal positional encoding to embeddings fn add_positional_encoding(&self, embeddings: &Tensor) -> Result { let batch = embeddings.shape[0]; let seq_len = embeddings.shape[1]; let d_model = embeddings.shape[2]; let pos_enc = self.pos_encoding.slice(0, seq_len)?; let mut data = embeddings.data.clone(); for b in 0..batch { for s in 0..seq_len { for d in 0..d_model { data[(b * seq_len + s) * d_model + d] += pos_enc.data[s * d_model + d]; } } } Tensor::from_data(data, embeddings.shape.clone()) } /// Total parameters in the decoder pub fn num_params(&self) -> usize { let layer_params: usize = self.layers.iter().map(|l| l.num_params()).sum(); layer_params + 2 * self.config.d_model // final LN } /// Collect all weight tensors pub fn weights(&self) -> Vec<&Tensor> { let mut w: Vec<&Tensor> = Vec::new(); for layer in &self.layers { w.extend(layer.weights()); } w.push(&self.final_ln_gamma); w.push(&self.final_ln_beta); w } /// Collect all weight tensors mutably pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { let mut w: Vec<&mut Tensor> = Vec::new(); for layer in &mut self.layers { w.extend(layer.weights_mut()); } w.push(&mut self.final_ln_gamma); w.push(&mut self.final_ln_beta); w } } // ============================================================================ // TESTS // ============================================================================ #[cfg(test)] mod tests { use super::*; #[test] fn test_decoder_layer_with_encoder() { let layer = DecoderLayer::new(64, 4, 256, 1e-5, 42); let dec_input = Tensor::randn(&[1, 4, 64], 99); // decoder: 4 tokens let enc_output = Tensor::randn(&[1, 8, 64], 100); // encoder: 8 tokens let out = layer.forward(&dec_input, &enc_output).unwrap(); assert_eq!(out.shape, vec![1, 4, 64]); // follows decoder seq_len assert!(out.data.iter().all(|v| v.is_finite())); } #[test] fn test_decoder_layer_self_only() { let layer = DecoderLayer::new(64, 4, 256, 1e-5, 42); let x = Tensor::randn(&[1, 6, 64], 99); let out = layer.forward_self_only(&x).unwrap(); assert_eq!(out.shape, vec![1, 6, 64]); assert!(out.data.iter().all(|v| v.is_finite())); } #[test] fn test_decoder_full_forward() { let config = DecoderConfig::small(); let decoder = Decoder::new(config, 42); let dec_emb = Tensor::randn(&[1, 4, 64], 99); let enc_out = Tensor::randn(&[1, 8, 64], 100); let out = decoder.forward(&dec_emb, &enc_out).unwrap(); assert_eq!(out.shape, vec![1, 4, 64]); assert!(out.data.iter().all(|v| v.is_finite())); } #[test] fn test_decoder_causal_forward() { let config = DecoderConfig::small(); let decoder = Decoder::new(config, 42); let x = Tensor::randn(&[1, 6, 64], 99); let out = decoder.forward_causal(&x).unwrap(); assert_eq!(out.shape, vec![1, 6, 64]); assert!(out.data.iter().all(|v| v.is_finite())); } #[test] fn test_decoder_seq_exceeds_max() { let config = DecoderConfig { max_seq_len: 10, ..DecoderConfig::small() }; let decoder = Decoder::new(config, 42); let x = Tensor::randn(&[1, 20, 64], 99); let enc = Tensor::randn(&[1, 5, 64], 100); assert!(decoder.forward(&x, &enc).is_err()); } #[test] fn test_decoder_causal_seq_exceeds_max() { let config = DecoderConfig { max_seq_len: 10, ..DecoderConfig::small() }; let decoder = Decoder::new(config, 42); let x = Tensor::randn(&[1, 20, 64], 99); assert!(decoder.forward_causal(&x).is_err()); } #[test] fn test_decoder_num_params() { let config = DecoderConfig::small(); // 2 layers, d=64, ff=256 let decoder = Decoder::new(config, 42); let params = decoder.num_params(); // Each layer: self_attn(16640) + cross_attn(16640) + ffn(33088) + 6×64(LN) = 66752 // 2 layers + final LN: 2×66752 + 128 = 133632 assert_eq!(params, 133632); } #[test] fn test_decoder_batch() { let config = DecoderConfig::small(); let decoder = Decoder::new(config, 42); let x = Tensor::randn(&[3, 4, 64], 99); // batch=3 let enc = Tensor::randn(&[3, 6, 64], 100); let out = decoder.forward(&x, &enc).unwrap(); assert_eq!(out.shape, vec![3, 4, 64]); } #[test] fn test_decoder_weights_count() { let config = DecoderConfig::small(); let decoder = Decoder::new(config, 42); let weights = decoder.weights(); // Each layer: 8(self_attn) + 8(cross_attn) + 4(ffn) + 6(LN) = 26 // 2 layers + 2 final LN = 54 assert_eq!(weights.len(), 54); } #[test] fn test_decoder_layer_params() { let layer = DecoderLayer::new(64, 4, 256, 1e-5, 42); // self_attn: 16640, cross_attn: 16640, ffn: 33088, 6×64 LN: 384 assert_eq!(layer.num_params(), 66752); } }