Refactor: Improve attention mechanism and early stopping
- Refactor the self-attention mechanism in `models.py` to use `nn.MultiheadAttention` for better performance and clarity. - Disable early stopping check during warmup epochs in `train.py` to improve training stability.
This commit is contained in:
59
models.py
59
models.py
@@ -2,57 +2,15 @@ import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import functional as F
|
||||
from typing import Tuple
|
||||
import math
|
||||
|
||||
class CausalSelfAttention(nn.Module):
|
||||
"""
|
||||
A vanilla multi-head masked self-attention layer with a projection at the end.
|
||||
"""
|
||||
|
||||
def __init__(self, n_embd: int, n_head: int, pdrop: float):
|
||||
super().__init__()
|
||||
assert n_embd % n_head == 0
|
||||
# key, query, value projections for all heads
|
||||
self.c_attn = nn.Linear(n_embd, 3 * n_embd)
|
||||
# output projection
|
||||
self.c_proj = nn.Linear(n_embd, n_embd)
|
||||
# regularization
|
||||
self.attn_dropout = nn.Dropout(pdrop)
|
||||
self.resid_dropout = nn.Dropout(pdrop)
|
||||
self.n_head = n_head
|
||||
self.n_embd = n_embd
|
||||
|
||||
def forward(self, x: torch.Tensor, custom_mask: torch.Tensor) -> torch.Tensor:
|
||||
B, L, D = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
|
||||
|
||||
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
|
||||
q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
|
||||
k = k.view(B, L, self.n_head, D // self.n_head).transpose(1, 2) # (B, nh, L, hs)
|
||||
q = q.view(B, L, self.n_head, D // self.n_head).transpose(1, 2) # (B, nh, L, hs)
|
||||
v = v.view(B, L, self.n_head, D // self.n_head).transpose(1, 2) # (B, nh, L, hs)
|
||||
|
||||
# causal self-attention; Self-attend: (B, nh, L, hs) x (B, nh, hs, L) -> (B, nh, L, L)
|
||||
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
|
||||
|
||||
# Apply the time-based causal mask
|
||||
att = att.masked_fill(custom_mask.unsqueeze(1) == 0, float('-inf'))
|
||||
|
||||
att = F.softmax(att, dim=-1)
|
||||
att = self.attn_dropout(att)
|
||||
y = att @ v # (B, nh, L, L) x (B, nh, L, hs) -> (B, nh, L, hs)
|
||||
y = y.transpose(1, 2).contiguous().view(B, L, D) # re-assemble all head outputs side by side
|
||||
|
||||
# output projection
|
||||
y = self.resid_dropout(self.c_proj(y))
|
||||
return y
|
||||
|
||||
class Block(nn.Module):
|
||||
""" an unassuming Transformer block """
|
||||
|
||||
def __init__(self, n_embd: int, n_head: int, pdrop: float):
|
||||
super().__init__()
|
||||
self.n_head = n_head
|
||||
self.ln_1 = nn.LayerNorm(n_embd)
|
||||
self.attn = CausalSelfAttention(n_embd, n_head, pdrop)
|
||||
self.attn = nn.MultiheadAttention(n_embd, n_head, dropout=pdrop, batch_first=True)
|
||||
self.ln_2 = nn.LayerNorm(n_embd)
|
||||
self.mlp = nn.ModuleDict(dict(
|
||||
c_fc = nn.Linear(n_embd, 4 * n_embd),
|
||||
@@ -62,9 +20,16 @@ class Block(nn.Module):
|
||||
))
|
||||
m = self.mlp
|
||||
self.mlpf = lambda x: m.dropout(m.c_proj(m.act(m.c_fc(x)))) # MLP forward
|
||||
self.resid_dropout = nn.Dropout(pdrop)
|
||||
|
||||
def forward(self, x: torch.Tensor, custom_mask: torch.Tensor) -> torch.Tensor:
|
||||
x = x + self.attn(self.ln_1(x), custom_mask=custom_mask)
|
||||
normed_x = self.ln_1(x)
|
||||
|
||||
attn_mask = ~custom_mask
|
||||
attn_mask = attn_mask.repeat_interleave(self.n_head, dim=0)
|
||||
|
||||
attn_output, _ = self.attn(normed_x, normed_x, normed_x, attn_mask=attn_mask, need_weights=False)
|
||||
x = x + self.resid_dropout(attn_output)
|
||||
x = x + self.mlpf(self.ln_2(x))
|
||||
return x
|
||||
|
||||
@@ -190,13 +155,13 @@ class TimeAwareGPT2(nn.Module):
|
||||
|
||||
# 5. Generate attention mask
|
||||
# The attention mask combines two conditions:
|
||||
# a) Time-based causality: A token i can attend to a token j only if time_seq[j] < time_seq[i].
|
||||
# a) Time-based causality: A token i can attend to a token j only if time_seq[j] <= time_seq[i].
|
||||
# b) Padding mask: Do not attend to positions where the event token is 0.
|
||||
|
||||
# a) Time-based causal mask
|
||||
t_i = time_seq.unsqueeze(-1) # (B, L, 1)
|
||||
t_j = time_seq.unsqueeze(1) # (B, 1, L)
|
||||
time_mask = (t_j < t_i)
|
||||
time_mask = (t_j <= t_i)
|
||||
|
||||
# b) Padding mask (prevents attending to key positions that are padding)
|
||||
padding_mask = (event_seq != 0).unsqueeze(1) # Shape: (B, 1, L)
|
||||
|
Reference in New Issue
Block a user