Files
DeepHealth/train_dpp.py

401 lines
14 KiB
Python
Raw Normal View History

2025-10-16 16:58:30 +08:00
# train.py (DDP-ready)
import os
import math
import argparse
import numpy as np
import tqdm
import matplotlib.pyplot as plt
2025-10-16 16:28:52 +08:00
import torch
import torch.nn as nn
2025-10-16 16:58:30 +08:00
import torch.distributed as dist
2025-10-16 16:28:52 +08:00
from torch.optim import Adam
from torch.utils.data import DataLoader, DistributedSampler
from models import TimeAwareGPT2, CombinedLoss
from utils import PatientEventDataset
2025-10-16 16:58:30 +08:00
2025-10-16 16:28:52 +08:00
# --- Configuration ---
class TrainConfig:
# Data parameters
train_data_path = 'ukb_real_train.bin'
2025-10-16 16:46:33 +08:00
val_data_path = 'ukb_real_val.bin'
block_length = 24 # Sequence length
2025-10-16 16:28:52 +08:00
# Model parameters
n_embd = 256
n_layer = 8
n_head = 8
pdrop = 0.1
token_pdrop = 0.1
# Training parameters
max_epoch = 200
2025-10-16 16:58:30 +08:00
batch_size = 128
2025-10-16 16:28:52 +08:00
lr_initial = 6e-4
lr_final = 6e-5
warmup_epochs = 10
early_stopping_patience = 5
2025-10-16 16:58:30 +08:00
2025-10-16 16:28:52 +08:00
# Loss parameters
2025-10-16 16:58:30 +08:00
# 0 = padding, 1 = "no event"
2025-10-16 16:28:52 +08:00
ignored_token_ids = [0, 1]
2025-10-16 16:58:30 +08:00
# System parameters (device 将在 main() 内按 local_rank 动态设置)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def setup_distributed(backend: str = "nccl"):
"""
如果由 torchrun 启动且 WORLD_SIZE>1则初始化分布式
返回 (is_distributed, world_size, rank, local_rank)
"""
world_size = int(os.environ.get("WORLD_SIZE", "1"))
is_distributed = world_size > 1
if is_distributed:
if not dist.is_initialized():
dist.init_process_group(backend=backend, init_method="env://")
rank = dist.get_rank()
local_rank = int(os.environ.get("LOCAL_RANK", "0"))
2025-10-16 16:28:52 +08:00
torch.cuda.set_device(local_rank)
else:
2025-10-16 16:58:30 +08:00
rank = 0
2025-10-16 16:46:33 +08:00
local_rank = 0
2025-10-16 16:58:30 +08:00
return is_distributed, world_size, rank, local_rank
def cleanup_distributed():
if dist.is_available() and dist.is_initialized():
dist.destroy_process_group()
def all_reduce_mean(value: float, device, world_size: int):
"""
value Python float本进程的和/均值返回所有进程平均后的 float
"""
tensor = torch.tensor([value], dtype=torch.float32, device=device)
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
tensor /= world_size
return float(tensor.item())
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--backend", type=str, default="nccl", choices=["nccl", "gloo", "mpi"])
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
# 分布式初始化
is_dist, world_size, rank, local_rank = setup_distributed(args.backend)
# 基本环境
torch.manual_seed(args.seed + rank)
np.random.seed(args.seed + rank)
torch.backends.cudnn.benchmark = True
config = TrainConfig()
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
config.device = device
is_main = (rank == 0)
2025-10-16 16:28:52 +08:00
# --- 1. Data Loading ---
2025-10-16 16:58:30 +08:00
if is_main:
2025-10-16 16:28:52 +08:00
print(f"Loading data from {config.train_data_path} and {config.val_data_path}...")
train_data_arr = np.memmap(config.train_data_path, dtype=np.uint32, mode='r').reshape(-1, 3)
2025-10-16 16:46:33 +08:00
val_data_arr = np.memmap(config.val_data_path, dtype=np.uint32, mode='r').reshape(-1, 3)
2025-10-16 16:28:52 +08:00
2025-10-16 16:58:30 +08:00
# Infer vocab_size from the data (max label + 1)
2025-10-16 16:28:52 +08:00
vocab_size = int(max(train_data_arr[:, 2].max(), val_data_arr[:, 2].max())) + 1
2025-10-16 16:58:30 +08:00
if is_main:
2025-10-16 16:28:52 +08:00
print(f"Inferred vocabulary size: {vocab_size}")
train_dataset = PatientEventDataset(train_data_arr, config.block_length)
2025-10-16 16:46:33 +08:00
val_dataset = PatientEventDataset(val_data_arr, config.block_length)
2025-10-16 16:58:30 +08:00
# 分布式采样器
if is_dist:
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, drop_last=False)
val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=rank, shuffle=False, drop_last=False)
2025-10-16 16:28:52 +08:00
else:
2025-10-16 16:46:33 +08:00
train_sampler = None
val_sampler = None
2025-10-16 16:28:52 +08:00
train_loader = DataLoader(
2025-10-16 16:58:30 +08:00
train_dataset,
batch_size=config.batch_size,
2025-10-16 16:46:33 +08:00
shuffle=(train_sampler is None),
2025-10-16 16:58:30 +08:00
sampler=train_sampler,
num_workers=4,
2025-10-16 16:28:52 +08:00
pin_memory=True,
2025-10-16 16:58:30 +08:00
drop_last=False,
persistent_workers=True if 4 > 0 else False,
2025-10-16 16:28:52 +08:00
)
val_loader = DataLoader(
2025-10-16 16:58:30 +08:00
val_dataset,
batch_size=config.batch_size,
2025-10-16 16:46:33 +08:00
shuffle=False,
2025-10-16 16:58:30 +08:00
sampler=val_sampler,
num_workers=4,
2025-10-16 16:28:52 +08:00
pin_memory=True,
2025-10-16 16:58:30 +08:00
drop_last=False,
persistent_workers=True if 4 > 0 else False,
2025-10-16 16:28:52 +08:00
)
2025-10-16 16:46:33 +08:00
# --- 2. Model, Optimizer, and Loss Initialization ---
2025-10-16 16:58:30 +08:00
if is_main:
print(f"Initializing model on {config.device}...")
2025-10-16 16:28:52 +08:00
model = TimeAwareGPT2(
vocab_size=vocab_size,
n_embd=config.n_embd,
n_layer=config.n_layer,
n_head=config.n_head,
pdrop=config.pdrop,
token_pdrop=config.token_pdrop
).to(device)
2025-10-16 16:58:30 +08:00
if is_main and hasattr(model, "get_num_params"):
print(f"Model initialized with {model.get_num_params():.2f}M trainable parameters.")
2025-10-16 16:28:52 +08:00
2025-10-16 16:46:33 +08:00
loss_fn = CombinedLoss(config.ignored_token_ids)
optimizer = Adam(model.parameters(), lr=config.lr_initial)
2025-10-16 16:28:52 +08:00
2025-10-16 16:58:30 +08:00
# DDP 包装
if is_dist:
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank] if device.type == "cuda" else None,
output_device=local_rank if device.type == "cuda" else None,
find_unused_parameters=False,
)
2025-10-16 16:28:52 +08:00
# --- 3. Training Loop ---
best_val_loss = float('inf')
patience_counter = 0
2025-10-16 16:58:30 +08:00
# 只在主进程收集与画图
train_losses_ce, train_losses_surv, train_losses_total = [], [], []
val_losses_ce, val_losses_surv, val_losses_total = [], [], []
if is_main:
2025-10-16 16:28:52 +08:00
print("Starting training...")
2025-10-16 16:58:30 +08:00
stop_training = False
2025-10-16 16:28:52 +08:00
for epoch in range(config.max_epoch):
2025-10-16 16:58:30 +08:00
# 设置 epoch 给分布式采样器,确保跨 epoch shuffle
if is_dist:
2025-10-16 16:28:52 +08:00
train_sampler.set_epoch(epoch)
2025-10-16 16:46:33 +08:00
# --- Learning Rate Scheduling ---
2025-10-16 16:28:52 +08:00
if epoch < config.warmup_epochs:
lr = config.lr_initial
else:
progress = (epoch - config.warmup_epochs) / (config.max_epoch - config.warmup_epochs)
lr = config.lr_final + 0.5 * (config.lr_initial - config.lr_final) * (1 + math.cos(math.pi * progress))
2025-10-16 16:46:33 +08:00
for param_group in optimizer.param_groups:
param_group['lr'] = lr
2025-10-16 16:28:52 +08:00
2025-10-16 16:46:33 +08:00
# --- Training Phase ---
2025-10-16 16:58:30 +08:00
if is_main:
pbar = tqdm.tqdm(train_loader, desc=f"Epoch {epoch+1}/{config.max_epoch} [Train]")
else:
pbar = train_loader # 非主进程禁用 tqdm
2025-10-16 16:28:52 +08:00
model.train()
2025-10-16 16:46:33 +08:00
train_loss_ce_acc, train_loss_surv_acc = 0.0, 0.0
2025-10-16 16:28:52 +08:00
train_steps = 0
2025-10-16 16:58:30 +08:00
for batch in pbar:
event_seq, time_seq = batch
event_seq = event_seq.to(device, non_blocking=True)
time_seq = time_seq.to(device, non_blocking=True)
2025-10-16 16:28:52 +08:00
2025-10-16 16:46:33 +08:00
# Prepare inputs and targets
2025-10-16 16:28:52 +08:00
input_events = event_seq[:, :-1]
input_times = time_seq[:, :-1]
target_events = event_seq[:, 1:]
target_wait_times = (time_seq[:, 1:] - time_seq[:, :-1]).float()
2025-10-16 16:46:33 +08:00
# Forward pass
logits = model(input_events, input_times)
loss_ce, loss_survival = loss_fn(logits, target_events, target_wait_times)
loss = loss_ce + loss_survival
# Backward pass and optimization
2025-10-16 16:58:30 +08:00
optimizer.zero_grad(set_to_none=True)
2025-10-16 16:46:33 +08:00
loss.backward()
optimizer.step()
2025-10-16 16:58:30 +08:00
train_loss_ce_acc += float(loss_ce.item())
train_loss_surv_acc += float(loss_survival.item())
2025-10-16 16:28:52 +08:00
train_steps += 1
2025-10-16 16:58:30 +08:00
if is_main and isinstance(pbar, tqdm.tqdm):
pbar.set_postfix({'loss_ce': f'{loss_ce.item():.4f}', 'loss_surv': f'{loss_survival.item():.4f}', 'lr': f'{lr:.2e}'})
# 进程内均值
avg_train_loss_ce_local = train_loss_ce_acc / max(train_steps, 1)
avg_train_loss_surv_local = train_loss_surv_acc / max(train_steps, 1)
# 所有进程平均
if is_dist:
avg_train_loss_ce = all_reduce_mean(avg_train_loss_ce_local, device, world_size)
avg_train_loss_surv = all_reduce_mean(avg_train_loss_surv_local, device, world_size)
2025-10-16 16:28:52 +08:00
else:
2025-10-16 16:58:30 +08:00
avg_train_loss_ce = avg_train_loss_ce_local
avg_train_loss_surv = avg_train_loss_surv_local
if is_main:
train_losses_ce.append(avg_train_loss_ce)
train_losses_surv.append(avg_train_loss_surv)
train_losses_total.append(avg_train_loss_ce + avg_train_loss_surv)
2025-10-16 16:28:52 +08:00
2025-10-16 16:46:33 +08:00
# --- Validation Phase ---
2025-10-16 16:58:30 +08:00
if is_main:
pbar_val = tqdm.tqdm(val_loader, desc=f"Epoch {epoch+1}/{config.max_epoch} [Val]")
else:
pbar_val = val_loader
2025-10-16 16:28:52 +08:00
model.eval()
2025-10-16 16:46:33 +08:00
val_loss_ce_acc, val_loss_surv_acc = 0.0, 0.0
2025-10-16 16:28:52 +08:00
val_steps = 0
2025-10-16 16:46:33 +08:00
2025-10-16 16:28:52 +08:00
with torch.no_grad():
2025-10-16 16:58:30 +08:00
for batch in pbar_val:
event_seq, time_seq = batch
event_seq = event_seq.to(device, non_blocking=True)
time_seq = time_seq.to(device, non_blocking=True)
2025-10-16 16:28:52 +08:00
input_events = event_seq[:, :-1]
input_times = time_seq[:, :-1]
target_events = event_seq[:, 1:]
target_wait_times = (time_seq[:, 1:] - time_seq[:, :-1]).float()
2025-10-16 16:46:33 +08:00
logits = model(input_events, input_times)
loss_ce, loss_survival = loss_fn(logits, target_events, target_wait_times)
2025-10-16 16:58:30 +08:00
val_loss_ce_acc += float(loss_ce.item())
val_loss_surv_acc += float(loss_survival.item())
2025-10-16 16:28:52 +08:00
val_steps += 1
2025-10-16 16:58:30 +08:00
if is_main and isinstance(pbar_val, tqdm.tqdm):
pbar_val.set_postfix({'loss_ce': f'{loss_ce.item():.4f}', 'loss_surv': f'{loss_survival.item():.4f}'})
avg_val_loss_ce_local = val_loss_ce_acc / max(val_steps, 1)
avg_val_loss_surv_local = val_loss_surv_acc / max(val_steps, 1)
if is_dist:
avg_val_loss_ce = all_reduce_mean(avg_val_loss_ce_local, device, world_size)
avg_val_loss_surv = all_reduce_mean(avg_val_loss_surv_local, device, world_size)
2025-10-16 16:46:33 +08:00
else:
2025-10-16 16:58:30 +08:00
avg_val_loss_ce = avg_val_loss_ce_local
avg_val_loss_surv = avg_val_loss_surv_local
2025-10-16 16:46:33 +08:00
total_val_loss = avg_val_loss_ce + avg_val_loss_surv
2025-10-16 16:58:30 +08:00
# 主进程打印与记录
if is_main:
2025-10-16 16:46:33 +08:00
print(f"Epoch {epoch+1} Summary: \n"
f" Train Loss: {avg_train_loss_ce + avg_train_loss_surv:.4f} (CE: {avg_train_loss_ce:.4f}, Surv: {avg_train_loss_surv:.4f})\n"
f" Val Loss: {total_val_loss:.4f} (CE: {avg_val_loss_ce:.4f}, Surv: {avg_val_loss_surv:.4f})\n"
2025-10-16 16:28:52 +08:00
f" Learning Rate: {lr:.6f}")
2025-10-16 16:58:30 +08:00
val_losses_ce.append(avg_val_loss_ce)
val_losses_surv.append(avg_val_loss_surv)
val_losses_total.append(total_val_loss)
2025-10-16 16:28:52 +08:00
2025-10-16 16:58:30 +08:00
# --- Early Stopping Check (基于聚合后的 total_val_loss) ---
improved = False
if is_main:
2025-10-16 16:46:33 +08:00
if total_val_loss < best_val_loss:
best_val_loss = total_val_loss
2025-10-16 16:28:52 +08:00
patience_counter = 0
2025-10-16 16:58:30 +08:00
improved = True
2025-10-16 16:46:33 +08:00
print(f"Validation loss improved to {best_val_loss:.4f}. Saving checkpoint...")
2025-10-16 16:58:30 +08:00
# DDP: 保存 module.state_dict()
state_dict = model.module.state_dict() if isinstance(model, nn.parallel.DistributedDataParallel) else model.state_dict()
torch.save(state_dict, 'best_model_checkpoint.pt')
2025-10-16 16:28:52 +08:00
else:
if epoch >= config.warmup_epochs:
patience_counter += 1
print(f"Validation loss did not improve. Patience: {patience_counter}/{config.early_stopping_patience}")
2025-10-16 16:58:30 +08:00
stop_training = patience_counter >= config.early_stopping_patience
# 把 improved/stop 广播到所有进程,确保一致退出
if is_dist:
flag_tensor = torch.tensor([1 if stop_training else 0], device=device, dtype=torch.int32)
dist.broadcast(flag_tensor, src=0)
stop_training = bool(int(flag_tensor.item()))
2025-10-16 16:28:52 +08:00
2025-10-16 16:58:30 +08:00
if stop_training:
if is_main:
2025-10-16 16:28:52 +08:00
print("\nEarly stopping triggered due to no improvement in validation loss.")
2025-10-16 16:58:30 +08:00
break
# --- Save Best Model at the End (只主进程) ---
if is_main:
if best_val_loss != float('inf'):
print(f"\nTraining finished. Loading best model from checkpoint with validation loss {best_val_loss:.4f}.")
# 为了易用,这里在主进程上重新构建单卡模型加载权重再保存
model_single = TimeAwareGPT2(
vocab_size=vocab_size,
n_embd=config.n_embd,
n_layer=config.n_layer,
n_head=config.n_head,
pdrop=config.pdrop,
token_pdrop=config.token_pdrop
).to('cpu')
model_single.load_state_dict(torch.load('best_model_checkpoint.pt', map_location='cpu'))
print("Saving final best model to best_model.pt")
torch.save(model_single.state_dict(), 'best_model.pt')
2025-10-16 16:46:33 +08:00
else:
2025-10-16 16:58:30 +08:00
print("\nTraining finished. No best model to save as validation loss never improved.")
# --- Plot and Save Loss Curves ---
num_epochs = len(train_losses_total)
if num_epochs > 0:
epochs = range(1, num_epochs + 1)
plt.figure(figsize=(18, 5))
# Plot CE Loss
plt.subplot(1, 3, 1)
plt.plot(epochs, train_losses_ce, label='Train CE')
plt.plot(epochs, val_losses_ce, label='Val CE')
plt.title('Cross-Entropy Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
# Plot Survival Loss
plt.subplot(1, 3, 2)
plt.plot(epochs, train_losses_surv, label='Train Survival')
plt.plot(epochs, val_losses_surv, label='Val Survival')
plt.title('Survival Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
# Plot Total Loss
plt.subplot(1, 3, 3)
plt.plot(epochs, train_losses_total, label='Train Total')
plt.plot(epochs, val_losses_total, label='Val Total')
plt.title('Total Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.savefig('loss_curves.png')
print("\nLoss curves saved to loss_curves.png")
# 清理分布式
cleanup_distributed()
2025-10-16 16:46:33 +08:00
2025-10-16 16:28:52 +08:00
if __name__ == '__main__':
2025-10-16 16:58:30 +08:00
main()