improve baseline

This commit is contained in:
2026-01-24 16:15:34 +01:00
parent 3b1d3c0497
commit 57026695d4
10 changed files with 228 additions and 63 deletions

View File

@@ -9,15 +9,52 @@ from architecture import MyModel
from utils import plot, evaluate_model
import torch
import torch.nn as nn
import numpy as np
import os
from torch.utils.data import DataLoader
from torch.utils.data import Subset
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
import wandb
class CombinedLoss(nn.Module):
"""Combined loss: MSE + L1 + SSIM-like perceptual component"""
def __init__(self, mse_weight=1.0, l1_weight=0.5, edge_weight=0.1):
super().__init__()
self.mse_weight = mse_weight
self.l1_weight = l1_weight
self.edge_weight = edge_weight
self.mse = nn.MSELoss()
self.l1 = nn.L1Loss()
# Sobel filters for edge detection
sobel_x = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32).view(1, 1, 3, 3)
sobel_y = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=torch.float32).view(1, 1, 3, 3)
self.register_buffer('sobel_x', sobel_x.repeat(3, 1, 1, 1))
self.register_buffer('sobel_y', sobel_y.repeat(3, 1, 1, 1))
def edge_loss(self, pred, target):
"""Compute edge-aware loss using Sobel filters"""
pred_edge_x = torch.nn.functional.conv2d(pred, self.sobel_x, padding=1, groups=3)
pred_edge_y = torch.nn.functional.conv2d(pred, self.sobel_y, padding=1, groups=3)
target_edge_x = torch.nn.functional.conv2d(target, self.sobel_x, padding=1, groups=3)
target_edge_y = torch.nn.functional.conv2d(target, self.sobel_y, padding=1, groups=3)
edge_loss = self.l1(pred_edge_x, target_edge_x) + self.l1(pred_edge_y, target_edge_y)
return edge_loss
def forward(self, pred, target):
mse_loss = self.mse(pred, target)
l1_loss = self.l1(pred, target)
edge_loss = self.edge_loss(pred, target)
total_loss = self.mse_weight * mse_loss + self.l1_weight * l1_loss + self.edge_weight * edge_loss
return total_loss
def train(seed, testset_ratio, validset_ratio, data_path, results_path, early_stopping_patience, device, learningrate,
weight_decay, n_updates, use_wandb, print_train_stats_at, print_stats_at, plot_at, validate_at, batchsize,
network_config: dict):
@@ -74,11 +111,15 @@ def train(seed, testset_ratio, validset_ratio, data_path, results_path, early_st
network.to(device)
network.train()
# defining the loss
mse_loss = torch.nn.MSELoss()
# defining the loss - combined loss for better reconstruction
combined_loss = CombinedLoss(mse_weight=1.0, l1_weight=0.5, edge_weight=0.1).to(device)
mse_loss = torch.nn.MSELoss() # Keep for evaluation
# defining the optimizer
optimizer = torch.optim.Adam(network.parameters(), lr=learningrate, weight_decay=weight_decay)
# defining the optimizer with AdamW for better weight decay handling
optimizer = torch.optim.AdamW(network.parameters(), lr=learningrate, weight_decay=weight_decay)
# Learning rate scheduler for better convergence
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=50, T_mult=2, eta_min=1e-6)
if use_wandb:
wandb.watch(network, mse_loss, log="all", log_freq=10)
@@ -105,11 +146,15 @@ def train(seed, testset_ratio, validset_ratio, data_path, results_path, early_st
output = network(input)
loss = mse_loss(output, target)
loss = combined_loss(output, target)
loss.backward()
# Gradient clipping for training stability
torch.nn.utils.clip_grad_norm_(network.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step(i + len(loss_list) / len(dataloader_train))
loss_list.append(loss.item())
@@ -164,3 +209,5 @@ def train(seed, testset_ratio, validset_ratio, data_path, results_path, early_st
wandb.summary["testset/loss"] = testset_loss
wandb.summary["testset/RMSE"] = testset_rmse
wandb.finish()
return testset_rmse