--- title: PyTorch losses keywords: fastai sidebar: home_sidebar summary: "Training losses." description: "Training losses." nb_path: "nbs/losses__pytorch.ipynb" ---
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
import time
from scipy.stats import hmean
import matplotlib.pyplot as plt
%matplotlib inline
class Model(nn.Module):
def __init__(self, horizon, n_quantiles):
super(Model, self).__init__()
self.horizon = horizon
self.n_quantiles = n_quantiles
self.linear_layer = nn.Linear(in_features=n_obs,
out_features=horizon * n_quantiles,
bias=False)
def forward(self, x):
y_hat = self.linear_layer(x)
y_hat = y_hat.view(-1, self.horizon, self.n_quantiles)
return y_hat
class Data(Dataset):
# Constructor
def __init__(self, Y, X):
self.X = X
self.Y = Y
self.len = Y.shape[0]
# Getter
def __getitem__(self, index):
return self.X[index], self.Y[index]
# Get Length
def __len__(self):
return self.len
t.cuda.manual_seed(7)
# Sample data
n_ts = 1000
n_obs = horizon = 10
mean = 0.0 # to generate random numbers from N(mean, std)
std = 7.0 # to generate random numbers from N(mean, std)
start = 0.05 # First quantile
end = 0.95 # Last quantiles
steps = 4 # Number of quantiles
# Hyperparameters
batch_size = 500
lr = 0.08
epochs = 100
# Sample data
quantiles = t.Tensor([0.0500, 0.3500, 0.6500, 0.9500])
print(f'quantiles:\n{quantiles}')
Y = t.normal(mean=mean, std=std, size=(n_ts, n_obs))
X = t.ones(size=(n_ts, n_obs))
Y_test = t.normal(mean=mean, std=std, size=(n_ts, horizon))
X_test = t.ones(size=(n_ts, horizon))
print(f'Y.shape: {Y.shape}, X.shape: {X.shape}')
print(f'Y_test.shape: {Y_test.shape}, X_test.shape: {X_test.shape}')
model = Model(horizon=horizon, n_quantiles=len(quantiles))
dataset = Data(X=X, Y=Y)
dataloader = DataLoader(dataset=dataset, batch_size=batch_size)
optimizer = optim.Adam(model.parameters(), lr=lr)
def train_model(model, epochs, print_progress=False):
start = time.time()
i = 0
training_trajectory = {'epoch': [],
'train_loss': []}
for epoch in range(epochs):
for x, y in dataloader:
i += 1
y_hat = model(x)
#training_loss = wMQLoss(y=y, y_hat=y_hat, quantiles=quantiles)
training_loss = MQLoss(y=y, y_hat=y_hat, quantiles=quantiles)
if i % (epoch + 1) == 0:
training_trajectory['epoch'].append(i)
training_trajectory['train_loss'].append(training_loss.detach().numpy())
optimizer.zero_grad()
training_loss.backward()
optimizer.step()
display_string = 'Step: {}, Time: {:03.3f}, Insample {}: {:.5f}'.format(i,
time.time()-start,
"MQLoss",
training_loss.cpu().data.numpy())
if print_progress: print(display_string)
return model, training_trajectory