--- title: Common components for models keywords: fastai sidebar: home_sidebar summary: "Common functions." description: "Common functions." nb_path: "nbs/models_components__common.ipynb" ---
import numpy as np
from sklearn import linear_model
np.random.seed(1)
X1 = np.random.normal(0, 1, (1000,1))
X = np.random.normal(0, 1, (1000, 99))
X = np.concatenate([X1, X], axis=1)
eps = np.random.normal(0, 0.1, (1000))
beta = np.array([1] + [0]*99)
Y = X @ beta.T + eps
Y = np.expand_dims(Y, 1)
print("X.shape", X.shape)
print("beta.shape", beta.shape)
print("Y.shape", Y.shape)
# model = linear_model.Lasso(alpha=0.1)
# model.fit(X, Y)
# print("model.coef_.shape", model.coef_.shape)
# model.coef_
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
import time
from scipy.stats import hmean
import matplotlib.pyplot as plt
%matplotlib inline
class _Model(nn.Module):
def __init__(self, in_features, l1_lambda):
super(_Model, self).__init__()
self.l1 = L1Regularizer(in_features, l1_lambda)
self.linear_layer = nn.Linear(in_features=in_features,
out_features=1,
bias=False)
def forward(self, x):
x = self.l1(x.float())
y_hat = self.linear_layer(x)
return y_hat
class Data(Dataset):
# Constructor
def __init__(self, Y, X):
self.X = X
self.Y = Y
self.len = Y.shape[0]
# Getter
def __getitem__(self, index):
return self.X[index], self.Y[index]
# Get Length
def __len__(self):
return self.len
model = _Model(in_features=X.shape[1], l1_lambda=0.07)
dataloader = DataLoader(dataset=Data(X=X, Y=Y), batch_size=512)
optimizer = optim.Adam(model.parameters(), lr=0.001)
print(model)
def train_model(model, epochs, print_progress=False):
start = time.time()
step = 0
training_trajectory = {'epoch': [],
'train_loss': []}
criterion = t.nn.MSELoss()
for epoch in range(epochs):
for x, y in dataloader:
x, y = x.float(), y.float() # Type compatibility
step += 1
y_hat = model(x)
training_loss = criterion(y, y_hat) + model.l1.regularization()
optimizer.zero_grad()
training_loss.backward()
optimizer.step()
if epoch % 100 == 0:
training_trajectory['epoch'].append(epoch)
train_loss = training_loss.detach().numpy()
training_trajectory['train_loss'].append(train_loss)
display_str = f'epoch: {epoch} step: {step} time: {time.time()-start:03.3f} ** '
display_str += f'train_loss: {train_loss:.4f}'
print(display_str)
return model, training_trajectory
# plt.plot(training_trajectory['epoch'], training_trajectory['train_loss'])
# plt.xlabel('Epochs')
# plt.ylabel('MSE + L1 Loss')
# plt.grid()
# plt.show()
# model.l1.weight