--- title: Title keywords: fastai sidebar: home_sidebar summary: "summary" ---
{% raw %}
%load_ext autoreload
%autoreload 2
The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload
from collections import defaultdict
import os
import torch
import torch.nn as nn

from htools.ml import BaseModel, GRelu, JRelu, variable_lr_optimizer, stats
os.path.exists('scratch_inheritance_and_mixins.ipynb')
True
class HNet(BaseModel):
    
    def __init__(self, x_dim, hidden_dim):
        super().__init__(locals())
        layers = [nn.Linear(x_dim, hidden_dim),
                  nn.LeakyReLU(),
                  nn.Linear(hidden_dim, 3),
                  nn.Softmax(-1)]
        self.layers = nn.Sequential(*layers)
        
    def forward(self, x):
        return self.layers(x)
hnet = HNet(4, 6)
hnet
HNet(
  (layers): Sequential(
    (0): Linear(in_features=4, out_features=6, bias=True)
    (1): LeakyReLU(negative_slope=0.01)
    (2): Linear(in_features=6, out_features=3, bias=True)
    (3): Softmax(dim=-1)
  )
)
x = torch.randint(10, (3, 4), dtype=torch.float)
x
tensor([[7., 5., 8., 2.],
        [1., 3., 8., 2.],
        [9., 7., 4., 0.]])
hnet(x)
tensor([[0.2730, 0.3260, 0.4010],
        [0.1617, 0.3326, 0.5057],
        [0.3660, 0.3071, 0.3270]], grad_fn=<SoftmaxBackward>)
hnet.weight_stats()
[(-0.071, 0.261), (-0.023, 0.379), (0.018, 0.234), (0.197, 0.093)]
hnet.save(99, '../data')
Epoch 99 weights saved to ../data/model_e99_v2.pth.
hnet2 = HNet.from_path('../data/model_e99.pth')
Epoch 99 weights loaded from ../data/model_e99.pth.
Model parameters: {'x_dim': 4, 'hidden_dim': 6}
Currently in eval mode.
hnet2(x)
tensor([[0.6451, 0.2300, 0.1250],
        [0.4431, 0.2086, 0.3483],
        [0.8782, 0.0980, 0.0237]], grad_fn=<SoftmaxBackward>)
hnet2.weight_stats()
[(-0.005, 0.298), (-0.018, 0.273), (0.013, 0.198), (0.045, 0.127)]
hnet3 = HNet.from_path('../data/model_e99_v2.pth', verbose=False)
hnet3.weight_stats()
[(-0.071, 0.261), (-0.023, 0.379), (0.018, 0.234), (0.197, 0.093)]
class Nested(BaseModel):
    
    def __init__(self, x_dim, hidden):
        super().__init__(locals())
        seq1 = nn.Sequential(nn.Linear(x_dim, hidden),
                             nn.Dropout(),
                             nn.LeakyReLU())
        seq2 = nn.Sequential(nn.Linear(hidden, 1),
                             nn.Sigmoid())
        self.groups = nn.ModuleList([seq1, seq2])
        
    def forward(self, x):
        for group in self.groups:
            x = group(x)
        return x
nested = Nested(4, 8)
nested
Nested(
  (groups): ModuleList(
    (0): Sequential(
      (0): Linear(in_features=4, out_features=8, bias=True)
      (1): Dropout(p=0.5, inplace=False)
      (2): LeakyReLU(negative_slope=0.01)
    )
    (1): Sequential(
      (0): Linear(in_features=8, out_features=1, bias=True)
      (1): Sigmoid()
    )
  )
)
variable_lr_optimizer(nested)
Adam (
Parameter Group 0
    amsgrad: False
    betas: (0.9, 0.999)
    eps: 1e-08
    lr: 0.003
    weight_decay: 0
)
variable_lr_optimizer(groups=nested.groups, lrs=[1, 2])
Adam (
Parameter Group 0
    amsgrad: False
    betas: (0.9, 0.999)
    eps: 1e-08
    lr: 1
    weight_decay: 0

Parameter Group 1
    amsgrad: False
    betas: (0.9, 0.999)
    eps: 1e-08
    lr: 2
    weight_decay: 0
)

Train

device = 'gpu' if torch.cuda.is_available() else 'cpu'
device
'cpu'
def train(epochs, train_dl, val_dl, model, criterion, hooks, lr=3e-3, 
          metrics=None, verbose=True):
    for hook in hooks:
        model.register_backward_hook(hook)
        
    optim = variable_lr_optimizer()
    stats = defaultdict(list)

    for epoch in range(epochs):
        model.to(device).train()
        e_loss = 0.0
        e_total = 0
#         for i, (x, y) in enumerate(train_dl):
#             optim.zero_grad()
#             x.to(device)
#             y.to(device)
#             bs = x.shape[0]
            
#             # Forward pass
#             y_hat = model(x)
#             loss = criterion(y_hat, y, reduction='mean')
            
#             # Backward pass
#             loss.backward()
#             optim.step()
            
#             # Update mini batch stats.
#             e_total += bs
#             loss += loss * bs
            
#         # Evaluate on validation set.
#         val_stats = validation_metrics()
        
#         # Update epoch stats.
#         stats['loss'].append(e_total)
#         stats['val_loss'].append()
        
        # Print epoch stats.
            
    return stats
def gradient_stats_hook(model, grad_in, grad_out):
    print(stats(grad_out))
train(3, None, None, hnet, nn.BCEWithLogitsLoss, [gradient_stats_hook])
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-93-213e077428ec> in <module>
----> 1 train(3, None, None, hnet, nn.BCEWithLogitsLoss, [gradient_stats_hook])

<ipython-input-91-1ddb8bc2cf74> in train(epochs, train_dl, val_dl, model, criterion, hooks, metrics, verbose)
      4         model.register_backward_hook(hook)
      5 
----> 6     optim = variable_lr_optimizer()
      7     stats = defaultdict(list)
      8 

TypeError: variable_lr_optimizer() missing 2 required positional arguments: 'groups' and 'lrs'
{% endraw %}