Dual RNN Models

Pytorch Models for Sequential Data
from nbdev.config import get_config
project_root = get_config().config_file.parent
f_path = project_root / 'test_data/WienerHammerstein'
hdf_files = get_hdf_files(f_path)
init_sz = 300
u = ['u']
y = ['y']
seq = DataBlock(blocks=(SequenceBlock.from_hdf(u+y,TensorSequencesInput,clm_shift=[0,-1]),
                        SequenceBlock.from_hdf(y,TensorSequencesOutput,clm_shift=[-1])),
                 get_items=CreateDict([DfHDFCreateWindows(win_sz=500+1,stp_sz=100,clm='u')]),
                 splitter=ApplyToDict(FuncSplitter(lambda o: 'valid' in str(o))))
db = seq.dataloaders(hdf_files,bs=32,dl_type=TfmdDL)
db.one_batch()[0][0].shape,db.one_batch()[0][1].shape
(torch.Size([500, 2]), torch.Size([500, 2]))
db.show_batch(max_n=1)

State Initializer for State Estimation and Autoregressive RNN for Prediction


source

Diag_RNN

 Diag_RNN (input_size, output_size, output_layer=1, hidden_size=100,
           rnn_layer=1, linear_layer=1, stateful=False, hidden_p=0.0,
           input_p=0.0, weight_p=0.0, rnn_type='gru',
           ret_full_hidden=False, normalization='', **kwargs)

*Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*


source

Diag_RNN_raw

 Diag_RNN_raw (input_size, output_size, output_layer=1, hidden_size=100,
               rnn_layer=1, linear_layer=1, stateful=False)

*Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*


source

DiagLSTM

 DiagLSTM (input_size, output_size, output_layer=1, hidden_size=100,
           rnn_layer=1, linear_layer=1, **kwargs)

*Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*


source

ARProg_Init

 ARProg_Init (n_u, n_x, n_y, init_sz, hidden_size=100, rnn_layer=1,
              diag_model=None, linear_layer=1, final_layer=0,
              hidden_p=0.0, input_p=0.0, weight_p=0.0, rnn_type='gru',
              ret_full_hidden=False, stateful=False, normalization='',
              **kwargs)

*Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*

model = ARProg_Init(len(u),0,len(y),init_sz=init_sz,rnn_layer=1,hidden_size=50)
lrn = Learner(db,model,loss_func=SkipNLoss(mse,init_sz))
lrn.add_cb(TbpttResetCB())
# lrn.fit(1,lr=3e-3)
lrn.fit_flat_cos(1,3e-3,pct_start=0.2)
0.00% [0/1 00:00<?]
epoch train_loss valid_loss time

0.00% [0/52 00:00<?]

NarProg


source

NarProg

 NarProg (prog_input_size, diag_input_size, output_size, init_sz,
          hidden_size=100, rnn_layer=1, diag_model=None, linear_layer=1,
          init_diag_only=False, final_layer=0, hidden_p=0.0, input_p=0.0,
          weight_p=0.0, rnn_type='gru', ret_full_hidden=False,
          stateful=False, normalization='', **kwargs)

*Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*


source

NarProgCallback

 NarProgCallback (modules, p_state_sync=10000000.0, p_diag_loss=0.0,
                  p_osp_sync=0, p_osp_loss=0, p_tar_loss=0,
                  sync_type='mse', targ_loss_func=<function mae>,
                  osp_n_skip=None, narprog_model=None, detach=False,
                  **kwargs)

Callback that regularizes the output of the NarProg model.

Type Default Details
modules
p_state_sync float 10000000.0 scalingfactor for regularization of hidden state deviation between diag and prog module
p_diag_loss float 0.0 scalingfactor of loss calculation of diag hidden state to final layer
p_osp_sync int 0 scalingfactor for regularization of hidden state deviation between one step prediction and diag hidden states
p_osp_loss int 0 scalingfactor for loss calculation of one step prediction of prog module
p_tar_loss int 0 scalingfactor for time activation regularization of combined hiddenstate of diag and prog with target sequence length
sync_type str mse
targ_loss_func function mae
osp_n_skip NoneType None number of elements to skip before osp is applied, defaults to model.init_sz
narprog_model NoneType None
detach bool False
kwargs
model = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50)
cb = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
                        p_state_sync=0, 
                        p_diag_loss=0.1,
                        p_osp_sync=0,
                        p_osp_loss=0,
                        sync_type='mse')
lrn = Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn.add_cb(TbpttResetCB())
lrn.fit(1,lr=3e-3)
0.00% [0/1 00:00<?]
epoch train_loss valid_loss time

0.00% [0/52 00:00<?]
model = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50)
cb = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
                        p_state_sync=1e-1, 
                        p_diag_loss=0.0,
                        p_osp_sync=0,
                        p_osp_loss=0.1,
                        sync_type='cos_pow')
lrn = Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn.add_cb(TbpttResetCB())
lrn.fit(1,lr=3e-3)
epoch train_loss valid_loss time
0 0.092519 0.047769 00:02

TCN as Diagnosis Module


source

Diag_TCN

 Diag_TCN (input_size, output_size, output_layer, hl_width, mlp_layers=0,
           hl_depth=1, act=<class 'torch.nn.modules.activation.Mish'>,
           bn=False, stateful=False, **kwargs)

*Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*

diag_tcn = Diag_TCN(2,50,2,hl_depth=6,hl_width=20,mlp_layers=3)
model = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50,diag_model=diag_tcn)
cb = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
                        p_state_sync=1e6, 
                        p_diag_loss=0.0,
                        p_osp_sync=0,
                        p_osp_loss=0.1,)
lrn = Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn.add_cb(TbpttResetCB())
lrn.fit(1,lr=3e-3)
/home/pheenix/miniconda3/envs/env_tsfast/lib/python3.11/site-packages/torch/nn/utils/weight_norm.py:28: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.
  warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.")
epoch train_loss valid_loss time
0 0.765931 0.060780 00:02

RNN without linear layer as diagnosis module

diag_rnn = Diag_RNN_raw(2,50,2,stateful=False)
model = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50,diag_model=diag_rnn)
cb = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
                        p_state_sync=1e6, 
                        p_diag_loss=0.0,
                        p_osp_sync=0,
                        p_osp_loss=0.1,)
lrn = Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn.add_cb(TbpttResetCB())
lrn.fit(1,lr=3e-3)
epoch train_loss valid_loss time
0 0.705823 0.052927 00:02

Fast variant with initsz diagnosis only

model = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50,init_diag_only=True)
lrn = Learner(db,model,loss_func=nn.MSELoss(),opt_func=ranger)
lrn.add_cb(TbpttResetCB())
lrn.fit(1,lr=3e-3)
epoch train_loss valid_loss time
0 0.045889 0.036955 00:02

source

NarProgCallback_variable_init

 NarProgCallback_variable_init (init_sz_min, init_sz_max, **kwargs)

Callback reports progress after every epoch to the ray tune logger