from nbdev.config import get_config
Dual RNN Models
= get_config().config_file.parent
project_root = project_root / 'test_data/WienerHammerstein' f_path
= get_hdf_files(f_path)
hdf_files = 300
init_sz = ['u']
u = ['y']
y = DataBlock(blocks=(SequenceBlock.from_hdf(u+y,TensorSequencesInput,clm_shift=[0,-1]),
seq =[-1])),
SequenceBlock.from_hdf(y,TensorSequencesOutput,clm_shift=CreateDict([DfHDFCreateWindows(win_sz=500+1,stp_sz=100,clm='u')]),
get_items=ApplyToDict(FuncSplitter(lambda o: 'valid' in str(o))))
splitter= seq.dataloaders(hdf_files,bs=32,dl_type=TfmdDL) db
0][0].shape,db.one_batch()[0][1].shape db.one_batch()[
(torch.Size([500, 2]), torch.Size([500, 2]))
=1) db.show_batch(max_n
State Initializer for State Estimation and Autoregressive RNN for Prediction
Diag_RNN
Diag_RNN (input_size, output_size, output_layer=1, hidden_size=100, rnn_layer=1, linear_layer=1, stateful=False, hidden_p=0.0, input_p=0.0, weight_p=0.0, rnn_type='gru', ret_full_hidden=False, normalization='', **kwargs)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
Diag_RNN_raw
Diag_RNN_raw (input_size, output_size, output_layer=1, hidden_size=100, rnn_layer=1, linear_layer=1, stateful=False)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
DiagLSTM
DiagLSTM (input_size, output_size, output_layer=1, hidden_size=100, rnn_layer=1, linear_layer=1, **kwargs)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
ARProg_Init
ARProg_Init (n_u, n_x, n_y, init_sz, hidden_size=100, rnn_layer=1, diag_model=None, linear_layer=1, final_layer=0, hidden_p=0.0, input_p=0.0, weight_p=0.0, rnn_type='gru', ret_full_hidden=False, stateful=False, normalization='', **kwargs)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= ARProg_Init(len(u),0,len(y),init_sz=init_sz,rnn_layer=1,hidden_size=50)
model = Learner(db,model,loss_func=SkipNLoss(mse,init_sz))
lrn
lrn.add_cb(TbpttResetCB())# lrn.fit(1,lr=3e-3)
1,3e-3,pct_start=0.2) lrn.fit_flat_cos(
epoch | train_loss | valid_loss | time |
---|
NarProg
NarProg
NarProg (prog_input_size, diag_input_size, output_size, init_sz, hidden_size=100, rnn_layer=1, diag_model=None, linear_layer=1, init_diag_only=False, final_layer=0, hidden_p=0.0, input_p=0.0, weight_p=0.0, rnn_type='gru', ret_full_hidden=False, stateful=False, normalization='', **kwargs)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
NarProgCallback
NarProgCallback (modules, p_state_sync=10000000.0, p_diag_loss=0.0, p_osp_sync=0, p_osp_loss=0, p_tar_loss=0, sync_type='mse', targ_loss_func=<function mae>, osp_n_skip=None, narprog_model=None, detach=False, **kwargs)
Callback
that regularizes the output of the NarProg model.
Type | Default | Details | |
---|---|---|---|
modules | |||
p_state_sync | float | 10000000.0 | scalingfactor for regularization of hidden state deviation between diag and prog module |
p_diag_loss | float | 0.0 | scalingfactor of loss calculation of diag hidden state to final layer |
p_osp_sync | int | 0 | scalingfactor for regularization of hidden state deviation between one step prediction and diag hidden states |
p_osp_loss | int | 0 | scalingfactor for loss calculation of one step prediction of prog module |
p_tar_loss | int | 0 | scalingfactor for time activation regularization of combined hiddenstate of diag and prog with target sequence length |
sync_type | str | mse | |
targ_loss_func | function | mae | |
osp_n_skip | NoneType | None | number of elements to skip before osp is applied, defaults to model.init_sz |
narprog_model | NoneType | None | |
detach | bool | False | |
kwargs |
= NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50)
model = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
cb =0,
p_state_sync=0.1,
p_diag_loss=0,
p_osp_sync=0,
p_osp_loss='mse')
sync_type= Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn
lrn.add_cb(TbpttResetCB())1,lr=3e-3) lrn.fit(
epoch | train_loss | valid_loss | time |
---|
= NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50)
model = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
cb =1e-1,
p_state_sync=0.0,
p_diag_loss=0,
p_osp_sync=0.1,
p_osp_loss='cos_pow')
sync_type= Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn
lrn.add_cb(TbpttResetCB())1,lr=3e-3) lrn.fit(
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 0.092519 | 0.047769 | 00:02 |
TCN as Diagnosis Module
Diag_TCN
Diag_TCN (input_size, output_size, output_layer, hl_width, mlp_layers=0, hl_depth=1, act=<class 'torch.nn.modules.activation.Mish'>, bn=False, stateful=False, **kwargs)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= Diag_TCN(2,50,2,hl_depth=6,hl_width=20,mlp_layers=3)
diag_tcn = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50,diag_model=diag_tcn)
model = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
cb =1e6,
p_state_sync=0.0,
p_diag_loss=0,
p_osp_sync=0.1,)
p_osp_loss= Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn
lrn.add_cb(TbpttResetCB())1,lr=3e-3) lrn.fit(
/home/pheenix/miniconda3/envs/env_tsfast/lib/python3.11/site-packages/torch/nn/utils/weight_norm.py:28: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.
warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.")
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 0.765931 | 0.060780 | 00:02 |
RNN without linear layer as diagnosis module
= Diag_RNN_raw(2,50,2,stateful=False)
diag_rnn = NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50,diag_model=diag_rnn)
model = NarProgCallback([model.rnn_diagnosis,model.rnn_prognosis],
cb =1e6,
p_state_sync=0.0,
p_diag_loss=0,
p_osp_sync=0.1,)
p_osp_loss= Learner(db,model,loss_func=nn.MSELoss(),cbs=cb,opt_func=ranger)
lrn
lrn.add_cb(TbpttResetCB())1,lr=3e-3) lrn.fit(
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 0.705823 | 0.052927 | 00:02 |
Fast variant with initsz diagnosis only
= NarProg(1,2,1,init_sz=100,linear_layer=1,rnn_layer=2,hidden_size=50,init_diag_only=True)
model = Learner(db,model,loss_func=nn.MSELoss(),opt_func=ranger)
lrn
lrn.add_cb(TbpttResetCB())1,lr=3e-3) lrn.fit(
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 0.045889 | 0.036955 | 00:02 |
NarProgCallback_variable_init
NarProgCallback_variable_init (init_sz_min, init_sz_max, **kwargs)
Callback
reports progress after every epoch to the ray tune logger