--- title: SASRec keywords: fastai sidebar: home_sidebar summary: "Self-Attentive Sequential Recommendation Model." description: "Self-Attentive Sequential Recommendation Model." nb_path: "nbs/models/models.sasrec.ipynb" ---
{% raw %}
{% endraw %} {% raw %}
{% endraw %} {% raw %}

class SASRec[source]

SASRec(args) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

{% endraw %} {% raw %}
{% endraw %} {% raw %}
class Args:
    bert_hidden_units = 4
    bert_num_heads = 2
    bert_head_size = 4
    bert_dropout = 0.2
    bert_attn_dropout = 0.2
    bert_num_blocks = 4
    num_items = 10
    bert_hidden_units = 4
    bert_max_len = 8
    bert_dropout = 0.2

args = Args()
model = SASRec(args)
model.parameters
<bound method Module.parameters of SASRec(
  (embedding): SASEmbedding(
    (token): TokenEmbedding(11, 4, padding_idx=0)
    (position): PositionalEmbedding(
      (pe): Embedding(9, 4)
    )
    (dropout): Dropout(p=0.2, inplace=False)
  )
  (model): SASModel(
    (transformer_blocks): ModuleList(
      (0): SASTransformerBlock(
        (layer_norm): LayerNorm()
        (attention): SASMultiHeadedAttention(
          (linear_layers): ModuleList(
            (0): Linear(in_features=4, out_features=8, bias=True)
            (1): Linear(in_features=4, out_features=8, bias=True)
            (2): Linear(in_features=4, out_features=8, bias=True)
          )
          (attention): Attention()
          (dropout): Dropout(p=0.2, inplace=False)
          (layer_norm): LayerNorm()
        )
        (feed_forward): SASPositionwiseFeedForward(
          (conv1): Conv1d(4, 16, kernel_size=(1,), stride=(1,))
          (activation): ReLU()
          (dropout): Dropout(p=0.2, inplace=False)
          (conv2): Conv1d(16, 4, kernel_size=(1,), stride=(1,))
          (layer_norm): LayerNorm()
        )
      )
      (1): SASTransformerBlock(
        (layer_norm): LayerNorm()
        (attention): SASMultiHeadedAttention(
          (linear_layers): ModuleList(
            (0): Linear(in_features=4, out_features=8, bias=True)
            (1): Linear(in_features=4, out_features=8, bias=True)
            (2): Linear(in_features=4, out_features=8, bias=True)
          )
          (attention): Attention()
          (dropout): Dropout(p=0.2, inplace=False)
          (layer_norm): LayerNorm()
        )
        (feed_forward): SASPositionwiseFeedForward(
          (conv1): Conv1d(4, 16, kernel_size=(1,), stride=(1,))
          (activation): ReLU()
          (dropout): Dropout(p=0.2, inplace=False)
          (conv2): Conv1d(16, 4, kernel_size=(1,), stride=(1,))
          (layer_norm): LayerNorm()
        )
      )
      (2): SASTransformerBlock(
        (layer_norm): LayerNorm()
        (attention): SASMultiHeadedAttention(
          (linear_layers): ModuleList(
            (0): Linear(in_features=4, out_features=8, bias=True)
            (1): Linear(in_features=4, out_features=8, bias=True)
            (2): Linear(in_features=4, out_features=8, bias=True)
          )
          (attention): Attention()
          (dropout): Dropout(p=0.2, inplace=False)
          (layer_norm): LayerNorm()
        )
        (feed_forward): SASPositionwiseFeedForward(
          (conv1): Conv1d(4, 16, kernel_size=(1,), stride=(1,))
          (activation): ReLU()
          (dropout): Dropout(p=0.2, inplace=False)
          (conv2): Conv1d(16, 4, kernel_size=(1,), stride=(1,))
          (layer_norm): LayerNorm()
        )
      )
      (3): SASTransformerBlock(
        (layer_norm): LayerNorm()
        (attention): SASMultiHeadedAttention(
          (linear_layers): ModuleList(
            (0): Linear(in_features=4, out_features=8, bias=True)
            (1): Linear(in_features=4, out_features=8, bias=True)
            (2): Linear(in_features=4, out_features=8, bias=True)
          )
          (attention): Attention()
          (dropout): Dropout(p=0.2, inplace=False)
          (layer_norm): LayerNorm()
        )
        (feed_forward): SASPositionwiseFeedForward(
          (conv1): Conv1d(4, 16, kernel_size=(1,), stride=(1,))
          (activation): ReLU()
          (dropout): Dropout(p=0.2, inplace=False)
          (conv2): Conv1d(16, 4, kernel_size=(1,), stride=(1,))
          (layer_norm): LayerNorm()
        )
      )
    )
  )
)>
{% endraw %}