Пример #1
0
 def _build_network(self):
     # Category Embedding layers
     self.cat_embedding_layers = nn.ModuleList(
         [
             nn.Embedding(cardinality, self.hparams.embedding_dim)
             for cardinality in self.hparams.categorical_cardinality
         ]
     )
     if self.hparams.batch_norm_continuous_input:
         self.normalizing_batch_norm = nn.BatchNorm1d(self.hparams.continuous_dim)
     # Continuous Embedding Layer
     self.cont_embedding_layer = nn.Embedding(
         self.hparams.continuous_dim, self.hparams.embedding_dim
     )
     if self.hparams.embedding_dropout != 0 and self.embedding_cat_dim != 0:
         self.embed_dropout = nn.Dropout(self.hparams.embedding_dropout)
     # Deep Layers
     _curr_units = self.hparams.embedding_dim
     if self.hparams.deep_layers:
         activation = getattr(nn, self.hparams.activation)
         # Linear Layers
         layers = []
         for units in self.hparams.layers.split("-"):
             layers.extend(
                 _linear_dropout_bn(
                     self.hparams,
                     _curr_units,
                     int(units),
                     activation,
                     self.hparams.dropout,
                 )
             )
             _curr_units = int(units)
         self.linear_layers = nn.Sequential(*layers)
     # Projection to Multi-Headed Attention Dims
     self.attn_proj = nn.Linear(_curr_units, self.hparams.attn_embed_dim)
     _initialize_layers(self.hparams, self.attn_proj)
     # Multi-Headed Attention Layers
     self.self_attns = nn.ModuleList(
         [
             nn.MultiheadAttention(
                 self.hparams.attn_embed_dim,
                 self.hparams.num_heads,
                 dropout=self.hparams.attn_dropouts,
             )
             for _ in range(self.hparams.num_attn_blocks)
         ]
     )
     if self.hparams.has_residuals:
         self.V_res_embedding = torch.nn.Linear(
             _curr_units,
             self.hparams.attn_embed_dim * self.hparams.num_attn_blocks
             if self.hparams.attention_pooling
             else self.hparams.attn_embed_dim,
         )
     self.output_dim = (
         self.hparams.continuous_dim + self.hparams.categorical_dim
     ) * self.hparams.attn_embed_dim
     if self.hparams.attention_pooling:
         self.output_dim = self.output_dim * self.hparams.num_attn_blocks
Пример #2
0
 def _build_network(self):
     # Backbone
     self.backbone = CategoryEmbeddingBackbone(self.hparams)
     # Adding the last layer
     self.head = nn.Linear(self.backbone.output_dim, self.hparams.output_dim
                           )  # output_dim auto-calculated from other config
     _initialize_layers(self.hparams.activation,
                        self.hparams.initialization, self.head)
Пример #3
0
 def _build_network(self):
     # Backbone
     self.backbone = AutoIntBackbone(self.hparams)
     self.dropout = nn.Dropout(self.hparams.dropout)
     # Adding the last layer
     self.output_layer = nn.Linear(
         self.backbone.output_dim, self.hparams.output_dim
     )  # output_dim auto-calculated from other config
     _initialize_layers(self.hparams, self.output_layer)
Пример #4
0
 def _build_network(self):
     # Backbone
     self.backbone = AutoIntBackbone(self.hparams)
     # Head
     self.head = nn.Sequential(
         nn.Dropout(self.hparams.dropout),
         nn.Linear(self.backbone.output_dim, self.hparams.output_dim),
     )
     _initialize_layers(
         self.hparams.activation, self.hparams.initialization, self.head
     )
Пример #5
0
 def _build_network(self):
     # Backbone
     self.backbone = FTTransformerBackbone(self.hparams)
     # Adding the last layer
     self.head = nn.Sequential(
         nn.Dropout(self.hparams.out_ff_dropout),
         nn.Linear(self.backbone.output_dim, self.hparams.output_dim),
     )
     _initialize_layers(
         self.hparams.out_ff_activation,
         self.hparams.out_ff_initialization,
         self.head,
     )
Пример #6
0
 def _build_network(self):
     # Embedding layers
     self.embedding_layers = nn.ModuleList(
         [nn.Embedding(x, y) for x, y in self.hparams.embedding_dims]
     )
     # Continuous Layers
     if self.hparams.batch_norm_continuous_input:
         self.normalizing_batch_norm = nn.BatchNorm1d(self.hparams.continuous_dim)
     # Backbone
     self.backbone = FeedForwardBackbone(self.hparams)
     # Adding the last layer
     self.output_layer = nn.Linear(
         self.backbone.output_dim, self.hparams.output_dim
     )  # output_dim auto-calculated from other config
     _initialize_layers(self.hparams, self.output_layer)