Пример #1
0
    def __init__(self,
                 num_numerical_features: int,
                 categorical_feature_sizes: Sequence[int],
                 bottom_mlp_sizes: Optional[Sequence[int]] = None,
                 embedding_type: str = "multi_table",
                 embedding_dim: int = 128,
                 hash_indices: bool = False,
                 use_cpp_mlp: bool = False,
                 fp16: bool = False,
                 device: str = "cuda"):
        super().__init__()
        assert bottom_mlp_sizes is None or embedding_dim == bottom_mlp_sizes[-1], "The last bottom MLP layer must" \
                                                                                  " have same size as embedding."
        self._embedding_dim = embedding_dim
        self._categorical_feature_sizes = copy.copy(categorical_feature_sizes)
        self._fp16 = fp16

        self.embeddings = create_embeddings(embedding_type,
                                            categorical_feature_sizes,
                                            embedding_dim, device,
                                            hash_indices, fp16)
        self.mlp = (create_mlp(num_numerical_features, bottom_mlp_sizes,
                               use_cpp_mlp).to(device)
                    if bottom_mlp_sizes else torch.nn.ModuleList())

        self._initialize_embeddings_weights(self.embeddings,
                                            categorical_feature_sizes)
Пример #2
0
    def __init__(self, top_mlp_sizes: Sequence[int], interaction: Interaction, use_cpp_mlp: bool = False):
        super().__init__()

        self.interaction = interaction
        self.mlp = create_mlp(interaction.num_interactions, top_mlp_sizes[:-1], use_cpp_mlp)
        self.out = nn.Linear(top_mlp_sizes[-2], top_mlp_sizes[-1])

        self._initialize_weights()