Пример #1
0
    def __init__(self, config, dataset):
        super(TransRec, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']

        # load dataset info
        self.n_users = dataset.user_num

        self.user_embedding = nn.Embedding(self.n_users,
                                           self.embedding_size,
                                           padding_idx=0)
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)
        self.bias = nn.Embedding(self.n_items, 1,
                                 padding_idx=0)  # Beta popularity bias
        self.T = nn.Parameter(torch.zeros(
            self.embedding_size))  # average user representation 'global'

        self.bpr_loss = BPRLoss()
        self.emb_loss = EmbLoss()
        self.reg_loss = RegLoss()

        # parameters initialization
        self.apply(xavier_normal_initialization)
Пример #2
0
    def __init__(self, config, dataset):
        super(TransRecF, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']

        # load dataset info
        self.n_users = dataset.user_num

        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size, padding_idx=0)
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size, padding_idx=0)
        self.bias = nn.Embedding(self.n_items, 1, padding_idx=0)  # Beta popularity bias
        self.T = nn.Parameter(torch.zeros(self.embedding_size))  # average user representation 'global'
        self.selected_features = config['selected_features']
        self.hidden_dropout_prob = config['hidden_dropout_prob']
        self.pooling_mode = config['pooling_mode']
        self.dropout = nn.Dropout(self.hidden_dropout_prob)
        self.layer_norm_eps = config['layer_norm_eps']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])
        self.bpr_loss = BPRLoss()
        self.emb_loss = EmbLoss()
        self.reg_loss = RegLoss()
        self.feature_embed_layer = FeatureSeqEmbLayer(
            dataset, self.embedding_size, self.selected_features, self.pooling_mode, self.device
        )
        self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=self.layer_norm_eps)
        self.concat_layer = nn.Linear(self.embedding_size * (1 + self.num_feature_field), self.embedding_size)
        # parameters initialization
        self.apply(xavier_normal_initialization)
Пример #3
0
    def __init__(self, config, dataset):
        super(Caser, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.loss_type = config['loss_type']
        self.n_h = config['nh']
        self.n_v = config['nv']
        self.dropout_prob = config['dropout_prob']
        self.reg_weight = config['reg_weight']

        # load dataset info
        self.n_users = dataset.user_num

        # define layers and loss
        self.user_embedding = nn.Embedding(self.n_users,
                                           self.embedding_size,
                                           padding_idx=0)
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)

        # vertical conv layer
        self.conv_v = nn.Conv2d(in_channels=1,
                                out_channels=self.n_v,
                                kernel_size=(self.max_seq_length, 1))

        # horizontal conv layer
        lengths = [i + 1 for i in range(self.max_seq_length)]
        self.conv_h = nn.ModuleList([
            nn.Conv2d(in_channels=1,
                      out_channels=self.n_h,
                      kernel_size=(i, self.embedding_size)) for i in lengths
        ])

        # fully-connected layer
        self.fc1_dim_v = self.n_v * self.embedding_size
        self.fc1_dim_h = self.n_h * len(lengths)
        fc1_dim_in = self.fc1_dim_v + self.fc1_dim_h
        self.fc1 = nn.Linear(fc1_dim_in, self.embedding_size)
        self.fc2 = nn.Linear(self.embedding_size + self.embedding_size,
                             self.embedding_size)

        self.dropout = nn.Dropout(self.dropout_prob)
        self.ac_conv = nn.ReLU()
        self.ac_fc = nn.ReLU()
        self.reg_loss = RegLoss()

        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")

        # parameters initialization
        self.apply(self._init_weights)
Пример #4
0
    def __init__(self, config, dataset):
        super(NextItNet, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.residual_channels = config['embedding_size']
        self.block_num = config['block_num']
        self.dilations = config['dilations'] * self.block_num
        self.kernel_size = config['kernel_size']
        self.reg_weight = config['reg_weight']
        self.loss_type = config['loss_type']

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)

        # residual blocks    dilations in blocks:[1,2,4,8,1,2,4,8,...]
        rb = [
            ResidualBlock_b(self.residual_channels,
                            self.residual_channels,
                            kernel_size=self.kernel_size,
                            dilation=dilation) for dilation in self.dilations
        ]
        self.residual_blocks = nn.Sequential(*rb)

        # fully-connected layer
        self.final_layer = nn.Linear(self.residual_channels,
                                     self.embedding_size)

        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")
        self.reg_loss = RegLoss()

        # parameters initialization
        self.apply(self._init_weights)
Пример #5
0
    def __init__(self, config, dataset):
        super(DCN, self).__init__(config, dataset)

        # load parameters info
        self.mlp_hidden_size = config['mlp_hidden_size']
        self.cross_layer_num = config['cross_layer_num']
        self.reg_weight = config['reg_weight']
        self.dropout_prob = config['dropout_prob']

        # define layers and loss
        # init weight and bias of each cross layer
        self.cross_layer_w = nn.ParameterList(
            nn.Parameter(
                torch.randn(self.num_feature_field *
                            self.embedding_size).to(self.device))
            for _ in range(self.cross_layer_num))
        self.cross_layer_b = nn.ParameterList(
            nn.Parameter(
                torch.zeros(self.num_feature_field *
                            self.embedding_size).to(self.device))
            for _ in range(self.cross_layer_num))

        # size of mlp hidden layer
        size_list = [self.embedding_size * self.num_feature_field
                     ] + self.mlp_hidden_size
        # size of cross network output
        in_feature_num = self.embedding_size * self.num_feature_field + self.mlp_hidden_size[
            -1]

        self.mlp_layers = MLPLayers(size_list,
                                    dropout=self.dropout_prob,
                                    bn=True)
        self.predict_layer = nn.Linear(in_feature_num, 1)
        self.reg_loss = RegLoss()
        self.sigmoid = nn.Sigmoid()
        self.loss = nn.BCELoss()

        # parameters initialization
        self.apply(self._init_weights)