示例#1
0
    def __init__(self, config, dataset):
        super(TransRecF, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']

        # load dataset info
        self.n_users = dataset.user_num

        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size, padding_idx=0)
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size, padding_idx=0)
        self.bias = nn.Embedding(self.n_items, 1, padding_idx=0)  # Beta popularity bias
        self.T = nn.Parameter(torch.zeros(self.embedding_size))  # average user representation 'global'
        self.selected_features = config['selected_features']
        self.hidden_dropout_prob = config['hidden_dropout_prob']
        self.pooling_mode = config['pooling_mode']
        self.dropout = nn.Dropout(self.hidden_dropout_prob)
        self.layer_norm_eps = config['layer_norm_eps']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])
        self.bpr_loss = BPRLoss()
        self.emb_loss = EmbLoss()
        self.reg_loss = RegLoss()
        self.feature_embed_layer = FeatureSeqEmbLayer(
            dataset, self.embedding_size, self.selected_features, self.pooling_mode, self.device
        )
        self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=self.layer_norm_eps)
        self.concat_layer = nn.Linear(self.embedding_size * (1 + self.num_feature_field), self.embedding_size)
        # parameters initialization
        self.apply(xavier_normal_initialization)
示例#2
0
    def __init__(self, config, dataset):

        super(SHANF, self).__init__(config, dataset)

        # load the dataset information
        self.n_users = dataset.num(self.USER_ID)
        self.device = config['device']
        self.selected_features = config['selected_features']
        self.pooling_mode = config['pooling_mode']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])
        # load the parameter information
        self.embedding_size = config["embedding_size"]
        self.short_item_length = config[
            "short_item_length"]  # the length of the short session items
        assert self.short_item_length <= self.max_seq_length, "short_item_length can't longer than the max_seq_length"
        self.reg_weight = config["reg_weight"]

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)

        self.long_w = nn.Linear(self.embedding_size, self.embedding_size)
        self.long_b = nn.Parameter(uniform_(
            tensor=torch.zeros(self.embedding_size),
            a=-np.sqrt(3 / self.embedding_size),
            b=np.sqrt(3 / self.embedding_size)),
                                   requires_grad=True).to(self.device)
        self.long_short_w = nn.Linear(self.embedding_size, self.embedding_size)
        self.long_short_b = nn.Parameter(uniform_(
            tensor=torch.zeros(self.embedding_size),
            a=-np.sqrt(3 / self.embedding_size),
            b=np.sqrt(3 / self.embedding_size)),
                                         requires_grad=True).to(self.device)

        self.relu = nn.ReLU()
        self.feature_embed_layer = FeatureSeqEmbLayer(dataset,
                                                      self.embedding_size,
                                                      self.selected_features,
                                                      self.pooling_mode,
                                                      self.device)
        self.concat_layer = nn.Linear(
            self.embedding_size * (1 + self.num_feature_field),
            self.embedding_size)
        self.loss_type = config['loss_type']
        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")

        # init the parameter of the model
        self.apply(self.init_weights)
示例#3
0
    def __init__(self, config, dataset):
        super(FDSA, self).__init__(config, dataset)

        # load parameters info
        self.n_layers = config['n_layers']
        self.n_heads = config['n_heads']
        self.hidden_size = config['hidden_size']  # same as embedding_size
        self.inner_size = config['inner_size']  # the dimensionality in feed-forward layer
        self.hidden_dropout_prob = config['hidden_dropout_prob']
        self.attn_dropout_prob = config['attn_dropout_prob']
        self.hidden_act = config['hidden_act']
        self.layer_norm_eps = config['layer_norm_eps']

        self.selected_features = config['selected_features']
        self.pooling_mode = config['pooling_mode']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])

        self.initializer_range = config['initializer_range']
        self.loss_type = config['loss_type']

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items, self.hidden_size, padding_idx=0)
        self.position_embedding = nn.Embedding(self.max_seq_length, self.hidden_size)

        self.feature_embed_layer = FeatureSeqEmbLayer(dataset, self.hidden_size, self.selected_features,
                                                      self.pooling_mode, self.device)

        self.item_trm_encoder = TransformerEncoder(n_layers=self.n_layers, n_heads=self.n_heads,
                                                   hidden_size=self.hidden_size, inner_size=self.inner_size,
                                                   hidden_dropout_prob=self.hidden_dropout_prob,
                                                   attn_dropout_prob=self.attn_dropout_prob,
                                                   hidden_act=self.hidden_act, layer_norm_eps=self.layer_norm_eps)

        self.feature_att_layer = VanillaAttention(self.hidden_size, self.hidden_size)
        # For simplicity, we use same architecture for item_trm and feature_trm
        self.feature_trm_encoder = TransformerEncoder(n_layers=self.n_layers, n_heads=self.n_heads,
                                                      hidden_size=self.hidden_size, inner_size=self.inner_size,
                                                      hidden_dropout_prob=self.hidden_dropout_prob,
                                                      attn_dropout_prob=self.attn_dropout_prob,
                                                      hidden_act=self.hidden_act, layer_norm_eps=self.layer_norm_eps)

        self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
        self.dropout = nn.Dropout(self.hidden_dropout_prob)
        self.concat_layer = nn.Linear(self.hidden_size * 2, self.hidden_size)
        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError("Make sure 'loss_type' in ['BPR', 'CE']!")

        # parameters initialization
        self.apply(self._init_weights)
示例#4
0
    def __init__(self, config, dataset):
        super(GRU4RecF, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.hidden_size = config['hidden_size']
        self.num_layers = config['num_layers']
        self.dropout_prob = config['dropout_prob']

        self.selected_features = config['selected_features']
        self.pooling_mode = config['pooling_mode']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])

        self.loss_type = config['loss_type']

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)
        self.feature_embed_layer = FeatureSeqEmbLayer(dataset,
                                                      self.embedding_size,
                                                      self.selected_features,
                                                      self.pooling_mode,
                                                      self.device)
        self.item_gru_layers = nn.GRU(
            input_size=self.embedding_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            bias=False,
            batch_first=True,
        )
        # For simplicity, we use same architecture for item_gru and feature_gru
        self.feature_gru_layers = nn.GRU(
            input_size=self.embedding_size * self.num_feature_field,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            bias=False,
            batch_first=True,
        )
        self.dense_layer = nn.Linear(self.hidden_size * 2, self.embedding_size)
        self.dropout = nn.Dropout(self.dropout_prob)
        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")

        # parameters initialization
        self.apply(xavier_normal_initialization)
        self.other_parameter_name = ['feature_embed_layer']
示例#5
0
    def __init__(self, config, dataset):
        super(NARMFF, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.hidden_size = config['hidden_size']
        self.n_layers = config['n_layers']
        self.dropout_probs = config['dropout_probs']
        self.device = config['device']

        self.selected_features = config['selected_features']
        self.pooling_mode = config['pooling_mode']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])
        self.loss_type = config['loss_type']
        self.feature_embed_layer = FeatureSeqEmbLayer(dataset,
                                                      self.hidden_size,
                                                      self.selected_features,
                                                      self.pooling_mode,
                                                      self.device)
        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)
        self.emb_dropout = nn.Dropout(self.dropout_probs[0])
        self.gru = nn.GRU(self.embedding_size * 3,
                          self.hidden_size,
                          self.n_layers,
                          bias=False,
                          batch_first=True)
        self.a_1 = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.a_2 = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.v_t = nn.Linear(self.hidden_size, 1, bias=False)
        self.ct_dropout = nn.Dropout(self.dropout_probs[1])
        self.b = nn.Linear(2 * self.hidden_size,
                           self.embedding_size,
                           bias=False)
        self.concat_layer = nn.Linear(
            self.hidden_size * (1 + self.num_feature_field), self.hidden_size)

        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")

        # parameters initialization
        self.apply(self._init_weights)
示例#6
0
    def __init__(self, config, dataset):
        super(STAMPF, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size, padding_idx=0)
        self.w1 = nn.Linear(self.embedding_size, self.embedding_size, bias=False)
        self.w2 = nn.Linear(self.embedding_size, self.embedding_size, bias=False)
        self.w3 = nn.Linear(self.embedding_size, self.embedding_size, bias=False)
        self.w0 = nn.Linear(self.embedding_size, 1, bias=False)
        self.b_a = nn.Parameter(torch.zeros(self.embedding_size), requires_grad=True)
        self.mlp_a = nn.Linear(self.embedding_size, self.embedding_size, bias=True)
        self.mlp_b = nn.Linear(self.embedding_size, self.embedding_size, bias=True)
        self.sigmoid = nn.Sigmoid()
        self.tanh = nn.Tanh()
        self.loss_type = config['loss_type']
        self.selected_features = config['selected_features']
        self.pooling_mode = config['pooling_mode']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])
        self.feature_embed_layer = FeatureSeqEmbLayer(
            dataset, self.embedding_size, self.selected_features, self.pooling_mode, self.device
        )
        self.concat_layer = nn.Linear(self.embedding_size * (1 + self.num_feature_field), self.embedding_size)

        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError("Make sure 'loss_type' in ['BPR', 'CE']!")

        # # parameters initialization
        self.apply(self._init_weights)
示例#7
0
    def __init__(self, config, dataset):
        super(SASRecF, self).__init__(config, dataset)

        # load parameters info
        self.n_layers = config['n_layers']
        self.n_heads = config['n_heads']
        self.hidden_size = config['hidden_size']  # same as embedding_size
        self.inner_size = config[
            'inner_size']  # the dimensionality in feed-forward layer
        self.hidden_dropout_prob = config['hidden_dropout_prob']
        self.attn_dropout_prob = config['attn_dropout_prob']
        self.hidden_act = config['hidden_act']
        self.layer_norm_eps = config['layer_norm_eps']

        self.selected_features = config['selected_features']
        self.pooling_mode = config['pooling_mode']
        self.device = config['device']
        self.num_feature_field = sum(
            1 if dataset.field2type[field] != FeatureType.FLOAT_SEQ else
            dataset.num(field) for field in config['selected_features'])

        self.initializer_range = config['initializer_range']
        self.loss_type = config['loss_type']

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.hidden_size,
                                           padding_idx=0)
        self.position_embedding = nn.Embedding(self.max_seq_length,
                                               self.hidden_size)
        self.feature_embed_layer = FeatureSeqEmbLayer(dataset,
                                                      self.hidden_size,
                                                      self.selected_features,
                                                      self.pooling_mode,
                                                      self.device)

        self.trm_encoder = TransformerEncoder(
            n_layers=self.n_layers,
            n_heads=self.n_heads,
            hidden_size=self.hidden_size,
            inner_size=self.inner_size,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attn_dropout_prob=self.attn_dropout_prob,
            hidden_act=self.hidden_act,
            layer_norm_eps=self.layer_norm_eps)

        self.concat_layer = nn.Linear(
            self.hidden_size * (1 + self.num_feature_field), self.hidden_size)

        self.LayerNorm = nn.LayerNorm(self.hidden_size,
                                      eps=self.layer_norm_eps)
        self.dropout = nn.Dropout(self.hidden_dropout_prob)

        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")

        # parameters initialization
        self.apply(self._init_weights)
        self.other_parameter_name = ['feature_embed_layer']