Exemplo n.º 1
0
    def __init__(
        self,
        mask_mat,
        input_size,
        rnn_hidden_size,
        att_hidden_size=(80, 40),
        activation='sigmoid',
        softmax_stag=False,
        gru='GRU'
    ):
        super(InterestEvolvingLayer, self).__init__()

        self.mask_mat = mask_mat
        self.gru = gru

        if gru == 'GRU':
            self.attention_layer = SequenceAttLayer(mask_mat, att_hidden_size, activation, softmax_stag, False)
            self.dynamic_rnn = nn.GRU(input_size=input_size, hidden_size=rnn_hidden_size, batch_first=True)

        elif gru == 'AIGRU':
            self.attention_layer = SequenceAttLayer(mask_mat, att_hidden_size, activation, softmax_stag, True)
            self.dynamic_rnn = nn.GRU(input_size=input_size, hidden_size=rnn_hidden_size, batch_first=True)

        elif gru == 'AGRU' or gru == 'AUGRU':
            self.attention_layer = SequenceAttLayer(mask_mat, att_hidden_size, activation, softmax_stag, True)
            self.dynamic_rnn = DynamicRNN(input_size=input_size, hidden_size=rnn_hidden_size, gru=gru)
Exemplo n.º 2
0
    def __init__(self, config, dataset):
        super(DIN, self).__init__(config, dataset)

        # get field names and parameter value from config
        self.LABEL_FIELD = config['LABEL_FIELD']
        self.embedding_size = config['embedding_size']
        self.mlp_hidden_size = config['mlp_hidden_size']
        self.device = config['device']
        self.pooling_mode = config['pooling_mode']
        self.dropout_prob = config['dropout_prob']

        self.types = ['user', 'item']
        self.user_feat = dataset.get_user_feature()
        self.item_feat = dataset.get_item_feature()

        # init MLP layers
        # self.dnn_list = [(3 * self.num_feature_field['item'] + self.num_feature_field['user'])
        #                  * self.embedding_size] + self.mlp_hidden_size
        num_item_feature = len(self.item_feat.interaction.keys())
        self.dnn_list = [3 * num_item_feature * self.embedding_size] + self.mlp_hidden_size
        self.att_list = [4 * num_item_feature * self.embedding_size] + self.mlp_hidden_size

        mask_mat = torch.arange(self.max_seq_length).to(self.device).view(1, -1)  # init mask
        self.attention = SequenceAttLayer(
            mask_mat, self.att_list, activation='Sigmoid', softmax_stag=False, return_seq_weight=False
        )
        self.dnn_mlp_layers = MLPLayers(self.dnn_list, activation='Dice', dropout=self.dropout_prob, bn=True)

        self.embedding_layer = ContextSeqEmbLayer(dataset, self.embedding_size, self.pooling_mode, self.device)
        self.dnn_predict_layers = nn.Linear(self.mlp_hidden_size[-1], 1)
        self.sigmoid = nn.Sigmoid()
        self.loss = nn.BCELoss()

        self.apply(self._init_weights)