예제 #1
0
파일: lstur.py 프로젝트: mindis/recommender
    def _build_newsencoder(self, embedding_layer):
        """The main function to create news encoder of LSTUR.

        Args:
            embedding_layer(obj): a word embedding layer.
        
        Return:
            obj: the news encoder of LSTUR.
        """
        hparams = self.hparams
        sequences_input_title = keras.Input(shape=(hparams.doc_size, ),
                                            dtype="int32")
        embedded_sequences_title = embedding_layer(sequences_input_title)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_title)
        y = layers.Conv1D(
            hparams.filter_num,
            hparams.window_size,
            activation=hparams.cnn_activation,
            padding="same",
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(
                seed=self.seed),
        )(y)
        y = layers.Dropout(hparams.dropout)(y)
        y = layers.Masking()(
            OverwriteMasking()([y, ComputeMasking()(sequences_input_title)]))
        pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)

        model = keras.Model(sequences_input_title,
                            pred_title,
                            name="news_encoder")
        return model
예제 #2
0
    def _build_newsencoder(self, embedding_layer):
        hparams = self.hparams
        input_title_body_verts = keras.Input(
            shape=(hparams.title_size + hparams.body_size + 2,), dtype="int32"
        )
        #按照不同的类型建不同的层
        sequences_input_title = layers.Lambda(lambda x: x[:, : hparams.title_size])(
            input_title_body_verts
        )
        sequences_input_body = layers.Lambda(
            lambda x: x[:, hparams.title_size : hparams.title_size + hparams.body_size]
        )(input_title_body_verts)
        #对各种类型进行encoder
        title_repr = self._build_titleencoder(embedding_layer)(sequences_input_title)
        body_repr = self._build_bodyencoder(embedding_layer)(sequences_input_body)
        #连接
        concate_repr = layers.Concatenate(axis=-2)(
            [title_repr, body_repr]
        )
		#attention机制
        news_repr = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(
            concate_repr
        )

        model = keras.Model(input_title_body_verts, news_repr, name="news_encoder")
        return model
예제 #3
0
    def _build_bodyencoder(self, embedding_layer):
        """build body encoder of NAML news encoder.

        Args:
            embedding_layer(obj): a word embedding layer.
        
        Return:
            obj: the body encoder of NAML.
        """
        hparams = self.hparams
        sequences_input_body = keras.Input(shape=(hparams.body_size,), dtype="int32")
        embedded_sequences_body = embedding_layer(sequences_input_body)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_body)
        y = layers.Conv1D(
            hparams.filter_num,
            hparams.window_size,
            activation=hparams.cnn_activation,
            padding="same",
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
        )(y)
        y = layers.Dropout(hparams.dropout)(y)
        pred_body = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)
        pred_body = layers.Reshape((1, hparams.filter_num))(pred_body)

        model = keras.Model(sequences_input_body, pred_body, name="body_encoder")
        return model
예제 #4
0
    def _build_userencoder(self, newsencoder):
        """The main function to create user encoder of NAML.

        Args:
            newsencoder(obj): the news encoder of NAML. 

        Return:
            obj: the user encoder of NAML.
        """
        hparams = self.hparams
        his_input_title_body_verts = keras.Input(
            shape=(hparams.his_size, hparams.title_size + hparams.body_size + 2),
            dtype="int32",
        )

        click_news_presents = layers.TimeDistributed(newsencoder)(
            his_input_title_body_verts
        )
        user_present = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(
            click_news_presents
        )

        model = keras.Model(
            his_input_title_body_verts, user_present, name="user_encoder"
        )
        return model
예제 #5
0
    def _build_newsencoder(self, embedding_layer):
        """The main function to create news encoder of NRMS.

        Args:
            embedding_layer(obj): a word embedding layer.
        
        Return:
            obj: the news encoder of NRMS.
        """
        hparams = self.hparams
        sequences_input_title = keras.Input(shape=(hparams.title_size, ),
                                            dtype="int32")

        embedded_sequences_title = embedding_layer(sequences_input_title)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_title)
        y = SelfAttention(hparams.head_num, hparams.head_dim,
                          seed=self.seed)([y, y, y])
        y = layers.Dropout(hparams.dropout)(y)
        pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)

        model = keras.Model(sequences_input_title,
                            pred_title,
                            name="news_encoder")
        return model
예제 #6
0
    def _build_userencoder(self, titleencoder, entityencoder, contextencoder):
        """The main function to create user encoder of NRMS.

        Args:
            titleencoder(obj): the news encoder of NRMS. 

        Return:
            obj: the user encoder of NRMS.
        """
        hparams = self.hparams
        his_input_title = keras.Input(shape=(hparams.his_size,
                                             hparams.title_size),
                                      dtype="int32")
        click_title_presents = layers.TimeDistributed(
            titleencoder, name='news_time_distributed')(his_input_title)
        y = SelfAttention(hparams.head_num, hparams.head_dim,
                          seed=self.seed)([click_title_presents] * 3)
        if entityencoder is not None:
            his_input_title_entity = keras.Input(shape=(hparams.his_size,
                                                        hparams.title_size),
                                                 dtype="int32")
            click_title_entity_presents = layers.TimeDistributed(
                entityencoder,
                name='entity_time_distributed')(his_input_title_entity)
            entity_y = SelfAttention(hparams.head_num,
                                     hparams.head_dim,
                                     seed=self.seed)(
                                         [click_title_entity_presents] * 3)
            if contextencoder is not None:
                click_title_context_presents = layers.TimeDistributed(
                    contextencoder,
                    name='context_time_distributed')(his_input_title_entity)
                context_y = SelfAttention(
                    hparams.head_num, hparams.head_dim,
                    seed=self.seed)([click_title_context_presents] * 3)
                y = layers.Concatenate()([y, entity_y, context_y])
            else:
                y = layers.Concatenate()([y, entity_y])

        user_present = AttLayer2(hparams.attention_hidden_dim,
                                 seed=self.seed)(y)
        if entityencoder is not None:
            model = keras.Model(
                inputs=[his_input_title, his_input_title_entity],
                outputs=user_present,
                name="user_encoder")
        else:
            model = keras.Model(his_input_title,
                                user_present,
                                name="user_encoder")
        return model
예제 #7
0
    def _build_newsencoder(self, embedding_layer):
        """The main function to create news encoder of NAML.
        news encoder in composed of title encoder, body encoder, vert encoder and subvert encoder

        Args:
            embedding_layer(obj): a word embedding layer.
        
        Return:
            obj: the news encoder of NAML.
        """
        hparams = self.hparams
        input_title_body_verts = keras.Input(shape=(hparams.title_size +
                                                    hparams.body_size + 2, ),
                                             dtype="int32")

        sequences_input_title = layers.Lambda(
            lambda x: x[:, :hparams.title_size])(input_title_body_verts)
        sequences_input_body = layers.Lambda(
            lambda x: x[:, hparams.title_size:hparams.title_size + hparams.
                        body_size])(input_title_body_verts)
        input_vert = layers.Lambda(
            lambda x: x[:, hparams.title_size + hparams.body_size:hparams.
                        title_size + hparams.body_size + 1, ])(
                            input_title_body_verts)
        input_subvert = layers.Lambda(
            lambda x: x[:, hparams.title_size + hparams.body_size + 1:])(
                input_title_body_verts)

        title_repr = self._build_titleencoder(embedding_layer)(
            sequences_input_title)
        body_repr = self._build_bodyencoder(embedding_layer)(
            sequences_input_body)
        vert_repr = self._build_vertencoder()(input_vert)
        subvert_repr = self._build_subvertencoder()(input_subvert)

        concate_repr = layers.Concatenate(axis=-2)(
            [title_repr, body_repr, vert_repr, subvert_repr])

        # multi-head attention module.
        y = layers.Dropout(hparams.dropout)(concate_repr)
        y = SelfAttention(hparams.head_num, hparams.head_dim,
                          seed=self.seed)([y, y, y])
        y = layers.Dropout(hparams.dropout)(y)

        news_repr = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)

        model = keras.Model(input_title_body_verts,
                            news_repr,
                            name="news_encoder")
        return model
예제 #8
0
    def _build_titleencoder(self, embedding_layer):
	#和lstur的新闻编码器相同参数不同
        hparams = self.hparams
        sequences_input_title = keras.Input(shape=(hparams.title_size,), dtype="int32")
        embedded_sequences_title = embedding_layer(sequences_input_title)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_title)
        y = layers.Conv1D(
            hparams.filter_num,
            hparams.window_size,
            activation=hparams.cnn_activation,
            padding="same",
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
        )(y)
        y = layers.Dropout(hparams.dropout)(y)
        pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)
        pred_title = layers.Reshape((1, hparams.filter_num))(pred_title)

        model = keras.Model(sequences_input_title, pred_title, name="title_encoder")
        return model
예제 #9
0
    def _build_userencoder(self, titleencoder):
        """The main function to create user encoder of NRMS.

        Args:
            titleencoder(obj): the news encoder of NRMS. 

        Return:
            obj: the user encoder of NRMS.
        """
        hparams = self.hparams
        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.title_size), dtype="int32"
        )

        click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title)
        y = SelfAttention(hparams.head_num, hparams.head_dim, seed=self.seed)(
            [click_title_presents] * 3
        )
        user_present = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)

        model = keras.Model(his_input_title, user_present, name="user_encoder")
        return model
예제 #10
0
    def _build_bodyencoder(self, embedding_layer):
        hparams = self.hparams
        sequences_input_body = keras.Input(shape=(hparams.body_size,), dtype="int32")
        embedded_sequences_body = embedding_layer(sequences_input_body)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_body)
        y = layers.Conv1D(
            hparams.filter_num,
            hparams.window_size,
            activation=hparams.cnn_activation,
            padding="same",
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
        )(y)
        y = layers.Dropout(hparams.dropout)(y)
        pred_body = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)
        pred_body = layers.Reshape((1, hparams.filter_num))(pred_body)

        model = keras.Model(sequences_input_body, pred_body, name="body_encoder")
        return model
	def _build_lsturnaml(self):
        hparams = self.hparams

        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.title_size), dtype="int32"
        )
        his_input_body = keras.Input(
            shape=(hparams.his_size, hparams.body_size), dtype="int32"
        )

        pred_input_title = keras.Input(
            shape=(hparams.npratio + 1, hparams.title_size), dtype="int32"
        )
        pred_input_body = keras.Input(
            shape=(hparams.npratio + 1, hparams.body_size), dtype="int32"
        )
        pred_input_title_one = keras.Input(
            shape=(1, hparams.title_size,), dtype="int32"
        )
        pred_input_body_one = keras.Input(shape=(1, hparams.body_size,), dtype="int32")
        his_title_body = layers.Concatenate(axis=-1)(
            [his_input_title, his_input_body]
        )
        pred_title_body = layers.Concatenate(axis=-1)(
            [pred_input_title, pred_input_body]
        )

        pred_title_body_one = layers.Concatenate(axis=-1)(
            [
                pred_input_title_one,
                pred_input_body_one,
            ]
        )
        pred_title_body_one = layers.Reshape((-1,))(pred_title_body_one)

        embedding_layer = layers.Embedding(
            self.word2vec_embedding.shape[0],
            hparams.word_emb_dim,
            weights=[self.word2vec_embedding],
            trainable=True,
        )

        self.newsencoder = self._build_newsencoder(embedding_layer)
        self.userencoder = self._build_userencoder(self.newsencoder)

        user_present = self.userencoder(his_title_body)
        news_present = layers.TimeDistributed(self.newsencoder)(pred_title_body)
        news_present_one = self.newsencoder(pred_title_body_one)

        preds = layers.Dot(axes=-1)([news_present, user_present])
        preds = layers.Activation(activation="softmax")(preds)

        pred_one = layers.Dot(axes=-1)([news_present_one, user_present])
        pred_one = layers.Activation(activation="sigmoid")(pred_one)

        model = keras.Model(
            [
                his_input_title,
                his_input_body,
                pred_input_title,
                pred_input_body,
            ],
            preds,
        )

        scorer = keras.Model(
            [
                his_input_title,
                his_input_body,
                pred_input_title_one,
                pred_input_body_one,
            ],
            pred_one,
        )

        return model, scorer
예제 #11
0
파일: lstur.py 프로젝트: zihua/recommenders
    def _build_userencoder(self, titleencoder, type="ini"):
        """The main function to create user encoder of LSTUR.

        Args:
            titleencoder(obj): the news encoder of LSTUR. 

        Return:
            obj: the user encoder of LSTUR.
        """
        hparams = self.hparams
        his_input_title = keras.Input(shape=(hparams.his_size,
                                             hparams.doc_size),
                                      dtype="int32")
        user_indexes = keras.Input(shape=(1, ), dtype="int32")

        user_embedding_layer = layers.Embedding(
            hparams.user_num,
            hparams.gru_unit,
            trainable=True,
            embeddings_initializer="zeros",
        )

        long_u_emb = layers.Reshape(
            (hparams.gru_unit, ))(user_embedding_layer(user_indexes))
        click_title_presents = layers.TimeDistributed(titleencoder)(
            his_input_title)

        if type == "ini":
            user_present = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(
                    seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(
                    seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(
                layers.Masking(mask_value=0.0)(click_title_presents),
                initial_state=[long_u_emb],
            )
        elif type == "con":
            short_uemb = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(
                    seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(
                    seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(layers.Masking(mask_value=0.0)(click_title_presents))

            user_present = layers.Concatenate()([short_uemb, long_u_emb])
            user_present = layers.Dense(
                hparams.gru_unit,
                bias_initializer=keras.initializers.Zeros(),
                kernel_initializer=keras.initializers.glorot_uniform(
                    seed=self.seed),
            )(user_present)

        click_title_presents = layers.TimeDistributed(titleencoder)(
            his_input_title)
        user_present = AttLayer2(hparams.attention_hidden_dim,
                                 seed=self.seed)(click_title_presents)

        model = keras.Model([his_input_title, user_indexes],
                            user_present,
                            name="user_encoder")
        return model