示例#1
0
    def build_train_graph(self, policy_action):
        # tf.get_variable_scope().reuse_variables()
        label_seq, dilate_input = self._expand_model_graph(self.input_train,
                                                           policy_action,
                                                           train=True)

        if self.using_negative_sampling:
            logits_2d = tf.reshape(dilate_input, [-1, self.dilated_channels])
            label_flat = tf.reshape(label_seq, [-1, 1])  # [B*(SessLen-1), 1]
            num_sampled = int(self.negative_sampling_ratio * self.item_size)
            loss = tf.nn.sampled_softmax_loss(
                self.softmax_w,
                self.softmax_b,
                labels=label_flat,
                inputs=logits_2d,
                num_sampled=num_sampled,
                num_classes=self.item_size,
            )  # [B*(SessLen-1), 1]
        else:
            logits = conv1d(tf.nn.relu(dilate_input),
                            output_channels=self.item_size,
                            name="logits")  # [B,SessLen-1,ItemSize]
            logits_2d = tf.reshape(
                logits, [-1, self.item_size])  # [B*(SessLen-1),ItemSize]
            label_flat = tf.reshape(label_seq, [-1])  # [B*(SessLen-1), 1]
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=label_flat, logits=logits_2d)  # [B*(SessLen-1), 1]

        self.loss = tf.reduce_mean(loss)
示例#2
0
    def build_test_graph(self, source_hidden):
        tf.get_variable_scope().reuse_variables()

        logits = conv1d(tf.nn.relu(source_hidden[:, -1:, :]),
                        self.target_item_size,
                        name="logits")
        logits_2d = tf.reshape(logits, [-1, self.target_item_size])

        self.test_probs = tf.nn.softmax(logits_2d)
示例#3
0
    def build_train_graph(self, source_hidden):
        # source_hidden: [B, L, C]
        logits = conv1d(tf.nn.relu(source_hidden[:, -1:, :]),
                        output_channels=self.target_item_size,
                        name="logits")
        logits_2d = tf.reshape(logits, [-1, self.target_item_size])

        label_flat = tf.reshape(self.target, [-1])
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_flat, logits=logits_2d)

        self.train_loss = tf.reduce_mean(loss)
示例#4
0
    def _get_test_result(self, dilate_input, label_seq):
        if self.using_negative_sampling:
            logits_2d = tf.reshape(dilate_input[:, -1:, :],
                                   [-1, self.dilated_channels])
            logits_2d = tf.matmul(logits_2d, tf.transpose(self.softmax_w))
            logits_2d = tf.nn.bias_add(logits_2d,
                                       self.softmax_b)  # [B, ItemSize]
        else:
            logits = conv1d(tf.nn.relu(dilate_input[:, -1:, :]),
                            self.item_size,
                            name="logits")
            logits_2d = tf.reshape(logits, [-1, self.item_size])

        label_flat = tf.reshape(label_seq[:, -1], [-1])  # [B,]
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_flat, logits=logits_2d)  # [B,]

        loss = tf.reduce_mean(loss)
        probs = tf.nn.softmax(logits_2d)  # [B, ItemSize]
        return loss, probs
示例#5
0
    def build_train_graph(self):

        label_seq, dilate_input = self._expand_model_graph(self.input_train, train=True)

        # label_seq: [B, SessLen - 1]
        # dilate_input: [B, SessLen - 1, DilatedChannels]

        if self.using_negative_sampling:
            logits_2d = tf.reshape(
                dilate_input, [-1, self.channels]
            )  # [B*(SessLen-1), DilatedChannels]

            label_flat = tf.reshape(label_seq, [-1, 1])  # [B*(SessLen-1), 1]
            num_sampled = int(self.negative_sampling_ratio * self.item_size)
            loss = tf.nn.sampled_softmax_loss(
                self.softmax_w,
                self.softmax_b,
                labels=label_flat,
                inputs=logits_2d,
                num_sampled=num_sampled,
                num_classes=self.item_size,
            )  # [B*(SessLen-1), 1]
        else:
            logits = conv1d(
                tf.nn.relu(dilate_input), output_channels=self.item_size, name="logits"
            )  # [B,SessLen-1,ItemSize]
            logits_2d = tf.reshape(
                logits, [-1, self.item_size]
            )  # [B*(SessLen-1),ItemSize]
            label_flat = tf.reshape(label_seq, [-1])  # [B*(SessLen-1), 1]
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=label_flat, logits=logits_2d
            )  # [B*(SessLen-1), 1]

        loss_train = tf.reduce_mean(loss)  # output of training steps, [1]
        return loss_train