def get_mask_zero_embedded(var_em, var_ph):
    mask = tf.equal(var_ph, 0)
    mask2 = tf.concat(
        [tf.expand_dims(~mask, -1) for i in range(EMBEDDING_DIM)], -1)

    rst = tf.where(
        mask2, tf.nn.embedding_lookup(var_em, var_ph),
        tf.zeros([tf.shape(var_ph)[0],
                  tf.shape(var_ph)[1], EMBEDDING_DIM]))
    return rst
    def build_fcn_net(self, inp, use_dice=False):
        with self.graph.as_default():
            self.saver = tf.train.Saver(max_to_keep=1)

            with tf.name_scope("Out"):
                bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1')
                dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1')
                if use_dice:
                    dnn1 = dice(dnn1, name='dice_1')
                else:
                    dnn1 = prelu(dnn1, 'prelu1')

                dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2')
                if use_dice:
                    dnn2 = dice(dnn2, name='dice_2')
                else:
                    dnn2 = prelu(dnn2, 'prelu2')
                dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3')
                self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

            with tf.name_scope('Metrics'):
                # Cross-entropy loss and optimizer initialization
                coe = tf.constant([1.2, 1.2])
                coe_mask = tf.equal(self.core_type_ph, 1)
                coe_mask2 = tf.concat(
                    [tf.expand_dims(coe_mask, -1) for i in range(2)], -1)
                self.target_ph_coe = tf.where(coe_mask2, self.target_ph * coe,
                                              self.target_ph)

                ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
                self.loss = ctr_loss
                # tf.summary.scalar('loss', self.loss)
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.lr_ph).minimize(self.loss)
                # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # Accuracy metric
                self.accuracy = tf.reduce_mean(
                    tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph),
                            tf.float32))
                # tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
def get_self_or_expand_dims(aim):
    return tf.cast(tf.expand_dims(aim, -1), tf.float32)
Esempio n. 4
0
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(get_rnn_sum(getattr(self, key_c),
                                                "rnncha"))
        history_sec_embedded.append(get_rnn_sum(getattr(self, key_s),
                                                "rnnsec"))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    #dien
    with tf.name_scope('rnn_1'):
        rnn_outputs, _ = dynamic_rnn(GRUCell(HIDDEN_SIZE * 2),
                                     inputs=tf.concat(
                                         [history_cha_emb, history_sec_emb],
                                         axis=-1),
                                     sequence_length=self.history_len_ph,
                                     dtype=tf.float32,
                                     scope="gru1")
    with tf.name_scope('Attention_layer_1'):
        att_outputs, alphas = din_fcn_attention(tf.concat([
            get_rnn_sum(self.today_chapters_embedded, "rnncha"),
            get_rnn_sum(self.today_sections_embedded, "rnnsec")
        ],
                                                          axis=-1),
                                                rnn_outputs,
                                                ATTENTION_SIZE,
                                                self.history_mask_cha_ph,
                                                scope="1_1",
                                                softmax_stag=1,
                                                stag='1_1',
                                                mode='LIST',
                                                return_alphas=True)
    with tf.name_scope('rnn_2'):
        rnn_outputs2, final_state2 = dynamic_rnn(
            VecAttGRUCell(HIDDEN_SIZE * 2),
            inputs=rnn_outputs,
            att_scores=tf.expand_dims(alphas, -1),
            sequence_length=self.history_len_ph,
            dtype=tf.float32,
            scope="gru2")

    return final_state2