コード例 #1
0
    def build_fcn_net(self, inp, use_dice=False):
        with self.graph.as_default():
            self.saver = tf.train.Saver(max_to_keep=1)


            with tf.name_scope("Out"):
                bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1')
                dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1')
                if use_dice:
                    dnn1 = dice(dnn1, name='dice_1')
                else:
                    dnn1 = prelu(dnn1, 'prelu1')

                dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2')
                if use_dice:
                    dnn2 = dice(dnn2, name='dice_2')
                else:
                    dnn2 = prelu(dnn2, 'prelu2')
                dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3')
                self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

            with tf.name_scope('Metrics'):
                # Cross-entropy loss and optimizer initialization
                # 'core_type_ph': [1, 1, 0,..],

                ctr_loss = - tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
                self.loss = ctr_loss
                # tf.summary.scalar('loss', self.loss)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # Accuracy metric
                self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph), tf.float32))
                # tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
コード例 #2
0
def get_mask_zero_embedded(var_em, var_ph):
    mask = tf.equal(var_ph, 0)
    mask2 = tf.concat(
        [tf.expand_dims(~mask, -1) for i in range(EMBEDDING_DIM)], -1)

    rst = tf.where(
        mask2, tf.nn.embedding_lookup(var_em, var_ph),
        tf.zeros([tf.shape(var_ph)[0],
                  tf.shape(var_ph)[1], EMBEDDING_DIM]))
    return rst