def build_fcn_net(self, inp, use_dice=False):
        with self.graph.as_default():
            self.saver = tf.train.Saver(max_to_keep=1)


            with tf.name_scope("Out"):
                bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1')
                dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1')
                if use_dice:
                    dnn1 = dice(dnn1, name='dice_1')
                else:
                    dnn1 = prelu(dnn1, 'prelu1')

                dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2')
                if use_dice:
                    dnn2 = dice(dnn2, name='dice_2')
                else:
                    dnn2 = prelu(dnn2, 'prelu2')
                dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3')
                self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

            with tf.name_scope('Metrics'):
                # Cross-entropy loss and optimizer initialization
                # 'core_type_ph': [1, 1, 0,..],

                ctr_loss = - tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
                self.loss = ctr_loss
                # tf.summary.scalar('loss', self.loss)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # Accuracy metric
                self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph), tf.float32))
                # tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
Example #2
0
    def build_fcn_net(self, inp, use_dice=False):
        bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1')
        dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1')

        if use_dice:
            dnn1 = dice(dnn1, name='dice_1')
        else:
            dnn1 = prelu(dnn1, 'prelu1')

        dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2')
        if use_dice:
            dnn2 = dice(dnn2, name='dice_2')
        else:
            dnn2 = prelu(dnn2, name='prelu2')

        dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3')
        self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

        with tf.name_scope('Metrics'):
            ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
            self.loss = ctr_loss
            if self.use_negsampling:
                self.loss += self.aux_loss
            tf.summary.scalar('loss', self.loss)
            self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.lr).minimize(self.loss)

            self.accuracy = tf.reduce_mean(
                tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph),
                        tf.float32))
            tf.summary.scalar('accuracy', self.accuracy)

        self.merged = tf.summary.merge_all()
Example #3
0
    def build_fcn_net(self,inp,use_dice=False):
        bn1 = tf.layers.batch_normalization(inputs=inp,name='bn1')
        dnn1 = tf.layers.dense(bn1,200,activation=None,name='f1')

        if use_dice:
            dnn1 = dice(dnn1,name='dice_1')
        else:
            dnn1 = prelu(dnn1,'prelu1')

        dnn2 = tf.layers.dense(dnn1,80,activation=None,name='f2')
        if use_dice:
            dnn2 = dice(dnn2,name='dice_2')
        else:
            dnn2 = prelu(dnn2,name='prelu2')

        dnn3 = tf.layers.dense(dnn2,2,activation=None,name='f3')
        self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

        with tf.name_scope('Metrics'):
            ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
            self.loss = ctr_loss
            if self.use_negsampling:
                self.loss += self.aux_loss
            tf.summary.scalar('loss',self.loss)
            self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)

            self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat),self.target_ph),tf.float32))
            tf.summary.scalar('accuracy',self.accuracy)

        self.merged = tf.summary.merge_all()
Example #4
0
    def dis(self,x,training):
        x = tf.reshape(x,shape=[-1,self.shape,self.shape,3])
        scope = 'dis_'
        layer = lrelu(conv2d(x,self.weights[scope+'w_conv1'])+self.biases[scope+'b_conv1'])
        
        for i in range(1,4):
        	conv = prelu(conv2d(layer,self.weights[scope+'w_conv'+str(i+1)])+self.biases[scope+'b_conv'+str(i+1)],scope+'w_conv'+str(i+1))
        	conv = maxpool2d(conv)
        	conv = tf.nn.dropout(conv,self.keep_rate)
        	layer = conv

        fc = tf.reshape(layer,[-1, int(self.shape/8)*int(self.shape/8)*256])
        fc = lrelu(tf.matmul(fc,self.weights[scope+'w_fc'])+self.biases[scope+'b_fc'])
        fc = tf.nn.dropout(fc,self.keep_rate)
        
        output = tf.matmul(fc,self.weights[scope+'out'])+self.biases[scope+'out']
        output = (tanh(output)+1.0)*0.5

        return output
Example #5
0
    def gen(self, x, training):
        x = tf.reshape(x, shape=[-1, self.shape, self.shape, 3])
        scope = str(self.scope_name) + '_'

        layer_1 = conv2d(
            x,
            self.weights[scope + 'w_conv1']) + self.biases[scope + 'b_conv1']
        layer_1 = lrelu(layer_1)

        layer_2 = conv2d(
            layer_1,
            self.weights[scope + 'w_conv2']) + self.biases[scope + 'b_conv2']
        layer_2 = batch_norm(layer_2, training=training, name=scope + 'conv2')
        layer_2 = prelu(layer_2, scope + 'w_conv2')

        layer_3 = conv2d(
            layer_2,
            self.weights[scope + 'w_conv3']) + self.biases[scope + 'b_conv3']
        layer_3 = batch_norm(layer_3, training=training, name=scope + 'conv3')
        layer_3 = prelu(layer_3, scope + 'w_conv3')

        layer_4 = conv2d(
            layer_3,
            self.weights[scope + 'w_conv4']) + self.biases[scope + 'b_conv4']
        layer_4 = batch_norm(layer_4, training=training, name=scope + 'conv4')
        layer_4 = prelu(layer_4, scope + 'w_conv4') + layer_3

        if self.scope_name == 1:
            layer_4 = prelu(layer_4, scope + 'w_conv4_res') + layer_2
            layer_5 = conv2d(layer_4, self.weights[
                scope + 'w_conv5']) + self.biases[scope + 'b_conv5']
            layer_5 = subpixel2d(layer_5,
                                 [-1, self.shape * 2, self.shape * 2, 64])
            layer_5 = prelu(layer_5, scope + 'w_conv5')
            layer = layer_5

        elif self.scope_name == 2:
            layer_5 = conv2d(layer_4, self.weights[
                scope + 'w_conv5']) + self.biases[scope + 'b_conv5']
            layer_5 = batch_norm(layer_5,
                                 training=training,
                                 name=scope + 'conv5')
            layer_5 = prelu(layer_5, scope + 'w_conv5') + layer_4

            layer_6 = conv2d(layer_5, self.weights[
                scope + 'w_conv6']) + self.biases[scope + 'b_conv6']
            layer_6 = batch_norm(layer_6,
                                 training=training,
                                 name=scope + 'conv6')
            layer_6 = prelu(layer_6, scope + 'w_conv6') + layer_5
            layer_6 = prelu(layer_6, scope + 'w_conv6_res') + layer_2

            layer_7 = conv2d(layer_6, self.weights[
                scope + 'w_conv7']) + self.biases[scope + 'b_conv7']
            layer_7 = subpixel2d(layer_7,
                                 [-1, self.shape * 2, self.shape * 2, 64])
            layer_7 = prelu(layer_7, scope + 'w_conv7')
            layer = layer_7

        elif self.scope_name == 3:
            layer_5 = conv2d(layer_4, self.weights[
                scope + 'w_conv5']) + self.biases[scope + 'b_conv5']
            layer_5 = batch_norm(layer_5,
                                 training=training,
                                 name=scope + 'conv5')
            layer_5 = prelu(layer_5, scope + 'w_conv5') + layer_4

            layer_6 = conv2d(layer_5, self.weights[
                scope + 'w_conv6']) + self.biases[scope + 'b_conv6']
            layer_6 = batch_norm(layer_6,
                                 training=training,
                                 name=scope + 'conv6')
            layer_6 = prelu(layer_6, scope + 'w_conv6') + layer_5

            layer_7 = conv2d(layer_6, self.weights[
                scope + 'w_conv7']) + self.biases[scope + 'b_conv7']
            layer_7 = batch_norm(layer_7,
                                 training=training,
                                 name=scope + 'conv7')
            layer_7 = prelu(layer_7, scope + 'w_conv7') + layer_6

            layer_8 = conv2d(layer_7, self.weights[
                scope + 'w_conv8']) + self.biases[scope + 'b_conv8']
            layer_8 = batch_norm(layer_8,
                                 training=training,
                                 name=scope + 'conv8')
            layer_8 = prelu(layer_8, scope + 'w_conv8') + layer_7
            layer_8 = prelu(layer_8, scope + 'w_conv8_res') + layer_2

            layer_9 = conv2d(layer_8, self.weights[
                scope + 'w_conv9']) + self.biases[scope + 'b_conv9']
            layer_9 = subpixel2d(layer_9,
                                 [-1, self.shape * 2, self.shape * 2, 64])
            layer_9 = prelu(layer_9, scope + 'w_conv9')
            layer = layer_9

        output = conv2d(
            layer, self.weights[scope + 'out']) + self.biases[scope + 'out']
        output = 127.5 * (1.0 + tanh(output))
        return output
Example #6
0
    def gen_model(self, x, train):
        if self.final_res % 2 == 0:
            gen_1 = generator(self.weights, self.biases, 1, self.shape)
            out_1 = gen_1.gen(x, train)
            out = out_1

        if self.final_res % 4 == 0:
            gen_2 = generator(self.weights, self.biases, 2, self.shape)
            out_2 = gen_2.gen(x, train)
            out_2 = tf.concat([out_2, out_1], 3)

            out_2 = conv2d(
                out_2, self.weights['2_w_conv9']) + self.biases['2_b_conv9']
            out_2 = batch_norm(out_2, train, name='concat_1')
            out_2 = prelu(out_2, 'concat_batchnorm1')

            out_2 = conv2d(
                out_2, self.weights['2_w_conv10']) + self.biases['2_b_conv10']
            out_2 = subpixel2d(out_2, [-1, shape * 4, shape * 4, 64])
            out_2 = prelu(out_2, 'concat_subpixel1')

            out_2 = conv2d(out_2,
                           self.weights['4x_out']) + self.biases['4x_out']
            out_2 = 127.5 * (tanh(out_2) + 1)
            out = out_2

        if self.final_res % 8 == 0:
            gen_3 = generator(self.weights, self.biases, 3, self.shape)
            out_3 = gen_3.gen(x, train)
            out_3 = tf.concat([out_3, out_1], 3)

            out_3 = conv2d(
                out_3, self.weights['3_w_conv11']) + self.biases['3_b_conv11']
            out_3 = batch_norm(out_3, train, name='concat_2')
            out_3 = prelu(out_3, 'concat_batchnorm2')

            out_3 = conv2d(
                out_3, self.weights['3_w_conv12']) + self.biases['3_b_conv12']
            out_3 = subpixel2d(out_3, [-1, shape * 4, shape * 4, 64])
            out_3 = prelu(out_3, 'concat_subpixel2')

            out_3 = conv2d(
                out_3, self.weights['3_w_conv13']) + self.biases['3_b_conv13']
            out_3 = lrelu(out_3)
            out_3 = tf.concat([out_3, out_2], 3)

            out_3 = conv2d(
                out_3, self.weights['3_w_conv14']) + self.biases['3_b_conv14']
            out_3 = batch_norm(out_3, train, name='concat_3')
            out_3 = prelu(out_3, 'concat_batchnorm3')

            out_3 = conv2d(
                out_3, self.weights['3_w_conv15']) + self.biases['3_b_conv15']
            out_3 = subpixel2d(out_3, [-1, shape * 8, shape * 8, 64])
            out_3 = prelu(out_3, 'concat_subpixel3')

            out_3 = conv2d(out_3,
                           self.weights['8x_out']) + self.biases['8x_out']
            out_3 = 127.5 * (tanh(out_3) + 1)
            out = out_3

        return out