Ejemplo n.º 1
0
Archivo: unet.py Proyecto: obskor/spw
    def neural_net(self):
        with tf.name_scope('down'):
            channel_n = self.model_channel
            conv1 = layer.conv2D('conv1_1', self.X, channel_n, [3, 3], [1, 1],
                                 'same')
            conv1 = layer.BatchNorm('BN1-1', conv1, self.training)
            conv1 = layer.p_relu('act1_1', conv1)
            conv1 = layer.conv2D('conv1_2', conv1, channel_n, [3, 3], [1, 1],
                                 'same')
            conv1 = layer.BatchNorm('BN1-2', conv1, self.training)
            conv1 = layer.p_relu('act1_2', conv1)
            # print(conv1.shape)
            pool1 = layer.maxpool('pool1', conv1, [2, 2], [2, 2],
                                  'same')  # 128 x 128
            # print(pool1.shape)

            channel_n *= 2  # 32
            conv2 = layer.conv2D('conv2_1', pool1, channel_n, [3, 3], [1, 1],
                                 'same')
            conv2 = layer.BatchNorm('BN2-1', conv2, self.training)
            conv2 = layer.p_relu('act2_1', conv2)
            conv2 = layer.conv2D('conv2_2', conv2, channel_n, [3, 3], [1, 1],
                                 'same')
            conv2 = layer.BatchNorm('BN2-2', conv2, self.training)
            conv2 = layer.p_relu('act2_2', conv2)
            # print(conv2.shape)
            pool2 = layer.maxpool('pool2', conv2, [2, 2], [2, 2],
                                  'same')  # 64 x 64
            # print(pool2.shape)

            channel_n *= 2  # 64
            conv3 = layer.conv2D('conv3_1', pool2, channel_n, [3, 3], [1, 1],
                                 'same')
            conv3 = layer.BatchNorm('BN3-1', conv3, self.training)
            conv3 = layer.p_relu('act3_1', conv3)
            conv3 = layer.conv2D('conv3_2', conv3, channel_n, [3, 3], [1, 1],
                                 'same')
            conv3 = layer.BatchNorm('BN3-2', conv3, self.training)
            conv3 = layer.p_relu('act3_2', conv3)
            # print(conv3.shape)
            pool3 = layer.maxpool('pool3', conv3, [2, 2], [2, 2],
                                  'same')  # 32 x 32
            # print(pool3.shape)

            channel_n *= 2  # 128
            conv4 = layer.conv2D('conv4_1', pool3, channel_n, [3, 3], [1, 1],
                                 'same')
            conv4 = layer.BatchNorm('BN4-1', conv4, self.training)
            conv4 = layer.p_relu('act4_1', conv4)
            conv4 = layer.conv2D('conv4_2', conv4, channel_n, [3, 3], [1, 1],
                                 'same')
            conv4 = layer.BatchNorm('BN4-2', conv4, self.training)
            conv4 = layer.p_relu('act4_2', conv4)
            # print(conv4.shape)
            pool4 = layer.maxpool('pool4', conv4, [2, 2], [2, 2],
                                  'same')  # 16 x 16
            # print(pool4.shape)

            channel_n *= 2  # 256
            conv5 = layer.conv2D('conv5_1', pool4, channel_n, [3, 3], [1, 1],
                                 'same')
            conv5 = layer.BatchNorm('BN5-1', conv5, self.training)
            conv5 = layer.p_relu('act5_1', conv5)
            conv5 = layer.conv2D('conv5_2', conv5, channel_n, [3, 3], [1, 1],
                                 'same')
            conv5 = layer.BatchNorm('BN5-2', conv5, self.training)
            conv5 = layer.p_relu('act5_2', conv5)
            # print(conv5.shape)

        with tf.name_scope('up'):
            up4 = layer.deconv2D('deconv4', conv5,
                                 [3, 3, channel_n // 2, channel_n],
                                 [-1, 32, 32, channel_n // 2], [1, 2, 2, 1],
                                 'SAME')
            up4 = tf.reshape(up4, shape=[-1, 32, 32, channel_n // 2])
            # up4 = layer.BatchNorm('deBN4', up4, self.training)
            up4 = layer.p_relu('deact4', up4)
            # print(up4.shape)
            up4 = layer.concat('concat4', [up4, conv4], 3)
            # print(up4.shape)

            channel_n //= 2  # 128
            conv4 = layer.conv2D('uconv4_1', up4, channel_n, [3, 3], [1, 1],
                                 'same')
            conv4 = layer.BatchNorm('uBN4-1', conv4, self.training)
            conv4 = layer.p_relu('uact4-1', conv4)
            conv4 = layer.conv2D('uconv4_2', conv4, channel_n, [3, 3], [1, 1],
                                 'same')
            conv4 = layer.BatchNorm('uBN4-2', conv4, self.training)
            conv4 = layer.p_relu('uact4-2', conv4)
            # print(conv4.shape)

            up3 = layer.deconv2D('deconv3', conv4,
                                 [3, 3, channel_n // 2, channel_n],
                                 [-1, 64, 64, channel_n // 2], [1, 2, 2, 1],
                                 'SAME')
            up3 = tf.reshape(up3, shape=[-1, 64, 64, channel_n // 2])
            # up3 = layer.BatchNorm('deBN3', up3, self.training)
            up3 = layer.p_relu('deact3', up3)
            # print(up3.shape)
            up3 = layer.concat('concat3', [up3, conv3], 3)
            # print(up3.shape)

            channel_n //= 2  # 64
            conv3 = layer.conv2D('uconv3_1', up3, channel_n, [3, 3], [1, 1],
                                 'same')
            conv3 = layer.BatchNorm('uBN3-1', conv3, self.training)
            conv3 = layer.p_relu('uact3-1', conv3)
            conv3 = layer.conv2D('uconv3_2', conv3, channel_n, [3, 3], [1, 1],
                                 'same')
            conv3 = layer.BatchNorm('uBN3-2', conv3, self.training)
            conv3 = layer.p_relu('uact3-2', conv3)
            # print(conv3.shape)

            up2 = layer.deconv2D('deconv2', conv3,
                                 [3, 3, channel_n // 2, channel_n],
                                 [-1, 128, 128, channel_n // 2], [1, 2, 2, 1],
                                 'SAME')
            up2 = tf.reshape(up2, shape=[-1, 128, 128, channel_n // 2])
            # up2 = layer.BatchNorm('deBN2', up2, self.training)
            up2 = layer.p_relu('deact2', up2)
            # print(up2.shape)
            up2 = layer.concat('concat2', [up2, conv2], 3)
            # print(up2.shape)

            channel_n //= 2  # 32
            conv2 = layer.conv2D('uconv2_1', up2, channel_n, [3, 3], [1, 1],
                                 'same')
            conv2 = layer.BatchNorm('uBN2-1', conv2, self.training)
            conv2 = layer.p_relu('uact2-1', conv2)
            conv2 = layer.conv2D('uconv2_2', conv2, channel_n, [3, 3], [1, 1],
                                 'same')
            conv2 = layer.BatchNorm('uBN2-2', conv2, self.training)
            conv2 = layer.p_relu('uact2-2', conv2)
            # print(conv2.shape)

            up1 = layer.deconv2D('deconv1', conv2,
                                 [3, 3, channel_n // 2, channel_n],
                                 [-1, 256, 256, channel_n // 2], [1, 2, 2, 1],
                                 'SAME')
            up1 = tf.reshape(up1, shape=[-1, 256, 256, channel_n // 2])
            # up1 = layer.BatchNorm('deBN1', up1, self.training)
            up1 = layer.p_relu('deact1', up1)
            # print(up1.shape)
            up1 = layer.concat('concat1', [up1, conv1], 3)
            # print(up1.shape)

            channel_n //= 2  # 16
            conv1 = layer.conv2D('uconv1_1', up1, 16, [3, 3], [1, 1], 'same')
            conv1 = layer.BatchNorm('uBN1-1', conv1, self.training)
            conv1 = layer.p_relu('uact1-1', conv1)
            conv1 = layer.conv2D('uconv1_2', conv1, 16, [3, 3], [1, 1], 'same')
            conv1 = layer.BatchNorm('uBN1-2', conv1, self.training)
            conv1 = layer.p_relu('uact1-2', conv1)

            out_seg = layer.conv2D('uconv1', conv1, 2, [1, 1], [1, 1], 'same')
            # out_seg = layer.BatchNorm('out_BN', out_seg, self.training)
            out_seg = tf.nn.relu(out_seg)
            print(out_seg.shape)

        return out_seg
Ejemplo n.º 2
0
    def neural_net(self):
        with tf.name_scope('input'):

            self.global_step = tf.Variable(0, trainable=False)
            self.drop_rate = tf.placeholder(tf.float32)
            self.training = tf.placeholder(tf.bool)
            self.X = tf.placeholder(tf.float32, [None, 32, 32, 3], name='X')

            self.Y = tf.placeholder(tf.int32, [None, 10], name='Y')

        with tf.name_scope('layer1'):
            self.layer = layer.conv2D('conv1-1', self.X, 30, [1, 5], [1, 1])
            self.layer = layer.BatchNorm('bn1', self.layer, self.training)
            self.layer = layer.p_relu('active1-1', self.layer)
            # print(self.layer.shape)
            self.layer = layer.conv2D('conv1-2', self.layer, 30, [5, 1],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn2', self.layer, self.training)
            self.layer = layer.p_relu('active1-2', self.layer)
            # print(self.layer.shape)
            self.layer = layer.maxpool('mp1', self.layer, [2, 2], [2, 2])
            # print(self.layer.shape)

        with tf.name_scope('layer2'):
            self.layer = layer.conv2D('conv2-1', self.layer, 90, [1, 3],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn3', self.layer, self.training)
            self.layer = layer.p_relu('active2-1', self.layer)
            # print(self.layer.shape)
            self.layer = layer.conv2D('conv2-2', self.layer, 90, [3, 1],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn4', self.layer, self.training)
            self.layer = layer.p_relu('active2-2', self.layer)
            # print(self.layer.shape)
            self.layer = layer.maxpool('mp2', self.layer, [2, 2], [2, 2])
            # print(self.layer.shape)

        with tf.name_scope('layer3'):
            self.layer = layer.conv2D('conv3-1', self.layer, 270, [1, 2],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn5', self.layer, self.training)
            self.layer = layer.p_relu('active3-1', self.layer)
            # print(self.layer.shape)
            self.layer = layer.conv2D('conv3-2', self.layer, 270, [2, 1],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn6', self.layer, self.training)
            self.layer = layer.p_relu('active3-2', self.layer)
            # print(self.layer.shape)

        with tf.name_scope('middle_flow'):
            self.m_layer = self.layer
            for i in range(8):
                self.residual = self.m_layer
                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-1',
                                              self.m_layer, 540, [1, 1],
                                              [1, 1], 'same')
                self.m_layer = layer.BatchNorm('bn' + str(i) + '1',
                                               self.m_layer, self.training)
                self.m_layer = layer.p_relu('m_active' + str(i) + '-1',
                                            self.m_layer)

                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-2',
                                              self.m_layer, 540, [3, 3],
                                              [1, 1], 'same')
                self.m_layer = layer.BatchNorm('bn' + str(i) + '2',
                                               self.m_layer, self.training)
                self.m_layer = layer.p_relu('m_active' + str(i) + '-2',
                                            self.m_layer)

                self.m_layer = layer.dropout('m_dp', self.m_layer,
                                             self.drop_rate, self.training)

                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-3',
                                              self.m_layer, 540, [3, 3],
                                              [1, 1], 'same')
                self.m_layer = layer.BatchNorm('bn' + str(i) + '3',
                                               self.m_layer, self.training)
                self.m_layer = layer.p_relu('m_active' + str(i) + '-3',
                                            self.m_layer)
                # print(self.m_layer.shape)

                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-4',
                                              self.m_layer, 270, [1, 1],
                                              [1, 1], 'same')
                self.m_layer = layer.add(self.m_layer,
                                         self.residual,
                                         name='add' + str(i))
                self.m_layer = layer.p_relu('m_active' + str(i) + '-4',
                                            self.m_layer)
                # print(self.m_layer.shape)

        with tf.name_scope('Global_Average_Pooling'):
            self.layer = layer.s_conv2D('reduce_channel', self.m_layer, 10,
                                        [1, 1], [1, 1])
            # print(self.layer.shape)
            self.layer = layer.averagepool('avp', self.layer, [5, 5], [1, 1])
            # print(self.layer.shape)
            self.logits = tf.squeeze(self.layer, [1, 2], name='logits')
            # print(self.logits.shape)

        with tf.name_scope('optimizer'):

            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                        labels=self.Y))

            # self.l2_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            # self.loss = tf.add(self.loss, self.l2_loss)

            self.init_learning = 0.01

            self.decay_step = 2500

            self.decay_rate = 0.9

            self.exponential_decay_learning_rate = tf.train.exponential_decay(
                learning_rate=self.init_learning,
                global_step=self.global_step,
                decay_steps=self.decay_step,
                decay_rate=self.decay_rate,
                staircase=True,
                name='learning_rate')

            self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.exponential_decay_learning_rate,
                epsilon=0.0001)

            self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(self.update_ops):
                self.trainer = self.optimizer.minimize(
                    self.loss, global_step=self.global_step, name='train')

            self.accuracy = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)),
                    tf.float32))

            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('lr', self.exponential_decay_learning_rate)
            tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
            self.writer = tf.summary.FileWriter('./logs', self.sess.graph)
Ejemplo n.º 3
0
    def neural_net(self):
        with tf.name_scope('input'):

            self.global_step = tf.Variable(0, trainable=False)
            self.drop_rate = tf.placeholder(tf.float32)
            self.training = tf.placeholder(tf.bool)

            self.X = tf.placeholder(tf.float32, [None, 32, 32, 3], name='X')
            self.Y = tf.placeholder(tf.int32, [None, 10], name='Y')


        with tf.name_scope('layer1') as scope:
            X = layer.conv2D('conv1-1', self.X, 30, [7, 7], [1, 1], 'same')  # 32 x 32
            X = layer.BatchNorm('BN1-1', X, self.training)
            X = layer.p_relu('active1-1', X)
            print(X.shape)
            X = layer.maxpool('mp1', X, [2, 2], [2, 2])  # 16 x 16
            print(X.shape)

        with tf.name_scope('layer2') as scope:
            X = layer.conv2D('conv2-1', X, 60, [3, 3], [1, 1], 'same')  # 16 x 16
            X = layer.BatchNorm('BN2-1', X, self.training)
            X = layer.p_relu('active2-1', X)
            print(X.shape)
            X = layer.maxpool('mp2', X, [2, 2], [2, 2])  # 8 x 8
            print(X.shape)

        with tf.name_scope('layer3') as scope:
            X = layer.conv2D('conv3-1', X, 120, [3, 3], [1, 1], 'same')  # 8 x 8
            X = layer.BatchNorm('BN3-1', X, self.training)
            X = layer.p_relu('active3-1', X)
            print(X.shape)
            # X = layer.maxpool('mp3', X, [2, 2], [2, 2])  # 4 x 4

        with tf.name_scope('bottleneck1') as scope:
            RX = layer.conv2D('rconv1-1', X, 120, [1, 1], [1, 1], 'same')
            RX = layer.BatchNorm('rBN1-1', RX, self.training)

            X = layer.conv2D('bconv1-1', X, 60, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN1-1', X, self.training)
            X = layer.p_relu('bactive1-1', X)
            X = layer.conv2D('bconv1-2', X, 60, [3, 3], [1, 1], 'same')
            X = layer.BatchNorm('bBN1-2', X, self.training)
            X = layer.p_relu('bactive1-2', X)
            # X = layer.dropout('dp1', X, self.drop_rate, self.training)
            X = layer.conv2D('bconv1-3', X, 120, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN1-3', X, self.training)
            X = layer.add(X, RX, name='add1')
            X = layer.p_relu('bactive1-3', X)
            print(X.shape)

        with tf.name_scope('bottleneck2') as scope:
            RX = layer.conv2D('rconv2-1', X, 240, [1, 1], [1, 1], 'same')
            RX = layer.BatchNorm('rBN2-1', RX, self.training)

            X = layer.conv2D('bconv2-1', X, 120, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN2-1', X, self.training)
            X = layer.p_relu('bactive2-1', X)
            X = layer.conv2D('bconv2-2', X, 120, [3, 3], [1, 1], 'same')
            X = layer.BatchNorm('bBN2-2', X, self.training)
            X = layer.p_relu('bactive2-2', X)
            # X = layer.dropout('dp2', X, self.drop_rate, self.training)
            X = layer.conv2D('bconv2-3', X, 240, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN2-3', X, self.training)
            X = layer.add(X, RX, name='add2')
            X = layer.p_relu('bactive2-3', X)
            print(X.shape)

        with tf.name_scope('bottleneck3') as scope:
            RX = layer.conv2D('rconv3-1', X, 360, [1, 1], [1, 1], 'same')
            RX = layer.BatchNorm('rBN3-1', RX, self.training)

            X = layer.conv2D('bconv3-1', X, 240, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN3-1', X, self.training)
            X = layer.p_relu('bactive3-1', X)
            X = layer.conv2D('bconv3-2', X, 240, [3, 3], [1, 1], 'same')
            X = layer.BatchNorm('bBN3-2', X, self.training)
            X = layer.p_relu('bactive3-2', X)
            # X = layer.dropout('dp3', X, self.drop_rate, self.training)
            X = layer.conv2D('bconv3-3', X, 360, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN3-3', X, self.training)
            X = layer.add(X, RX, name='add3')
            X = layer.p_relu('bactive3-3', X)
            print(X.shape)

        with tf.name_scope('bottleneck4') as scope:
            RX = layer.conv2D('rconv4-1', X, 480, [1, 1], [1, 1], 'same')
            RX = layer.BatchNorm('rBN4-1', RX, self.training)

            X = layer.conv2D('bconv4-1', X, 360, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN4-1', X, self.training)
            X = layer.p_relu('bactive4-1', X)
            X = layer.conv2D('bconv4-2', X, 360, [3, 3], [1, 1], 'same')
            X = layer.BatchNorm('bBN4-2', X, self.training)
            X = layer.p_relu('bactive4-2', X)
            # X = layer.dropout('dp4', X, self.drop_rate, self.training)
            X = layer.conv2D('bconv4-3', X, 480, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN4-3', X, self.training)
            X = layer.add(X, RX, name='add4')
            X = layer.p_relu('bactive4-3', X)
            print(X.shape)
        '''
        with tf.name_scope('bottleneck5') as scope:
            RX = layer.conv2D('rconv5-1', X, 720, [1, 1], [1, 1], 'same')
            RX = layer.BatchNorm('rBN5-1', RX, self.training)

            X = layer.conv2D('bconv5-1', X, 480, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN5-1', X, self.training)
            X = layer.p_relu('bactive5-1', X)
            X = layer.conv2D('bconv5-2', X, 480, [3, 3], [1, 1], 'same')
            X = layer.BatchNorm('bBN5-2', X, self.training)
            X = layer.p_relu('bactive5-2', X)
            X = layer.dropout('dp5', X, self.drop_rate, self.training)
            X = layer.conv2D('bconv5-3', X, 720, [1, 1], [1, 1], 'same')
            X = layer.BatchNorm('bBN5-3', X, self.training)
            X = layer.add(X, RX, name='add5')
            X = layer.p_relu('bactive5-3', X)
            print(X.shape)
        '''

        with tf.name_scope('GAP') as scope:
            X = layer.conv2D('GAP_1', X, 10, [1, 1], [1, 1], 'same')
            print(X.shape)
            X = layer.averagepool('avp', X, [8, 8], [1, 1])
            print(X.shape)
            self.logits = tf.squeeze(X, [1, 2])
            print(self.logits.shape)

        with tf.name_scope('optimizer'):

            self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y))

            self.l2_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            self.loss = tf.add(self.loss, self.l2_loss)

            self.init_learning = 0.01

            self.decay_step = 2500

            self.decay_rate = 0.9

            self.exponential_decay_learning_rate = tf.train.exponential_decay(
                learning_rate=self.init_learning,
                global_step=self.global_step,
                decay_steps=self.decay_step,
                decay_rate=self.decay_rate,
                staircase=True,
                name='learning_rate'
            )

            self.optimizer = tf.train.AdamOptimizer(learning_rate=0.01)

            self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(self.update_ops):
                self.trainer = self.optimizer.minimize(self.loss, global_step=self.global_step, name='train')

            self.accuracy = tf.reduce_mean(
                tf.cast(tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)), tf.float32))

            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('lr', self.exponential_decay_learning_rate)
            tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
            self.writer = tf.summary.FileWriter('./logs', self.sess.graph)
Ejemplo n.º 4
0
    def neural_net(self):

        with tf.name_scope('input'):
            self.global_step = tf.Variable(0, trainable=False)
            self.drop_rate = tf.placeholder(tf.float32)
            self.training = tf.placeholder(tf.bool)
            self.X = tf.placeholder(tf.float32, [None, 224, 224, 1], name='X')
            self.Y = tf.placeholder(tf.float32, [None, 224, 224, 2], name='Y')
            self.batch_size = tf.placeholder(tf.int32)

        with tf.name_scope('down'):
            conv1 = layer.conv2D('conv1_1', self.X, 32, [3, 3], [1, 1], 'same')
            conv1 = layer.BatchNorm('BN1-1', conv1, self.training)
            conv1 = layer.p_relu('act1_1', conv1)
            conv1 = layer.conv2D('conv1_2', conv1, 32, [3, 3], [1, 1], 'same')
            conv1 = layer.BatchNorm('BN1-2', conv1, self.training)
            conv1 = layer.p_relu('act1_2', conv1)
            # print(conv1.shape)
            pool1 = layer.maxpool('pool1', conv1, [2, 2], [2, 2],
                                  'same')  # 112 x 112
            # print(pool1.shape)

            conv2 = layer.conv2D('conv2_1', pool1, 64, [3, 3], [1, 1], 'same')
            conv2 = layer.BatchNorm('BN2-1', conv2, self.training)
            conv2 = layer.p_relu('act2_1', conv2)
            conv2 = layer.conv2D('conv2_2', conv2, 64, [3, 3], [1, 1], 'same')
            conv2 = layer.BatchNorm('BN2-2', conv2, self.training)
            conv2 = layer.p_relu('act2_2', conv2)
            # print(conv2.shape)
            pool2 = layer.maxpool('pool2', conv2, [2, 2], [2, 2],
                                  'same')  # 56 x 56
            # print(pool2.shape)

            conv3 = layer.conv2D('conv3_1', pool2, 128, [3, 3], [1, 1], 'same')
            conv3 = layer.BatchNorm('BN3-1', conv3, self.training)
            conv3 = layer.p_relu('act3_1', conv3)
            conv3 = layer.conv2D('conv3_2', conv3, 128, [3, 3], [1, 1], 'same')
            conv3 = layer.BatchNorm('BN3-2', conv3, self.training)
            conv3 = layer.p_relu('act3_2', conv3)
            # print(conv3.shape)
            pool3 = layer.maxpool('pool3', conv3, [2, 2], [2, 2],
                                  'same')  # 28 x 28
            # print(pool3.shape)

            conv4 = layer.conv2D('conv4_1', pool3, 256, [3, 3], [1, 1], 'same')
            conv4 = layer.BatchNorm('BN4-1', conv4, self.training)
            conv4 = layer.p_relu('act4_1', conv4)
            conv4 = layer.conv2D('conv4_2', conv4, 256, [3, 3], [1, 1], 'same')
            conv4 = layer.BatchNorm('BN4-2', conv4, self.training)
            conv4 = layer.p_relu('act4_2', conv4)
            # print(conv4.shape)
            pool4 = layer.maxpool('pool4', conv4, [2, 2], [2, 2],
                                  'same')  # 14 x 14
            # print(pool4.shape)

            conv5 = layer.conv2D('conv5_1', pool4, 512, [3, 3], [1, 1], 'same')
            conv5 = layer.BatchNorm('BN5-1', conv5, self.training)
            conv5 = layer.p_relu('act5_1', conv5)
            conv5 = layer.conv2D('conv5_2', conv5, 512, [3, 3], [1, 1], 'same')
            conv5 = layer.BatchNorm('BN5-2', conv5, self.training)
            conv5 = layer.p_relu('act5_2', conv5)
            # print(conv5.shape)

        with tf.name_scope('up'):
            up4 = layer.deconv2D('deconv4', conv5, [3, 3, 256, 512],
                                 [self.batch_size, 28, 28, 256], [1, 2, 2, 1],
                                 'SAME')
            up4 = layer.BatchNorm('deBN4', up4, self.training)
            up4 = layer.p_relu('deact4', up4)
            # print(up4.shape)
            up4 = layer.concat('concat4', [up4, conv4], 3)
            # print(up4.shape)
            conv4 = layer.conv2D('uconv4_1', up4, 256, [3, 3], [1, 1], 'same')
            conv4 = layer.BatchNorm('uBN4-1', conv4, self.training)
            conv4 = layer.p_relu('uact4-1', conv4)
            conv4 = layer.conv2D('uconv4_2', conv4, 256, [3, 3], [1, 1],
                                 'same')
            conv4 = layer.BatchNorm('uBN4-2', conv4, self.training)
            conv4 = layer.p_relu('uact4-2', conv4)
            # print(conv4.shape)

            up3 = layer.deconv2D('deconv3', conv4, [3, 3, 128, 256],
                                 [self.batch_size, 56, 56, 128], [1, 2, 2, 1],
                                 'SAME')
            up3 = layer.BatchNorm('deBN3', up3, self.training)
            up3 = layer.p_relu('deact3', up3)
            # print(up3.shape)
            up3 = layer.concat('concat3', [up3, conv3], 3)
            # print(up3.shape)
            conv3 = layer.conv2D('uconv3_1', up3, 128, [3, 3], [1, 1], 'same')
            conv3 = layer.BatchNorm('uBN3-1', conv3, self.training)
            conv3 = layer.p_relu('uact3-1', conv3)
            conv3 = layer.conv2D('uconv3_2', conv3, 128, [3, 3], [1, 1],
                                 'same')
            conv3 = layer.BatchNorm('uBN3-2', conv3, self.training)
            conv3 = layer.p_relu('uact3-2', conv3)
            # print(conv3.shape)

            up2 = layer.deconv2D('deconv2', conv3, [3, 3, 64, 128],
                                 [self.batch_size, 112, 112, 64], [1, 2, 2, 1],
                                 'SAME')
            up2 = layer.BatchNorm('deBN2', up2, self.training)
            up2 = layer.p_relu('deact2', up2)
            # print(up2.shape)
            up2 = layer.concat('concat2', [up2, conv2], 3)
            # print(up2.shape)
            conv2 = layer.conv2D('uconv2_1', up2, 64, [3, 3], [1, 1], 'same')
            conv2 = layer.BatchNorm('uBN2-1', conv2, self.training)
            conv2 = layer.p_relu('uact2-1', conv2)
            conv2 = layer.conv2D('uconv2_2', conv2, 64, [3, 3], [1, 1], 'same')
            conv2 = layer.BatchNorm('uBN2-2', conv2, self.training)
            conv2 = layer.p_relu('uact2-2', conv2)
            # print(conv2.shape)

            up1 = layer.deconv2D('deconv1', conv2, [3, 3, 32, 64],
                                 [self.batch_size, 224, 224, 32], [1, 2, 2, 1],
                                 'SAME')
            up1 = layer.BatchNorm('deBN1', up1, self.training)
            up1 = layer.p_relu('deact1', up1)
            # print(up1.shape)
            up1 = layer.concat('concat1', [up1, conv1], 3)
            # print(up1.shape)
            conv1 = layer.conv2D('uconv1_1', up1, 32, [3, 3], [1, 1], 'same')
            conv1 = layer.BatchNorm('uBN1-1', conv1, self.training)
            conv1 = layer.p_relu('uact1-1', conv1)
            conv1 = layer.conv2D('uconv1_2', conv1, 32, [3, 3], [1, 1], 'same')
            conv1 = layer.BatchNorm('uBN1-2', conv1, self.training)
            conv1 = layer.p_relu('uact1-2', conv1)

            out_seg = layer.conv2D('uconv1', conv1, 2, [1, 1], [1, 1], 'same')
            out_seg = layer.BatchNorm('out_BN', out_seg, self.training)
            out_seg = layer.p_relu('out_act', out_seg)
            # print(out_seg.shape)

        with tf.name_scope('optimizer'):

            # self.output = tl.act.pixel_wise_softmax(out_seg)

            # self.loss = 1 - tl.cost.dice_coe(self.output, self.Y)

            # self.loss = tl.cost.dice_hard_coe(self.output, self.Y)

            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=out_seg,
                                                        labels=self.Y))

            #self.l2_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            #self.loss = tf.add(self.loss,self.l2_loss)

            self.init_learning = 0.01

            self.decay_step = 5000

            self.decay_rate = 0.9

            self.exponential_decay_learning_rate = tf.train.exponential_decay(
                learning_rate=self.init_learning,
                global_step=self.global_step,
                decay_steps=self.decay_step,
                decay_rate=self.decay_rate,
                staircase=True,
                name='learning_rate')

            self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.exponential_decay_learning_rate,
                epsilon=0.00001)

            self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(self.update_ops):
                self.trainer = self.optimizer.minimize(
                    self.loss, global_step=self.global_step)

            self.out = tf.nn.softmax(out_seg)

            self.predicted, _ = tf.split(self.out, [1, 1], 3)

            self.truth, _ = tf.split(self.Y, [1, 1], 3)

            self.accuracy = layer.iou_coe(output=self.predicted,
                                          target=self.truth)
Ejemplo n.º 5
0
    def neural_net(self):
        with tf.name_scope('input'):

            self.global_step = tf.Variable(0, trainable=False)

            self.drop_rate = tf.placeholder(tf.float32)

            self.training = tf.placeholder(tf.bool)

            self.init_learning = 0.01

            self.decay_step = 5000

            self.decay_rate = 0.9

            self.X = tf.placeholder(tf.float32, [None, 32, 32, 3], name='X')
            self.Y = tf.placeholder(tf.int32, [None, 10], name='Y')

        with tf.name_scope('dense_layer1'):

            X1 = layer.conv2D('conv1', self.X, 32, [3, 3], [1, 1], 'same')
            X1 = layer.BatchNorm('bn1', X1, self.training)
            X1 = layer.p_relu('act1', X1)
            # print(X1.shape)

            X2 = layer.conv2D('conv2', X1, 32, [3, 3], [1, 1], 'same')
            X2 = layer.BatchNorm('bn2', X2, self.training)
            X2 = layer.p_relu('act2', X2)
            # print(X2.shape)

            X3 = layer.concat('concat1', [X1, X2])
            # print(X3.shape)
            X3 = layer.conv2D('conv3', X3, 32, [3, 3], [1, 1], 'same')
            X3 = layer.BatchNorm('bn3', X3, self.training)
            X3 = layer.p_relu('act3', X3)
            # print(X3.shape)

            X4 = layer.concat('concat2', [X1, X2, X3])
            # print(X4.shape)
            X4 = layer.conv2D('conv4', X4, 32, [3, 3], [1, 1], 'same')
            X4 = layer.BatchNorm('bn4', X4, self.training)
            X4 = layer.p_relu('act4', X4)
            # print(X4.shape)

            X5 = layer.concat('concat3', [X1, X2, X3, X4])
            # print(X5.shape)
            X5 = layer.conv2D('conv5', X5, 32, [3, 3], [1, 1], 'same')
            X5 = layer.BatchNorm('bn5', X5, self.training)
            X5 = layer.p_relu('act5', X5)
            # print(X5.shape)

        with tf.name_scope('middle_node1'):
            X5 = layer.conv2D('mconv1', X5, 64, [1, 1], [1, 1])
            # print(X5.shape)
            X5 = layer.maxpool('mp1', X5, [2, 2], [2, 2])
            # print(X5.shape)

        with tf.name_scope('dense_layer2'):
            X6 = layer.conv2D('conv6', X5, 64, [3, 3], [1, 1], 'same')
            X6 = layer.BatchNorm('bn6', X6, self.training)
            X6 = layer.p_relu('act6', X6)
            # print(X6.shape)

            X7 = layer.conv2D('conv7', X6, 64, [3, 3], [1, 1], 'same')
            X7 = layer.BatchNorm('bn7', X7, self.training)
            X7 = layer.p_relu('act7', X7)
            # print(X7.shape)

            X8 = layer.concat('concat4', [X6, X7])
            # print(X8.shape)
            X8 = layer.conv2D('conv8', X8, 64, [3, 3], [1, 1], 'same')
            X8 = layer.BatchNorm('bn8', X8, self.training)
            X8 = layer.p_relu('act8', X8)
            # print(X8.shape)

            X9 = layer.concat('concat5', [X6, X7, X8])
            # print(X9.shape)
            X9 = layer.conv2D('conv9', X9, 64, [3, 3], [1, 1], 'same')
            X9 = layer.BatchNorm('bn9', X9, self.training)
            X9 = layer.p_relu('act9', X9)
            # print(X9.shape)

            X10 = layer.concat('concat6', [X6, X7, X8, X9])
            # print(X10.shape)
            X10 = layer.conv2D('conv10', X10, 64, [3, 3], [1, 1], 'same')
            X10 = layer.BatchNorm('bn10', X10, self.training)
            X10 = layer.p_relu('act10', X10)
            # print(X10.shape)

        with tf.name_scope('middle_node2'):
            X10 = layer.conv2D('mconv2', X10, 128, [1, 1], [1, 1])
            # print(X10.shape)
            X10 = layer.maxpool('mp2', X10, [2, 2], [2, 2])
            # print(X10.shape)

        with tf.name_scope('dense_layer3'):
            X11 = layer.conv2D('conv11', X10, 128, [3, 3], [1, 1], 'same')
            X11 = layer.BatchNorm('bn11', X11, self.training)
            X11 = layer.p_relu('act11', X11)
            # print(X11.shape)

            X12 = layer.conv2D('conv12', X11, 128, [3, 3], [1, 1], 'same')
            X12 = layer.BatchNorm('bn12', X12, self.training)
            X12 = layer.p_relu('act12', X12)
            # print(X12.shape)

            X13 = layer.concat('concat7', [X11, X12])
            # print(X13.shape)
            X13 = layer.conv2D('conv13', X13, 128, [3, 3], [1, 1], 'same')
            X13 = layer.BatchNorm('bn13', X13, self.training)
            X13 = layer.p_relu('act13', X13)
            # print(X13.shape)

            X14 = layer.concat('concat8', [X11, X12, X13])
            # print(X14.shape)
            X14 = layer.conv2D('conv14', X14, 128, [3, 3], [1, 1], 'same')
            X14 = layer.BatchNorm('bn14', X14, self.training)
            X14 = layer.p_relu('act14', X14)
            # print(X14.shape)

            X15 = layer.concat('concat9', [X11, X12, X13, X14])
            # print(X15.shape)
            X15 = layer.conv2D('conv15', X15, 128, [3, 3], [1, 1], 'same')
            X15 = layer.BatchNorm('bn15', X15, self.training)
            X15 = layer.p_relu('act15', X15)
            # print(X15.shape)

        with tf.name_scope('middle_node3'):
            X15 = layer.conv2D('mconv3', X15, 256, [1, 1], [1, 1])
            # print(X15.shape)

        with tf.name_scope('dense_layer4'):
            X16 = layer.conv2D('conv16', X15, 256, [3, 3], [1, 1], 'same')
            X16 = layer.BatchNorm('bn16', X16, self.training)
            X16 = layer.p_relu('act16', X16)
            # print(X16.shape)

            X17 = layer.conv2D('conv17', X16, 256, [3, 3], [1, 1], 'same')
            X17 = layer.BatchNorm('bn17', X17, self.training)
            X17 = layer.p_relu('act17', X17)
            # print(X17.shape)

            X18 = layer.concat('concat10', [X16, X17])
            # print(X18.shape)
            X18 = layer.conv2D('conv18', X18, 256, [3, 3], [1, 1], 'same')
            X18 = layer.BatchNorm('bn18', X18, self.training)
            X18 = layer.p_relu('act18', X18)
            # print(X18.shape)

            X19 = layer.concat('concat11', [X16, X17, X18])
            # print(X19.shape)
            X19 = layer.conv2D('conv19', X19, 256, [3, 3], [1, 1], 'same')
            X19 = layer.BatchNorm('bn19', X19, self.training)
            X19 = layer.p_relu('act19', X19)
            # print(X19.shape)

            X20 = layer.concat('concat12', [X16, X17, X18, X19])
            # print(X20.shape)
            X20 = layer.conv2D('conv20', X20, 256, [3, 3], [1, 1], 'same')
            X20 = layer.BatchNorm('bn20', X20, self.training)
            X20 = layer.p_relu('act20', X20)
            # print(X20.shape)

        with tf.name_scope('GAP') as scope:
            X_gap = layer.s_conv2D('GAP_1', X20, 10, [1, 1], [1, 1], 'same')
            # print(X_gap.shape)
            X_gap = layer.averagepool('avp', X_gap, [8, 8], [1, 1])
            # print(X_gap.shape)
            self.logits = tf.squeeze(X_gap, [1, 2])
            # print(self.logits.shape)

        with tf.name_scope('optimizer'):

            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                        labels=self.Y))

            # self.l2_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            # self.loss = tf.add(self.loss, self.l2_loss)

            self.init_learning = 0.01

            self.decay_step = 5000

            self.decay_rate = 0.9

            self.exponential_decay_learning_rate = tf.train.exponential_decay(
                learning_rate=self.init_learning,
                global_step=self.global_step,
                decay_steps=self.decay_step,
                decay_rate=self.decay_rate,
                staircase=True,
                name='learning_rate')

            self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.exponential_decay_learning_rate,
                epsilon=0.0001)

            self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(self.update_ops):
                self.trainer = self.optimizer.minimize(
                    self.loss, global_step=self.global_step, name='train')

            self.accuracy = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)),
                    tf.float32))
Ejemplo n.º 6
0
yarr = yarr[:1000]

# graphing variables
xbar = []
ybar = []
correct = 0
loss = 0

# training loop
for epoch in range(10):
    for image in range(1000):
        # convolution operation with max pooling
        input = layer.convert_to_2d_image(xarr[image])
        conv0 = layer.conv2d(input, filter1)
        relu0 = layer.RELU(conv0)
        max0 = layer.maxpool(relu0)
        conv1 = layer.conv2d(max0, filter2)
        relu1 = layer.RELU(conv1)
        max1 = layer.maxpool(relu1)
        # fully connected
        l0 = layer.flatten(max1)
        l0 = layer.dropout(l0, .5)
        z = layer.forward_connected(l0, syn0, bias0)
        l1 = layer.RELU(z)
        l1 = layer.dropout(l1, .5)
        l2 = layer.forward_connected(l1, syn1, bias1)
        l2 = layer.softmax(l2)
        # define target matrix
        target = np.zeros([10, 1])
        target[int(yarr[image])][0] = 1
        # calculate cost