Beispiel #1
0
def SharePart(input, drop_out_rate):
    
    def pre_process(input):
        rgb_scaled = input
        Mean = [103.939,116.779,123.68]
        
        red,green,blue = tf.split(rgb_scaled,3,3)
        bgr = tf.concat([
                red - Mean[2],
                green - Mean[1],
                blue - Mean[0]],3)
        return bgr
    
    input = pre_process(input)
    
    with tf.variable_scope('Share_Part'):
        
        conv1 = l.conv2d('conv1',input,(11,11),96,strides = [1,4,4,1],decay = (0.0,0.0),pad='VALID',Init = MODEL_INIT['conv1'])
        maxpool1 = l.max_pooling('maxpool',conv1,3,2)
        norm1 = tf.nn.lrn(maxpool1,depth_radius=2,alpha=2e-05,beta=0.75,name='conv1')
    
        conv2 = l.conv2d_with_group('conv2',norm1,(5,5),256,2,decay = (0.0,0.0),pad = 'SAME', Init = MODEL_INIT['conv2'])
        maxpool2 = l.max_pooling('maxpool2',conv2,3,2)
        norm2 = tf.nn.lrn(maxpool2,depth_radius=2,alpha=2e-05,beta=0.75,name='conv2')

        conv3 = l.conv2d('conv3',norm2,(3,3),384,pad = 'SAME',Init = MODEL_INIT['conv3'])
    
    
        conv4 = l.conv2d_with_group('conv4',conv3,(3,3),384,2,pad = 'SAME',Init = MODEL_INIT['conv4'])
       
        conv5 = l.conv2d_with_group('conv5',conv4,(3,3),256,2,pad = 'SAME',Init = MODEL_INIT['conv5'])
        maxpool5 = l.max_pooling('maxpool5',conv5,3,2)
        print maxpool5.shape
    
        dim=1
        shape = maxpool5.get_shape().as_list()
        for d in shape[1:]:
            dim*=d
    
        reshape = tf.reshape(maxpool5,[-1,dim])
    
        fc6 = l.fully_connect('fc6',reshape,4096,Init = MODEL_INIT['fc6'])
        fc6 = l.dropout('drop_6',fc6,drop_out_rate)
        fc7 = l.fully_connect('fc7',fc6,4096,Init = MODEL_INIT['fc7'])
        fc7 = l.dropout('drop_7',fc7,drop_out_rate)
        
    return fc7
Beispiel #2
0
		train_loss = train(model, train_set)
		vali_loss = validation(model, vali_set)
		accuracy = test(model, test_set)

		print("epoch:", epoch, "\ttrain_loss:", train_loss, "\tvali_loss:", vali_loss, "\taccuracy:", accuracy)


lr = 0.01

model = net.model(optimizer.Adam(lr=lr)) # 30 66
#model = net.model(optimizer.GradientDescent(lr=lr))  #30번에 32퍼 학,검,테 데이터셋 128개일때 

model.add(nn.conv2d(filters=32, kernel_size=[3,3], strides=[1,1], w_init=init.he))
model.add(nn.relu())
model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2]))
model.add(nn.dropout(0.6))

model.add(nn.conv2d(filters=64, kernel_size=[3,3], strides=[1,1], w_init=init.he))
model.add(nn.relu())
model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2]))
model.add(nn.dropout(0.6))

model.add(nn.conv2d(filters=128, kernel_size=[3,3], strides=[1,1], w_init=init.he))
model.add(nn.relu())
model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2]))
model.add(nn.dropout(0.6))

model.add(nn.flatten())
model.add(nn.affine(out_dim=10, w_init=init.he))

model.add_loss(nn.softmax_cross_entropy_with_logits())
Beispiel #3
0
    def neural_net(self):
        with tf.name_scope('input'):

            self.global_step = tf.Variable(0, trainable=False)
            self.drop_rate = tf.placeholder(tf.float32)
            self.training = tf.placeholder(tf.bool)
            self.X = tf.placeholder(tf.float32, [None, 32, 32, 3], name='X')

            self.Y = tf.placeholder(tf.int32, [None, 10], name='Y')

        with tf.name_scope('layer1'):
            self.layer = layer.conv2D('conv1-1', self.X, 30, [1, 5], [1, 1])
            self.layer = layer.BatchNorm('bn1', self.layer, self.training)
            self.layer = layer.p_relu('active1-1', self.layer)
            # print(self.layer.shape)
            self.layer = layer.conv2D('conv1-2', self.layer, 30, [5, 1],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn2', self.layer, self.training)
            self.layer = layer.p_relu('active1-2', self.layer)
            # print(self.layer.shape)
            self.layer = layer.maxpool('mp1', self.layer, [2, 2], [2, 2])
            # print(self.layer.shape)

        with tf.name_scope('layer2'):
            self.layer = layer.conv2D('conv2-1', self.layer, 90, [1, 3],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn3', self.layer, self.training)
            self.layer = layer.p_relu('active2-1', self.layer)
            # print(self.layer.shape)
            self.layer = layer.conv2D('conv2-2', self.layer, 90, [3, 1],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn4', self.layer, self.training)
            self.layer = layer.p_relu('active2-2', self.layer)
            # print(self.layer.shape)
            self.layer = layer.maxpool('mp2', self.layer, [2, 2], [2, 2])
            # print(self.layer.shape)

        with tf.name_scope('layer3'):
            self.layer = layer.conv2D('conv3-1', self.layer, 270, [1, 2],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn5', self.layer, self.training)
            self.layer = layer.p_relu('active3-1', self.layer)
            # print(self.layer.shape)
            self.layer = layer.conv2D('conv3-2', self.layer, 270, [2, 1],
                                      [1, 1])
            self.layer = layer.BatchNorm('bn6', self.layer, self.training)
            self.layer = layer.p_relu('active3-2', self.layer)
            # print(self.layer.shape)

        with tf.name_scope('middle_flow'):
            self.m_layer = self.layer
            for i in range(8):
                self.residual = self.m_layer
                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-1',
                                              self.m_layer, 540, [1, 1],
                                              [1, 1], 'same')
                self.m_layer = layer.BatchNorm('bn' + str(i) + '1',
                                               self.m_layer, self.training)
                self.m_layer = layer.p_relu('m_active' + str(i) + '-1',
                                            self.m_layer)

                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-2',
                                              self.m_layer, 540, [3, 3],
                                              [1, 1], 'same')
                self.m_layer = layer.BatchNorm('bn' + str(i) + '2',
                                               self.m_layer, self.training)
                self.m_layer = layer.p_relu('m_active' + str(i) + '-2',
                                            self.m_layer)

                self.m_layer = layer.dropout('m_dp', self.m_layer,
                                             self.drop_rate, self.training)

                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-3',
                                              self.m_layer, 540, [3, 3],
                                              [1, 1], 'same')
                self.m_layer = layer.BatchNorm('bn' + str(i) + '3',
                                               self.m_layer, self.training)
                self.m_layer = layer.p_relu('m_active' + str(i) + '-3',
                                            self.m_layer)
                # print(self.m_layer.shape)

                self.m_layer = layer.s_conv2D('s_conv' + str(i) + '-4',
                                              self.m_layer, 270, [1, 1],
                                              [1, 1], 'same')
                self.m_layer = layer.add(self.m_layer,
                                         self.residual,
                                         name='add' + str(i))
                self.m_layer = layer.p_relu('m_active' + str(i) + '-4',
                                            self.m_layer)
                # print(self.m_layer.shape)

        with tf.name_scope('Global_Average_Pooling'):
            self.layer = layer.s_conv2D('reduce_channel', self.m_layer, 10,
                                        [1, 1], [1, 1])
            # print(self.layer.shape)
            self.layer = layer.averagepool('avp', self.layer, [5, 5], [1, 1])
            # print(self.layer.shape)
            self.logits = tf.squeeze(self.layer, [1, 2], name='logits')
            # print(self.logits.shape)

        with tf.name_scope('optimizer'):

            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                        labels=self.Y))

            # self.l2_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            # self.loss = tf.add(self.loss, self.l2_loss)

            self.init_learning = 0.01

            self.decay_step = 2500

            self.decay_rate = 0.9

            self.exponential_decay_learning_rate = tf.train.exponential_decay(
                learning_rate=self.init_learning,
                global_step=self.global_step,
                decay_steps=self.decay_step,
                decay_rate=self.decay_rate,
                staircase=True,
                name='learning_rate')

            self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.exponential_decay_learning_rate,
                epsilon=0.0001)

            self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(self.update_ops):
                self.trainer = self.optimizer.minimize(
                    self.loss, global_step=self.global_step, name='train')

            self.accuracy = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)),
                    tf.float32))

            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('lr', self.exponential_decay_learning_rate)
            tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
            self.writer = tf.summary.FileWriter('./logs', self.sess.graph)
    def dropout(self, keep_prob):
        self.res = L.dropout(self.res, keep_prob)
        self.res_size = self.res.get_shape()

        return [self.res, self.res_size]
Beispiel #5
0
    def model(self, image_batch=None, label_batch=None):
        """创建网络graph"""
        # 1st Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv1 = layer.conv_block(image_batch,
                                      11,
                                      11,
                                      64,
                                      2,
                                      2,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block1')
        self.pool1 = layer.max_pool(self.conv1,
                                    3,
                                    3,
                                    2,
                                    2,
                                    padding='SAME',
                                    name='pool1')

        # 2nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv2 = layer.conv_block(self.pool1,
                                      7,
                                      7,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block2')
        self.pool2 = layer.max_pool(self.conv2,
                                    3,
                                    3,
                                    2,
                                    2,
                                    padding='SAME',
                                    name='pool2')

        # 3nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv3 = layer.conv_block(self.pool2,
                                      5,
                                      5,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block3')
        self.pool3 = layer.max_pool(self.conv3,
                                    3,
                                    3,
                                    1,
                                    1,
                                    padding='SAME',
                                    name='pool3')

        # 3nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv4 = layer.conv_block(self.pool3,
                                      3,
                                      3,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block4')
        self.pool4 = layer.max_pool(self.conv4,
                                    3,
                                    3,
                                    1,
                                    1,
                                    padding='SAME',
                                    name='pool4')

        # 5th Layer: ffully connected-BatchNorm-ReLU-> Dropout
        self.fc1 = layer.fc(self.pool4,
                            256,
                            initializer=self.initializer,
                            relu=True,
                            is_training=self.is_training,
                            norm=self.norm,
                            name='fc1')
        self.dropout1 = layer.dropout(self.fc1,
                                      self.keep_prob,
                                      name='dropout1')

        # 6th Layer: fully connected layer
        self.fc2 = layer.fc(self.dropout1,
                            10,
                            initializer=self.initializer,
                            relu=False,
                            is_training=self.is_training,
                            norm=None,
                            name='fc2')

        if not label_batch == None:
            loss = self.netloss(self.fc2, label_batch)
            correct_prediction = tf.equal(tf.argmax(self.fc2, 1), label_batch)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return loss, accuracy
            #return loss,accuracy,self.fc2,tf.argmax(self.fc2,1),label_batch
        else:
            #prediction时,label=None,无需返回
            #return self.fc2
            return tf.argmax(self.fc2, 1)
Beispiel #6
0
loss = 0

# training loop
for epoch in range(10):
    for image in range(1000):
        # convolution operation with max pooling
        input = layer.convert_to_2d_image(xarr[image])
        conv0 = layer.conv2d(input, filter1)
        relu0 = layer.RELU(conv0)
        max0 = layer.maxpool(relu0)
        conv1 = layer.conv2d(max0, filter2)
        relu1 = layer.RELU(conv1)
        max1 = layer.maxpool(relu1)
        # fully connected
        l0 = layer.flatten(max1)
        l0 = layer.dropout(l0, .5)
        z = layer.forward_connected(l0, syn0, bias0)
        l1 = layer.RELU(z)
        l1 = layer.dropout(l1, .5)
        l2 = layer.forward_connected(l1, syn1, bias1)
        l2 = layer.softmax(l2)
        # define target matrix
        target = np.zeros([10, 1])
        target[int(yarr[image])][0] = 1
        # calculate cost
        loss += np.abs(layer.cost(l2, target))
        print(
            str(loss) + " " + str(int(yarr[image])) + " " +
            str(np.argmax(l2)) + " " + str(image))

        # backprop fully connected