Esempio n. 1
0
 def testModelVariables(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         vgg.vgg_a(inputs, num_classes)
         expected_names = [
             'vgg_a/conv1/conv1_1/weights',
             'vgg_a/conv1/conv1_1/biases',
             'vgg_a/conv2/conv2_1/weights',
             'vgg_a/conv2/conv2_1/biases',
             'vgg_a/conv3/conv3_1/weights',
             'vgg_a/conv3/conv3_1/biases',
             'vgg_a/conv3/conv3_2/weights',
             'vgg_a/conv3/conv3_2/biases',
             'vgg_a/conv4/conv4_1/weights',
             'vgg_a/conv4/conv4_1/biases',
             'vgg_a/conv4/conv4_2/weights',
             'vgg_a/conv4/conv4_2/biases',
             'vgg_a/conv5/conv5_1/weights',
             'vgg_a/conv5/conv5_1/biases',
             'vgg_a/conv5/conv5_2/weights',
             'vgg_a/conv5/conv5_2/biases',
             'vgg_a/fc6/weights',
             'vgg_a/fc6/biases',
             'vgg_a/fc7/weights',
             'vgg_a/fc7/biases',
             'vgg_a/fc8/weights',
             'vgg_a/fc8/biases',
         ]
         model_variables = [v.op.name for v in slim.get_model_variables()]
         self.assertSetEqual(set(model_variables), set(expected_names))
Esempio n. 2
0
 def testNoClasses(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = None
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         net, end_points = vgg.vgg_a(inputs, num_classes)
         expected_names = [
             'vgg_a/conv1/conv1_1',
             'vgg_a/pool1',
             'vgg_a/conv2/conv2_1',
             'vgg_a/pool2',
             'vgg_a/conv3/conv3_1',
             'vgg_a/conv3/conv3_2',
             'vgg_a/pool3',
             'vgg_a/conv4/conv4_1',
             'vgg_a/conv4/conv4_2',
             'vgg_a/pool4',
             'vgg_a/conv5/conv5_1',
             'vgg_a/conv5/conv5_2',
             'vgg_a/pool5',
             'vgg_a/fc6',
             'vgg_a/fc7',
         ]
         self.assertSetEqual(set(end_points.keys()), set(expected_names))
         self.assertTrue(net.op.name.startswith('vgg_a/fc7'))
Esempio n. 3
0
 def testForward(self):
     batch_size = 1
     height, width = 224, 224
     with self.test_session() as sess:
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(inputs)
         sess.run(tf.initialize_all_variables())
         output = sess.run(logits)
         self.assertTrue(output.any())
Esempio n. 4
0
 def testFullyConvolutional(self):
     batch_size = 1
     height, width = 256, 256
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
         self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, 2, 2, num_classes])
Esempio n. 5
0
 def testBuild(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(inputs, num_classes)
         self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
Esempio n. 6
0
 def testEvaluation(self):
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         eval_inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
         predictions = tf.argmax(logits, 1)
         self.assertListEqual(predictions.get_shape().as_list(),
                              [batch_size])
Esempio n. 7
0
 def testEndPoints(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         _, end_points = vgg.vgg_a(inputs, num_classes)
         expected_names = [
             'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',
             'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',
             'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',
             'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',
             'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'
         ]
         self.assertSetEqual(set(end_points.keys()), set(expected_names))
Esempio n. 8
0
 def testTrainEvalWithReuse(self):
     train_batch_size = 2
     eval_batch_size = 1
     train_height, train_width = 224, 224
     eval_height, eval_width = 256, 256
     num_classes = 1000
     with self.test_session():
         train_inputs = tf.random_uniform(
             (train_batch_size, train_height, train_width, 3))
         logits, _ = vgg.vgg_a(train_inputs)
         self.assertListEqual(logits.get_shape().as_list(),
                              [train_batch_size, num_classes])
         tf.get_variable_scope().reuse_variables()
         eval_inputs = tf.random_uniform(
             (eval_batch_size, eval_height, eval_width, 3))
         logits, _ = vgg.vgg_a(eval_inputs,
                               is_training=False,
                               spatial_squeeze=False)
         self.assertListEqual(logits.get_shape().as_list(),
                              [eval_batch_size, 2, 2, num_classes])
         logits = tf.reduce_mean(logits, [1, 2])
         predictions = tf.argmax(logits, 1)
         self.assertEquals(predictions.get_shape().as_list(),
                           [eval_batch_size])
Esempio n. 9
0
    def _buildGraph(self):
        x_in = tf.placeholder(
            tf.float32,
            shape=[
                None,  # enables variable batch size
                self.input_dim[0]
            ],
            name="x")
        x_in_reshape = tf.reshape(
            x_in, [-1, self.input_dim[1], self.input_dim[2], 3])

        dropout = tf.placeholder_with_default(1., shape=[], name="dropout")

        y_in = tf.placeholder(dtype=tf.int8, name="y")

        onehot_labels = tf.one_hot(indices=tf.cast(y_in, tf.int32), depth=2)

        is_train = tf.placeholder_with_default(True, shape=[], name="is_train")

        logits, nett, ww = vgg.vgg_a(x_in_reshape,
                                     num_classes=2,
                                     is_training=is_train,
                                     dropout_keep_prob=dropout,
                                     spatial_squeeze=True,
                                     scope='vgga')

        pred = tf.nn.softmax(logits, name="prediction")

        global_step = tf.Variable(0, trainable=False)

        pred_cost = tf.losses.softmax_cross_entropy(
            onehot_labels=onehot_labels, logits=logits)

        tf.summary.scalar("InceptionV3_cost", pred_cost)

        train_op = tf.contrib.layers.optimize_loss(
            loss=pred_cost,
            learning_rate=self.learning_rate,
            global_step=global_step,
            optimizer="Adam")

        merged_summary = tf.summary.merge_all()

        return (x_in, dropout, is_train, y_in, logits, nett, ww, pred,
                pred_cost, global_step, train_op, merged_summary)
Esempio n. 10
0
def run_training():

    #1.create log and model saved dir according to the datetime
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    models_dir = os.path.join("saved_models", subdir, "models")
    if not os.path.isdir(models_dir):  # Create the model directory if it doesn't exist
        os.makedirs(models_dir)
    logs_dir = os.path.join("saved_models", subdir, "logs")
    if not os.path.isdir(logs_dir):  # Create the log directory if it doesn't exist
        os.makedirs(logs_dir)
    topn_models_dir = os.path.join("saved_models", subdir, "topn")#topn dir used for save top accuracy model
    if not os.path.isdir(topn_models_dir):  # Create the topn model directory if it doesn't exist
        os.makedirs(topn_models_dir)
    topn_file=open(os.path.join(topn_models_dir,"topn_acc.txt"),"a+")
    topn_file.close()


    #2.load dataset and define placeholder
    conf=config.get_config()
    train_dataset=input_dataset.TFRecordDataset(conf)
    train_iterator,train_next_element = train_dataset.generateDataset( dataset_path=conf.train_dataset_path,batch_suize=conf.batch_size)
    test_dataset=input_dataset.TFRecordDataset(conf)
    test_iterator,test_next_element = test_dataset.generateDataset( dataset_path=conf.test_dataset_path,batch_size=conf.batch_size,test_mode=1)

    phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
    images_placeholder = tf.placeholder(name='input', shape=[None, conf.input_img_height,conf.input_img_width, 3], dtype=tf.float32)
    labels_placeholder = tf.placeholder(name='labels', shape=[None, ], dtype=tf.int64)

    # Create the model.
    #with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(batch_norm_updates_collections=None)):
        #predictions, end_points = mobilenet_v1.mobilenet_v1(images_placeholder,is_training=phase_train_placeholder,num_classes=3,prediction_fn=False)

    with slim.arg_scope(vgg.vgg_arg_scope()):
        predictions, end_points = vgg.vgg_a(images_placeholder,num_classes=3,is_training=phase_train_placeholder)

    output=tf.argmax(predictions,1,name="output")
    
    softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=predictions, labels=labels_placeholder),name="loss")
    tf.add_to_collection('losses', softmax_loss)


    correct_prediction = tf.equal(tf.argmax(predictions,1),labels_placeholder )
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #adjust learning rate
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(conf.learning_rate,global_step,conf.learning_rate_decay_step,conf.learning_rate_decay_rate,staircase=True)


    custom_loss=tf.get_collection("losses")
    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    total_loss=tf.add_n(custom_loss+regularization_losses,name='total_loss')


    #optimize loss and update
    train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1).minimize(total_loss,global_step=global_step)
    #train_op=tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True).minimize(total_loss,global_step=global_step)

    saver=tf.train.Saver(tf.trainable_variables(),max_to_keep=5)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        for epoch in range(conf.max_nrof_epochs):
            sess.run(train_iterator.initializer)
            while True:
                use_time=0
                try:
                    images_train, labels_train = sess.run(train_next_element)

                    start_time=time.time()
                    input_dict={phase_train_placeholder:True,images_placeholder:images_train,labels_placeholder:labels_train}
                    step,lr,train_loss,_,train_accuracy = sess.run([global_step,
                                                                            learning_rate,
                                                                            total_loss,
                                                                            train_op,
                                                                            accuracy],
                                                                          feed_dict=input_dict)

                    end_time=time.time()
                    use_time+=(end_time-start_time)

                    #display train result
                    if(step%conf.display_iter==0):
                        print ("step:%d lr:%f time:%.3f total_loss:%.3f acc:%.3f epoch:%d"%(step,lr,use_time,train_loss,train_accuracy,epoch) )
                        use_time=0
                    if (step%conf.test_save_iter==0):
                        filename_cpkt = os.path.join(models_dir, "%d.ckpt"%step)
                        saver.save(sess, filename_cpkt)
                        #evaluate(models_dir)
                        sess.run(test_iterator.initializer)
                        total_acc=0
                        test_cnt=0
                        
                        while True:
                            try:
                                test_cnt+=1
                                test_img,test_label=(sess.run(test_next_element) )
                                fd={images_placeholder:test_img,labels_placeholder:test_label,phase_train_placeholder: False}
                                acc=sess.run(accuracy,feed_dict=fd)
                                total_acc+=acc
                            except tf.errors.OutOfRangeError:
                                valid_acc=(total_acc*1.0/test_cnt)*100
                                print ("test accuracy %.2f"%valid_acc )
                                with open(os.path.join(topn_models_dir,"topn_acc.txt"),"a+")as tmp_f:
                                        tmp_f.write("step : %d  accuracy : %f\n"%(step,valid_acc) )
                                if valid_acc>conf.topn_threshold:
                                    shutil.copyfile(os.path.join(models_dir, "%d.ckpt.meta"%step),os.path.join(topn_models_dir, "%d.ckpt.meta"%step))
                                    shutil.copyfile(os.path.join(models_dir, "%d.ckpt.index"%step),os.path.join(topn_models_dir, "%d.ckpt.index"%step))
                                    shutil.copyfile(os.path.join(models_dir, "%d.ckpt.data-00000-of-00001"%step),os.path.join(topn_models_dir, "%d.ckpt.data-00000-of-00001"%step))
                                break
                        
                except tf.errors.OutOfRangeError:
                    print("End of epoch ")
                    break