Exemple #1
0
def train():
    
    train_dir='/home/daijiaming/Galaxy/data3/trainset/'
    train_label_dir='/home/daijiaming/Galaxy/data3/train_label.csv'
    test_dir='/home/daijiaming/Galaxy/data3/testset/'
    test_label_dir='/home/daijiaming/Galaxy/data3/test_label.csv'
    
    train_log_dir = '/home/daijiaming/Galaxy/Dieleman/logs/train/'
    val_log_dir = '/home/daijiaming/Galaxy/Dieleman/logs//val/'
    
    tra_image_batch, tra_label_batch,tra_galalxyid_batch = input_data.read_galaxy11(data_dir=train_dir,
                                                                                    label_dir=train_label_dir,
                                                                                    batch_size= BATCH_SIZE)
    val_image_batch, val_label_batch,val_galalxyid_batch = input_data.read_galaxy11_test(data_dir=test_dir,
                                                                                         label_dir=test_label_dir,
                                                                                         batch_size= BATCH_SIZE)

    x = tf.placeholder(tf.float32, [BATCH_SIZE, 64, 64, 3])
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, N_CLASSES])
    keep_prob=tf.placeholder(tf.float32)
                            
    logits,fc_output = model.inference(x, BATCH_SIZE, N_CLASSES,keep_prob)
       
    loss =  model.loss(logits, y_)

    accuracy =  model.accuracy(logits, y_)
    
    my_global_step = tf.Variable(0, name='global_step', trainable=False) 
    train_op =  model.optimize(loss, learning_rate, my_global_step)
    
   
    
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()   
       
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)    
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
                
            tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])
            _, tra_loss,tra_acc,summary_str = sess.run([train_op,loss, accuracy,summary_op],feed_dict={x:tra_images, y_:tra_labels,keep_prob:0.5})
            
            if step % 50 == 0 or (step + 1) == MAX_STEP:                 
                print ('Step: %d, tra_loss: %.4f, tra_accuracy: %.2f%%' % (step, tra_loss, tra_acc))
#                summary_str = sess.run(summary_op,feed_dict={x:tra_images, y_:tra_labels})
                tra_summary_writer.add_summary(summary_str, step)
                
            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc, summary_str = sess.run([loss, accuracy,summary_op],feed_dict={x:val_images,y_:val_labels,keep_prob:1})
                print('**  Step %d, test_loss = %.4f, test_accuracy = %.2f%%  **' %(step, val_loss, val_acc))
#                summary_str = sess.run([summary_op],feed_dict={x:val_images,y_:val_labels})
                val_summary_writer.add_summary(summary_str, step)
                    
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
Exemple #2
0
def evaluate():
    with tf.Graph().as_default():

        log_dir = '/home/daijiaming/Galaxy/VGG16/logs/train/'
        #        test_dir='/home/daijiaming/Galaxy/data3/testset/'
        #        test_label_dir='/home/daijiaming/Galaxy/data3/test_label.csv'
        #        test_dir='/home/daijiaming/Galaxy/data3/test1000/'
        #        test_label_dir='/home/daijiaming/Galaxy/data3/test1000_label.csv'
        test_dir = '/home/daijiaming/Galaxy/data3/train1000/'
        test_label_dir = '/home/daijiaming/Galaxy/data3/train1000_label.csv'
        #        test_dir='/home/daijiaming/Galaxy/data3/trainset/'
        #        test_label_dir='/home/daijiaming/Galaxy/data3/train_label.csv'
        n_test = 1000

        val_image_batch, val_label_batch, val_galalxyid_batch = input_data.read_galaxy11_test(
            data_dir=test_dir, label_dir=test_label_dir, batch_size=BATCH_SIZE)

        x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 64, 64, 3])
        y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE, N_CLASSES])
        keep_prob = tf.placeholder(tf.float32)

        logits, fc_output = VGG.VGG16N(x, N_CLASSES, keep_prob, IS_PRETRAIN)

        correct = tools.num_correct_prediction(logits, y_)
        accuracy = tools.accuracy(logits, y_)
        #        top_k_op = tf.nn.in_top_k(predictions=logits,targets=y_, k=1)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:
            sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.ceil(n_test / BATCH_SIZE))
                num_sample = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                totall_acc = 0.0
                #                true_count = 0
                while step < num_step and not coord.should_stop():
                    test_images, test_labels = sess.run(
                        [val_image_batch, val_label_batch])
                    batch_correct, tes_acc, batch_logits = sess.run(
                        [correct, accuracy, logits],
                        feed_dict={
                            x: test_images,
                            y_: test_labels,
                            keep_prob: 1
                        })
                    #                    print('tes_acc = %.3f' % tes_acc)
                    total_correct += np.sum(batch_correct)
                    totall_acc = totall_acc + tes_acc
                    #                    print('totall_acc = %.3f' % totall_acc)
                    #                    true_count += np.sum(predictions)
                    if step == 0:
                        a = test_labels
                        b = batch_logits
                    if step >= 1:
                        a = np.concatenate((a, test_labels))
                        b = np.concatenate((b, batch_logits))
                    step += 1
#                precision = true_count / num_sample
                aver_acc = totall_acc / num_step
                print('Aver acc = %.4f' % aver_acc)
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.4f%%' %
                      (100 * total_correct / num_sample))
                np.savetxt('./labels1000.csv', a, delimiter=',')
                np.savetxt('./logits1000.csv', b, delimiter=',')
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
Exemple #3
0
def evaluate():
    with tf.Graph().as_default():

        log_dir = DATA_DIR + 'logs/train/'
        test_dir = DATA_DIR + 'testset/'
        test_label_dir = DATA_DIR + 'test_label.csv'
        # what is the n_test? change for image that we have
        n_test = 2879

        val_image_batch, val_label_batch = input_data.read_galaxy11_test(
            data_dir=test_dir, label_dir=test_label_dir, batch_size=BATCH_SIZE)

        x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 64, 64, 3])
        y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE, N_CLASSES])

        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, end_points, output0, output1 = resnet_v2.resnet_v2_26_2(
                x, N_CLASSES, is_training=True)

        correct = resnet_v2.num_correct_prediction(logits, y_)
        accuracy = resnet_v2.accuracy(logits, y_)
        #        top_k_op = tf.nn.in_top_k(predictions=logits,targets=y_, k=1)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:
            sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.ceil(n_test / BATCH_SIZE))
                num_sample = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                totall_acc = 0.0
                #                true_count = 0
                while step < num_step and not coord.should_stop():
                    test_images, test_labels = sess.run(
                        [val_image_batch, val_label_batch])
                    batch_correct, tes_acc, batch_logits = sess.run(
                        [correct, accuracy, logits],
                        feed_dict={
                            x: test_images,
                            y_: test_labels
                        })
                    #                    print('tes_acc = %.3f' % tes_acc)
                    total_correct += np.sum(batch_correct)
                    totall_acc = totall_acc + tes_acc
                    #                    print('totall_acc = %.3f' % totall_acc)
                    #                    true_count += np.sum(predictions)
                    if step == 0:
                        a = test_labels
                        b = batch_logits
                    if step >= 1:
                        a = np.concatenate((a, test_labels))
                        b = np.concatenate((b, batch_logits))
                    step += 1
#                precision = true_count / num_sample
                aver_acc = totall_acc / num_step
                print('Aver acc = %.4f' % aver_acc)
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.4f%%' %
                      (100 * total_correct / num_sample))
                np.savetxt('labels2879.csv', a, delimiter=',')
                np.savetxt('logits2879.csv', b, delimiter=',')
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
def train():

    train_dir = DATA_DIR + 'trainset/'
    train_label_dir = DATA_DIR + 'train_label.csv'
    test_dir = DATA_DIR + 'testset/'
    test_label_dir = DATA_DIR + 'test_label.csv'

    train_log_dir = DATA_DIR + 'logs/train/'
    val_log_dir = DATA_DIR + 'logs/val/'

    tra_image_batch, tra_label_batch = input_data.read_galaxy11(
        data_dir=train_dir, label_dir=train_label_dir, batch_size=BATCH_SIZE)
    val_image_batch, val_label_batch = input_data.read_galaxy11_test(
        data_dir=test_dir, label_dir=test_label_dir, batch_size=BATCH_SIZE)
    embed()

    x = tf.placeholder(tf.float32, [BATCH_SIZE, 64, 64, 3])
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, N_CLASSES])
    #    keep_prob=tf.placeholder(tf.float32)

    with slim.arg_scope(resnet_v2.resnet_arg_scope()):
        logits, end_points, output0, output1 = resnet_v2.resnet_v2_26_2(
            x, N_CLASSES, is_training=True)

    loss = resnet_v2.loss(logits, y_)
    #    rmse=resnet_v2.compute_rmse(logits, y_)
    accuracy = resnet_v2.accuracy(logits, y_)

    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = resnet_v2.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run(
                [tra_image_batch, tra_label_batch])
            _, tra_loss, tra_acc, summary_str = sess.run(
                [train_op, loss, accuracy, summary_op],
                feed_dict={
                    x: tra_images,
                    y_: tra_labels
                })

            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, tra_loss: %.4f, tra_accuracy: %.2f%%' %
                      (step, tra_loss, tra_acc))
                #                summary_str = sess.run(summary_op,feed_dict={x:tra_images, y_:tra_labels})
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_acc, summary_str = sess.run(
                    [loss, accuracy, summary_op],
                    feed_dict={
                        x: val_images,
                        y_: val_labels
                    })
                print(
                    '**  Step %d, test_loss = %.4f, test_accuracy = %.2f%%  **'
                    % (step, val_loss, val_acc))
                #                summary_str = sess.run([summary_op],feed_dict={x:val_images,y_:val_labels})
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()