예제 #1
0
def evaluate():
    with tf.Graph().as_default():

        #        log_dir = 'C://Users//kevin//Documents//tensorflow//VGG//logsvgg//train//'
        log_dir = './logs/train/'
        test_dir = '.'

        data, labels = input_data.read_data(data_dir=test_dir,
                                            is_train=False,
                                            batch_size=BATCH_SIZE,
                                            shuffle=False,
                                            n_test=n_test)

        logits = FNET.FNET(data,
                           N_CLASSES,
                           IS_PRETRAIN,
                           train=False,
                           droprate=1)
        correct = tools.num_correct_prediction(logits, labels)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.floor(n_test / BATCH_SIZE))
                num_sample = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.2f%%' %
                      (100 * total_correct / num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #2
0
파일: train.py 프로젝트: Syneh/homework1
def evaluate():
    with tf.Graph().as_default():
        
#        log_dir = 'C://Users//kevin//Documents//tensorflow//VGG//logsvgg//train//'
        data_dir = '/home/viplab/Desktop/Syneh/HW01/data/'
        train_log_dir = '/home/viplab/Desktop/Syneh/HW01/code/train_log'
        val_log_dir = '/home/viplab/Desktop/Syneh/HW01/code/train_log/val'
        n_test = 12776
                
        val_image_list, val_label_list = input_data.read_files(data_dir=data_dir,
                                                 is_train=False)
        val_image_batch, val_label_batch = input_data.get_batch( val_image_list,
                                                                 val_label_list,
                                                                 IMG_W,
                                                                 IMG_H,
                                                                 BATCH_SIZE,
                                                                 CAPACITY)

        logits = VGG.VGG16N(val_image_batch, N_CLASSES, IS_PRETRAIN)
        correct = tools.num_correct_prediction(logits, val_label_batch)
        saver = tf.train.Saver(tf.global_variables())
        
        with tf.Session() as sess:
            
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(train_log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return
        
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess = sess, coord = coord)
            
            try:
                print('\nEvaluating......')
                num_step = int(math.floor(n_test / BATCH_SIZE))
                num_sample = num_step*BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1
                print('Total testing samples: %d' %num_sample)
                print('Total correct predictions: %d' %total_correct)
                print('Average accuracy: %.2f%%' %(100*total_correct/num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
def val():
    num_test = 10000
    data_dir = '/home/rong/something_for_deep/cifar-10-batches-bin'
    train_log_dir = './logs/train'

    image_batch, label_batch = input_data.read_cifar10(data_dir,
                                                       is_train=False,
                                                       batch_size=BATCH_SIZE,
                                                       shuffle=False)
    vgg16 = model.VGG16()
    logits = vgg16.build(image_batch, NUM_CLASSES, False)
    saver = tf.train.Saver()
    correct_per_batch = tools.num_correct_prediction(logits, label_batch)

    with tf.Session() as sess:
        print('Reading checkpoints')
        ckpt = tf.train.get_checkpoint_state(train_log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')
            return

        saver.restore(sess, './logs/train/model.ckpt-8000')
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            print('\nEvaluating......')
            num_step = int(math.floor(num_test / BATCH_SIZE))
            num_sample = num_step * BATCH_SIZE
            step = 0
            total_correct = 0
            while step < num_step and not coord.should_stop():
                batch_correct = sess.run(correct_per_batch)
                total_correct += np.sum(batch_correct)
                step += 1
                if step % 10 == 0:
                    print('Testing samples: %d' % (step * BATCH_SIZE))
                    print('Correct predictions: %d' % total_correct)
                    print('Average accuracy: %.2f%%' % (total_correct /
                                                        (step * BATCH_SIZE)))
            print('Total testing samples: %d' % num_sample)
            print('Total correct predictions: %d' % total_correct)
            print('Average accuracy: %.2f%%' %
                  (100 * total_correct / num_sample))
        except Exception as e:
            coord.request_stop(e)
        finally:
            coord.request_stop()
            coord.join(threads)
예제 #4
0
def evaluate():
    with tf.Graph().as_default():

        log_dir = 'logs/train/'
        test_data_dir = 'F:\\flowersdata\\tfrecord\\test\\testdata.tfrecords*'
        n_test = 502

        #read test
        val_image_batch, val_label_batch = input_data.read_TFRecord(
            data_dir=test_data_dir,
            batch_size=EVA_BATCH_SIZE,
            shuffle=False,
            in_classes=N_CLASSES)

        logits = VGG.VGG16N(val_image_batch, N_CLASSES, IS_PRETRAIN)
        correct = tools.num_correct_prediction(logits, val_label_batch)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.floor(n_test / EVA_BATCH_SIZE))
                num_sample = num_step * EVA_BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.2f%%' %
                      (100 * total_correct / num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #5
0
def whole_validate(ckpt_path):
    feature_dict = input_data.get_feature_dict('D:/data/data.csv', 'CRC death')
    log_dir = 'C:\\Users\\xy31\\PycharmProjects\\VGG\\logs\\train\\'
    val_path = 'D:\\data\\1-16\\whole_val\\'
    collections = os.listdir(val_path)
    collection_correct = 0

    x = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMG_W, IMG_H, IMG_D))
    labels = tf.placeholder(tf.int16, shape=(BATCH_SIZE, N_CLASSES))
    logits = VGG.VGG16(x, N_CLASSES, keep_prob=1, is_pretrain=False)
    saver = tf.train.Saver(tf.global_variables())
    correct = tools.num_correct_prediction(logits, labels)

    with tf.Session() as sess:
        print('Reading check points')
        saver.restore(sess, ckpt_path)
        print('Loading successful')

        for collection in collections:
            # print('validating ' + collection)
            collection_path = os.path.join(val_path, collection)
            tile_list = os.listdir(collection_path)
            n_tiles = len(tile_list)
            num_step = int(math.floor(n_tiles / BATCH_SIZE))
            num_sample = num_step * BATCH_SIZE
            num_correct = 0
            for step in range(num_step):
                val_image_batch, val_label_batch = input_data.read_local_data_CRCdeath(
                    data_dir=data_dir,
                    is_train=False,
                    batch_size=BATCH_SIZE,
                    step=step,
                    feature_dict=feature_dict,
                    n_classes=N_CLASSES,
                    name_list=tile_list)
                val_labels = sess.run(val_label_batch)
                val_images = val_image_batch
                batch_correct = sess.run(correct,
                                         feed_dict={
                                             x: val_images,
                                             labels: val_labels
                                         })
                num_correct += np.sum(batch_correct)

            print(collection + ' accuracy: %.2f%%' %
                  (100 * num_correct / num_sample))
            if num_correct >= num_sample / 2:
                collection_correct += 1

        collection_accuracy = collection_correct / len(collections)

        print('Total testing collections: %.2f%%' %
              (100 * collection_accuracy))
def evaluate():
    with tf.Graph().as_default():
        log_dir = 'C:/3_5th/VGG_model/logs/train/'  #训练日志,即训练参数
        test_dir = 'C:\data_libary\cifar-10-batches-bin'
        n_test = 10000

        images, labels = input_data.read_cifar10(data_dir=test_dir,
                                                 is_train=False,
                                                 batch_size=BATCH_SIZE,
                                                 shuffle=False)

        logits = VGG.VGG16N(images, N_CLASSES, IS_PRETRAIN)
        correct = tools.num_correct_prediction(logits, labels)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                print("找到文件啦")
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print("没有找到文件")
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.floor(n_test / BATCH_SIZE))
                num_sample = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.2f%%' %
                      (100 * total_correct / num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #7
0
def evaluate():
    with tf.Graph().as_default():

        log_dir = './logs2/train/'
        test_dir = '/content/data/'
        n_test = 10000

        test_iamge_batch, test_label_batch = input_data.read_cifar10(
            test_dir, is_train=False, batch_size=BATCH_SIZE, shuffle=False)

        logits = VGG.MyResNet(test_iamge_batch, N_CLASSES, IS_PRETRAIN)
        correct = tools.num_correct_prediction(logits, test_label_batch)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print('Reading checkpoint...')
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Load success, global step: %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating...')
                num_step = int(math.ceil(n_test / BATCH_SIZE))
                num_example = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1

                print("Total test examples: %d" % num_example)
                print("Total correct predictions: %d" % total_correct)
                print("Average accuracy: %.2f%%" %
                      (100 * total_correct / num_example))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #8
0
def evaluate():
    with tf.Graph().as_default():
        
        log_dir = './logs/vgg16_logs/train/'
        data_dir = './data/cifar-10-batches-bin/'
        n_test = 10000
                
        images, labels = input_data.read_cifar10(data_dir=data_dir,
                                                 is_train=False,
                                                 batch_size=BATCH_SIZE,
                                                 shuffle=False)

        logits = VGG.VGG16N(images, N_CLASSES, IS_PRETRAIN)  # shape of logits: [Batch_size, n_classes]
        correct = tools.num_correct_prediction(logits, labels)  # 得到准确率,类型是浮点型
        saver = tf.train.Saver(tf.global_variables())
        
        with tf.Session() as sess:
            
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return
        
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            
            try:
                print('\nEvaluating......')
                # math.floor()返回小于或等于一个给定数字的最大整数
                num_step = int(math.floor(n_test / BATCH_SIZE))  # num_step = 312
                num_sample = num_step*BATCH_SIZE   # num_sample=9984
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)    # 得到在一个batch中正确的个数
                    total_correct += np.sum(batch_correct)  # 得到总共的正确数量
                    step += 1
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.2f%%' % (100*total_correct/num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
def evaluate():
    with tf.Graph().as_default():
        
#        log_dir = 'C://Users//kevin//Documents//tensorflow//VGG//logsvgg//train//'
        log_dir = 'C:/Users/kevin/Documents/tensorflow/VGG/logs/train/'
        test_dir = './/data//cifar-10-batches-bin//'
        n_test = 10000
                
        images, labels = input_data.read_cifar10(data_dir=test_dir,
                                                    is_train=False,
                                                    batch_size= BATCH_SIZE,
                                                    shuffle=False)

        logits = VGG.VGG16N(images, N_CLASSES, IS_PRETRAIN)
        correct = tools.num_correct_prediction(logits, labels)
        saver = tf.train.Saver(tf.global_variables())
        
        with tf.Session() as sess:
            
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return
        
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess = sess, coord = coord)
            
            try:
                print('\nEvaluating......')
                num_step = int(math.floor(n_test / BATCH_SIZE))
                num_sample = num_step*BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1
                print('Total testing samples: %d' %num_sample)
                print('Total correct predictions: %d' %total_correct)
                print('Average accuracy: %.2f%%' %(100*total_correct/num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #10
0
def evaluate():
    tf.reset_default_graph()
    val_image, val_label = load_tfrecord(shuffle=False,
                                         path="test.tfrecords",
                                         batch_size=BATCH_SIZE)

    logits = net.CNN(val_image, N_CLASSES, is_pretrain=True)
    correct = tools.num_correct_prediction(logits, val_label)
    saver = tf.train.Saver(tf.global_variables())

    with tf.Session() as sess:
        # =============================================================================
        #         tf.get_variable_scope().reuse_variables()
        # =============================================================================
        print("Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(train_log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')
            return

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            print('\nEvaluating......')
            num_step = int(math.floor(n_test / BATCH_SIZE))
            num_sample = num_step * BATCH_SIZE
            step = 0
            total_correct = 0
            while step < num_step and not coord.should_stop():
                batch_correct = sess.run(correct)
                total_correct += np.sum(batch_correct)
                step += 1
            print('Total testing samples: %d' % num_sample)
            print('Total correct predictions: %d' % total_correct)
            print('Average accuracy: %.2f%%' %
                  (100 * total_correct / num_sample))

        except tf.errors.OutOfRangeError:
            print('Done reading')
        finally:
            coord.request_stop()
        coord.request_stop()
        coord.join(threads)
예제 #11
0
def evaluate():
    with tf.Graph().as_default():
        BATCH_SIZE = 5
        N_CLASSES = 146
        log_dir = '/home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation  recognition/logss/va_shuffle/'
        #log_dir = '/home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation  recognition/logs/train/'
        #data_dir2 = '/home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation  recognition/data/segmentation/test/'
        data_dir2 = '/home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation  recognition/data/data1/testold/'

        image, label = notMNIST_input.get_file(data_dir2)
        image_batch, label_batch = notMNIST_input.get_batch(image,
                                                            label,
                                                            IMG_W,
                                                            IMG_H,
                                                            BATCH_SIZE,
                                                            capacity,
                                                            shuffle=False)
        x = tf.placeholder(tf.float32,
                           shape=[5, IMG_W, IMG_H, 3],
                           name='place_x')
        y_ = tf.placeholder(tf.int64, shape=[
            5,
        ], name='place_y')
        image_batch = tf.cast(image_batch, dtype=tf.float32)
        label_batch = tf.cast(label_batch, dtype=tf.int64)

        logits = VGG.VGG16N(x, N_CLASSES, IS_PRETRAIN)

        accuracy = tools.num_correct_prediction(logits, y_)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                #                global_step=4500
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.floor(len(image) / BATCH_SIZE))
                #                num_sample = num_step*BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    #                    print(step)
                    x_train_a, y_train_a = sess.run([image_batch, label_batch])
                    batch_correct = sess.run([accuracy],
                                             feed_dict={
                                                 x: x_train_a,
                                                 y_: y_train_a
                                             })
                    print("batch_correct:", batch_correct)
                    total_correct += np.sum(batch_correct)
                    print("total_correct:", total_correct)
                    step += 1
#                print('Total testing samples: %d' %num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.2f%%' %
                      (100 * total_correct / num_step))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #12
0
def evaluate1():

    with tf.Graph().as_default():
        log_dir = './logs/train_st2/'  # 'C:/3_5th/VGG_model/logs/train/'  #训练日志,即训练参数
        # test_dir = './/cifar10_data//cifar-10-batches-bin//'
        test_dir = './data'
        n_test = 3000

        input_images, input_labels, input_labels1 = input_data.read_cifar10(
            data_dir=test_dir,
            is_train=True,
            batch_size=BATCH_SIZE,
            shuffle=True)
        # mean_data = np.mean(mnist.train.images, axis=0)
        logits, features = VGG.VGG16N(input_images, N_CLASSES, IS_PRETRAIN)
        correct = tools.num_correct_prediction(logits, input_labels)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                print("找到文件啦")
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print("没有找到文件")
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                # j=0
                d = []
                tt = np.array([])

                for step in np.arange(1):
                    if coord.should_stop():
                        break
                    feat = np.array([])
                    images, labels = sess.run([input_images, input_labels])
                    mean_data = np.mean(images, axis=0)
                    # label=[]
                    # np.concatenate((array,q),axis=0)
                    b = np.transpose(np.nonzero(labels))[:, 1]
                    # d = b
                    d = np.concatenate((d, b), axis=0)
                    # labels=labels.tolist()
                    # print(d)
                    if step == 0:
                        tt = sess.run(
                            features,
                            feed_dict={input_images: images - mean_data})
                    else:
                        feat = sess.run(
                            features,
                            feed_dict={input_images: images - mean_data})

                    if step != 0:
                        tt = np.concatenate([tt, feat])

                fig = plt.figure(figsize=(16, 9))
                # fig = plt.figure()
                # ax = Axes3D(fig)
                #aa = TSNE(n_components=2).fit_transform(tt)
                pca = PCA(n_components=2)
                pca.fit(tt)
                aa = pca.transform(tt)
                #io.savemat('zongtnse.mat', {'matrix': aa})
                #lda = LinearDiscriminantAnalysis(n_components=2)
                #lda.fit(tt, d)
                #aa = lda.transform(tt)
                np.save('save_pca', aa)
                #aa = TSNE(n_components=2).fit_transform(tt)

                #print(aa[d==0,0].flatten())
                #np.save('vgg-9', aa)
                # ax.scatter(aa[:,0],aa[:,1],aa[:,2],c=labels1)
                # f = plt.figure()
                c = [
                    '#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
                    '#ff00ff', '#990000', '#999900', '#009900'
                ]
                for i in range(9):
                    plt.plot((aa[d == i, 0].flatten()) / 100.0,
                             (aa[d == i, 1].flatten()) / 100.0,
                             '.',
                             markersize=10,
                             c=c[i])

                plt.legend(['1', '2', '3', '4', '5', '6', '7', '8', '9'])
                # plt.xlim(-10, 10)
                # plt.ylim(-10,10)
                plt.grid()
                plt.show()
                # plt.close(fig)

            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #13
0
def evaluate():
    with tf.Graph().as_default():

        log_dir = '/home/daijiaming/Galaxy/VGG16/logs/train/'
        #        test_dir='/home/daijiaming/Galaxy/data3/testset/'
        #        test_label_dir='/home/daijiaming/Galaxy/data3/test_label.csv'
        #        test_dir='/home/daijiaming/Galaxy/data3/test1000/'
        #        test_label_dir='/home/daijiaming/Galaxy/data3/test1000_label.csv'
        test_dir = '/home/daijiaming/Galaxy/data3/train1000/'
        test_label_dir = '/home/daijiaming/Galaxy/data3/train1000_label.csv'
        #        test_dir='/home/daijiaming/Galaxy/data3/trainset/'
        #        test_label_dir='/home/daijiaming/Galaxy/data3/train_label.csv'
        n_test = 1000

        val_image_batch, val_label_batch, val_galalxyid_batch = input_data.read_galaxy11_test(
            data_dir=test_dir, label_dir=test_label_dir, batch_size=BATCH_SIZE)

        x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 64, 64, 3])
        y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE, N_CLASSES])
        keep_prob = tf.placeholder(tf.float32)

        logits, fc_output = VGG.VGG16N(x, N_CLASSES, keep_prob, IS_PRETRAIN)

        correct = tools.num_correct_prediction(logits, y_)
        accuracy = tools.accuracy(logits, y_)
        #        top_k_op = tf.nn.in_top_k(predictions=logits,targets=y_, k=1)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:
            sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.ceil(n_test / BATCH_SIZE))
                num_sample = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                totall_acc = 0.0
                #                true_count = 0
                while step < num_step and not coord.should_stop():
                    test_images, test_labels = sess.run(
                        [val_image_batch, val_label_batch])
                    batch_correct, tes_acc, batch_logits = sess.run(
                        [correct, accuracy, logits],
                        feed_dict={
                            x: test_images,
                            y_: test_labels,
                            keep_prob: 1
                        })
                    #                    print('tes_acc = %.3f' % tes_acc)
                    total_correct += np.sum(batch_correct)
                    totall_acc = totall_acc + tes_acc
                    #                    print('totall_acc = %.3f' % totall_acc)
                    #                    true_count += np.sum(predictions)
                    if step == 0:
                        a = test_labels
                        b = batch_logits
                    if step >= 1:
                        a = np.concatenate((a, test_labels))
                        b = np.concatenate((b, batch_logits))
                    step += 1
#                precision = true_count / num_sample
                aver_acc = totall_acc / num_step
                print('Aver acc = %.4f' % aver_acc)
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.4f%%' %
                      (100 * total_correct / num_sample))
                np.savetxt('./labels1000.csv', a, delimiter=',')
                np.savetxt('./logits1000.csv', b, delimiter=',')
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
예제 #14
0
def get_cnn_output(data_type, ckpt_path):
    feature_dict = input_data.get_feature_dict('D:/data/data.csv', 'CRC death')
    log_dir = 'C:\\Users\\xy31\\PycharmProjects\\VGG\\logs\\train\\'
    train_path = os.path.join(data_dir, data_type)
    list_alltiles = os.listdir(train_path)
    x = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMG_W, IMG_H, IMG_D))
    labels = tf.placeholder(tf.int16, shape=(BATCH_SIZE, N_CLASSES))
    logits = VGG.VGG16(x, N_CLASSES, keep_prob=1, is_pretrain=False)
    saver = tf.train.Saver(tf.global_variables())
    correct = tools.num_correct_prediction(logits, labels)

    data_dict = {}
    output_dict = {}

    with tf.Session() as sess:
        print('Reading check points')
        saver.restore(sess, ckpt_path)
        print('Loading successful')

        for tile_name in list_alltiles:
            image_index = tile_name[12:16]
            aug_index = tile_name[23]
            if image_index not in data_dict.keys():
                data_dict[image_index] = dict()
                data_dict[image_index][aug_index] = [tile_name]
            elif aug_index not in data_dict[image_index].keys():
                data_dict[image_index][aug_index] = [tile_name]
            else:
                data_dict[image_index][aug_index].append(tile_name)

            if image_index not in output_dict:
                output_dict[image_index] = {}

            if aug_index not in output_dict[image_index]:
                output_dict[image_index][aug_index] = []

        progress_index = 1

        for key_image in data_dict.keys():
            aug_dict = data_dict[key_image]
            for key_aug in aug_dict:
                tile_names = aug_dict[key_aug]
                n_padded = 0
                while len(tile_names) % BATCH_SIZE != 0:
                    tile_names.append(tile_names[0])
                    n_padded += 1
                n_tiles = len(tile_names)
                num_step = int(n_tiles / BATCH_SIZE)
                for step in range(num_step):
                    batch_tiles = tile_names[step * BATCH_SIZE:(step + 1) *
                                             BATCH_SIZE]
                    if data_type == 'train':
                        is_train = True
                    elif data_type == 'validate':
                        is_train = False
                    else:
                        return 1
                    val_image_batch, val_label_batch = input_data.read_local_data_CRCdeath(
                        data_dir=data_dir,
                        is_train=is_train,
                        batch_size=BATCH_SIZE,
                        step=step,
                        feature_dict=feature_dict,
                        n_classes=N_CLASSES,
                        name_list=tile_names)
                    val_images = val_image_batch
                    logits_array = sess.run(logits, feed_dict={x: val_images})
                    logits_array = logits_array.tolist()
                    if step != num_step - 1:
                        for i in range(BATCH_SIZE):
                            output_dict[key_image][key_aug].append(
                                logits_array[i])
                    else:
                        for i in range(BATCH_SIZE - n_padded):
                            output_dict[key_image][key_aug].append(
                                logits_array[i])

            print('finished %d file(s) of %d files ' %
                  (progress_index, len(data_dict.keys())))
            progress_index += 1

        with open('cnn_output_' + data_type + '.json', 'w') as file_output:
            file_output.write(json.dumps(output_dict, indent=4))