Example #1
0
def eval(data_file=None, crop_size=None, ckpt_file=None, batch_size=128):
    with tf.Graph().as_default():
        if FLAGS.use_fp16:
            FP = tf.float16
        else:
            FP = tf.float32
        eval_input = CNNEvalInput(data_file, crop_size=crop_size)
        images = tf.placeholder(
            FP,
            shape=[batch_size, crop_size[0], crop_size[1], 1],
            name='image')
        # inference
        logits = cnn_model.inference(images)

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()

        # run graph in session
        with tf.Session() as sess:
            init = tf.global_variables_initializer(
            )  # create an operation initializes all the variables
            sess.run(init)
            print('restore sess with %s' % ckpt_file)
            saver.restore(sess, ckpt_file)

            n_frames = eval_input.data.shape[0]
            probs = np.zeros((n_frames, 2))
            count = 0
            start = time.time()
            while True:
                batch_images, ending = eval_input.next_batch(
                    batch_size=batch_size)
                logits_value = sess.run(logits,
                                        feed_dict={
                                            images: batch_images,
                                        })
                exp_logits = np.exp(logits_value)
                prob = exp_logits.T / np.sum(exp_logits, axis=1)

                if ending:
                    probs[count:] = prob.T[:n_frames - count]
                    break
                else:
                    probs[count:count + batch_size] = prob.T
                    count += batch_size
            end = time.time()
            print('time elapsed %.3f' % (end - start))
            return probs
Example #2
0
def evaluate_one_image(image_array):
    with tf.Graph().as_default():
        BATCH_SIZE = 1
        N_CLASSES = 2

        image = tf.cast(image_array, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 64, 64, 3])

        logit = cnn_model.inference(image, BATCH_SIZE, N_CLASSES)

        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[64, 64, 3])

        # you need to change the directories to yours.
        logs_train_dir = '/Users/gore/Desktop/object-detection/output/'

        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            if max_index == 0:
                print('This is a crack with possibility %.6f' %
                      prediction[:, 0])
            else:
                print('This is a background with possibility %.6f' %
                      prediction[:, 1])
def main():

    # Placeholders
    learning_rate = tf.placeholder(tf.float32)
    keep_prob = tf.placeholder(tf.float32)
    images = tf.placeholder(tf.float32,
                            [None, image_height, image_width, num_channels])
    labels = tf.placeholder(tf.int64, [None])
    phase = tf.placeholder(tf.bool, [])

    with tf.device('/gpu:%d' % gpu_number):
        logits = cnn_model.inference(images,
                                     phase=phase,
                                     dropout_rate=keep_prob)
        var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        loss = get_loss(logits, labels)
        # FILL IN. Obtain accuracy of given batch of data.
        with tf.name_scope('prediction'):
            correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('train_accuracy', accuracy)

    apply_gradient_op = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(
            loss)  # YOU MAY MODIFY THE OPTIMIZER

    # Summary list
    tf.summary.scalar('Total Loss', loss)
    tf.summary.image('Input', images, max_outputs=batch_size)
    for var in var_list:
        tf.summary.histogram(var.op.name, var)

    # Initialize
    init = tf.global_variables_initializer()
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(init)
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
    saver = tf.train.Saver()

    # Start from scratch or load model
    if use_pretrained_model == False:
        lr = init_lr
        epoch_num = 0
    else:
        lr = np.load('learning_rate.npy')
        epoch_num = np.load('epoch_num.npy')
        saver.restore(sess, model_save_path + '/model')

    trlist = list(open('train.list', 'r'))
    testlist = list(open('test.list', 'r'))

    test_accuracy = []
    train_accuracy = []
    train_loss = []
    # Start training
    print "Start training..."
    for i in range(epoch_num, max_epoch):

        # Update learning rate if required

        shuffle(trlist)
        _acc = 0
        epoch_start_time = time.time()
        for pos in range(0, len(trlist), batch_size):

            # Load batch data
            t = time.time()
            batch_images, batch_labels = read_data(
                trlist, range(pos, min(pos + batch_size, len(trlist))))
            dt = time.time() - t

            # Train with batch
            t = time.time()
            _, cost, acc = sess.run(
                [apply_gradient_op, loss, accuracy],
                feed_dict={
                    images: batch_images,
                    labels: batch_labels,
                    learning_rate: lr,
                    phase: True,
                    keep_prob: 0.8
                })
            # print('Epoch: %d, Item : %d, Loss: %.5f, Train Accuracy: %.2f, Data Time: %.2f, Network Time: %.2f' %(i, pos, cost, acc, dt, time.time()-t))
            train_loss.append(cost)
            train_accuracy.append(acc)
            _acc += acc * len(batch_labels)

        # lr = 0.95 * lr
        # Test, Save model
        # FILL IN. Obtain test_accuracy on the entire test set and append it to variable test_accuracy.
        num_test_examples = len(testlist)
        total_accuracy = 0
        for offset in range(0, num_test_examples, batch_size):
            test_images, test_labels = read_data(
                testlist, range(offset, min(offset + batch_size,
                                            len(testlist))))
            _, _, acc = sess.run(
                [apply_gradient_op, loss, accuracy],
                feed_dict={
                    images: test_images,
                    labels: test_labels,
                    learning_rate: lr,
                    phase: False,
                    keep_prob: 0.8
                })
            total_accuracy += (acc * len(test_labels))
        test_acc = total_accuracy / num_test_examples
        test_accuracy.append(test_acc)
        tf.summary.scalar('test_accuracy', test_accuracy)
        print(
            'Epoch: %d, Train Accuracy: %.2f, Test Accuracy: %.2f, Epoch Time: %.2f'
            %
            (i, _acc / len(trlist), test_acc, time.time() - epoch_start_time))

        np.save('test_accuracy.npy', test_accuracy)
        sio.savemat('test_accuracy.mat',
                    mdict={'test_accuracy': test_accuracy})
        np.save('train_accuracy.npy', train_accuracy)
        sio.savemat('train_accuracy.mat',
                    mdict={'train_accuracy': train_accuracy})
        np.save('train_loss.npy', train_loss)
        sio.savemat('train_loss.mat', mdict={'train_loss': train_loss})
        np.save('learning_rate.npy', lr)
        np.save('epoch_num.npy', i)
        saver.save(sess, model_save_path + '/model')

    print('Training done.')
Example #4
0
def train():
    batch_size = FLAGS.batch_size
    crop_size = FLAGS.crop_size

    with tf.Graph().as_default():
        global_step = tf.Variable(0, name='global_step', trainable=False)
        # input
        train_input = CNNTrainInput(FLAGS.data_file)

        images = tf.placeholder(tf.float32,
                                shape=[batch_size, crop_size, crop_size, 1],
                                name='image')
        labels = tf.placeholder(tf.int64, shape=[batch_size], name='label')
        # Display the training images in the visualizer.

        image_slices = tf.slice(images, [0, 0, 0, 0],
                                [int(batch_size), crop_size, crop_size, 1],
                                name='central_slice')
        tf.summary.image('image_slices', image_slices, max_outputs=10)

        # inference
        logits = cnn_model.inference(images)

        # calculate accuracy and error rate
        prediction = tf.argmax(logits, 1)
        correct_prediction = tf.equal(prediction, labels)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        error_rate = 1 - accuracy
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('error_rate', error_rate)

        # train to minimize loss
        loss = cnn_model.loss(logits, labels)
        lr = tf.placeholder(tf.float64, name='leaning_rate')
        tf.summary.scalar('learning_rate', lr)
        train_op = cnn_model.train(loss, lr, global_step)

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()

        # run graph in session
        with tf.Session() as sess:
            init = tf.global_variables_initializer(
            )  # create an operation initializes all the variables
            sess.run(init)
            merged = tf.summary.merge_all()
            writer = tf.summary.FileWriter('%s' % FLAGS.train_dir, sess.graph)

            if FLAGS.load_ckpt:
                ckpt_file = '%s/model.ckpt-%d' % \
                    (FLAGS.train_dir, FLAGS.ckpt_step)
                print('restore sess with %s' % ckpt_file)
                saver.restore(sess, ckpt_file)

            start = time.time()
            for step in range(FLAGS.max_steps):
                batch_images, batch_labels = train_input.next_batch(
                    batch_size=batch_size)
                if FLAGS.shuffle:
                    idx = np.arange(batch_size)
                    np.random.shuffle(idx)
                    batch_images = batch_images[idx]
                    batch_labels = batch_labels[idx]
                lr_value = FLAGS.learning_rate * pow(
                    FLAGS.decay_factor, (step / FLAGS.decay_steps))
                _, err, g_step, loss_value, summary = sess.run(
                    [train_op, error_rate, global_step, loss, merged],
                    feed_dict={
                        labels: batch_labels,
                        images: batch_images,
                        lr: lr_value,
                    })

                if step % FLAGS.log_frequency == 0 and step != 0:
                    end = time.time()
                    duration = end - start
                    examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
                    sec_per_batch = float(duration / FLAGS.log_frequency)
                    format_str = (
                        '%s: step %d, loss = %.2f, err = %.4f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), g_step, loss_value,
                                        err, examples_per_sec, sec_per_batch))
                    writer.add_summary(summary, g_step)
                    # Save the variables to disk.
                    saver.save(sess,
                               '%s/model.ckpt' % FLAGS.train_dir,
                               global_step=g_step)
                    start = end
Example #5
0
def main():

    # Placeholders
    #starter_learning_rate = tf.placeholder(tf.float32)
    keep_prob = tf.placeholder(tf.float32)
    images = tf.placeholder(tf.float32,
                            [None, image_height, image_width, num_channels])
    labels = tf.placeholder(tf.int64, [None])
    phase = tf.placeholder(tf.bool, [])

    with tf.device('/gpu:%d' % gpu_number):
        logits = cnn_model.inference(images,
                                     phase=phase,
                                     dropout_rate=keep_prob)
        var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        loss = get_loss(logits, labels)
        # FILL IN. Obtain accuracy of given batch of data.
        correct_pred = tf.equal(tf.argmax(logits, 1), labels)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    if use_pretrained_model:
        global_step_ini = np.load('globol_step.npy')
    else:
        global_step_ini = 0
    # modify learning rate
    global_step = tf.get_variable(
        'global_step', [],
        initializer=tf.constant_initializer(global_step_ini),
        trainable=False)
    learning_rate = tf.train.exponential_decay(init_lr,
                                               global_step,
                                               decay_steps=1000,
                                               decay_rate=0.95)
    tf.summary.scalar('Learning Rate', learning_rate)
    # end of modificaiton

    apply_gradient_op = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(
            loss, global_step=global_step)  # YOU MAY MODIFY THE OPTIMIZER

    # Summary list
    tf.summary.scalar('Total Loss', loss)
    tf.summary.image('Input', images, max_outputs=batch_size)
    for var in var_list:
        tf.summary.histogram(var.op.name, var)

    # Initialize
    init = tf.global_variables_initializer()
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(init)
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(tensorboard_path + "/train",
                                         sess.graph)
    test_writer = tf.summary.FileWriter(tensorboard_path + "/test", sess.graph)
    saver = tf.train.Saver(tf.trainable_variables())

    # Start from scratch or load model
    if use_pretrained_model == False:
        lr = init_lr
        epoch_num = 0
    else:
        lr = np.load('learning_rate.npy')
        epoch_num = np.load('epoch_num.npy')
        saver.restore(sess, model_save_path + '/model')

    trlist = list(open('train.list', 'r'))
    testlist = list(open('test.list', 'r'))

    test_accuracy = []
    train_accuracy = []
    train_loss = []
    # Start training
    for i in range(epoch_num, max_epoch):

        # Update learning rate if required

        shuffle(trlist)

        for pos in range(0, len(trlist), batch_size):

            # Load batch data
            t = time.time()
            batch_images, batch_labels = read_data(
                trlist, range(pos, min(pos + batch_size, len(trlist))))
            dt = time.time() - t

            # Train with batch
            t = time.time()
            _, cost, acc, now_lr, summary = sess.run(
                [apply_gradient_op, loss, accuracy, learning_rate, merged],
                feed_dict={
                    images: batch_images,
                    labels: batch_labels,
                    phase: True,
                    keep_prob: 0.8
                })
            print(
                'Epoch: %d, Item : %d, lr: %.5f, Loss: %.5f, Train Accuracy: %.2f, Data Time: %.2f, Network Time: %.2f'
                % (i, pos, now_lr, cost, acc, dt, time.time() - t))
            train_loss.append(cost)
            train_accuracy.append(acc)

            train_writer.add_summary(summary, pos)
        # Test, Save model
        # FILL IN. Obtain test_accuracy on the entire test set and append it to variable test_accuracy.
        batch_images, batch_labels = read_data(testlist, range(len(testlist)))
        summary, acc = sess.run(
            [merged, accuracy],
            feed_dict={
                images: batch_images,
                labels: batch_labels,
                phase: False,
                keep_prob: 1.0
            })
        test_writer.add_summary(summary, pos)
        print('*****************************************')
        print('test accuracy %.2f' % acc)
        test_accuracy.append(acc)
        global_step_check = sess.run(global_step)
        np.save('test_accuracy.npy', test_accuracy)
        sio.savemat('test_accuracy.mat',
                    mdict={'test_accuracy': test_accuracy})
        np.save('train_accuracy.npy', train_accuracy)
        sio.savemat('train_accuracy.mat',
                    mdict={'train_accuracy': train_accuracy})
        np.save('train_loss.npy', train_loss)
        sio.savemat('train_loss.mat', mdict={'train_loss': train_loss})
        np.save('learning_rate.npy', lr)
        np.save('global_step.npy', global_step_check)
        np.save('epoch_num.npy', i)
        saver.save(sess, model_save_path + '/model')

    print('Training done.')
Example #6
0
def train(dataset):
    """Train this network model.
    Args:
        datasets: contain two elements: a tensor for feature,shape is [batch,height,width,channels]
                , and a tensor for label, shape is [batch, label_num].
    Return:
    """
    global GRID_FEATURE_SHAPE
    global FLAGS

    with tf.Graph().as_default():
        global_step = tf.Variable(2, trainable=False)
        """Build network."""
        x = tf.placeholder(tf.float32, shape=[None, GRID_FEATURE_SHAPE[0]\
            , GRID_FEATURE_SHAPE[1], GRID_FEATURE_SHAPE[2]]) # [batch,heigh,width,depth]
        y_ = tf.placeholder(tf.float32, shape=[None])  # [batch]
        keep_prob = tf.placeholder(tf.float32, shape=[])
        logits = cnn_model.inference(x, keep_prob)
        """get variable"""
        # For Debug
        """build loss function and optimizer."""
        loss = cnn_model.loss(logits, y_)
        train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
        """build accuracy function"""
        # for column
        #pdb.set_trace()
        prediction = tf.equal(tf.argmax(logits, 1), tf.cast(y_, tf.int64))
        accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
        """train and save new model."""
        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            """init saver and load old model."""
            saver = tf.train.Saver()
            ckpt = tf.train.get_checkpoint_state(CFLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                print 'Find old model %s, and try to restore it.' % (
                    ckpt.model_checkpoint_path)
                saver.restore(sess, ckpt.model_checkpoint_path)
            """training"""
            print 'Begin trainning....'
            for i in xrange(global_step.eval(), 10000):
                batch = dataset.next_batch(FLAGS.batch_size)
                #tmp = sess.run(logits,feed_dict={x:batch[0], y_:batch[1], keep_prob:0.5})
                #pdb.set_trace()
                train_step.run(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 0.5
                })
                # show train accuracy
                if (i + 1) % 10 == 0:
                    train_accuracy = accuracy.eval(feed_dict={x:batch[0]\
                                    , y_:batch[1], keep_prob:1.0})
                    print "step %d, trainning accuracy %g" % (i,
                                                              train_accuracy)
                # save checkpoint
                if (i + 1) % CFLAGS.checkpoint_steps == 0:
                    sess.run(tf.assign(global_step, i + 1))
                    saver.save(sess, CFLAGS.checkpoint_dir + CFLAGS.checkpoint_file \
                                , global_step=global_step)

            print 'End trainning...'
Example #7
0
learning_rate = 0.0001  # 一般小于0.0001

# 获取批次batch
train_dir = '/Users/gore/Desktop/object-detection/output'  # 训练样本的读入路径
logs_train_dir = '/Users/gore/Desktop/object-detection/output'  # logs存储路径
# logs_test_dir =  'E:/Re_train/image_data/test'        #logs存储路径

# train, train_label = input_data.get_files(train_dir)
train, train_label, val, val_label = image_input.get_files(train_dir, 0.3)
# 训练数据及标签
train_batch, train_label_batch = image_input.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
# 测试数据及标签
val_batch, val_label_batch = image_input.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

# 训练操作定义
train_logits = cnn_model.inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = cnn_model.losses(train_logits, train_label_batch)
train_op = cnn_model.trainning(train_loss, learning_rate)
train_acc = cnn_model.evaluation(train_logits, train_label_batch)

# 测试操作定义
test_logits = cnn_model.inference(val_batch, BATCH_SIZE, N_CLASSES)
test_loss = cnn_model.losses(test_logits, val_label_batch)
test_acc = cnn_model.evaluation(test_logits, val_label_batch)

# 这个是log汇总记录
summary_op = tf.summary.merge_all()

# 产生一个会话
sess = tf.Session()
# 产生一个writer来写log文件