コード例 #1
0
def generator_npz(data, batch_size, img_ch, img_cols, img_rows):

    while True:
        for it in list(range(0, data[0].shape[0], batch_size)):
            x, y = load_batch([l[it:it + batch_size] for l in data], img_ch,
                              img_cols, img_rows)
            yield x, y
コード例 #2
0
def test_small(args):

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.dev

    dataset_path = ""
    print("Dataset: {}".format(dataset_path))

    weights_path = ""
    print("Weights: {}".format(weights_path))

    # image parameter
    img_cols = 64
    img_rows = 64
    img_ch = 3

    # test parameter
    batch_size = args.batch_size

    # model
    model = get_eye_tracker_model(img_ch, img_cols, img_rows)

    # model summary
    model.summary()

    # weights
    print("Loading weights...")
    model.load_weights(weights_path)

    # data
    train_data, val_data = load_data_from_npz(dataset_path)

    print("Loading testing data...")
    x, y = load_batch([l[:] for l in val_data], img_ch, img_cols, img_rows)
    print("Done.")

    predictions = model.predict(x=x, batch_size=batch_size, verbose=1)

    # print and analyze predictions
    err_x = []
    err_y = []
    for i, prediction in enumerate(predictions):
        print("PR: {} {}".format(prediction[0], prediction[1]))
        print("GT: {} {} \n".format(y[i][0], y[i][1]))

        err_x.append(abs(prediction[0] - y[i][0]))
        err_y.append(abs(prediction[1] - y[i][1]))

    # mean absolute error
    mae_x = np.mean(err_x)
    mae_y = np.mean(err_y)

    # standard deviation
    std_x = np.std(err_x)
    std_y = np.std(err_y)

    # final results
    print("MAE: {} {} ({} samples)".format(mae_x, mae_y, len(y)))
    print("STD: {} {} ({} samples)".format(std_x, std_y, len(y)))
コード例 #3
0
def run():
    #Create log_dir for evaluation information
    if not os.path.exists(log_eval):
        os.mkdir(log_eval)

    #Just construct the graph from scratch again
    with tf.Graph().as_default() as graph:
        tf.logging.set_verbosity(tf.logging.INFO)
        #Get the dataset first and load one batch of validation images and labels tensors. Set is_training as False so as to use the evaluation preprocessing
        dataset = get_split('train', dataset_dir, file_pattern)
        images, _, labels = load_batch(dataset, batch_size=batch_size, height=image_size, width=image_size)

        #Create some information about the training steps
        num_batches_per_epoch = dataset.num_samples / batch_size
        num_steps_per_epoch = int(num_batches_per_epoch)

        #Now create the inference model but set is_training=False
        with slim.arg_scope(inception_resnet_v2_arg_scope()):
            logits, end_points = inception_resnet_v2(images, num_classes = dataset.num_classes, is_training = False)

        # #get all the variables to restore from the checkpoint file and create the saver function to restore
        variables_to_restore = slim.get_variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        def restore_fn(sess):
            return saver.restore(sess, checkpoint_file)

        #Just define the metrics to track without the loss or whatsoever
        predictions = tf.argmax(end_points['Predictions'], 1)
        accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels)
        metrics_op = tf.group(accuracy_update)

        #Create the global step and an increment op for monitoring
        global_step = get_or_create_global_step()
        global_step_op = tf.assign(global_step, global_step + 1) #no apply_gradient method so manually increasing the global_step
        

        #Create a evaluation step function
        def eval_step(sess, metrics_op, global_step):
            '''
            Simply takes in a session, runs the metrics op and some logging information.
            '''
            start_time = time.time()
            _, global_step_count, accuracy_value = sess.run([metrics_op, global_step_op, accuracy])
            time_elapsed = time.time() - start_time

            #Log some information
            logging.info('Global Step %s: Streaming Accuracy: %.4f (%.2f sec/step)', global_step_count, accuracy_value, time_elapsed)

            return accuracy_value


        #Define some scalar quantities to monitor
        tf.summary.scalar('Test_Accuracy', accuracy)
        my_summary_op = tf.summary.merge_all()

        #Get your supervisor
        sv = tf.train.Supervisor(logdir = log_eval, summary_op = None, saver = None, init_fn = restore_fn)

        #Now we are ready to run in one session
        with sv.managed_session() as sess:
            for step in xrange(num_steps_per_epoch * num_epochs):
                sess.run(sv.global_step)
                #print vital information every start of the epoch as always
                if step % num_batches_per_epoch == 0:
                    logging.info('Epoch: %s/%s', step / num_batches_per_epoch + 1, num_epochs)
                    logging.info('Current Streaming Accuracy: %.4f', sess.run(accuracy))
                    
                #Compute summaries every 10 steps and continue evaluating
                if step % 10 == 0:
                    eval_step(sess, metrics_op = metrics_op, global_step = sv.global_step)
                    summaries = sess.run(my_summary_op)
                    sv.summary_computed(sess, summaries)
                    

                #Otherwise just run as per normal
                else:
                    eval_step(sess, metrics_op = metrics_op, global_step = sv.global_step)

            #At the end of all the evaluation, show the final accuracy
            logging.info('Final Streaming Accuracy: %.4f', sess.run(accuracy))
コード例 #4
0
def generator_npz(data, batch_size, img_ch, img_cols, img_rows):

    while True:
        for it in list(range(0, data[0].shape[0], batch_size)):
            x, y = load_batch([l[it:it + batch_size] for l in data], img_ch, img_cols, img_rows)
            yield x, y
コード例 #5
0
ファイル: train.py プロジェクト: rakesh4real/MAPNet
def train():

    tf.global_variables_initializer().run()

    could_load, checkpoint_counter = load()
    if could_load:
        start_epoch = (int)(checkpoint_counter / num_batches)
        start_batch_id = checkpoint_counter - start_epoch * num_batches
        counter = checkpoint_counter
        print("Checkpoint Load Successed")

    else:
        start_epoch = 0
        start_batch_id = 0
        counter = 1
        print("train from scratch...")

    train_iter=[]
    train_loss=[]
    IOU=0.65
    # utils.count_params()
    print("Total train image:{}".format(len(train_img)))
    print("Total validate image:{}".format(len(valid_img)))
    print("Total epoch:{}".format(args.num_epochs))
    print("Batch size:{}".format(args.batch_size))
    print("Learning rate:{}".format(args.learning_rate))
    print("Checkpoint step:{}".format(args.checkpoint_step))

    print("Data Argument:")
    print("h_flip: {}".format(args.h_flip))
    print("v_flip: {}".format(args.v_flip))
    print("rotate: {}".format(args.rotation))
    print("clip size: {}".format(args.clip_size))
    loss_tmp = []
    for i in range(start_epoch, args.num_epochs):
        epoch_time=time.time()
        id_list = np.random.permutation(len(train_img))

        for j in range(start_batch_id, num_batches):
            img_d = []
            lab_d = []

            for ind in range(args.batch_size):
                id = id_list[j * args.batch_size + ind]
                img_d.append(train_img[id])
                lab_d.append(train_label[id])

            x_batch, y_batch = load_batch(img_d, lab_d)
            feed_dict = {img: x_batch,
                         label: y_batch,
                         is_training:True
                         }

            _, loss, pred1 = sess.run([train_step, sigmoid_cross_entropy_loss, pred], feed_dict=feed_dict)

            loss_tmp.append(loss)
            if (counter % 100 == 0):
                tmp = np.median(loss_tmp)
                train_iter.append(counter)
                train_loss.append(tmp)
                print('Epoch', i, '|Iter', counter, '|Loss', tmp)
                loss_tmp.clear()

            counter += 1
        start_batch_id = 0
        print('Time:', time.time() - epoch_time)

        # saver.save(sess, './checkpoint/model.ckpt', global_step=counter)

        if (i>args.start_valid):
            if (i-args.start_valid)%args.valid_step==0:
                val_iou = validation()
                print("last iou valu:{}".format(IOU))
                print("new_iou value:{}".format(val_iou))
                if val_iou > IOU:
                    print("Save the checkpoint...")
                    saver.save(sess, './checkpoint/model.ckpt', global_step=counter, write_meta_graph=True)
                    IOU = val_iou
    saver.save(sess, './checkpoint/model.ckpt', global_step=counter)
コード例 #6
0
from inception_resnet_v2 import inception_resnet_v2_arg_scope, inception_resnet_v2
from param import *

import time
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
slim = tf.contrib.slim

if not os.path.exists(log_dir):
    os.mkdir(log_dir)

with tf.Graph().as_default() as graph:
    tf.logging.set_verbosity(tf.logging.INFO)

    dataset = get_split('train', dataset_dir, file_pattern)
    images, _, labels = load_batch(dataset, batch_size=batch_size, height=image_size, width=image_size)

    with slim.arg_scope(inception_resnet_v2_arg_scope()):
        logits, end_points = inception_resnet_v2(images, num_classes=dataset.num_classes)
        #end_points['Predictions'] = tf.nn.softmax(net, name='Predictions')

    variables_to_restore = slim.get_variables_to_restore(exclude = exclude_list)

    predictions = tf.argmax(end_points['Predictions'], 1)
    probabilities = end_points['Predictions']
    accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels)
    metrics_op = tf.group(accuracy_update, probabilities)

    #m_loss =  moment_loss(end_points['group_map'])
    m_loss = group_loss(end_points['group_map'])