Exemplo n.º 1
0
def pred():

    batch_s = 100

    tf.reset_default_graph()
    sess = tf.Session()

    dataset = input_fn(train=False, batch_size=batch_s, num_epochs=1)
    iterator = dataset.make_one_shot_iterator()
    image_batch, label_batch = iterator.get_next()

    logits, endpoints = mobilenetv2(image_batch, 14, is_train=False)

    # evaluate model, for classification
    correct_pred = tf.equal(tf.argmax(endpoints, 1),
                            tf.cast(label_batch, tf.int64))
    acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # sess=tf.Session()
    saver = tf.train.Saver()
    print('[*] Try to load trained model...')
    could_load, step = load(sess, saver, args.checkpoint_dir)

    accum = 0
    objNum = np.zeros(14, dtype=float)
    correctNum = np.zeros((14, 14), dtype=float)
    for i in range(50):

        try:
            la, _res, _pred, _acc = sess.run(
                [label_batch, endpoints, correct_pred, acc])
            # _res, _acc = sess.run([endpoints, acc])

            # check acc of each class
            max_pro = np.argmax(_res, axis=1)
            for j in range(la.size):
                objNum[la[j]] = objNum[la[j]] + 1
                if _pred[j] == True:
                    correctNum[la[j], la[j]] = correctNum[la[j], la[j]] + 1
                else:
                    correctNum[la[j],
                               max_pro[j]] = correctNum[la[j], max_pro[j]] + 1

            print(_acc * batch_s)

            accum = accum + _acc * batch_s

        except tf.errors.OutOfRangeError:
            print("end of dataset")

    print(accum)

    print("number of each class:", objNum)
    print(
        "number of correctly classified, row is ground truth, column is result:",
        correctNum)
    print("acc of each class:", np.divide(np.diag(correctNum), objNum))
Exemplo n.º 2
0
def test(img_input):
    img_exp = img_input[np.newaxis, :]
    print(img_exp.shape)
    '''
    dataset = tf.data.Dataset().batch(1)
    handle = tf.placeholder(dtype=tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle, dataset.output_types, dataset.output_shapes)
    '''
    landmarks, _, img = mobilenetv2(None, is_train=args.is_train)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(check_point_dir))
        landmarks_out = sess.run([landmarks], feed_dict={img: img_exp})
        print('landmarks:', landmarks_out)
    return landmarks
Exemplo n.º 3
0
def evaluate_one_image(pic):
    '''Test one image against the saved models and parameters
    '''
    image_array = get_one_image(pic)

    #
    with tf.Graph().as_default():
        BATCH_SIZE = 1
        N_CLASSES = 2

        image = tf.cast(image_array, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 224, 224, 3])
        logit = mobilenet_v2.mobilenetv2(image, N_CLASSES)

        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[224, 224, 3])

        # you need to change the directories to yours.
        logs_train_dir = '/home/pdd/pdwork/CV_BiShe/compression/mobileNetV2_TF/myself/log/'

        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)


#                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            #if max_index==0:
            #  print('This is a clear with possibility %.6f' %prediction[:, 0])
            #else:
            #   print('This is a msk with possibility %.6f' %prediction[:, 1])
            return max_index
Exemplo n.º 4
0
def main():
    height=args.height
    width=args.width

    sess=tf.Session()

    # read queue
    glob_pattern = os.path.join(args.dataset_dir, '*.tfrecord')
    tfrecords_list = glob.glob(glob_pattern)
    filename_queue = tf.train.string_input_producer(tfrecords_list, num_epochs=None)
    img_batch, label_batch = get_batch(filename_queue, args.batch_size)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    inputs = tf.placeholder(tf.float32, [None, height, width, 3], name='input')

    logits, pred=mobilenetv2(inputs, num_classes=args.num_classes, is_train=args.is_train)

    # loss
    loss_ = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_batch, logits=logits))
    # L2 regularization
    l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    total_loss = loss_ + l2_loss

    # evaluate model, for classification
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.cast(label_batch, tf.int64))
    acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # learning rate decay
    base_lr = tf.constant(args.learning_rate)
    lr_decay_step = args.num_samples // args.batch_size * 2  # every epoch
    global_step = tf.placeholder(dtype=tf.float32, shape=())
    lr = tf.train.exponential_decay(base_lr, global_step=global_step, decay_steps=lr_decay_step,
                                    decay_rate=args.lr_decay)
    # optimizer
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        # tf.train.RMSPropOptimizer(learning_rate=self.lr, decay=0.9, momentum=0.9)
        train_op = tf.train.AdamOptimizer(
            learning_rate=lr, beta1=args.beta1).minimize(total_loss)

    # summary
    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar('accuracy', acc)
    tf.summary.scalar('learning_rate', lr)
    summary_op = tf.summary.merge_all()

    # summary writer
    writer = tf.summary.FileWriter(args.logs_dir, sess.graph)

    sess.run(tf.global_variables_initializer())

    # saver for save/restore model
    saver = tf.train.Saver()
    # load pretrained model
    step=0
    if not args.renew:
        print('[*] Try to load trained model...')
        could_load, step = load(sess, saver, args.checkpoint_dir)

    max_steps = int(args.num_samples / args.batch_size * args.epoch)

    print('START TRAINING...')
    for _step in range(step+1, max_steps+1):
        start_time=time.time()
        feed_dict = {global_step:_step, inputs:img_batch}
        # train
        _, _lr = sess.run([train_op, lr], feed_dict=feed_dict)
        # print logs and write summary
        if _step % 10 == 0:
            _summ, _loss, _acc = sess.run([summary_op, total_loss, acc],
                                       feed_dict=feed_dict)
            writer.add_summary(_summ, _step)
            print('global_step:{0}, time:{1:.3f}, lr:{2:.8f}, acc:{3:.6f}, loss:{4:.6f}'.format
                  (_step, time.time() - start_time, _lr, _acc, _loss))

        # save model
        if _step % 10 == 0:
            save_path = saver.save(sess, os.path.join(args.checkpoint_dir, args.model_name), global_step=_step)
            print('Current model saved in ' + save_path)

    tf.train.write_graph(sess.graph_def, args.checkpoint_dir, args.model_name + '.pb')
    save_path = saver.save(sess, os.path.join(args.checkpoint_dir, args.model_name), global_step=max_steps)
    print('Final model saved in ' + save_path)
    sess.close()
    print('FINISHED TRAINING.')
Exemplo n.º 5
0
def main():
    # read queue
    glob_pattern = os.path.join(args.dataset_dir, '9w_train.tfrecords')
    dev_record_file = os.path.join(args.dataset_dir, '1w_val.tfrecords')
    print('path', glob_pattern)
    #-------------------------------------------------------
    parser = get_record_parser()
    train_dataset = get_batch_dataset(glob_pattern, parser)
    eval_dataset = get_batch_dataset(dev_record_file, parser)
    handle = tf.placeholder(dtype=tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle,
                                                   train_dataset.output_types,
                                                   train_dataset.output_shapes)
    train_iterator = train_dataset.make_one_shot_iterator()
    eval_iterator = eval_dataset.make_one_shot_iterator()

    #--------------------------------------------------------------
    net_out, landmarks, _ = mobilenetv2(iterator, is_train=args.is_train)
    # loss
    euclidean_loss = tf.sqrt(
        tf.reduce_mean(
            tf.reduce_sum(tf.square(
                tf.subtract(tf.cast(landmarks, tf.float32), net_out)),
                          axis=1) / 2))
    l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    total_loss = euclidean_loss + l2_loss
    # rmse = tf.sqrt(tf.reduce_mean(tf.square(y_ - y_conv)))

    # learning rate decay
    base_lr = tf.constant(args.learning_rate)
    lr_decay_step = args.num_samples // args.batch_size * 2  # every epoch
    global_step = tf.placeholder(dtype=tf.float32, shape=())
    lr = tf.train.exponential_decay(base_lr,
                                    global_step=global_step,
                                    decay_steps=lr_decay_step,
                                    decay_rate=args.lr_decay)

    # optimizer
    train_op = tf.train.AdamOptimizer(learning_rate=lr,
                                      beta1=args.beta1).minimize(total_loss)
    #train_op = tf.train.AdamOptimizer(1e-3).minimize(euclidean_loss)

    step = 0
    '''
    if not args.renew:
        print('[*] Try to load trained model...')
        could_load, step = load(sess, saver, args.checkpoint_dir)
    '''
    max_steps = int(args.num_samples / args.batch_size * args.epoch)

    print('START TRAINING...')
    with tf.Session() as sess:
        writer = tf.summary.FileWriter(args.logs_dir, sess.graph)
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        train_handle = sess.run(train_iterator.string_handle())
        eval_handle = sess.run(eval_iterator.string_handle())
        coord = tf.train.Coordinator()

        for _step in range(step + 1, max_steps + 1):
            start_time = time.time()

            _, _lr, _loss = sess.run([train_op, lr, total_loss],
                                     feed_dict={
                                         global_step: _step,
                                         handle: train_handle
                                     })
            #  print logs and write summary
            if _step % 50 == 0:
                loss_sum = tf.Summary(value=[
                    tf.Summary.Value(tag="train/loss", simple_value=_loss),
                ])
                writer.add_summary(loss_sum, _step)
                writer.flush()
                print(
                    'global_step:{0}, time:{1:.3f}, lr:{2:.8f}, loss:'.format(
                        _step,
                        time.time() - start_time, _lr, _loss))
            # save model
            if _step % 500 == 0:
                save_path = saver.save(sess,
                                       os.path.join(args.checkpoint_dir,
                                                    args.model_name),
                                       global_step=_step)
                print('Current model saved in ' + save_path)

            # evaluation
            if _step % 200 == 0:
                losses = []
                for _ in tqdm(range(1, args.eval_batch_size)):
                    _loss = sess.run([total_loss],
                                     feed_dict={
                                         global_step: _step,
                                         handle: eval_handle
                                     })
                    losses.append(_loss)
                t_loss = np.mean(losses)
                print(
                    'global_step:{0}, time:{1:.3f}, lr:{2:.8f}, loss:'.format(
                        _step,
                        time.time() - start_time, _lr, t_loss))
                loss_sum = tf.Summary(value=[
                    tf.Summary.Value(tag="eval/loss", simple_value=t_loss),
                ])
                writer.add_summary(loss_sum, _step)
                writer.flush()

        tf.train.write_graph(sess.graph_def, args.checkpoint_dir,
                             args.model_name + '.pb')
        save_path = saver.save(sess,
                               os.path.join(args.checkpoint_dir,
                                            args.model_name),
                               global_step=max_steps)
        print('Final model saved in ' + save_path)
    print('FINISHED TRAINING.')
import tensorflow as tf
import tensorflow.contrib.slim as slim
from mobilenet_v2 import mobilenetv2, mobilenet_arg_scope
from cnn_utils import create_readable_names_for_imagenet_labels
import cv2
import numpy as np

inputs = tf.placeholder(tf.uint8, [None, None, 3])
images = tf.expand_dims(inputs, 0)
images = tf.cast(images, tf.float32) / 128. - 1
images.set_shape((None, None, None, 3))
images = tf.image.resize_images(images, (224, 224))

with slim.arg_scope(mobilenet_arg_scope(is_training=False)):
    logits, endpoints = mobilenetv2(images)

# Restore using exponential moving average since it produces (1.5-2%) higher
# accuracy
ema = tf.train.ExponentialMovingAverage(0.999)
vars = ema.variables_to_restore()

saver = tf.train.Saver(vars)

print(len(tf.global_variables()))
for var in tf.global_variables():
    print(var)
checkpoint_path = "mobilenet_v2_1.ckpt"
image_file = "panda.jpg"
with tf.Session() as sess:
    saver.restore(sess, checkpoint_path)
Exemplo n.º 7
0
#glob_pattern = os.path.join(args.dataset_dir, '*.tfrecord')
#tfrecords_list = glob.glob(glob_pattern)
#filename_queue = tf.train.string_input_producer(tfrecords_list, num_epochs=None)
#img_batch, label_batch = get_batch(filename_queue, args.batch_size)
train_dir = '/home/pdd/pdwork/CV_BiShe/picture/picTest/'

train, train_label = input_data.get_files(train_dir)

img_batch, label_batch = input_data.get_batch(train, train_label, width,
                                              height, 100, 10000)
##
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

#inputs = tf.placeholder(tf.float32, [None, height, width, 3], name='input')
logits, pred = mobilenetv2(img_batch, num_classes=2, is_train=True)

# loss
loss_ = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_batch,
                                                   logits=logits))
# L2 regularization
l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
total_loss = loss_ + l2_loss

# evaluate model, for classification
correct_pred = tf.equal(tf.argmax(pred, 1), tf.cast(label_batch, tf.int64))
acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# learning rate decay
base_lr = tf.constant(learning_rate)
Exemplo n.º 8
0
def run_training():

    tf.reset_default_graph()

    sess = tf.Session()

    iterator = input_fn(train=True,
                        batch_size=args.batch_size,
                        num_epochs=args.epoch)
    image_batch, label_batch = iterator.get_next()

    logits, pred = mobilenetv2(image_batch,
                               num_classes=args.num_classes,
                               is_train=True)

    # loss
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_batch,
                                                       logits=logits))

    # L2 regularization
    l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    total_loss = loss + l2_loss

    # learning rate decay
    base_lr = tf.constant(args.learning_rate)
    lr_decay_step = args.num_samples // args.batch_size * 2  # every epoch
    global_step = tf.placeholder(dtype=tf.float32, shape=())
    lr = tf.train.exponential_decay(base_lr,
                                    global_step=global_step,
                                    decay_steps=lr_decay_step,
                                    decay_rate=args.lr_decay)

    # optimizer
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tf.train.AdamOptimizer(
            learning_rate=lr, beta1=args.beta1).minimize(total_loss)

    # evaluate model, for classification with training data
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.cast(label_batch, tf.int64))
    acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # summary
    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar('accuracy', acc)
    tf.summary.scalar('learning_rate', lr)
    summary_op = tf.summary.merge_all()

    # summary writer
    writer = tf.summary.FileWriter(args.logs_dir, sess.graph)

    sess.run(tf.global_variables_initializer())

    # saver for save/restore model
    saver = tf.train.Saver()
    # load pretrained model

    if not args.renew:
        print('[*] Try to load trained model...')
        could_load, step = load(sess, saver, args.checkpoint_dir)
    step = 0

    max_steps = int(args.num_samples / args.batch_size * args.epoch)
    print('max step:', max_steps)

    print('START TRAINING...')
    con_step = 0
    _acc = 0
    for _step in range(step + 1, max_steps + 1):
        start_time = time.time()
        feed_dict = {global_step: _step}

        _, _lr, _acc = sess.run([train_op, lr, acc], feed_dict=feed_dict)
        # im, la, _, _lr, _res, _acc = sess.run([image_batch, label_batch, train_op, lr, pred, acc], feed_dict=feed_dict)

        # sample_img = im[0].astype(np.uint8)
        # cv2.imshow('image', sample_img)
        # cv2.waitKey(5)
        # print(la[0])

        # print logs and write summary
        if _step % 10 == 0:
            feed_dict = {global_step: _step}
            _summ, _loss, _acc, _pred = sess.run(
                [summary_op, total_loss, acc, correct_pred],
                feed_dict=feed_dict)
            writer.add_summary(_summ, _step)
            print(
                'global_step:{0}, time:{1:.3f}, lr:{2:.8f}, acc:{3:.6f}, loss:{4:.6f}'
                .format(_step,
                        time.time() - start_time, _lr, _acc, _loss))

        # save model
        if _step % 50 == 0:
            save_path = saver.save(sess,
                                   os.path.join(args.checkpoint_dir,
                                                args.model_name),
                                   global_step=_step)
            print('Current model saved in ' + save_path)

        print("acc:", _acc)
        if _acc > 0.95:
            con_step = con_step + 1
            if con_step >= 30:
                break
        else:
            con_step = 0

    tf.train.write_graph(sess.graph_def, args.checkpoint_dir,
                         args.model_name + '.pb')
    save_path = saver.save(sess,
                           os.path.join(args.checkpoint_dir, args.model_name),
                           global_step=_step)

    # save_path = saver.save(sess, "checkpoints/model.ckpt")
    print('Final model saved in ' + save_path)
    sess.close()
    print('FINISHED TRAINING.')
Exemplo n.º 9
0
    labels.append(13)
  else:
    sys.exit("Unexpected image name!")


c = list(zip(addrs, labels))
shuffle(c)
addrs, labels = zip(*c)

# model_file

tf.reset_default_graph()

images = tf.placeholder(tf.float32, [None, args.width, args.height, 3], name='inputs')

logits, pred = mobilenetv2(images, 14, is_train=False)

sess = tf.Session()
saver=tf.train.Saver()

ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
    ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
    # ckpt_name = 'mobilenetv2-625'
    saver.restore(sess, os.path.join(ckpt_dir, ckpt_name))
    print("[*] Success to read {}".format(ckpt_name))
else:
    print("[*] Failed to find a checkpoint")

acc = 0