Exemple #1
0
def run_training():

    # you need to change the directories to yours.
    #train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
    train_dir = 'D:/tensorflow/mydata/cat_dog2/'  #My dir--20170727-csq
    #logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
    logs_train_dir = 'D:/tensorflow/mylog/cat_dog2/'
    train, train_label = input_data.get_files(train_dir)
    print(train)
    print(train_label)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    print(train_batch)
    print(train_label_batch)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    print(summary_op)

    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        saver = tf.train.Saver()

        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            for step in np.arange(MAX_STEP):
                print(step)

                if coord.should_stop():
                    break
                _, tra_loss, tra_acc = sess.run(
                    [train_op, train_loss, train__acc])

                if step % cnt_summary == 0:
                    print(
                        'Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                        (step, tra_loss, tra_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % cnt_cache == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(logs_train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()

        coord.join(threads)
        sess.close()
Exemple #2
0
def evaluate_one_image():
    '''''Test one image against the saved models and parameters 
    '''

    print('evaluate_one_image enter')

    # you need to change the directories to yours.
    #train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
    train_dir = 'D:/tensorflow/mypic/cat.2.jpg'
    #train, train_label = input_data.get_files(train_dir)
    #image_array = get_one_image(train)
    image_array = get_one_image1(train_dir)
    print(image_array)

    with tf.Graph().as_default():
        BATCH_SIZE = 2
        N_CLASSES = 2

        image = tf.cast(image_array, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 208, 208, 3])
        logit = model.inference(image, BATCH_SIZE, N_CLASSES)

        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[208, 208, 3])

        # you need to change the directories to yours.
        #logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
        logs_train_dir = 'D:/tensorflow/mylog/cat_dog2/'

        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            if max_index == 0:
                print('This is a cat with possibility %.6f' % prediction[:, 0])
            else:
                print('This is a dog with possibility %.6f' % prediction[:, 1])


#%%

##run_training()
#evaluate_one_image()
def evaluate():
    '''''Test one image against the saved models and parameters
    '''

    # you need to change the directories to yours.
    #    img_dir = '/home/hjxu/PycharmProjects/01_cats_vs_dogs/222.jpg'
    #    image_array = get_one_img(img_dir)

    with tf.Graph().as_default():

        # you need to change the directories to yours.
        logs_train_dir = 'recordstrain/'
        tfrecords_file = 'tfrecords2'
        train_batch, train_batch_d, train_label_batch = cr.read_and_decode(
            tfrecords_file, batch_size=BATCH_SIZE)
        train_batch = tf.cast(train_batch, dtype=tf.float32)
        train_batch_d = tf.cast(train_batch_d, dtype=tf.float32)
        train_label_batch = tf.cast(train_label_batch, dtype=tf.int64)
        train_logits = model1.inference(model1, train_batch, BATCH_SIZE,
                                        N_CLASSES)
        train_loss = model1.losses(train_logits, train_label_batch)
        train_op = model1.trainning(train_loss, learning_rate)
        train__acc = model1.evaluation(train_logits, train_label_batch)
        m = np.empty([31])
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            try:
                for step in np.arange(MAX_STEP):
                    _, tra_loss, tra_acc = sess.run(
                        [train_op, train_loss, train__acc])
                    if coord.should_stop():
                        break
                    if step % 1 == 0:
                        print(
                            'test: train loss = %.2f, train accuracy = %.2f%%'
                            % (tra_loss, tra_acc * 100.0))
                        m[step] = tra_acc
                _, tra_loss, tra_acc = sess.run(
                    [train_op, train_loss, train__acc])
            finally:
                coord.request_stop()
            coord.join(threads)
            sess.close()
            print(np.mean(m))
def test_one_image():
    """
    Test one image with the saved models and parameters
    """

    test_image = get_one_image(test_dir)

    with tf.Graph().as_default():
        BATCH_SIZE = 1
        N_CLASSES = 2

        image = tf.cast(test_image, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 208, 208, 3])
        logit = model1.inference(image, BATCH_SIZE, N_CLASSES)

        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[208, 208, 3])

        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(train_logs_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: test_image})
            max_index = np.argmax(prediction)
            if max_index == 0:
                print('This is a cat with possibility %.6f' % prediction[:, 0])
            else:
                print('This is a dog with possibility %.6f' % prediction[:, 1])
Exemple #5
0
def run_training():

    logs_train_dir = 'G:\\dataS-UNIWARD0.4\\logs\\train'
    logs_val_dir = 'G:\\dataS-UNIWARD0.4\\logs\\val'

    tfrecords_traindir = 'G:\\dataS-UNIWARD0.4\\S_UNIWARD0.4train.tfrecords'
    tfrecords_valdir = 'G:\\dataS-UNIWARD0.4\\S_UNIWARD0.4val.tfrecords'

    # 获得batch tfrecord方法
    train_batch, train_label_batch = input_data1.read_and_decode(
        tfrecords_traindir, BATCH_SIZE)
    val_batch, val_label_batch = input_data1.read_and_decode(
        tfrecords_valdir, BATCH_SIZE)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 256, 256, 1])
    y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

    logits = model1.inference(x, BATCH_SIZE, N_CLASSES)
    loss = model1.losses(logits, y_)
    acc = model1.evaluation(logits, y_)
    train_op = model1.trainning(loss, learning_rate)

    sess = tf.Session()
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    summary_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    val_writer = tf.summary.FileWriter(logs_val_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run([train_batch, train_label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, acc],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })
            if step % 2 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                # summary_str = sess.run(summary_op)
                # train_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run([val_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, acc],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc * 100.0))
                # summary_str = sess.run(summary_op)
                # val_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
def training():

    train, train_label, val, val_label = input_data.get_files(train_dir, RATIO)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W,
                                                      IMG_H, BATCH_SIZE,
                                                      CAPACITY)

    logits = model1.inference(train_batch, BATCH_SIZE, N_CLASSES)
    loss = model1.losses(logits, train_label_batch)
    train_op = model1.trainning(loss, learning_rate)
    acc = model1.evaluation(logits, train_label_batch)

    # x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    # y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE])

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(train_logs_dir, sess.graph)
        val_writer = tf.summary.FileWriter(val_logs_dir, sess.graph)

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                tra_images, tra_labels = sess.run(
                    [train_batch, train_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, acc])
                # feed_dict={x:tra_images, y_:tra_labels})
                if step % 50 == 0:
                    print(
                        'Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                        (step, tra_loss, tra_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 200 == 0 or (step + 1) == MAX_STEP:
                    val_loss, val_acc = sess.run([loss, acc])

                    print(
                        '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **'
                        % (step, val_loss, val_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    val_writer.add_summary(summary_str, step)

                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(train_logs_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)