Пример #1
0
def run_training():
    train_dir = 'E:/Code/Dog vs Cat/train/'
    logs_train_dir = 'E:/Code/Dog vs Cat/log/'

    train, train_label = input_data.get_file(train_dir)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    time_start = time.time()
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training --epoch limit reached')
    finally:
        coord.request_stop()
    time_end = time.time()
    train_time = time_end - time_start
    print("train time:", train_time)
    coord.join(threads)
    sess.close()
Пример #2
0
def train():

    image_list, label_list = input_data.get_file(TRAIN_DIR)
    image_batch, label_batch = input_data.get_batch(image_list, label_list,
                                                    IMG_H, IMG_W, BATCH_SIZE,
                                                    CAPACITY)
    logits = model_fcn.inference(image_batch, True)
    train_loss = model_fcn.loss(logits, label_batch)
    train_op = model_fcn.backpropagation(train_loss, LEARNING_RATE)
    accuracy = model_fcn.accuracy(logits, label_batch)

    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(LOGS_DIR, sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        coord = tf.train.Coordinator()  # 多线程
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, loss, tra_acc = sess.run(
                [train_op, train_loss, accuracy * 100.0])

            if step % 50 == 0:
                print('step %d, train loss = %.2f, accuracy = %.2f%%' %
                      (step, loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(LOGS_DIR, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        coord.request_stop()

        coord.join(threads)
Пример #3
0
def train():
    pre_trained_weights = './vgg_weights/vgg16.npy'
    train_dir = 'E:/Code/Dog vs Cat/train/'
    train_log_dir = './logs_vgg/'

    with tf.name_scope('input'):
        image_list, label_list = input_data.get_file(train_dir)
        image_batch,label_batch = input_data.get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)


    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_H, IMG_W, 3])
    y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

    logits = VGG.VGG16N(x, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)

    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    # val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run([image_batch, label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={x: tra_images, y_: tra_labels})
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)

            # if step % 200 == 0 or (step + 1) == MAX_STEP:
            #     val_images, val_labels = sess.run([val_image_batch, val_label_batch])
            #     val_loss, val_acc = sess.run([loss, accuracy],
            #                                  feed_dict={x: val_images, y_: val_labels})
            #     print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' % (step, val_loss, val_acc))
            #
            #     summary_str = sess.run(summary_op)
                # val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Пример #4
0
    tf.summary.scalar('loss', cost),
    tf.summary.scalar('accuracy', accuracy),
])

init = tf.global_variables_initializer()
# Launch the graph
writer = tf.summary.FileWriter(tf_logs)
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init)
    writer.add_graph(sess.graph)

    # Keep training until reach max iterations
    progress = tqdm(range(training_iters))
    train_images, train_labels = input_data.get_file(train_dir)
    for batch_idx in progress:
        batch_x, batch_y = input_data.get_batch(train_images, train_labels,
                                                image_w, image_h, 10, 50)
        # Run optimization op (backprop)
        sess.run(optimizer,
                 feed_dict={
                     x: batch_x.eval(session=sess),
                     y: batch_y.eval(session=sess),
                     keep_prob: dropout
                 })

        if batch_idx % display_step == 0:
            summary, loss, acc = sess.run(
                [summary_op, cost, accuracy],
                feed_dict={
Пример #5
0
import input_data
import model

N_CLASSES = 2
IMG_W = 208
IMG_H = 208
BATCH_SIZE = 16
CAPACITY = 2000
MAX_STEP = 10000
learning_rate = 0.0001
train_dir = 'E:\Jupyter\catanddog\ALLPetImages'
#  train_dir = 'E:\PyCharmProject\mycatvsdog\PetImages'
logs_train_dir = 'E:\PyCharmProject\mycatvsdog\log'

train, train_label = input_data.get_file(train_dir)
train_batch, train_label_batch = input_data.get_batch(train, train_label,
                                                      IMG_W, IMG_H, BATCH_SIZE,
                                                      CAPACITY)

train_logits = model.mynn_inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = model.losses(train_logits, train_label_batch)
train_op = model.training(train_loss, learning_rate)
train_acc = model.evaluation(train_logits, train_label_batch)

summary_op = tf.compat.v1.summary.merge_all()

#  折线图
step_list = list(range(100))
cnn_list1 = []
cnn_list2 = []
Пример #6
0
import input_data as data
train_dir = r'/Volumes/learn/learning-package/project/code_git/data_preprocessing/Training/'

IMG_W = 28
IMG_H = 28
TRAIN_BATCH_SIZE = 32

# 遍历训练集文件夹下的图片,输出图片list和label的list
train,train_labels = data.get_file(train_dir)
# 输出预训练的batch数据
train_batch,train_label_batch = data.get_batch(train,train_labels,IMG_W,IMG_H,TRAIN_BATCH_SIZE)
print(train_label_batch)
print('\n')
print(train_batch)