コード例 #1
0
def eval():
    keep_prob = tf.placeholder(tf.float32)
    images, labels = input.get_data('eval', FLAGS.batch_size)
    hypothesis, cross_entropy, train_step = model.make_network(images, labels, keep_prob)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        if tf.gfile.Exists(FLAGS.checkpoint_dir + '/model.ckpt'):
            saver.restore(sess, FLAGS.checkpoint_dir + '/model.ckpt')
        else:
            print 'Cannot find checkpoint file: ' + FLAGS.checkpoint_dir + '/model.ckpt'
            return

        delta = datetime.timedelta()
        max_steps = 10
        true_count = 0.
        total_sample_count = max_steps * FLAGS.batch_size

        top_k_op = tf.nn.in_top_k(hypothesis, labels, 1)
        tf.train.start_queue_runners(sess=sess)

        for i in range(0, max_steps):
            start = datetime.datetime.now()
            predictions = sess.run(top_k_op, feed_dict={keep_prob: 1.0})
            true_count += np.sum(predictions)
            delta += datetime.datetime.now() - start

    print 'total sample count: %d' % total_sample_count
    print 'precision @ 1: %f' % (true_count / total_sample_count)
    print 'evaluation time: %f seconds' % ((delta.seconds + delta.microseconds / 1E6) / max_steps)
コード例 #2
0
def run_evaluating():

    eval_data, eval_label = input_data.get_files(FLAGS.eval_dir)
    eval_batch, eval_label_batch = input_data.get_batch(
        eval_data, eval_label, FLAGS.height, FLAGS.width, FLAGS.batch_size,
        FLAGS.capacity)

    keep_prob = tf.placeholder(tf.float32)

    hypothesis, cross_entropy, eval_step = model.make_network(
        eval_batch, eval_label_batch, keep_prob)

    cost_sum = tf.summary.scalar("cost_eval", cross_entropy)

    eval_accuracy = tf.nn.in_top_k(hypothesis, eval_label_batch, 1)
    eval_acc = model.evaluation(hypothesis, eval_label_batch)

    saver = tf.train.Saver()

    print('Start Evaluation......')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        total_sample_count = FLAGS.eval_steps * FLAGS.batch_size
        true_count = 0

        writer = tf.summary.FileWriter(FLAGS.log_dir)
        writer.add_graph(sess.graph)  # Show the graph

        merge_sum = tf.summary.merge_all()

        saver.restore(sess, './CNN_Homework/logs/model.ckpt-36000')

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in np.arange(FLAGS.eval_steps + 1):
            _, summary, eval_loss = sess.run(
                [eval_step, merge_sum, cross_entropy],
                feed_dict={keep_prob: 1.0})
            predictions, accuracy = sess.run([eval_accuracy, eval_acc],
                                             feed_dict={keep_prob: 1.0})
            writer.add_summary(summary, global_step=step)

            true_count = true_count + np.sum(predictions)

            if step % 10 == 0:
                print('step : %d, loss : %f, eval_accuracy : %f' %
                      (step, eval_loss, accuracy * 100))

        coord.request_stop()
        coord.join(threads)

        print('precision : %f' % (true_count / total_sample_count))

        sess.close()
コード例 #3
0
def run_training():
    train, train_label = input_data.get_files(FLAGS.train_dir)
    train_batch, train_label_batch = input_data.get_batch(train, train_label, FLAGS.height, FLAGS.width, FLAGS.batch_size, FLAGS.capacity)

    
    keep_prob = tf.placeholder(tf.float32)
    tf.reset_default_graph()

    hypothesis, cross_entropy, train_step = model.make_network(train_batch, train_label_batch, keep_prob)

    cost_sum = tf.summary.scalar("cost", cross_entropy)
    
    train_acc = model.evaluation(hypothesis, train_label_batch)

    with tf.Session() as sess:
        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(logs_dir, sess.graph)
        saver = tf.train.Saver()
    
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                        break
                _,tra_loss, tra_acc = sess.run([train_step, train_loss, train_acc], feed_dict={keep_prob: 0.7})
            
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        
    coord.join(threads)
    sess.close()
コード例 #4
0
ファイル: train.py プロジェクト: JunjaK/deeplearning-lab-
def run_training():
    train, train_label = input_data.get_files(FLAGS.train_dir)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, FLAGS.height, FLAGS.width, FLAGS.batch_size,
        FLAGS.capacity)

    keep_prob = tf.placeholder(tf.float32)

    hypothesis, cross_entropy, train_step = model.make_network(
        train_batch, train_label_batch, keep_prob)

    cost_sum = tf.summary.scalar("cost", cross_entropy)
    saver = tf.train.Saver()

    train_acc = model.evaluation(hypothesis, train_label_batch)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        writer = tf.summary.FileWriter(FLAGS.log_dir)
        writer.add_graph(sess.graph)  # Show the graph

        merge_sum = tf.summary.merge_all()

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in np.arange(FLAGS.max_steps + 1):
            _, summary, tra_loss, tra_acc = sess.run(
                [train_step, merge_sum, cross_entropy, train_acc],
                feed_dict={keep_prob: 0.7})
            writer.add_summary(summary, global_step=step)

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
            if step % 2000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        coord.request_stop()
        coord.join(threads)

        sess.close()
コード例 #5
0
def train_model(remove_features, label_names, train_path, val_path, batch_size,
                seq_len, num_epoch, ctx, load_epoch, name):
    import os
    from test import predict, score, make_submission
    data_names = [
        i[:-4] for i in os.listdir(train_path)
        if i.endswith('.npy') and not (i[:-4] in remove_features)
    ]
    train_iter = SeqDataIter(train_path,
                             batch_size,
                             data_names=data_names,
                             label_names=label_names,
                             shuffle=True,
                             usampling=True,
                             seq_len=seq_len)
    val_iter = SeqDataIter(val_path,
                           batch_size,
                           data_names=data_names,
                           label_names=label_names,
                           shuffle=False,
                           max_len=batch_size * 1000,
                           seq_len=seq_len)

    sym = make_network(train_iter, seq_len)
    sym = add_loss(sym)

    model = train(sym,
                  train_iter,
                  val_iter,
                  name=name,
                  load_epoch=load_epoch,
                  batch_size=batch_size,
                  exp_dir=exp_dir)

    test_iter = DataIter(test_path,
                         batch_size,
                         data_names=data_names,
                         label_names=[],
                         shuffle=False)
    score(val_iter, 7, name)
    prediction = predict(test_iter, 7, name)
    make_submission(prediction, name)
コード例 #6
0
def train():
    keep_prob = tf.placeholder(tf.float32)
    images, labels = input.get_data('train', FLAGS.batch_size)
    hypothesis, cross_entropy, train_step = model.make_network(images, labels, keep_prob)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        if tf.gfile.Exists(FLAGS.checkpoint_dir + '/model.ckpt'):
            saver.restore(sess, FLAGS.checkpoint_dir + '/model.ckpt')
        else:
            init = tf.initialize_all_variables()
            sess.run(init)

        tf.train.start_queue_runners(sess=sess)

        for step in range(FLAGS.max_steps):
            sess.run(train_step, feed_dict={keep_prob: 0.7})
            print step, sess.run(cross_entropy, feed_dict={keep_prob: 1.0})

            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.checkpoint_dir + '/model.ckpt')
コード例 #7
0
def train():
    keep_prob = tf.placeholder(tf.float32)
    images, labels = input.get_data('train', FLAGS.batch_size)
    hypothesis, cross_entropy, train_step = model.make_network(
        images, labels, keep_prob)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        if tf.gfile.Exists(FLAGS.checkpoint_dir + '/model.ckpt'):
            saver.restore(sess, FLAGS.checkpoint_dir + '/model.ckpt')
        else:
            init = tf.initialize_all_variables()
            sess.run(init)

        tf.train.start_queue_runners(sess=sess)

        for step in range(FLAGS.max_steps):
            sess.run(train_step, feed_dict={keep_prob: 0.7})
            print step, sess.run(cross_entropy, feed_dict={keep_prob: 1.0})

            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.checkpoint_dir + '/model.ckpt')
コード例 #8
0
def run_trainuating():

    train_data, train_label, val_data, val_label = input_data.get_files(
        FLAGS.train_dir, FLAGS.ratio)
    #train_data, train_label = input_data.get_files(FLAGS.train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train_data, train_label, FLAGS.height, FLAGS.width, FLAGS.batch_size,
        FLAGS.capacity)
    val_batch, val_label_batch = input_data.get_batch(val_data, val_label,
                                                      FLAGS.height,
                                                      FLAGS.width,
                                                      FLAGS.batch_size,
                                                      FLAGS.capacity)

    keep_prob = tf.placeholder(tf.float32)

    train_hypothesis, train_cross_entropy, train_step = model.make_network(
        train_batch, train_label_batch, keep_prob)
    val_hypothesis, val_cross_entropy, val_step = model.val_make_network(
        val_batch, val_label_batch, keep_prob)

    cost_sum_train = tf.summary.scalar("cost_train", train_cross_entropy)
    cost_sum_val = tf.summary.scalar("cost_val", val_cross_entropy)

    train_acc = model.evaluation(train_hypothesis, train_label_batch)
    val_acc = model.evaluation(val_hypothesis, val_label_batch)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        train_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
        val_writer = tf.summary.FileWriter(FLAGS.val_dir, sess.graph)
        train_writer.add_graph(sess.graph)  # Show the graph
        val_writer.add_graph(sess.graph)  # Show the graph

        merge_sum = tf.summary.merge_all()

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in np.arange(FLAGS.max_steps + 1):
            _, train_summary, train_loss, train_accuracy = sess.run(
                [train_step, merge_sum, train_cross_entropy, train_acc],
                feed_dict={keep_prob: 0.7})
            train_writer.add_summary(train_summary, global_step=step)

            if step % 50 == 0:
                print('step : %d, loss : %f, train_accuracy : %f' %
                      (step, train_loss, train_accuracy * 100))
            if step % 200 == 0:
                _, val_summary, val_loss, val_accuracy = sess.run(
                    [val_step, merge_sum, val_cross_entropy, val_acc],
                    feed_dict={keep_prob: 0.7})
                print(
                    '   **  step : %d, val_loss : %f, val_accuracy : %f  **' %
                    (step, val_loss, val_accuracy * 100))
                val_writer.add_summary(val_summary, global_step=step)

            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        coord.request_stop()
        coord.join(threads)

        sess.close()
コード例 #9
0
def evaluating():

    keep_prob = tf.placeholder(tf.float32)

    eval_image, eval_label = input_data.get_eval_files(eval_dir)
    eval_batch, eval_label_batch = input_data.get_batch(
        eval_image, eval_label, FLAGS.height, FLAGS.width,
        FLAGS.eval_batch_size, FLAGS.capacity)

    logits = model.make_network(eval_batch, eval_label_batch, keep_prob)
    loss = model.losses(logits, eval_label_batch)
    eval_op = model.trainning(loss, FLAGS.learning_rate)
    acc = model.evaluation(logits, eval_label_batch)

    x = tf.placeholder(
        tf.float32,
        shape=[FLAGS.eval_batch_size, FLAGS.height, FLAGS.width, 3])
    y_ = tf.placeholder(tf.int16, shape=[FLAGS.eval_batch_size])

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(train_logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            #saver.restore(sess, './DeepLearning/vgg16/logs/model.ckpt')
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        tf.summary.image("ver1", eval_batch, 32)
        im = tf.summary.image("ver2", eval_batch, 32)

        summary_op = tf.summary.merge_all()
        eval_writer = tf.summary.FileWriter(eval_log_dir, sess.graph)

        try:
            for step in np.arange(FLAGS.eval_step):
                if coord.should_stop():
                    break
                e_images, e_labels = sess.run([eval_batch, eval_label_batch])
                _, e_loss, e_acc = sess.run([eval_op, loss, acc],
                                            feed_dict={
                                                x: e_images,
                                                y_: e_labels
                                            })

                summary_image = sess.run(tf.summary.image(
                    "ver3", e_images, 32))

                if step % 10 == 0:
                    print('Step %d, eval loss = %f, eval accuracy = %f%%' %
                          (step, e_loss, e_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    im = sess.run(im)

                    eval_writer.add_summary(summary_str, step)
                    eval_writer.add_summary(summary_image, step)
                    eval_writer.add_summary(im, step)

        except tf.errors.OutOfRangeError:
            print('Done evaling -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
コード例 #10
0
    minibatches = get_minibatch(X_train, y_train)

    for iter in range(1, 100 + 1):
        idx = np.random.randint(0, len(minibatches))
        X_mini, y_mini = minibatches[idx]

        grad = get_minibatch_grad(model, X_mini, y_mini)

        for layer in grad:
            velocity[layer] = gamma * velocity[layer] + 1e-3 * grad[layer]
            model[layer] += velocity[layer]

    return model


if __name__ == '__main__':
    X, y = make_moons(n_samples=5000, random_state=42, noise=0.1)
    x_train, x_test, y_train, y_test = train_test_split(X, y, random_state=123)

    mean_accuracy = []

    for j in range(15):
        model = make_network()
        model = sgd(model, x_train, y_train)
        y_pred = predict(model, x_test, y_test)

        acc = accuracy(y_test, y_pred)
        mean_accuracy.append(acc)

    print(np.mean(mean_accuracy))
コード例 #11
0
def training():

    keep_prob = tf.placeholder(tf.float32)

    train, train_label, val, val_label = input_data.get_files(
        train_dir, FLAGS.ratio)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, FLAGS.height, FLAGS.width, FLAGS.batch_size,
        FLAGS.capacity)
    val_batch, val_label_batch = input_data.get_batch(val, val_label,
                                                      FLAGS.height,
                                                      FLAGS.width,
                                                      FLAGS.batch_size,
                                                      FLAGS.capacity)

    logits = model.make_network(train_batch, train_label_batch, keep_prob)
    loss = model.losses(logits, train_label_batch)
    train_op = model.trainning(loss, FLAGS.learning_rate)
    acc = model.evaluation(logits, train_label_batch)

    x = tf.placeholder(tf.float32,
                       shape=[FLAGS.batch_size, FLAGS.height, FLAGS.width, 3])
    y_ = tf.placeholder(tf.int16, shape=[FLAGS.batch_size])

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(train_logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            #saver.restore(sess, './DeepLearning/vgg16/logs/model.ckpt')
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(train_logs_dir, sess.graph)
        val_writer = tf.summary.FileWriter(val_dir, sess.graph)

        try:
            for step in np.arange(FLAGS.max_steps):
                if coord.should_stop():
                    break
                tra_images, tra_labels = sess.run(
                    [train_batch, train_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, acc],
                                                feed_dict={
                                                    x: tra_images,
                                                    y_: tra_labels
                                                })
                if step % 10 == 0:
                    print('Step %d, train loss = %f, train accuracy = %f%%' %
                          (step, tra_loss, tra_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 200 == 0 or (step + 1) == FLAGS.max_steps:
                    val_images, val_labels = sess.run(
                        [val_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, acc],
                                                 feed_dict={
                                                     x: val_images,
                                                     y_: val_labels
                                                 })
                    print(
                        '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **'
                        % (step, val_loss, val_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    val_writer.add_summary(summary_str, step)

                if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                    checkpoint_path = os.path.join(train_logs_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)