Esempio n. 1
0
def evaluate(mnist, net_model):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, 784], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')

        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        y = cnn.inference(net_model, x, None, train=False)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(
            moving_average_decay)
        variable_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(model_save_path)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                print("Step:%s,accuracy:%g" % (global_step, accuracy_score))
            else:
                print('error')
                return
Esempio n. 2
0
def train():
    net = models.facial_keypoint_net.copy()
    regularization_rate = 0.0001

    x = tf.placeholder("float", shape=[None, 96, 96, 1])
    y_ = tf.placeholder("float", shape=[None, 30])
    keep_prob = tf.placeholder("float")
    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)

    y_conv = cnn.inference(net, x, regularizer, train=True)
    rmse = tf.sqrt(tf.reduce_mean(tf.square(y_ - y_conv)))

    VALIDATION_SIZE = 100  # 验证集大小
    EPOCHS = 100  # 迭代次数
    BATCH_SIZE = 64  # 每个batch大小,稍微大一点的batch会更稳定
    EARLY_STOP_PATIENCE = 10  # 控制early stopping的参数

    train_step = tf.train.AdamOptimizer(1e-5).minimize(rmse)

    best_validation_loss = 1000000.0
    current_epoch = 0

    X, y = input_data()
    X_valid, y_valid = X[:VALIDATION_SIZE], y[:VALIDATION_SIZE]
    X_train, y_train = X[VALIDATION_SIZE:], y[VALIDATION_SIZE:]

    TRAIN_SIZE = X_train.shape[0]
    train_index = list(range(TRAIN_SIZE))
    random.shuffle(train_index)
    X_train, y_train = X_train[train_index], y_train[train_index]

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        print('begin training..., train dataset size:{0}'.format(TRAIN_SIZE))
        for i in range(EPOCHS):
            random.shuffle(train_index)  # 每个epoch都shuffle一下效果更好
            X_train, y_train = X_train[train_index], y_train[train_index]

            for j in range(0, TRAIN_SIZE, BATCH_SIZE):
                # print('epoch {0}, train {1} samples done...'.format(i, j))

                train_step.run(feed_dict={x: X_train[j:j + BATCH_SIZE],
                                          y_: y_train[j:j + BATCH_SIZE], keep_prob: 0.5})

            # 电脑太渣,用所有训练样本计算train_loss居然死机,只好注释了。
            # train_loss = rmse.eval(feed_dict={x:X_train, y_:y_train, keep_prob: 1.0})
            validation_loss = rmse.eval(feed_dict={x: X_valid, y_: y_valid, keep_prob: 1.0})

            print('epoch {0} done! validation loss:{1}'.format(i, validation_loss * 96.0))
            if validation_loss < best_validation_loss:
                best_validation_loss = validation_loss
                current_epoch = i
                saver.save(sess, 'model/model.ckpt')  # 即时保存最好的结果
            elif (i - current_epoch) >= EARLY_STOP_PATIENCE:
                print('early stopping')
                break
Esempio n. 3
0
def train(mnist, model_save_path):
    lenet = models.le_net.copy()
    learning_rate_base = 0.2
    learning_rate_decay = 0.99
    regularization_rate = 0.0001
    training_steps = 30000
    moving_average_decay = 0.99

    x = tf.placeholder(tf.float32, lenet[0]['size'], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, lenet[-1]['size'][1]],
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)
    y = cnn.inference(lenet, x, regularizer, train=True)

    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(
        learning_rate_base, global_step,
        mnist.train.num_examples / lenet[0]['size'][0], learning_rate_decay)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(training_steps):
            xs, ys = mnist.train.next_batch(lenet[0]['size'][0])
            reshaped_xs = np.reshape(xs, tuple(lenet[0]['size']))
            _None, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict={
                                                   x: reshaped_xs,
                                                   y_: ys
                                               })
            tf.summary.scalar('accuracy', loss_value)
            if i % 1000 == 0:
                print("Steps:%d,Loss:%g" % (step, loss_value))
                saver.save(sess, model_save_path, global_step=global_step)
Esempio n. 4
0
def train(mnist, net_model):
    learning_rate_base = 0.08
    learning_rate_decay = 0.99
    training_steps = 30000
    x = tf.placeholder(tf.float32, net_model[0]['size'], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, net_model[-1]['size'][1]],
                        name='y-input')

    y = cnn.inference(net_model, x, None, train=True)

    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    loss = tf.reduce_mean(cross_entropy)

    learning_rate = tf.train.exponential_decay(
        learning_rate_base, global_step,
        mnist.train.num_examples / net_model[0]['size'][0],
        learning_rate_decay)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        loss_base = 1.0
        tf.global_variables_initializer().run()

        for i in range(1, training_steps + 1):
            xs, ys = mnist.train.next_batch(net_model[0]['size'][0])
            _none, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict={
                                                   x: xs,
                                                   y_: ys
                                               })

            if i % 1000 == 0:
                print("Steps:%d, Loss:%g" % (step, loss_value))
                if loss_value < loss_base:
                    loss_base = loss_value
                    saver.save(sess,
                               os.path.join(model_save_path, model_name),
                               global_step=global_step)
Esempio n. 5
0
def main():
    net = models.facial_keypoint_net.copy()
    SAVE_PATH = './model/model.ckpt'

    x = tf.placeholder("float", shape=[None, 96, 96, 1])
    keep_prob = tf.placeholder("float")

    img = cv2.imread('test.bmp')
    if img is None:
        print('Image Name Error')
        return
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.resize(img, (96, 96), interpolation=cv2.INTER_CUBIC)

    img_x = np.zeros((96, 96, 1))
    for i in range(96):
        for j in range(96):
            img_x[i][j] = [float(img[i][j]) / 255.0]
    X = [img_x]

    y_conv = cnn.inference(net, x, None, train=False)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, SAVE_PATH)
        y_batch = y_conv.eval(feed_dict={x: X, keep_prob: 1.0})

        test = y_batch[0]
        plt.imshow(img, cmap='gray')
        i = 0
        while i < 30:
            xx = 96 * test[i]
            i += 1
            yy = 96 * test[i]
            i += 1
            plt.plot(xx, yy, '*')

        plt.savefig('out.png')

        print('predict test image done!')
Esempio n. 6
0
def evaluate(mnist, model_save_path):
    fcnet = models.fcnet()
    x = tf.placeholder(tf.float32, [None, 784], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
    y = cnn.inference(fcnet, x, None, train=False)

    validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()

    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(model_save_path)
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
            accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
            print("Accuracy:%g" % (accuracy_score))
        else:
            print('error')
            return