예제 #1
0
def train_mnist_model():
    with tf.Session(config=session_config) as sess:
        mnist = create_inputs.read_data_sets(dataset_path, one_hot=True)
        # 计算有多少批次
        n_batch = mnist.train.num_examples // batch_size
        # 构建网络
        x = tf.placeholder(tf.float32, [None, 784])
        y = tf.placeholder(tf.float32, [None, class_num])
        prediction, _ = model(x, input_shape, class_num)
        # 求loss
        loss = dk.cross_entropy_loss(prediction, y)
        # 设置优化器
        global_step, train_step = dk.set_optimizer(
            lr_range, num_batches_per_epoch=n_batch, loss=loss)
        # 求acc
        accuracy = dk.get_acc(prediction, y)
        # 初始化变量
        coord, threads = dk.init_variables_and_start_thread(sess)
        # 恢复model
        saver, start_epoch = dk.restore_model(sess, ckpt, restore_model=False)
        # 设置训练日志
        summary_dict = {'loss': loss, 'accuracy': accuracy}
        summary_writer, summary_op = dk.set_summary(sess, logdir, summary_dict)
        # epoch=50
        for epoch_th in range(epoch):
            for bat in range(n_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                # 训练一个step
                _, loss_value, acc_value, summary_str, step = sess.run(
                    [train_step, loss, accuracy, summary_op, global_step],
                    feed_dict={
                        x: batch_xs,
                        y: batch_ys
                    })
                # 显示结果
                dk.print_message(epoch_th, bat, n_batch, step, loss_value,
                                 acc_value)
                # 保存summary
                if (step + 1) % 20 == 0:
                    summary_writer.add_summary(summary_str, step)

            # 保存model
            if (((epoch_th + 1) % 50)) == 0:
                print('saving movdel.......')
                saver.save(sess,
                           os.path.join(ckpt,
                                        'model_{}.ckpt'.format(epoch_th)),
                           global_step=global_step)

        dk.stop_threads(coord, threads)
예제 #2
0
def evaluate_result():
    with tf.Session(config=session_config) as sess:
        test_x, test_y = create_inputs(is_train)
        # test_y = tf.one_hot(test_y, depth=class_num, axis=1, dtype=tf.int32)
        # 构建网络
        x = tf.placeholder(tf.float32,
                           shape=[
                               input_shape[0], input_shape[1], input_shape[2],
                               input_shape[3]
                           ])
        y = tf.placeholder(tf.float32, shape=[input_shape[0], class_num])

        prediction, endpoint = model(x, input_shape, class_num)
        # 打印模型结构
        dk.print_model_struct(endpoint)
        # 求acc
        accuracy = dk.get_acc(prediction, y)
        # 初始化变量
        coord, threads = dk.init_variables_and_start_thread(sess)
        # 恢复model
        saver = dk.restore_model(sess, ckpt, restore_model=restore_model)
        # 显示参数量
        dk.show_parament_numbers()
        start_epoch = 0
        acc_list = []

        if is_train:
            print('train_number:', train_number)
            n_batch_train = int(train_number // batch_size)
            n_batch_total = n_batch_train
        else:
            print('test_number:', test_number)
            n_batch_test = int(test_number // batch_size)
            n_batch_total = n_batch_test
        for epoch_n in range(start_epoch, epoch):
            for n_batch in range(n_batch_total):
                batch_x, batch_y = sess.run([test_x, test_y])
                # 训练一个step
                acc_value = sess.run(accuracy,
                                     feed_dict={
                                         x: batch_x,
                                         y: batch_y
                                     })
                # 显示结果batch_size
                print('epoch_n:', epoch_n, ' n_batch:', n_batch, ' acc_value:',
                      acc_value)
                acc_list.append(acc_value)
        result = np.mean(acc_list)
        print('final result: ', result)
        dk.stop_threads(coord, threads)
예제 #3
0
def train_cifar10_model():
    with tf.Session(config=session_config) as sess:
        #入口
        train_x, train_y = create_inputs(is_train)
        x = tf.placeholder(tf.float32, shape=input_shape)
        y = tf.placeholder(tf.float32, shape=label_shape)
        # 构建网络
        prediction, _ = model(x, input_shape, class_num)
        # 求loss
        loss = dk.cross_entropy_loss(prediction, y)
        # 设置优化器
        global_step, train_step = dk.set_optimizer(
            lr_range, num_batches_per_epoch=n_batch_train, loss=loss)
        # 求acc
        accuracy = dk.get_acc(prediction, y)
        # 初始化变量
        coord, threads = dk.init_variables_and_start_thread(sess)
        # 设置训练日志
        summary_dict = {'loss': loss, 'accuracy': accuracy}
        summary_writer, summary_op = dk.set_summary(sess, logdir, summary_dict)
        # 恢复model
        saver, start_epoch = dk.restore_model(sess, ckpt, restore_model=False)
        # 显示参数量
        dk.show_parament_numbers()
        start_epoch = 0
        if restore_model:
            step = sess.run(global_step)
            start_epoch = int(
                step / n_batch_train / save_epoch_n) * save_epoch_n
        # 训练loop
        total_step = n_batch_train * epoch
        for epoch_n in range(epoch):
            pre_index = 0
            since = time.time()
            acc_value_list = []
            for n_batch in range(n_batch_train):
                if pre_index + batch_size < 50000:
                    batch_x = train_x[pre_index:pre_index + batch_size]
                    batch_y = train_y[pre_index:pre_index + batch_size]
                else:
                    batch_x = train_x[pre_index:]
                    batch_y = train_y[pre_index:]
                # 训练一个step
                _, loss_value, acc_value, summary_str, step = sess.run(
                    [train_step, loss, accuracy, summary_op, global_step],
                    feed_dict={
                        x: batch_x,
                        y: batch_y
                    })
                # 显示结果
                dk.print_message(epoch_n, n_batch, n_batch_train, step,
                                 loss_value, acc_value)
                # 保存summary
                if (step + 1) % 20 == 0:
                    summary_writer.add_summary(summary_str, step)
                pre_index += batch_size
                # 保存结果
                acc_value_list.append(acc_value)

            # 显示进度、耗时、最小最大平均值
            seconds_mean = (time.time() - since) / n_batch_train
            dk.print_progress_and_time_massge(seconds_mean, step, total_step,
                                              acc_value_list)

            # 保存model
            if (((epoch_n + 1) % save_epoch_n)) == 0:
                print('epoch_n :{} saving movdel.......'.format(epoch_n))
                saver.save(sess,
                           os.path.join(ckpt, 'model_{}.ckpt'.format(epoch_n)),
                           global_step=global_step)

        dk.stop_threads(coord, threads)
예제 #4
0
n_batch_train = int(train_number // batch_size)
n_batch_test = int(test_number // batch_size)
session_config = dk.set_gpu()

with tf.Session(config=session_config) as sess:
    test_x, test_y = create_inputs(is_train)
    test_y = tf.one_hot(test_y, depth=class_num, axis=1, dtype=tf.float32)
    # 构建网络
    x = tf.placeholder(
        tf.float32,
        shape=[input_shape[0], input_shape[1], input_shape[2], input_shape[3]])
    y = tf.placeholder(tf.float32, shape=[input_shape[0], class_num])
    prediction = cnn_L4(x, input_shape, class_num, keep_prob=1)
    # 求acc
    accuracy = dk.get_acc(prediction, y)
    # 初始化变量
    coord, threads = dk.init_variables_and_start_thread(sess)
    # 恢复model
    saver = dk.restore_model(sess, ckpt, restore_model=restore_model)
    # 显示参数量
    dk.show_parament_numbers()
    start_epoch = 0
    acc_list = []
    print('train_number:', train_number)
    print('test_number:', test_number)
    if is_train:
        n_batch_total = n_batch_train
    else:
        n_batch_total = n_batch_test
    for epoch_n in range(start_epoch, epoch):