예제 #1
0
def train():
    """
	Train model
	:return:
	"""
    X = tf.placeholder(tf.float32, [
        cfg.CFG['batch_size'], cfg.CFG['image_size'], cfg.CFG['image_size'],
        cfg.CFG['image_channel']
    ],
                       name='train-input')
    Y = tf.placeholder(tf.float32, [None], name='label-input')
    train, train_label = generator.get_files('./dataset/')

    train_batch, train_label_batch = generator.get_batches(
        train, train_label, cfg.CFG["image_size"], cfg.CFG["image_size"],
        cfg.CFG["batch_size"], 20)

    train_logits = model.model(train_batch, cfg.CFG["batch_size"],
                               cfg.CFG["classes"])

    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, cfg.CFG["learning_rate"])
    train_acc = model.evaluation(train_logits, train_label_batch)

    sess = tf.Session()
    saver = tf.train.Saver(max_to_keep=5)
    sess.run(tf.global_variables_initializer())

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        ptar = tqdm(range(50))
        for iter in ptar:
            if coord.should_stop():
                break

            image_data, label = sess.run([train_batch, train_label_batch])
            # _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc],
                                            feed_dict={
                                                X: image_data,
                                                Y: label
                                            })

            ptar.set_description(
                "iterative  %d times,train loss=%.4f     train accuracy = %.4f"
                % (iter, tra_loss, tra_acc))

            checkpoint_path = cfg.CFG[
                'model_path'] + 'flame_dector_%.4f.ckpt' % tra_acc
            # print(checkpoint_path)
            saver.save(sess, checkpoint_path, global_step=iter)

    except tf.errors.OutOfRangeError:
        print('Done training')
    finally:
        coord.request_stop()
    coord.join(threads)
예제 #2
0
def run_training():
    # 数据集
    train_dir = r'D:\Python\mnist\test_my\big_0_1_4/'  # My dir--20170727-csq
    # logs_train_dir 存放训练模型的过程的数据,在tensorboard 中查看
    logs_train_dir = r'D:\PyCharm_code\Ai\Tensorflow_mooc_note\6\MinstNew\logs\train/'

    # 获取图片和标签集
    train, train_label = input_data.get_files(train_dir)
    # 生成批次
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    # 进入模型
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # 获取 loss
    train_loss = model.losses(train_logits, train_label_batch)
    # 训练
    train_op = model.trainning(train_loss, learning_rate)
    # 获取准确率
    train__acc = model.evaluation(train_logits, train_label_batch)
    # 合并 summary
    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    # 保存summary
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
예제 #3
0
def train():
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()
        images, labels = input.createBatch(tfrecords_name, batch_size)
        logits = model.inference(images, batch_size, n_classes)
        loss = model.loss(logits, labels)
        accuracy = model.evaluation(logits, labels)
        train_op = model.trainning(loss, learning_rate, global_step)

        class _LoggerHook(tf.train.SessionRunHook):
            """Logs loss and runtime."""
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                #if self._step % log_frequency == 0:
                # print(self.run(accuracy))
                # print("step %d, accuracy = %.2f"%(self._step ,accuracy))
                return tf.train.SessionRunArgs([loss, accuracy
                                                ])  # Asks for loss value.

            def after_run(self, run_context, run_values):
                if self._step % log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time
                    [loss_value, accuracy_value] = run_values.results
                    #accuracy_value = run_context.accuracy
                    examples_per_sec = log_frequency * batch_size / duration
                    sec_per_batch = float(duration / log_frequency)
                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))
                    print('Accuracy = %.2f' % accuracy_value)

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=train_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=max_steps),
                    tf.train.NanTensorHook(loss),
                    tf.train.SummarySaverHook(
                        save_steps=5,
                        output_dir=board_dir,
                        summary_op=tf.summary.merge_all()),
                    _LoggerHook()
                ]) as mon_sess:
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=mon_sess, coord=coord)
            while not mon_sess.should_stop():
                mon_sess.run(train_op)
                #print('dont stop')
            coord.request_stop()
            coord.join(threads)
def run_training():
    # dataset
    train_dir = '/Users/xcliang/PycharmProjects/cats_vs_dogs/data/train/'  # My dir--20170727-csq
    # logs_train_dir  store the data of the process of training model, view in tensorbpard
    logs_train_dir = '/Users/xcliang/PycharmProjects/cats_vs_dogs/data/saveNet'

    # Get images and tag sets
    train, train_label = input_data.get_files(train_dir)
    # Generate batch
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    # Entering the model
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # Get loss
    train_loss = model.losses(train_logits, train_label_batch)
    # train
    train_op = model.trainning(train_loss, learning_rate)
    # Get accuracy
    train__acc = model.evaluation(train_logits, train_label_batch)
    # merge summary
    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    # save summary
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                # Save the model every 2000 steps and save the model in checkpoint_path
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
예제 #5
0
def run_training():
    '''

    '''
    '''
    tf.train.Coordinator和tf.train.start_queue_runners貌似都要在sess.run之前使用,不然会无法运行
    try:这些语法貌似是模板,直接使用就好
    '''

    # you need to change the directories to yours.
    train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
    logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()  #和下面的queue_runners配合使用,发生错误可以正确关闭线程
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        # When done, ask the threads to stop.
        coord.request_stop()

    # Wait for threads to finish.
    coord.join(threads)
    sess.close()
예제 #6
0
def run_training():

    # you need to change the directories to yours.
    s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu'
    T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid'
    logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+'
    s_train, s_train_label = input_data.get_files(s_train_dir)
    s_train_batch, s_train_label_batch = input_data.get_batch(
        s_train, s_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    T_train, T_train_label = input_data.get_files(T_train_dir)

    T_train_batch, T_train_label_batch = input_data.get_batch(
        T_train, T_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(s_train_batch, T_train_batch, BATCH_SIZE,
                                   N_CLASSES)
    train_loss = model.losses(train_logits, s_train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, s_train_label_batch)

    summary_op = tf.summary.merge_all()  #汇总操作
    sess = tf.Session()  #定义sess
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)  #
    saver = tf.train.Saver()  #保存操作

    sess.run(tf.global_variables_initializer())  #初始化所有变量
    coord = tf.train.Coordinator()  #设置多线程协调器
    threads = tf.train.start_queue_runners(
        sess=sess, coord=coord)  #开始Queue Runners(队列运行器)

    #开始训练过程
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                #运行汇总操作,写入汇总
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 800 == 0 or (step + 1) == MAX_STEP:
                #保存当前模型和权重到 logs_train_dir,global_step为当前迭代次数
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #7
0
def fit():
    train_logits1, train_logits2, train_logits3, train_logits4, train_logits5, train_logits6, train_logits7 = model.inference(
        image_holder, keep_prob)

    train_loss1, train_loss2, train_loss3, train_loss4, train_loss5, train_loss6, train_loss7 = model.losses(
        train_logits1, train_logits2, train_logits3, train_logits4,
        train_logits5, train_logits6, train_logits7, label_holder)
    train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7 = model.trainning(
        train_loss1, train_loss2, train_loss3, train_loss4, train_loss5,
        train_loss6, train_loss7, learning_rate)

    train_acc = model.evaluation(train_logits1, train_logits2, train_logits3,
                                 train_logits4, train_logits5, train_logits6,
                                 train_logits7, label_holder)

    input_image = tf.summary.image('input', image_holder)

    summary_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))

    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    start_time1 = time.time()
    for step in range(count):
        x_batch, y_batch = get_batch()
        start_time2 = time.time()
        time_str = datetime.datetime.now().isoformat()
        feed_dict = {
            image_holder: x_batch,
            label_holder: y_batch,
            keep_prob: 0.5
        }
        _, _, _, _, _, _, _, tra_loss1, tra_loss2, tra_loss3, tra_loss4, tra_loss5, tra_loss6, tra_loss7, acc, summary_str = sess.run(
            [
                train_op1, train_op2, train_op3, train_op4, train_op5,
                train_op6, train_op7, train_loss1, train_loss2, train_loss3,
                train_loss4, train_loss5, train_loss6, train_loss7, train_acc,
                summary_op
            ], feed_dict)
        train_writer.add_summary(summary_str, step)
        duration = time.time() - start_time2
        tra_all_loss = tra_loss1 + tra_loss2 + tra_loss3 + tra_loss4 + tra_loss5 + tra_loss6 + tra_loss7

        # print(y_batch)  #仅测试代码训练实际样本与标签是否一致

        if step % 10 == 0:
            sec_per_batch = float(duration)
            print('%s : Step %d,train_loss = %.2f,acc= %.2f,sec/batch=%.3f' %
                  (time_str, step, tra_all_loss, acc, sec_per_batch))

        if step % 10000 == 0 or (step + 1) == count:
            checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)
    sess.close()
    print(time.time() - start_time1)
예제 #8
0
def run_training():

    #train set path
    train_dir = '/raidHDD/experimentData/Dev/Knife/hackthon/upup7/'
    #output model path
    logs_train_dir = '/raidHDD/experimentData/Dev/Knife/hackthon/modelX'
    if removeLogFIle:
        if os.path.exists(logs_train_dir):
            for logFile in os.listdir(logs_train_dir):
                os.remove("{0}/{1}".format(logs_train_dir, logFile))
            print("Delete Log file success...")
    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8  #0.8 =GPU_memory usage
    sess = tf.Session(config=config)
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
                train_writer.flush()
            #only save end model
            if (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #9
0
def run_training():
    train_dir = "/home/sxy/PycharmProjects/defect2/data/train"
    logs_train_dir = "/home/sxy/PycharmProjects/defect2/logs/train-4"
    tf.reset_default_graph()
    train, train_label = input_data.get_files(train_dir)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)
    #测试准确率
    # test_dir="/home/sxy/PycharmProjects/defect2/data/test"
    # test,test_label=input_data.get_files(test_dir)
    # test_batch,test_label_batch=input_data.get_batch(test,
    #                                                       test_label,
    #                                                       IMG_W,
    #                                                       IMG_H,
    #                                                       BATCH_SIZE,
    #                                                       CAPACITY)
    # test_logits = model.inference(test_batch, BATCH_SIZE, N_CLASSES)
    # train_loss = model.losses(test_logits, test_label_batch)
    # train_op = model.trainning(train_loss, learning_rate)
    # train_acc = model.evaluation(test_logits, test_label_batch)

    summary_op = tf.merge_all_summaries()
    sess = tf.Session()
    train_writer = tf.train.SummaryWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.initialize_all_variables())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:
                print("Step %d, train loss = %.2f, train accuracy = %.2f%%" %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done training -- epoch limit reached.")
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #10
0
파일: trainning.py 프로젝트: HanSolo09/MCNN
def run_training():
    starttime = datetime.datetime.now()
    # you need to change the directories to yours.
    train_dir = 'D:\\Thesis\\sample_patch\\6110_4_0\\*\\*.tif'
    logs_train_dir = 'D:\\Thesis\\classify\\6110_4_0\\logs\\train'

    train_image, train_label= input_data.new_getfiles(train_dir)
    train_image = input_data.read_images(train_image,R_Size)

    train_batch, train_label_batch = input_data.get_batch(train_image,
                                                          train_label,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    # sess = tf.Session()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config) # allowing dynamic memory growth as follows
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    test = coord.should_stop()
    try:
        for step in np.arange(MAX_STEP):
            if test:
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 1 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)


            if step % 100 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    # coord.join(threads)
    sess.close()
    endtime=datetime.datetime.now()
    print(endtime-starttime).seconds
예제 #11
0
def run_training():

    # 调用input_data文件的get_files()函数获得image_list, label_list
    train, train_label = input_data.get_files(train_dir)
    # 获得image_batch, label_batch
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    # 进行前向训练,获得回归值
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # 计算获得损失值loss
    train_loss = model.losses(train_logits, train_label_batch)
    # 对损失值进行优化
    train_op = model.trainning(train_loss, learning_rate)
    # 根据计算得到的损失值,计算出分类准确率
    train__acc = model.evaluation(train_logits, train_label_batch)
    # 将图形、训练过程合并在一起
    summary_op = tf.summary.merge_all()
    # 新建会话
    sess = tf.Session()
    # 将训练日志写入到logs_train_dir的文件夹内
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    # 保存变量
    saver = tf.train.Saver()
    # 执行训练过程,初始化变量
    sess.run(tf.global_variables_initializer())
    # 创建一个线程协调器,用来管理之后在Session中启动的所有线程
    coord = tf.train.Coordinator()
    # 启动入队的线程,一般情况下,系统有多少个核,就会启动多少个入队线程(入队具体使用多少个线程在tf.train.batch中定义);
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            # 使用 coord.should_stop()来查询是否应该终止所有线程,当文件队列(queue)中的所有文件都已经读取出列的时候,
            # 会抛出一个 OutofRangeError 的异常,这时候就应该停止Sesson中的所有线程了;
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
            # 每50步打印一次损失值和准确率
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            # 每2000步保存一次训练得到的模型
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    # 如果读取到文件队列末尾会抛出此异常
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()  # 使用coord.request_stop()来发出终止所有线程的命令

    coord.join(threads)  # coord.join(threads)把线程加入主线程,等待threads结束
예제 #12
0
def run_training():

    # you need to change the directories to yours.
    train_dir = './data/train/train/'
    logs_train_dir = './logs/'

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    with tf.name_scope("training"):
        train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
        train_loss = model.losses(train_logits, train_label_batch)
        train_op = model.trainning(train_loss, learning_rate)
        train__acc = model.evaluation(train_logits, train_label_batch)

        summary_op = tf.summary.merge_all()

        sess = tf.Session()
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        saver = tf.train.Saver()

        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                _, tra_loss, tra_acc = sess.run(
                    [train_op, train_loss, train__acc])

                if step % 20 == 0:
                    print(
                        'Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                        (step, tra_loss, tra_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(logs_train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path,
                               global_step=step)  #保存模型和模型参数到logs_train_dir文件夹

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')

        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()
예제 #13
0
def run_training1():

    # you need to change the directories to yours.
    #    train_dir = '/home/hjxu/PycharmProjects/01_cats_vs_dogs/data/train/'
    logs_train_dir = '/home/scy/eclipse-workspace/PythonProject/src/12yue3ri/logs/'
    #
    #    train, train_label = input_data.get_files(train_dir)
    tfrecords_file = '/home/scy/eclipse-workspace/PythonProject/src/12yue3ri/my_train2.tfrecords'
    train_batch, train_label_batch = cr.read_and_decode(tfrecords_file,
                                                        batch_size=BATCH_SIZE)
    train_batch = tf.cast(train_batch, dtype=tf.float32)
    train_label_batch = tf.cast(train_label_batch, dtype=tf.int64)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 5 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
        saver.save(sess, 'net/mydataset_net.ckpt')


#             if step % 4 == 0 or (step + 1) == MAX_STEP:
#                 checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
#                 saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #14
0
파일: training.py 프로젝트: lllttzz/my_code
def run_training():
    train_dir = './train/' # 加载数据训练
    logs_train_dir = './save_model/' # 储存训练好的位置

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES,True) # forward pass
    train_loss = model.losses(train_logits, train_label_batch) # 设置损失函数
    train_op = model.trainning(train_loss,learning_rate=) # training
    train__acc = model.evaluation(train_logits, train_label_batch) # 验证正确率

    summary_op = tf.summary.merge_all() # 定义合并变量操作,一次性生成所有摘要数据
    sess = tf.Session() # 初始化会话
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) # TensorBoard的记录
    saver = tf.train.Saver() # 储存模型

    sess.run(tf.global_variables_initializer()) # 所有变量初始化
    coord = tf.train.Coordinator() # 多线程
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    csvfile = open('csv.csv', 'w', newline='')
    writer = csv.writer(csvfile)
    writer.writerow(['name','label'])

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #15
0
def train():

    img, label = data.read_and_decode("train.tfrecords")
    img_batch, label_batch = data.get_batch(img, label, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(img_batch)
    # s = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'ALexNet')
    # print(s)
    # for v in s:
    #     print(v.name)
    # exit(0)

    train_loss = model.losses(train_logits, label_batch)
    train_op = model.trainning(train_loss, learning_rate)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        im, la = sess.run([img_batch, label_batch])
        print(im.shape, la.shape)
        cv2.imshow("asdas", im[0, :, :, :])
        cv2.waitKey(0)
        exit(0)
        try:
            img, label = sess.run([img_batch, label_batch])
            for step in range(MAX_STEP):
                if coord.should_stop():
                    break
                _, tra_loss = sess.run([train_op, train_loss])

                if step % 50 == 0:
                    print('Step %d, train loss = %.2f' % (step, tra_loss))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(logs_train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()
예제 #16
0
def train():
    with tf.device('/cpu:0'):
        img, label = data.read_and_decode([tf_file1, tf_file2])
        img_batch, label_batch = data.get_batch(img, label, BATCH_SIZE,
                                                CAPACITY)
    train_logits, train_weight = model.inference(img_batch)
    # s = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'ALexNet')
    # print(s)
    # for v in s:
    #     print(v.name)
    # exit(0)

    train_loss = model.losses(train_logits, label_batch)
    train_op = model.trainning(train_loss, learning_rate)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        load_parameters("Alexnet.npy", sess, 'Alexnet')
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            for step in range(MAX_STEP):
                if coord.should_stop():
                    break
                _, tra_loss = sess.run([train_op, train_loss])

                if step % 50 == 0:
                    # tra_weight.imshow()
                    print('Step %d, train loss = %.2f, l2 loss = %.2f' %
                          (step, tra_loss, tra_loss))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(logs_train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()
예제 #17
0
def run_train(image_batch, label_batch, n_class, batch_size, checkpoint_dir,
              lr, MAX_STEP):
    print('{:*^70}'.format('【train starting!】'))
    global result
    if args.inference == '2D':
        result = inference(image_batch, batch_size, n_class)
    elif args.inference == '1D':
        result = inference1D(image_batch, batch_size, n_class)
    else:
        print('【Unknown error】')

    print('{:*^70}'.format('DEBUG'))
    train_loss = losses(result[-1], label_batch)
    train_op = trainning(train_loss, lr)
    train__acc = evaluation(result[-1], label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(checkpoint_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(checkpoint_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()

    print('{:*^70}'.format(' train ending! '))
예제 #18
0
def run_training():
    DIR_PRE = os.getcwd() + '/'
    train_dir = DIR_PRE + 'data/train/'
    logs_train_dir = DIR_PRE + 'logs/train/'
    os.makedirs(train_dir, exist_ok=True)
    os.makedirs(logs_train_dir, exist_ok=True)

    # 获取所有图片文件列表和对应的标签列表
    train, train_label = input_data.get_files(train_dir)

    #
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #19
0
def run_training():
    
    # you need to change the directories to yours.
    train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
    logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
    
    train, train_label = input_data.get_files(train_dir)
    
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE, 
                                                          CAPACITY)      
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)        
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)
       
    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()
    
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
               
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
예제 #20
0
def run_training():

    # you need to change the directories to yours.
    # train_dir = '/home/kevin/tensorflow/hams_vs_hots/data/train/'
    train_dir = 'D:/workspace/uploadPicJudge3Class/train/'
    # logs_train_dir = '/home/kevin/tensorflow/hams_vs_hots/logs/train/'
    logs_train_dir = 'D:/workspace/uploadPicJudge3Class/logs/'

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 10 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0) + '  ' +
                      datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S'))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 100 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #21
0
def run_training():
    train_dir = "./data/TRAIN/"
    logs_train_dir = "./logs/"

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(train,train_label,IMG_W,IMG_H,BATCH_SIZE,CAPACITY)


    train_logits = model.inference(train_batch,BATCH_SIZE,N_CLASSES)
    train_loss = model.losses(train_logits,train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)


    summary_op = tf.summary.merge_all()
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc= sess.run([train_op, train_loss, train_acc])

            if step % 100 == 0:
                print('Step:', step, 'train loss:', tra_loss, 'train accuracy:', tra_acc)
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if tra_acc > 0.95 and step>6000:

                checkpoint_path = os.path.join(logs_train_dir, "model")
                saver.save(sess, checkpoint_path, global_step=step)
                print("train success!")
                print('Step:', step, 'train loss:', tra_loss, 'train accuracy:', tra_acc)
                coord.request_stop()
    except tf.errors.OutOfRangeError:
        print("Done training -- epoch limit reached.")
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #22
0
def run_training(w1, b1, w2, b2, w3, b3, w4, b4, w5, b5):
    train, train_label = get_data.get_zacao_input(train_dir, label_dir)
    train_batch, train_label_batch = get_data.get_batch_jpg(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES, w1, b1,
                                   w2, b2, w3, b3, w4, b4, w5, b5)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)
    summary_op = tf.summary.merge_all()
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    step = 0
    ckpt = tf.train.get_checkpoint_state(logs_train_dir)
    if ckpt and ckpt.model_checkpoint_path:
        step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Loading success, global_step is %d' % step)
        step += 1

    try:
        while step < MAX_STEP:
            if coord.should_stop():
                break
            _, tra_loss, tra_acc, summary_str = sess.run(
                [train_op, train_loss, train__acc, summary_op])
            train_writer.add_summary(summary_str, step)

            if step % 50 == 0 or step + 1 == MAX_STEP:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
            if step % 1000 == 0 or step + 1 == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model')
                saver.save(sess, checkpoint_path, global_step=step)
            step += 1
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
예제 #23
0
def run_training1():

    logs_train_dir = dataDir + '/logs/recordstrain/'
    #
    #    train, train_label = input_data.get_files(train_dir)
    tfrecords_file = 'Idcard_train.tfrecords'
    train_batch, train_label_batch = read_and_decode(tfrecords_file,
                                                     batch_size=BATCH_SIZE)
    train_batch = tf.cast(train_batch, dtype=tf.float32)
    train_label_batch = tf.cast(train_label_batch, dtype=tf.int64)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #24
0
def run_training():
    print 'let us begin....'
    train_dir = '../data/train/'
    logs_train_dir = './train/'

    train, train_label = input_data.get_files(train_dir)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_H, IMG_W, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, lr)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()  #????
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                print 'coord stop!'
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:
                print 'Step: %d, train_loss = %.2f, train_accuracy = %.2f\n' % (
                    step, tra_loss, tra_acc * 100.0)
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2500 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #25
0
def run_training():
    train_dir = '/home/fengyu/python_workspace/cats_vs_dogs/picData/train/train/'
    logs_traing_dir = '/home/fengyu/python_workspace/cats_vs_dogs/logs/'

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(train_batch, BATCH_SIZE, n_classes)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_accuracy = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_traing_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_accuracy = sess.run(
                [train_op, train_loss, train_accuracy])

            if step % 50 == 0:
                print 'Step %d, train loss = %.2f, accuracy = %.2f' % (
                    step, tra_loss, tra_accuracy)
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                check_point_path = os.path.join(logs_traing_dir, 'model.ckpt')
                saver.save(sess, check_point_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print 'Done training -- epoch limit reached'
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #26
0
def run_training():

    train_dir = 'F://CAE_CNN//data//train_imgs//'
    #产生一些文件,可以用tensorboard查看
    logs_train_dir = 'F://CAE_CNN//log//train//'

    #读取数据
    train, train_label = input_data.get_files(train_dir)
    #获得batch
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    #参数传景区
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)

    train_loss = model.losses(train_logits, train_label_batch)
    #训练
    train_op = model.trainning(train_loss, learning_rate)

    train__acc = model.evaluation(train_logits, train_label_batch)
    #merge到一块?
    summary_op = tf.summary.merge_all()  # 这个是log汇总记录

    # 产生一个会话
    sess = tf.Session()
    # 产生一个writer来写log文件
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    # 产生一个saver来存储训练好的模型
    saver = tf.train.Saver()
    # 所有节点初始化
    sess.run(tf.global_variables_initializer())

    # 队列监控
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    for step in np.arange(MAX_STEP):
        _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
        # 每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
        if step % 2 == 0:
            print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                  (step, tra_loss, tra_acc * 100.0))
            summary_str = sess.run(summary_op)
            train_writer.add_summary(summary_str, step)
            # 每隔2000步,保存一次训练好的模型
        if step % 2000 == 0 or (step + 1) == MAX_STEP:
            checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)
예제 #27
0
def run_training():
    # you need to change the directories to yours.

    logs_train_dir = './logs/train/'

    train_batch, train_label_batch = read_and_decode('./data/train.tfrecords',
                                                     batch_size=BATCH_SIZE)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    tf.reset_default_graph()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join('./model/model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
    print("training is done")
예제 #28
0
def run_training():
    train_dir = '/home/zhangch/RSS/train_set/'
    logs_train_dir = '/home/zhangch/RSS/log/'

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evalution(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver(max_to_keep=2100)

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 100 == 0:
                print("Step %d,train loss = %.2f,train accuracy = %.2f" %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                checkpoin_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoin_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done train')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #29
0
def run_training():
    train_dir = "E:/tensorflow_model/cats_vs_dogs/data/train/"
    logs_train_dir = "E:/tensorflow_model/cats_vs_dogs/logs/"

    #train,train_label接收每张图片的路径列表和标签列表
    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 100 == 0:
                print("Step %d, train loss = %.2f, train accuracy = %.2f" %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done training -- epoch limit reached.")
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #30
0
파일: training.py 프로젝트: zyqgmzyq/cube
def train(tf_file, logs_train_dir):
    filenames = tf.placeholder(tf.string, shape=[None])
    training_filenames = tf_file
    with tf.device('/cpu:0'):
        iterator = data.read_and_decode(filenames, BATCH_SIZE, True)
    sess = tf.Session()
    sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
    tra_img, tra_label = iterator.get_next()
    train_logits, train_weight = model.inference(tra_img)
    train_loss = model.losses(train_logits, tra_label)
    train_op = model.trainning(train_loss, learning_rate)
    load_parameters("Alexnet.npy", sess, 'Alexnet')
    summary_op = tf.summary.merge_all()
    sess.run(tf.global_variables_initializer())
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in range(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss = sess.run([train_op, train_loss])

            if step % 50 == 0:
                # tra_weight.imshow()
                print('Step %d, train loss = %.2f, l2 loss = %.2f' %
                      (step, tra_loss, tra_loss))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)

    sess.close()
예제 #31
0
def running_train(data_dir, log_dir, max_step=10000, batch_size=16, lr=0.05):
    '''
    '''
    image_batch, label_batch = input_data.read_files(data_dir,
                                                     batchsize=batch_size)
    print label_batch
    logits = model.inference(image_batch, batch_size=batch_size)
    loss = model.losses(logits, label_batch)
    train_op = model.trainning(loss, lr=lr)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(log_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(max_step):
            if coord.should_stop():
                break
            _, tra_loss = sess.run([train_op, loss])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f' % (step, tra_loss))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == max_step:
                checkpoint_path = os.path.join(log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
예제 #32
0
def training():

    train, train_label, val, val_label = input_data.get_files(train_dir, RATIO)
    train_batch, train_label_batch = input_data.get_batch(train,
                                                  train_label,
                                                  IMG_W,
                                                  IMG_H,
                                                  BATCH_SIZE,
                                                  CAPACITY)
    val_batch, val_label_batch = input_data.get_batch(val,
                                                  val_label,
                                                  IMG_W,
                                                  IMG_H,
                                                  BATCH_SIZE,
                                                  CAPACITY)

    logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    loss = model.losses(logits, train_label_batch)
    train_op = model.trainning(loss, learning_rate)
    acc = model.evaluation(logits, train_label_batch)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE])


    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess= sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(train_logs_dir, sess.graph)
        val_writer = tf.summary.FileWriter(val_logs_dir, sess.graph)

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                        break
                tra_images,tra_labels = sess.run([train_batch, train_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, acc],
                                                feed_dict={x:tra_images, y_:tra_labels})
                if step % 50 == 0:
                    print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 200 == 0 or (step + 1) == MAX_STEP:
                    val_images, val_labels = sess.run([val_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, acc],
                                                 feed_dict={x:val_images, y_:val_labels})
                    print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %(step, val_loss, val_acc*100.0))
                    summary_str = sess.run(summary_op)
                    val_writer.add_summary(summary_str, step)

                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(train_logs_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
예제 #33
0
def run_training():
    
    # you need to change the directories to yours.
	s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu'
	T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid'
	logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+'
	s_train, s_train_label = input_data.get_files(s_train_dir)
	s_train_batch, s_train_label_batch = input_data.get_batch(s_train,
                                                          s_train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE, 
                                                          CAPACITY)   
	T_train, T_train_label = input_data.get_files(T_train_dir)
    
	T_train_batch, T_train_label_batch = input_data.get_batch(T_train,
                                                          T_train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE, 
                                                          CAPACITY) 

	train_logits = model.inference(s_train_batch,T_train_batch, BATCH_SIZE, N_CLASSES)
	train_loss = model.losses(train_logits, s_train_label_batch)        
	train_op = model.trainning(train_loss, learning_rate)
	train__acc = model.evaluation(train_logits, s_train_label_batch)
       
	summary_op = tf.summary.merge_all()  #汇总操作
	sess = tf.Session()   #定义sess
	train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) #
	saver = tf.train.Saver()    #保存操作
    
	sess.run(tf.global_variables_initializer())#初始化所有变量
	coord = tf.train.Coordinator() #设置多线程协调器
	threads = tf.train.start_queue_runners(sess=sess, coord=coord) #开始Queue Runners(队列运行器)
    
    #开始训练过程
	try:
		for step in np.arange(MAX_STEP):
			if coord.should_stop():
					break
			_, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) 
               
			if step % 50 == 0:
				print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
				#运行汇总操作,写入汇总
				summary_str = sess.run(summary_op)
				train_writer.add_summary(summary_str, step)
            
			if step % 800 == 0 or (step + 1) == MAX_STEP:
				#保存当前模型和权重到 logs_train_dir,global_step为当前迭代次数
				checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
				saver.save(sess, checkpoint_path, global_step=step)
                
	except tf.errors.OutOfRangeError:
		print('Done training -- epoch limit reached')
	finally:
		coord.request_stop()
        
	coord.join(threads)
	sess.close()
예제 #34
0
s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu'
T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid'
logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+'
s_train, s_train_label = input_data.get_files(s_train_dir)
#print(s_train)
s_train_batch, s_train_label_batch = input_data.get_batch(s_train,
                                                          s_train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE, 
                                                          CAPACITY) 
#print(s_train_label_batch)
T_train, T_train_label = input_data.get_files(T_train_dir)
    
T_train_batch, T_train_label_batch = input_data.get_batch(T_train,
                                                          T_train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE, 
                                                          CAPACITY) 
#print(len(s_train),len(s_train_label),len(T_train),len(T_train_label))
#print(s_train_batch,s_train_label_batch,T_train_batch,s_train_label_batch)
train_logits = model.inference(s_train_batch,T_train_batch, BATCH_SIZE, N_CLASSES)
train_loss = model.losses(train_logits, s_train_label_batch)        
train_op = model.trainning(train_loss, learning_rate)
#correct_prediction = tf.equal(tf.argmax(train_logits,1),tf.argmax(y_,1))
labels_max = tf.reduce_max(s_train_label_batch)

sess = tf.Session()
print(sess.run(labels_max))