Пример #1
0
def main():
    inputs = tf.placeholder(
        dtype=tf.float32,
        shape=[None, config.img_h, config.img_w, config.img_ch])
    y_true_l = tf.placeholder(dtype=tf.float32,
                              shape=[
                                  None, config.img_h // 8, config.img_w // 8,
                                  3, 5 + len(cls_dict.label_num_dict)
                              ])
    y_true_m = tf.placeholder(dtype=tf.float32,
                              shape=[
                                  None, config.img_h // 16, config.img_w // 16,
                                  3, 5 + len(cls_dict.label_num_dict)
                              ])
    y_true_s = tf.placeholder(dtype=tf.float32,
                              shape=[
                                  None, config.img_h // 32, config.img_w // 32,
                                  3, 5 + len(cls_dict.label_num_dict)
                              ])
    y_true = [y_true_l, y_true_m, y_true_s]

    fm1, fm2, fm3 = yolo3_net(inputs=inputs)
    # anchors = [[20, 30], [40, 50], [60, 70]]
    y_pred = [fm3, fm2, fm1]
    calc_loss.build_loss(y_pred=y_pred, y_true=y_true)
Пример #2
0
def yolo3_model(mode, feature, label, batch_size):
    # is_training = (mode == tf.estimator.ModeKeys.TRAIN)
    fm1, fm2, fm3 = model_design_nn.yolo3_net(inputs=feature)
    y_pred = [fm3, fm2, fm1]

    if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
        # calc ctc loss
        loss = calc_loss.build_loss(y_pred=y_pred, y_true=label)

        # # calc l2 loss
        # l2_loss = tf.Variable(initial_value=0, dtype=tf.float32, trainable=False)
        # for scope_name in ['CNN_Net', 'RNN_Net' 'FC_Net']:
        #     net_tv = tf.trainable_variables(scope=scope_name)
        #     r_lambda = 0.001
        #     regularization_cost = r_lambda * tf.reduce_sum([tf.nn.l2_loss(v) for v in net_tv])
        #     loss += regularization_cost
        #     l2_loss += regularization_cost

        return y_pred, loss  # , l2_loss, net_out, tensor_dict, seq_len

    else:
        # inference
        loss = None
        # l2_loss = None
        return y_pred, loss  # , l2_loss, net_out, tensor_dict, seq_len
Пример #3
0
def main():
    # init directory
    if not os.path.exists(config.model_save_dir):
        os.mkdir(config.model_save_dir)
    if not os.path.exists(config.summary_save_dir):
        os.mkdir(config.summary_save_dir)

    # data_load
    Data_Gen = data_loader.DataGenerator(img_dir=config.img_dir,
                                         img_h=config.img_h,
                                         img_w=config.img_w,
                                         img_ch=config.img_ch,
                                         batch_size=config.batch_size)
    Data_Gen.build_data()
    generator = Data_Gen.next_batch()

    # define train model
    images = tf.placeholder(
        dtype=tf.float32,
        shape=[None, config.img_h, config.img_w, config.img_ch])
    labels = tf.placeholder(dtype=tf.int32, shape=[None])
    logits = model_design_nn.cls_net(inputs=images, is_training=True)
    cls_loss = calc_loss.build_loss(logits=logits, labels=labels)

    # l2_loss
    l2_loss = tf.Variable(initial_value=0, dtype=tf.float32, trainable=False)
    for scope_name in ['CNN_Module', 'FC_Module']:
        module_train_vars = tf.trainable_variables(scope=scope_name)
        regularization_cost = tf.reduce_sum(
            [tf.nn.l2_loss(var) for var in module_train_vars])
        l2_loss += regularization_cost * config.l2_loss_lambda
    loss = cls_loss + l2_loss

    tf.summary.scalar("Loss/0_total_loss", loss)
    tf.summary.scalar("Loss/1_cls_loss", cls_loss)
    tf.summary.scalar("Loss/2_l2_loss", l2_loss)

    # summary_op
    summary_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(config.summary_save_dir)

    # train_op
    global_step = tf.train.get_or_create_global_step()
    train_op = tf.train.AdamOptimizer(
        learning_rate=config.learning_rate).minimize(loss=loss,
                                                     global_step=global_step)

    sess = tf.Session()
    saver = tf.train.Saver(max_to_keep=3)
    ckpt_path = tf.train.latest_checkpoint(config.model_save_dir)
    print('latest_checkpoint_path: ', ckpt_path)
    if ckpt_path is not None:
        saver.restore(sess, ckpt_path)
        prev_step = int(ckpt_path.split('-')[-1])
    else:
        init_op = tf.initialize_all_variables()
        sess.run(init_op)
        prev_step = -1

    # train
    with sess.as_default():
        for i in range(config.train_steps):
            _inputs, _outputs = next(generator)  # .__next__()
            _img_tensor = _inputs['images']
            _label_tensor = _outputs['labels']
            # print(_ant_tensor.shape)
            _loss, _, _summary_op = sess.run([loss, train_op, summary_op],
                                             feed_dict={
                                                 images: _img_tensor,
                                                 labels: _label_tensor
                                             })
            print('step: ', prev_step + 1 + i, 'loss: ', _loss)

            train_writer.add_summary(_summary_op, prev_step + 1 + i)
            train_writer.flush()

            if i % config.save_n_iters == 0:
                saver.save(sess=sess,
                           save_path=os.path.join(config.model_save_dir,
                                                  'model.ckpt'),
                           global_step=global_step)
Пример #4
0
def train_ver1():

    anchors = [[12, 10], [18, 15], [22, 22], [29, 20], [29, 29], [37, 24],
               [36, 34], [40, 43], [44, 55]]

    DataSet = load_dataset.Read_Tfrecord(
        tfrecord_dir='/data/data/weche_tfrecords', anchors=anchors)

    config.batch_size = 2
    imgs, y_true = DataSet.make_batch(batch_size=config.batch_size)

    fm1, fm2, fm3 = model_design_nn.yolo3_net(inputs=imgs)
    # anchors = [[20, 30], [40, 50], [60, 70]]
    y_pred = [fm3, fm2, fm1]
    loss = calc_loss.build_loss(y_pred=y_pred, y_true=y_true)

    tf.summary.scalar('train_batch_statistics/total_loss', loss[0])
    tf.summary.scalar('train_batch_statistics/loss_xy', loss[1])
    tf.summary.scalar('train_batch_statistics/loss_wh', loss[2])
    tf.summary.scalar('train_batch_statistics/loss_conf', loss[3])
    tf.summary.scalar('train_batch_statistics/loss_class', loss[4])

    # summary_op
    summary_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./train_summary')

    # train_op
    global_step = tf.train.get_or_create_global_step()
    # add update_op for slim.bn
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
            loss=loss[0], global_step=global_step)

    sess = tf.Session()
    saver = tf.train.Saver(max_to_keep=3)
    model_save_dir = './run_output'
    if not os.path.exists(model_save_dir):
        os.mkdir(model_save_dir)
    ckpt_path = tf.train.latest_checkpoint(model_save_dir)
    print('latest_checkpoint_path: ', ckpt_path)
    if ckpt_path is not None:
        saver.restore(sess, ckpt_path)
        prev_step = int(ckpt_path.split('-')[-1])
    else:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        prev_step = -1

    with sess.as_default():
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(100):
            _loss, _, _summary_op = sess.run([loss[0], train_op, summary_op])
            print('step: ', prev_step + 1 + i, 'loss: ', _loss)

            # train_writer.add_summary(_summary_op, prev_step + 1 + i)
            # train_writer.flush()

            if i % 10 == 0:
                saver.save(sess=sess,
                           save_path=os.path.join(model_save_dir,
                                                  'model.ckpt'),
                           global_step=global_step)

        coord.request_stop()
        coord.join(threads=threads)

    sess.close()