Exemplo n.º 1
0
def test_preprocess():
    import time

    image_data, label_dict, _ = loader.load_VOC(batch_size=32)
    config = parscfg.ConfigParser(
        'configs/{}_path.cfg'.format(platform.node()), 'configs/coco80.cfg')

    stride_list = [32, 16, 8]

    pre = PreProcess(dataflow=image_data,
                     rescale_shape_list=[416, 320],
                     stride_list=stride_list,
                     prior_list=config.anchors,
                     n_class=config.n_class,
                     h_flip=True,
                     crop=True,
                     color=True,
                     affine=True,
                     max_num_bbox_per_im=45)

    start_time = time.time()
    im, gt_mask_batch, true_boxes = pre.process_batch(output_scale=[320, 320])
    print(time.time() - start_time)

    start_time = time.time()
    pre.process_batch_2(output_scale=[320, 320])
    print(time.time() - start_time)
Exemplo n.º 2
0
def test_input():
    import time
    import src.utils.viz as viz

    config = parscfg.ConfigParser(
        'configs/{}_path.cfg'.format(platform.node()), 'configs/coco80.cfg')

    label_dict, category_index, train_data_generator, valid_data_generator = loader.load_VOC(
        rescale_shape_list=config.mutliscale,
        net_stride_list=[32, 16, 8],
        prior_anchor_list=config.anchors,
        n_class=config.n_class,
        batch_size=1,
        buffer_size=4,
        num_parallel_preprocess=2,
        h_flip=True,
        crop=True,
        color=True,
        affine=True,
        max_num_bbox_per_im=45)
    print(valid_data_generator.batch_data)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())

        for epoch in range(10):
            print('epoch: {}'.format(epoch))
            train_data_generator.init_iterator(sess, reset_scale=True)
            while True:
                try:
                    # train_data_generator.init_iterator(sess, reset_scale=True)
                    # train_data_generator.reset_im_scale()
                    start_time = time.time()
                    t = sess.run(train_data_generator.batch_data)
                    print(time.time() - start_time)
                    print(t[0].shape)
                    print(t[-1].shape)
                    viz.draw_bounding_box(t[0][0] * 255,
                                          t[2][0],
                                          label_list=None,
                                          box_type='xyxy')
                except tf.errors.OutOfRangeError:
                    break
Exemplo n.º 3
0
def test_target_anchor():
    import src.utils.viz as viz

    config = parscfg.ConfigParser(
        'configs/{}_path.cfg'.format(platform.node()), 'configs/coco80.cfg')

    image_data, label_dict, _ = loader.load_VOC(batch_size=2)
    rescale_shape = 320
    image_data.reset_image_rescale(rescale=rescale_shape)
    batch_data = image_data.next_batch_dict()
    gt_bbox_para = np.array([bbox[1:] for bbox in batch_data['label'][0]])
    gt_bbox_label = [bbox[0] for bbox in batch_data['label'][0]]

    stride_list = [32, 16, 8]
    target = bboxgt.TargetAnchor([416, 320], stride_list, config.anchors,
                                 config.n_class)
    gt, target_anchor_batch = target.get_yolo_target_anchor(
        batch_data['label'], batch_data['boxes'], batch_data['shape'],
        rescale_shape, True)
Exemplo n.º 4
0
def detect():
    config = parscfg.ConfigParser('configs/config_path.cfg',
                                  'configs/coco80.cfg')

    label_dict, category_index = loader.load_coco80_label_yolo()
    # Create a Dataflow object for test images
    image_data = loader.read_image(im_name=config.im_name,
                                   n_channel=config.n_channel,
                                   data_dir=config.data_dir,
                                   batch_size=config.test_bsize,
                                   rescale=config.im_rescale)

    test_model = YOLOv3(
        bsize=config.test_bsize,
        n_channel=config.n_channel,
        n_class=config.n_class,
        anchors=config.anchors,
        feature_extractor_trainable=False,
        detector_trainable=False,
        pre_trained_path=config.coco_pretrained_path,
    )
    test_model.create_test_model()

    sessconfig = tf.ConfigProto()
    sessconfig.gpu_options.allow_growth = True
    with tf.Session(config=sessconfig) as sess:
        sess.run(tf.global_variables_initializer())

        test_model.predict_epoch_or_step(sess,
                                         image_data,
                                         config.im_rescale,
                                         config.obj_score_thr,
                                         config.nms_iou_thr,
                                         label_dict,
                                         category_index,
                                         config.save_path,
                                         run_type='epoch')
Exemplo n.º 5
0
def train():
    FLAGS = get_args()
    config = parscfg.ConfigParser('configs/config_path.cfg', 'configs/voc.cfg')

    label_dict, category_index, train_data_generator, valid_data_generator = loader.load_VOC(
        data_dir=config.train_data_dir,
        rescale_shape_list=config.mutliscale,
        net_stride_list=[32, 16, 8],
        prior_anchor_list=config.anchors,
        train_percentage=0.85,
        n_class=config.n_class,
        batch_size=config.train_bsize,
        buffer_size=4,
        num_parallel_preprocess=8,
        h_flip=True,
        crop=True,
        color=True,
        affine=True,
        max_num_bbox_per_im=57)

    # Training
    train_model = YOLOv3(
        n_channel=config.n_channel,
        n_class=config.n_class,
        category_index=category_index,
        anchors=config.anchors,
        bsize=config.train_bsize,
        ignore_thr=config.ignore_thr,
        obj_weight=config.obj_weight,
        nobj_weight=config.nobj_weight,
        feature_extractor_trainable=False,
        detector_trainable=True,
        pre_trained_path=config.yolo_feat_pretrained_path,
    )
    train_model.create_train_model(train_data_generator.batch_data)

    # Validation
    valid_model = YOLOv3(
        n_channel=config.n_channel,
        n_class=config.n_class,
        anchors=config.anchors,
        category_index=category_index,
        bsize=config.train_bsize,
        ignore_thr=config.ignore_thr,
        obj_weight=config.obj_weight,
        nobj_weight=config.nobj_weight,
        feature_extractor_trainable=False,
        detector_trainable=False,
        pre_trained_path=config.yolo_feat_pretrained_path,
    )
    valid_model.create_valid_model(valid_data_generator.batch_data)

    #testing
    # test_scale = 416
    # image_data = loader.read_image(
    #     im_name=config.im_name,
    #     n_channel=config.n_channel,
    #     data_dir=config.data_dir,
    #     batch_size=config.test_bsize,
    #     rescale=test_scale)

    writer = tf.summary.FileWriter(config.save_path)
    saver = tf.train.Saver(max_to_keep=5)
    # saver = tf.train.Saver(
    #     var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='style_net'))
    sessconfig = tf.ConfigProto()
    sessconfig.gpu_options.allow_growth = True
    with tf.Session(config=sessconfig) as sess:
        sess.run(tf.global_variables_initializer())
        sess.graph.finalize()
        writer.add_graph(sess.graph)

        for i in range(150):
            if i >= 100:
                lr = FLAGS.lr / 100.
            elif i >= 50:
                lr = FLAGS.lr / 10.
            else:
                lr = FLAGS.lr

            # test_model.predict_epoch_or_step(
            #     sess,
            #     image_data,
            #     test_scale,
            #     config.obj_score_thr,
            #     config.nms_iou_thr,
            #     label_dict,
            #     category_index,
            #     config.save_path,
            #     run_type='epoch')

            if i > 0 and i % 10 == 0:
                train_data_generator.init_iterator(sess, reset_scale=True)
            else:
                train_data_generator.init_iterator(sess, reset_scale=False)

            train_model.train_epoch(sess, lr, summary_writer=writer)

            valid_data_generator.init_iterator(sess)
            valid_model.valid_epoch(sess, summary_writer=writer)

            saver.save(sess, '{}/yolov3_epoch_{}'.format(config.save_path, i))

            # if i > 0 and i % 10 == 0:
            #     train_data_generator.reset_im_scale()

    writer.close()