Пример #1
0
def eval_all(args):
    # model_path
    saveDir = config.model_dir
    evalDir = config.eval_dir
    misc_utils.ensure_dir(evalDir)
    model_file = os.path.join(saveDir,
                              'epoch_{}.pkl'.format(args.resume_weights))
    assert os.path.exists(model_file)
    # load data
    records = misc_utils.load_json_lines(config.eval_source)
    # multiprocessing
    num_records = len(records)
    num_devs = args.devices
    num_image = math.ceil(num_records / num_devs)
    result_queue = Queue(1000)
    procs = []
    all_results = []
    for i in range(num_devs):
        start = i * num_image
        end = min(start + num_image, num_records)
        split_records = records[start:end]
        proc = Process(target=inference,
                       args=(model_file, i, split_records, result_queue))
        proc.start()
        procs.append(proc)
    pbar = tqdm(total=num_records, ncols=50)
    for i in range(num_records):
        t = result_queue.get()
        all_results.append(t)
        pbar.update(1)
    for p in procs:
        p.join()
    fpath = os.path.join(evalDir, 'dump-{}.json'.format(args.resume_weights))
    misc_utils.save_json_lines(all_results, fpath)
Пример #2
0
def run_test():
    parser = argparse.ArgumentParser()
    # parser.add_argument('--resume_weights', '-r', default=None, type=str)
    parser.add_argument('--start_epoch', '-s',default = 30, type=int)
    parser.add_argument('--end_epoch','-e', default= 50, type=int)
    parser.add_argument('--devices', '-d', default=1, type=int)
    args = parser.parse_args()
    # eval_all(args)

    # model_path
    model_dir = config.model_dir
    eval_dir = config.eval_dir
    # misc_utils.ensure_dir(evalDir)
    ensure_dir(config.eval_dir)
    records = load_json_lines(config.eval_source)

    start_epoch, end_epoch = args.start_epoch, args.end_epoch
    for epoch in range(start_epoch, end_epoch):
        model_file = osp.join(model_dir, 'epoch-{}.pkl'.format(epoch))
        if not osp.exists(model_file):
            continue
        results = eval_all(model_file, records, args)
        
        fpath = osp.join(eval_dir, 'epoch-{}.human'.format(epoch))
        save_json_lines(results, fpath)
Пример #3
0
 def __init__(self, config, if_train):
     if if_train:
         self.training = True
         source = config.train_source
         self.short_size = config.train_image_short_size
         self.max_size = config.train_image_max_size
     else:
         self.training = False
         source = config.eval_source
         self.short_size = config.eval_image_short_size
         self.max_size = config.eval_image_max_size
     self.records = load_json_lines(source)
     self.config = config
Пример #4
0
def run_test():

    parser = argparse.ArgumentParser()
    parser.add_argument('--start_epoch', '-s',default = 30, type=int)
    parser.add_argument('--end_epoch','-e', default= 50, type=int)
    parser.add_argument('--devices', '-d', default=1, type=int)
    args = parser.parse_args()

    # model_path
    model_dir = config.model_dir
    eval_dir = config.eval_dir
    ensure_dir(config.eval_dir)
    records = load_json_lines(config.eval_source)

    start_epoch, end_epoch = args.start_epoch, args.end_epoch
    for epoch in range(start_epoch, end_epoch):
        model_file = osp.join(model_dir, 'epoch-{}.pkl'.format(epoch))
        if not osp.exists(model_file):
            continue
        print('Processing {}'.format(osp.basename(model_file)))
        results = eval_all(model_file, records, args)
        
        fpath = osp.join(eval_dir, 'epoch-{}.human'.format(epoch))
        save_json_lines(results, fpath)
Пример #5
0
def train_dataset(seed=config.seed_dataprovider):
    root = config.image_folder
    source = config.train_source
    batch_per_gpu = config.batch_per_gpu
    short_size = config.train_image_short_size
    max_size = config.train_image_max_size
    records = misc_utils.load_json_lines(source)
    nr_files = len(records)
    print('training image number: {}'.format(nr_files))
    np.random.seed(seed)
    np.random.shuffle(records)
    file_idx = 0
    while file_idx < nr_files:
        batch_records = []
        batch_images_list = []
        hw_stat = np.zeros((batch_per_gpu, 2), np.int32)
        for i in range(batch_per_gpu):
            record = records[file_idx]
            batch_records.append(record)
            image_path = os.path.join(config.image_folder,
                                      record['ID'] + '.png')
            img = misc_utils.load_img(image_path)
            batch_images_list.append(img.copy())
            hw_stat[i, :] = img.shape[:2]
            file_idx += 1
            if file_idx >= nr_files:
                file_idx = 0
                np.random.shuffle(records)

        batch_image_height = np.max(hw_stat[:, 0])
        batch_image_width = np.max(hw_stat[:, 1])
        is_batch_ok = True
        batch_resized_height, batch_resized_width = get_hw_by_short_size(
            batch_image_height, batch_image_width, short_size, max_size)
        batch_images = np.zeros((batch_per_gpu, 3, max_size, max_size),
                                dtype=np.float32)
        batch_gts = np.zeros(
            (batch_per_gpu, config.max_boxes_of_image, config.nr_box_dim),
            dtype=np.float32)
        batch_info = np.zeros((batch_per_gpu, 6), dtype=np.float32)

        for i in range(batch_per_gpu):
            record = batch_records[i]
            img = batch_images_list[i]
            gt_boxes = misc_utils.load_gt(record, 'gtboxes', 'fbox',
                                          config.class_names)
            keep = (gt_boxes[:, 2] >= 0) * (gt_boxes[:, 3] >= 0)
            gt_boxes = gt_boxes[keep, :]
            nr_gtboxes = gt_boxes.shape[0]
            if nr_gtboxes == 0:
                is_batch_ok = False
                break
            gt_boxes[:, 2:4] += gt_boxes[:, :2]
            padded_image = pad_image(img, batch_image_height,
                                     batch_image_width, config.image_mean)
            original_height, original_width, channels = padded_image.shape
            resized_image, scale = resize_img_by_short_and_max_size(
                padded_image, short_size, max_size)
            gt_boxes[:, 0:4] *= scale
            resized_gt = gt_boxes
            if np.random.randint(2) == 1:
                resized_image, resized_gt = flip_image_and_boxes(
                    resized_image, resized_gt)
            resized_image = resized_image.transpose(2, 0, 1).astype(np.float32)
            batch_images[i, :, :int(resized_image.shape[1]), :int(
                resized_image.shape[2])] = resized_image
            batch_gts[i, :nr_gtboxes] = resized_gt
            batch_info[i, :] = (resized_image.shape[1], resized_image.shape[2],
                                scale, original_height, original_width,
                                nr_gtboxes)
        if not is_batch_ok:
            continue
        yield dict(data=batch_images, boxes=batch_gts, im_info=batch_info)