Ejemplo n.º 1
0
def convert_annotations(data,
                        path_prefix,
                        num_sample,
                        nproc,
                        start_img_id=0,
                        start_ann_id=0):
    modify_image_info_with_params = partial(modify_image_info,
                                            path_prefix=path_prefix,
                                            start_img_id=start_img_id)
    modify_annotation_with_params = partial(modify_annotation,
                                            num_sample=num_sample,
                                            start_img_id=start_img_id,
                                            start_ann_id=start_ann_id)
    if nproc > 1:
        data['annotations'] = mmcv.track_parallel_progress(
            modify_annotation_with_params, data['annotations'], nproc=nproc)
        data['images'] = mmcv.track_parallel_progress(
            modify_image_info_with_params, data['images'], nproc=nproc)
    else:
        data['annotations'] = mmcv.track_progress(
            modify_annotation_with_params, data['annotations'])
        data['images'] = mmcv.track_progress(
            modify_image_info_with_params,
            data['images'],
        )
    data['categories'] = [{'id': 1, 'name': 'text'}]
    return data
Ejemplo n.º 2
0
def main():
    args = parse_args()
    cityscapes_path = args.cityscapes_path
    out_dir = args.out_dir if args.out_dir else cityscapes_path
    mmcv.mkdir_or_exist(out_dir)

    gt_dir = osp.join(cityscapes_path, args.gt_dir)

    poly_files = []
    for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=True):
        poly_file = osp.join(gt_dir, poly)
        poly_files.append(poly_file)
    if args.nproc > 1:
        mmcv.track_parallel_progress(convert_json_to_label, poly_files,
                                     args.nproc)
    else:
        mmcv.track_progress(convert_json_to_label, poly_files)

    split_names = ['train', 'val', 'test']

    for split in split_names:
        filenames = []
        for poly in mmcv.scandir(
                osp.join(gt_dir, split), '_polygons.json', recursive=True):
            filenames.append(poly.replace('_gtFine_polygons.json', ''))
        with open(osp.join(out_dir, f'{split}.txt'), 'w') as f:
            f.writelines(f + '\n' for f in filenames)
Ejemplo n.º 3
0
def main():
    # parse cfg and args
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)

    # touch output file to save broken files list.
    output_path = Path(args.out_path)
    if not output_path.parent.exists():
        raise Exception('log_file parent directory not found.')
    if output_path.exists():
        os.remove(output_path)
    output_path.touch()

    # do valid
    validator = DatasetValidator(cfg, output_path, args.phase)

    if args.num_process > 1:
        # The default chunksize calcuation method of Pool.map
        chunksize, extra = divmod(len(validator), args.num_process * 8)
        if extra:
            chunksize += 1

        track_parallel_progress(validator.valid_idx,
                                list(range(len(validator))),
                                args.num_process,
                                chunksize=chunksize,
                                keep_order=False)
    else:
        track_progress(validator.valid_idx, list(range(len(validator))))

    print_info(output_path)
Ejemplo n.º 4
0
def main():
    args = parse_args()
    coco_path = args.coco_path
    nproc = args.nproc

    out_dir = args.out_dir or coco_path
    out_img_dir = osp.join(out_dir, 'images')
    out_mask_dir = osp.join(out_dir, 'annotations')

    mmcv.mkdir_or_exist(osp.join(out_img_dir, 'train2014'))
    mmcv.mkdir_or_exist(osp.join(out_img_dir, 'test2014'))
    mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2014'))
    mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'test2014'))

    train_list, test_list = generate_coco_list(coco_path)
    assert (len(train_list) +
            len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format(
                len(train_list), len(test_list))

    if args.nproc > 1:
        mmcv.track_parallel_progress(
            partial(
                convert_to_trainID,
                in_img_dir=osp.join(coco_path, 'images'),
                in_ann_dir=osp.join(coco_path, 'annotations'),
                out_img_dir=out_img_dir,
                out_mask_dir=out_mask_dir,
                is_train=True),
            train_list,
            nproc=nproc)
        mmcv.track_parallel_progress(
            partial(
                convert_to_trainID,
                in_img_dir=osp.join(coco_path, 'images'),
                in_ann_dir=osp.join(coco_path, 'annotations'),
                out_img_dir=out_img_dir,
                out_mask_dir=out_mask_dir,
                is_train=False),
            test_list,
            nproc=nproc)
    else:
        mmcv.track_progress(
            partial(
                convert_to_trainID,
                in_img_dir=osp.join(coco_path, 'images'),
                in_ann_dir=osp.join(coco_path, 'annotations'),
                out_img_dir=out_img_dir,
                out_mask_dir=out_mask_dir,
                is_train=True), train_list)
        mmcv.track_progress(
            partial(
                convert_to_trainID,
                in_img_dir=osp.join(coco_path, 'images'),
                in_ann_dir=osp.join(coco_path, 'annotations'),
                out_img_dir=out_img_dir,
                out_mask_dir=out_mask_dir,
                is_train=False), test_list)

    print('Done!')
Ejemplo n.º 5
0
def collect_annotations(files, dataset, nproc=1):
    """Collect the annotation information.

    Args:
        files(list): The list of tuples (image_file, groundtruth_file)
        dataset(str): The dataset name, icdar2015 or icdar2017
        nproc(int): The number of process to collect annotations

    Returns:
        images(list): The list of image information dicts
    """
    assert isinstance(files, list)
    assert isinstance(dataset, str)
    assert dataset
    assert isinstance(nproc, int)

    load_img_info_with_dataset = partial(load_img_info, dataset=dataset)
    if nproc > 1:
        images = mmcv.track_parallel_progress(load_img_info_with_dataset,
                                              files,
                                              nproc=nproc)
    else:
        images = mmcv.track_progress(load_img_info_with_dataset, files)

    return images
Ejemplo n.º 6
0
def cvt_annotations(devkit_path, years, split, out_file):
    if not isinstance(years, list):
        years = [years]
    annotations = []
    for year in years:
        filelist = osp.join(devkit_path,
                            f'VOC{year}/ImageSets/Main/{split}.txt')
        if not osp.isfile(filelist):
            print(f'filelist does not exist: {filelist}, '
                  f'skip voc{year} {split}')
            return
        img_names = mmcv.list_from_file(filelist)
        xml_paths = [
            osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
            for img_name in img_names
        ]
        img_paths = [
            f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
        ]
        part_annotations = mmcv.track_progress(parse_xml,
                                               list(zip(xml_paths, img_paths)))
        annotations.extend(part_annotations)
    if out_file.endswith('json'):
        annotations = cvt_to_coco_json(annotations)
    mmcv.dump(annotations, out_file)
    return annotations
Ejemplo n.º 7
0
def cvt_annotations(devkit_path, years, split, out_file):
    if not isinstance(years, list):
        years = [years]
    annotations = []
    for year in years:
        filelist = osp.join(devkit_path,
                            'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
        if not osp.isfile(filelist):
            print('filelist does not exist: {}, skip voc{} {}'.format(
                filelist, year, split))
            return
        img_names = mmcv.list_from_file(filelist)
        xml_paths = [
            osp.join(devkit_path,
                     'VOC{}/Annotations/{}.xml'.format(year, img_name))
            for img_name in img_names
        ]
        img_paths = [
            'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)
            for img_name in img_names
        ]
        part_annotations = mmcv.track_progress(parse_xml,
                                               list(zip(xml_paths, img_paths)))
        annotations.extend(part_annotations)
    mmcv.dump(annotations, out_file)
    return annotations
Ejemplo n.º 8
0
def collect_annotations(files, nproc=1):
    print('Loading annotation images')
    if nproc > 1:
        images = mmcv.track_parallel_progress(img2coco, files, nproc=nproc)
    else:
        images = mmcv.track_progress(img2coco, files)

    return images
Ejemplo n.º 9
0
def main():
    args = parse_args()
    coco_path = args.coco_path
    nproc = args.nproc

    out_dir = args.out_dir or coco_path
    out_img_dir = osp.join(out_dir, 'images')
    out_mask_dir = osp.join(out_dir, 'annotations')

    mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2017'))
    mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'val2017'))

    if out_dir != coco_path:
        shutil.copytree(osp.join(coco_path, 'images'), out_img_dir)

    train_list = glob(osp.join(coco_path, 'annotations', 'train2017', '*.png'))
    train_list = [file for file in train_list if '_labelTrainIds' not in file]
    test_list = glob(osp.join(coco_path, 'annotations', 'val2017', '*.png'))
    test_list = [file for file in test_list if '_labelTrainIds' not in file]
    assert (len(train_list) +
            len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format(
                len(train_list), len(test_list))

    if args.nproc > 1:
        mmcv.track_parallel_progress(partial(convert_to_trainID,
                                             out_mask_dir=out_mask_dir,
                                             is_train=True),
                                     train_list,
                                     nproc=nproc)
        mmcv.track_parallel_progress(partial(convert_to_trainID,
                                             out_mask_dir=out_mask_dir,
                                             is_train=False),
                                     test_list,
                                     nproc=nproc)
    else:
        mmcv.track_progress(
            partial(convert_to_trainID,
                    out_mask_dir=out_mask_dir,
                    is_train=True), train_list)
        mmcv.track_progress(
            partial(convert_to_trainID,
                    out_mask_dir=out_mask_dir,
                    is_train=False), test_list)

    print('Done!')
Ejemplo n.º 10
0
def test_track_progress_list():
    out = StringIO()
    ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out)
    assert out.getvalue() == (
        '[   ] 0/3, elapsed: 0s, ETA:'
        '\r[>  ] 1/3, 1.0 task/s, elapsed: 1s, ETA:     2s'
        '\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA:     1s'
        '\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA:     0s\n')
    assert ret == [1, 2, 3]
Ejemplo n.º 11
0
def test_track_progress_list(capsys):

    ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3)
    out, _ = capsys.readouterr()
    assert out == ('[   ] 0/3, elapsed: 0s, ETA:'
                   '\r[>  ] 1/3, 1.0 task/s, elapsed: 1s, ETA:     2s'
                   '\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA:     1s'
                   '\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA:     0s\n')
    assert ret == [1, 2, 3]
Ejemplo n.º 12
0
def collect_annotations(files, nproc=1):
    print("Loading annotation images")
    if nproc > 1:
        images = mmcv.track_parallel_progress(load_img_info,
                                              files,
                                              nproc=nproc)
    else:
        images = mmcv.track_progress(load_img_info, files)

    return images
Ejemplo n.º 13
0
def main():
    args = parse_args()
    devkit_path = args.devkit_path
    if args.out_dir is None:
        out_dir = osp.join(devkit_path, 'VOC2010', 'SegmentationClassContext')
    else:
        out_dir = args.out_dir
    json_path = args.json_path
    mmcv.mkdir_or_exist(out_dir)
    img_dir = osp.join(devkit_path, 'VOC2010', 'JPEGImages')

    train_detail = Detail(json_path, img_dir, 'train')
    train_ids = train_detail.getImgs()

    val_detail = Detail(json_path, img_dir, 'val')
    val_ids = val_detail.getImgs()

    mmcv.mkdir_or_exist(
        osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext'))

    train_list = mmcv.track_progress(
        partial(generate_labels, detail=train_detail, out_dir=out_dir),
        train_ids)
    with open(
            osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext',
                     'train.txt'), 'w') as f:
        f.writelines(line + '\n' for line in sorted(train_list))

    val_list = mmcv.track_progress(
        partial(generate_labels, detail=val_detail, out_dir=out_dir), val_ids)
    with open(
            osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext',
                     'val.txt'), 'w') as f:
        f.writelines(line + '\n' for line in sorted(val_list))

    print('Done!')
Ejemplo n.º 14
0
def process(json_dir,
            img_dir,
            out_dir,
            tasks=['det'],
            nproc=1,
            recog_format='jsonl',
            warp=False):
    mmcv.mkdir_or_exist(out_dir)

    json_file_list = glob.glob(osp.join(json_dir, '*.json'))

    parse_labelme_json_func = partial(parse_labelme_json,
                                      img_dir=img_dir,
                                      out_dir=out_dir,
                                      tasks=tasks,
                                      recog_format=recog_format,
                                      warp_flag=warp)

    if nproc <= 1:
        total_results = mmcv.track_progress(parse_labelme_json_func,
                                            json_file_list)
    else:
        total_results = mmcv.track_parallel_progress(parse_labelme_json_func,
                                                     json_file_list,
                                                     keep_order=True,
                                                     nproc=nproc)

    total_det_line_json_list = []
    total_recog_crop_line_str = []
    total_recog_warp_line_str = []
    for res in total_results:
        total_det_line_json_list.extend(res[0])
        if 'recog' in tasks:
            total_recog_crop_line_str.extend(res[1])
            total_recog_warp_line_str.extend(res[2])

    mmcv.mkdir_or_exist(out_dir)
    det_out_file = osp.join(out_dir, 'instances_training.txt')
    list_to_file(det_out_file, total_det_line_json_list)

    if 'recog' in tasks:
        recog_out_file_crop = osp.join(out_dir, f'train_label.{recog_format}')
        list_to_file(recog_out_file_crop, total_recog_crop_line_str)
        if warp:
            recog_out_file_warp = osp.join(out_dir,
                                           f'warp_train_label.{recog_format}')
            list_to_file(recog_out_file_warp, total_recog_warp_line_str)
Ejemplo n.º 15
0
def collect_annotations(files, nproc=1):
    """Collect the annotation information.

    Args:
        files(list): The list of tuples (image_file, groundtruth_file)
        nproc(int): The number of process to collect annotations
    Returns:
        images(list): The list of image information dicts
    """
    assert isinstance(files, list)
    assert isinstance(nproc, int)

    if nproc > 1:
        images = mmcv.track_parallel_progress(
            load_img_info, files, nproc=nproc)
    else:
        images = mmcv.track_progress(load_img_info, files)

    return images
Ejemplo n.º 16
0
def collect_annotations(files, split, nproc=1):
    """Collect the annotation information.

    Args:
        files(list): The list of tuples (image_file, groundtruth_file)
        split(str): The split of dataset. Namely: training or test
        nproc(int): The number of process to collect annotations

    Returns:
        images(list): The list of image information dicts
    """
    assert isinstance(files, list)
    assert isinstance(split, str)
    assert isinstance(nproc, int)

    load_img_info_with_split = partial(load_img_info, split=split)
    if nproc > 1:
        images = mmcv.track_parallel_progress(
            load_img_info_with_split, files, nproc=nproc)
    else:
        images = mmcv.track_progress(load_img_info_with_split, files)

    return images
Ejemplo n.º 17
0
def main():
    """Program for making Audi's A2D2 dataset compatible with mmsegmentation.

    NOTE: The input argument path must be the ABSOLUTE PATH to the dataset
          - NOT the symbolically linked one (i.e. data/a2d2)!

    Segmentation label conversion:
        The A2D2 labels are instance segmentations (i.e. car_1, car_2, ...),
        while semantic segmentation requires categorical segmentations.

        The function 'convert_TYPE_trainids()' converts all instance
        segmentation to their corresponding categorical segmentation and saves
        them as new label image files.

        Conversion type options
            A2D2: Generates segmentations using inherent categories.
            Cityscapes: Generates segmentations according to the categories and
                        indexing (i.e. 'trainIds') as in Cityscapes.

    Directory restructuring:
        A2D2 files are not arranged in the required 'train/val/test' directory
        structure.

        The function 'restructure_a2d2_directory' creates a new compatible
        directory structure in the root directory, and fills it with symbolic
        links or file copies to the input and segmentation label images.

    Example usage:
        python tools/convert_datasets/a2d2.py path/to/camera_lidar_semantic
    """
    args = parse_args()
    a2d2_path = args.a2d2_path
    out_dir = args.out_dir if args.out_dir else a2d2_path
    mmcv.mkdir_or_exist(out_dir)

    # Create a list of filepaths to all original labels
    # NOTE: Original label files have a number before '.png'
    label_filepaths = glob.glob(osp.join(a2d2_path, '*/label/*/*[0-9].png'))

    # Convert segmentation images to the Cityscapes 'TrainIds' values
    if args.convert:
        seg_choice = args.choice
        if seg_choice == 'cityscapes':
            if args.nproc > 1:
                mmcv.track_parallel_progress(convert_cityscapes_trainids,
                                             label_filepaths, args.nproc)
            else:
                mmcv.track_progress(convert_cityscapes_trainids,
                                    label_filepaths)
        elif seg_choice == 'a2d2':
            if args.nproc > 1:
                mmcv.track_parallel_progress(convert_a2d2_trainids,
                                             label_filepaths, args.nproc)
            else:
                mmcv.track_progress(convert_a2d2_trainids, label_filepaths)

        else:
            raise ValueError

    # Restructure directory structure into 'img_dir' and 'ann_dir'
    if args.restruct:
        restructure_a2d2_directory(out_dir, args.val, args.test, args.symlink)
Ejemplo n.º 18
0
# -*- coding: utf-8 -* -
'''
mmcv创建进度条
参考:https://mmcv.readthedocs.io/en/latest/utils.html#progressbar
'''
import mmcv
import time


def do_task(i):
    time.sleep(1)
    return i + 1


'''
依次调用task,输出进度
'''
tasks = list(range(10))
results = mmcv.track_progress(do_task, tasks)
print(results)
'''
多进程执行task,8个进程
'''
results = mmcv.track_parallel_progress(do_task, tasks, 8)
print(results)
Ejemplo n.º 19
0
    # test ping results.
    ping_result = multithread_ping_ssrserver(ssr_config)
    valid_index = [
        i for i, v in enumerate(ping_result) if v[0] <= speedtest_ping_timeout
        and '回国' not in ssr_config[i]['remarks'] and '打机' not in ssr_config[i]
        ['remarks'] and '游戏' not in ssr_config[i]['remarks']
    ]
    ping_result_str = '%d/%d servers are ping accessible!!!\n%.2f%% ping pass rate!!!' % (
        len(valid_index), len(ssr_config), 100.0 *
        (float(len(valid_index)) / len(ssr_config)))
    ssr_config = [ssr_config[i] for i in valid_index]
    ping_result = [ping_result[i] for i in valid_index]

    # test speed
    results_list = track_progress(outwall_speed_test, ssr_config)
    for i in range(len(results_list)):
        results_list[i]['remarks'] = ssr_config[i]['remarks']
        results_list[i]['ssr_config'] = ssr_config[i]
        results_list[i]['ping'] = ping_result[i][0]

    if len(results_list) == 0:
        print('No available ssr server!!!')
    else:
        results_list = sorted(results_list,
                              key=lambda s:
                              (-s['www.google.com']['success_rate'], s[
                                  'www.google.com']['ave_time']))
        results_list = [
            r for r in results_list if r['www.google.com']['success_rate'] != 0
        ]