コード例 #1
0
ファイル: pascal2coco.py プロジェクト: ixhorse/visdrone
    def convert(self, devkit_path, split):
        """Converts PASCAL VOC annotations to MSCOCO format. """
        split_file = osp.join(devkit_path,
                              'ImageSets/Main/{}.txt'.format(split))
        ann_dir = osp.join(devkit_path, 'Annotations')

        name_list = mmcv.list_from_file(split_file)

        # copy image
        split_imgdir = os.path.join(coco_imgdir, split)
        if os.path.exists(split_imgdir):
            shutil.rmtree(split_imgdir)
        os.mkdir(split_imgdir)
        img_list = [
            os.path.join(image_dir, name + '.jpg') for name in name_list
        ]
        with concurrent.futures.ThreadPoolExecutor() as exector:
            exector.map(_copy, img_list, [split_imgdir] * len(img_list))

        images, annotations = [], []
        ann_id = 1
        for id, name in enumerate(name_list):
            image_id = id

            xml_file = osp.join(ann_dir, name + '.xml')

            with open(xml_file, 'r') as f:
                ann_dict = xmltodict.parse(f.read(), force_list=('object', ))

            if 'object' in ann_dict['annotation']:
                # Add image item.
                image = self.get_img_item(name + '.jpg', image_id,
                                          ann_dict['annotation']['size'])
                images.append(image)

                for obj in ann_dict['annotation']['object']:
                    # Add annotation item.
                    annotation = self.get_ann_item(obj, image_id, ann_id)
                    annotations.append(annotation)
                    ann_id += 1
            else:
                logger.warning('{} does not have any object'.format(name))

        categories = []
        for name, id in self.cat2id.items():
            # Add category item.
            category = self.get_cat_item(name, id)
            categories.append(category)

        ann = OrderedDict()
        ann['images'] = images
        ann['type'] = 'instances'
        ann['annotations'] = annotations
        ann['categories'] = categories

        save_file = os.path.join(coco_dir,
                                 'annotations/instances_{}.json'.format(split))
        logger.info('Saving annotations to {}'.format(save_file))
        with open(save_file, 'w') as f:
            json.dump(ann, f)
コード例 #2
0
def main():
    labels_list = ['SteelPipe', 'rebar']
    voc_test_file = '/diskb/GlodonDataset/rebar-steelpipe/v0.1/test-rebar/VOC2007/ImageSets/Main/test.txt'
    image_id_list = mmcv.list_from_file(voc_test_file)

    category_comp4_data_map = {key: {} for key in labels_list}

    comp4_path = '/data/wangtf/Projects/darknet-AlexeyAB/work_dirs/results/comp4_det_test_rebar.txt'
    category = os.path.basename(comp4_path).split('.')[0].split('_')[-1]
    image_id_bbox_map = load_comp4(comp4_path)
    category_comp4_data_map[category] = image_id_bbox_map

    json_data = []
    for image_id in image_id_list:
        one_image_json_data = []
        for label in labels_list:
            if image_id not in category_comp4_data_map[label]:
                one_image_json_data.append([])
            else:
                one_image_json_data.append(category_comp4_data_map[label][image_id])
        json_data.append(one_image_json_data)

    save_path = os.path.dirname(comp4_path) + '/result_test.json'
    with open(save_path, 'w') as f:
        json.dump(json_data, f)
コード例 #3
0
    def load_annotations(self, ann_file):
        """Load annotation from XML style ann_file.

        Args:
            ann_file (str): Path of XML file.

        Returns:
            list[dict]: Annotation info from XML file.
        """

        data_infos = []
        img_ids = mmcv.list_from_file(ann_file)
        for img_id in img_ids:
            filename = f'JPEGImages/{img_id}.jpg'
            xml_path = osp.join(self.img_prefix, 'Annotations',
                                f'{img_id}.xml')
            tree = ET.parse(xml_path)
            root = tree.getroot()
            size = root.find('size')
            width = 0
            height = 0
            if size is not None:
                width = int(size.find('width').text)
                height = int(size.find('height').text)
            else:
                img_path = osp.join(self.img_prefix, 'JPEGImages',
                                    '{}.jpg'.format(img_id))
                img = Image.open(img_path)
                width, height = img.size
            data_infos.append(
                dict(id=img_id, filename=filename, width=width, height=height))

        return data_infos
コード例 #4
0
def loadtxt(txt_file):
    lines = mmcv.list_from_file(txt_file)
    if len(lines) == 0:
        return np.zeros([0, 8], dtype=np.int)
    lines = [list(map(int, line.split(',')[:-1])) for line in lines]
    lines = np.array(lines, dtype=np.int)
    return lines
def get_ann(img, gt_path):
    # h, w = img.shape[0:2]
    h, w = img.height, img.width
    lines = mmcv.list_from_file(gt_path)
    bboxes = []
    words = []
    for line in lines:
        line = line.encode('utf-8').decode('utf-8-sig')
        line = line.replace('\xef\xbb\xbf', '')

        gt = line.split(' ')

        w_ = np.float(gt[4])
        h_ = np.float(gt[5])
        x1 = np.float(gt[2]) + w_ / 2.0
        y1 = np.float(gt[3]) + h_ / 2.0
        theta = np.float(gt[6]) / math.pi * 180

        bbox = cv2.boxPoints(((x1, y1), (w_, h_), theta))
        # print(np.asarray(bbox.reshape(-1)))
        bbox = np.array(adjust_box_sort(np.asarray(
            bbox.reshape(-1)))) / ([w * 1.0, h * 1.0] * 4)
        # bbox = bbox.reshape(-1)

        bboxes.append(bbox)
        words.append('???')
    return np.array(bboxes), words
コード例 #6
0
def multi_selsa_gpu_dump(model, cfg):
    file_list = mmcv.list_from_file('.txt')
    assert len(file_list) % 9 == 0

    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    for i in range(len(file_list)):
        files_cur_video_trip = file_list[i * 9:(i + 1) * 9]

        data_piped = []
        for i, file_ in enumerate(files_cur_video_trip):
            if i % 3 == 0:
                data_piped.append([])
            img_ = mmcv.imread(file_)
            data = dict(img=img)
            data = test_pipeline(data)

            data_piped[i // 3].append(data)

        data_piped = scatter(collate([data_piped], samples_per_gpu=9),
                             [device])[0]

        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

    return result
コード例 #7
0
ファイル: kie_test_imgs.py プロジェクト: open-mmlab/mmocr
def save_results(model, img_meta, gt_bboxes, result, out_dir):
    assert 'filename' in img_meta, ('Please add "filename" '
                                    'to "meta_keys" in config.')
    assert 'ori_texts' in img_meta, ('Please add "ori_texts" '
                                     'to "meta_keys" in config.')

    out_json_file = osp.join(out_dir,
                             osp.basename(img_meta['filename']) + '.json')

    idx_to_cls = {}
    if model.module.class_list is not None:
        for line in mmcv.list_from_file(model.module.class_list):
            class_idx, class_label = line.strip().split()
            idx_to_cls[int(class_idx)] = class_label

    json_result = [{
        'text':
        text,
        'box':
        box,
        'pred':
        idx_to_cls.get(
            pred.argmax(-1).cpu().item(),
            pred.argmax(-1).cpu().item()),
        'conf':
        pred.max(-1)[0].cpu().item()
    } for text, box, pred in zip(img_meta['ori_texts'], gt_bboxes,
                                 result['nodes'])]

    mmcv.dump(json_result, out_json_file)
def get_ann(gt_path):
    lines = mmcv.list_from_file(gt_path)
    polygon_list = []
    words = []
    boxes = []
    for line in lines:
        line = line.encode('utf-8').decode('utf-8-sig')
        line = line.replace('\xef\xbb\xbf', '')

        gt = line.split(' ')

        w_ = np.float(gt[4])
        h_ = np.float(gt[5])
        x1 = np.float(gt[2]) + w_ / 2.0
        y1 = np.float(gt[3]) + h_ / 2.0
        theta = np.float(gt[6]) / math.pi * 180

        bbox = cv2.boxPoints(((x1, y1), (w_, h_), theta))
        # print(np.asarray(bbox.reshape(-1)))
        x, y, w, h = cv2.boundingRect(bbox)
        boxes.append([max(x, 0), max(y, 0), max(x + w, 0), max(y + h, 0)])

        bbox = np.array(np.asarray(bbox.reshape(-1)))
        # bbox = bbox.reshape(-1)

        polygon_list.append(bbox)
        words.append('???')
    return boxes, polygon_list
コード例 #9
0
 def load_annotations(self, img_prefix, ann_file):
     '''
     @description: 
     @param : 
         ann_file: VOC2007/train.txt | VOC2012/train.txt
     @return: 
     '''
     img_infos = []
     img_ids = mmcv.list_from_file(osp.join(self.data_root, ann_file))
     img_prefix = osp.join(self.data_root, img_prefix)
     for img_id in img_ids:
         filename = f'{img_prefix}/JPEGImages/{img_id}.jpg'
         xml_path = f'{img_prefix}/Annotations/{img_id}.xml'
         tree = ET.parse(xml_path)
         root = tree.getroot()
         size = root.find('size')
         width = int(size.find('width').text)
         height = int(size.find('height').text)
         img_infos.append(
             dict(id=img_id,
                  filename=filename,
                  xml_path=xml_path,
                  width=width,
                  height=height))
     return img_infos
コード例 #10
0
def prepare_train_img_infos(cache_path, img_list_path=None):
    if img_list_path is not None:
        img_names = mmcv.list_from_file(img_list_path)
        img_names = [img_name + '.jpg' for img_name in img_names]
    else:
        img_names = [
            img_name
            for img_name in mmcv.utils.scandir(train_img_root, '.jpg')
        ]

    img_infos = []
    print('Loading images...')
    for i, img_name in enumerate(img_names):
        if i % 1000 == 0:
            print('%d / %d' % (i, len(img_names)))
        img_path = train_img_root + img_name
        ann_file = img_name.replace('.jpg', '.json')

        try:
            h, w, _ = mmcv.imread(img_path).shape
            img_info = dict(filename=img_name,
                            height=h,
                            width=w,
                            annfile=ann_file)
            img_infos.append(img_info)
        except:
            print('Load image error when generating img_infos: %s' % img_path)

    with open(cache_path, 'w') as f:
        mmcv.dump(img_infos, f, file_format='json', ensure_ascii=False)
コード例 #11
0
def cvt_annotations(devkit_path, years, split, out_file):
    if not isinstance(years, list):
        years = [years]
    annotations = []
    for year in years:
        filelist = osp.join(devkit_path,
                            f'VOC{year}/ImageSets/Main/{split}.txt')
        if not osp.isfile(filelist):
            print(f'filelist does not exist: {filelist}, '
                  f'skip voc{year} {split}')
            return
        img_names = mmcv.list_from_file(filelist)
        xml_paths = [
            osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
            for img_name in img_names
        ]
        img_paths = [
            f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
        ]
        part_annotations = mmcv.track_progress(parse_xml,
                                               list(zip(xml_paths, img_paths)))
        annotations.extend(part_annotations)
    if out_file.endswith('json'):
        annotations = cvt_to_coco_json(annotations)
    mmcv.dump(annotations, out_file)
    return annotations
コード例 #12
0
ファイル: toby.py プロジェクト: GiangHLe/mmdetection
    def load_annotations(self, ann_file):
        ann_list = mmcv.list_from_file(ann_file)

        data_infos = []
        for i, ann_line in enumerate(ann_list):
            if ann_line != '#':
                continue

            img_shape = ann_list[i + 2].split(' ')
            width = int(img_shape[0])
            height = int(img_shape[1])
            bbox_number = int(ann_list[i + 3])

            anns = ann_line.split(' ')
            bboxes = []
            labels = []
            for anns in ann_list[i + 4:i + 4 + bbox_number]:
                anns = anns.split(' ')
                bboxes.append([float(ann) for ann in anns[:4]])
                labels.append(int(anns[4]))

            data_infos.append(
                dict(
                    filename=ann_list[i + 1],
                    width=width,
                    height=height,
                    ann=dict(
                        bboxes=np.array(bboxes).astype(np.float32),
                        labels=np.array(labels).astype(np.int64))
                ))

        return data_infos
コード例 #13
0
ファイル: wider_face.py プロジェクト: zzzzlalala/mmdetection
    def load_annotations(self, ann_file):
        """Load annotation from WIDERFace XML style annotation file.

        Args:
            ann_file (str): Path of XML file.

        Returns:
            list[dict]: Annotation info from XML file.
        """

        data_infos = []
        img_ids = mmcv.list_from_file(ann_file)
        for img_id in img_ids:
            filename = f'{img_id}.jpg'
            xml_path = osp.join(self.img_prefix, 'Annotations',
                                f'{img_id}.xml')
            tree = ET.parse(xml_path)
            root = tree.getroot()
            size = root.find('size')
            width = int(size.find('width').text)
            height = int(size.find('height').text)
            folder = root.find('folder').text
            data_infos.append(
                dict(
                    id=img_id,
                    filename=osp.join(folder, filename),
                    width=width,
                    height=height))

        return data_infos
コード例 #14
0
ファイル: custom.py プロジェクト: kuangbixia/R-CNN
    def get_classes(cls, classes=None):
        """Get class names of current dataset.

        Args:
            classes (Sequence[str] | str | None): If classes is None, use
                default CLASSES defined by builtin dataset. If classes is a
                string, take it as a file name. The file contains the name of
                classes where each line contains one class name. If classes is
                a tuple or list, override the CLASSES defined by the dataset.

        Returns:
            tuple[str] or list[str]: Names of categories of the dataset.
        """
        if classes is None:
            return cls.CLASSES

        if isinstance(classes, str):
            # take it as a file path
            class_names = mmcv.list_from_file(classes)
        elif isinstance(classes, (tuple, list)):
            class_names = classes
        else:
            raise ValueError(f'Unsupported type {type(classes)} of classes.')

        return class_names
コード例 #15
0
def cvt_annotations(devkit_path, years, split, out_file):
    if not isinstance(years, list):
        years = [years]
    annotations = []
    for year in years:
        filelist = osp.join(devkit_path,
                            'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
        if not osp.isfile(filelist):
            print('filelist does not exist: {}, skip voc{} {}'.format(
                filelist, year, split))
            return
        img_names = mmcv.list_from_file(filelist)
        xml_paths = [
            osp.join(devkit_path,
                     'VOC{}/Annotations/{}.xml'.format(year, img_name))
            for img_name in img_names
        ]
        img_paths = [
            'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)
            for img_name in img_names
        ]
        part_annotations = mmcv.track_progress(parse_xml,
                                               list(zip(xml_paths, img_paths)))
        annotations.extend(part_annotations)
    mmcv.dump(annotations, out_file)
    return annotations
コード例 #16
0
ファイル: kie_dataset.py プロジェクト: quincylin1/mmocr-1
    def __init__(self,
                 ann_file,
                 loader,
                 dict_file,
                 img_prefix='',
                 pipeline=None,
                 norm=10.,
                 directed=False,
                 test_mode=True,
                 **kwargs):
        super().__init__(ann_file,
                         loader,
                         pipeline,
                         img_prefix=img_prefix,
                         test_mode=test_mode)
        assert osp.exists(dict_file)

        self.norm = norm
        self.directed = directed
        self.dict = {
            '': 0,
            **{
                line.rstrip('\r\n'): ind
                for ind, line in enumerate(mmcv.list_from_file(dict_file), 1)
            }
        }
コード例 #17
0
ファイル: self_voc.py プロジェクト: BadUncleBoy/MMDETECTION
    def load_annotations(self, ann_file):
        # load image list from file
        lines = mmcv.list_from_file(self.ann_file)

        data_infos = []

        for line in lines:
            annos = line.strip().split(" ")

            img_name, width, height, bboxes =\
                     annos[1], int(annos[2]), int(annos[3]), [int(each) for each in annos[4:]]

            data_info = dict(filename=img_name, width=width, height=height)

            gt_bboxes = []
            gt_labels = []
            gt_bboxes_ignore = []
            gt_labels_ignore = []
            for x in range(len(bboxes) // 5):
                gt_labels.append(bboxes[x * 5])
                gt_bboxes.append(bboxes[x * 5 + 1:x * 5 + 5])

            data_anno = dict(bboxes=np.array(gt_bboxes,
                                             dtype=np.float32).reshape(-1, 4),
                             labels=np.array(gt_labels, dtype=np.long),
                             bboxes_ignore=np.array(gt_bboxes_ignore,
                                                    dtype=np.float32).reshape(
                                                        -1, 4),
                             labels_ignore=np.array(gt_labels_ignore,
                                                    dtype=np.long))

            data_info.update(ann=data_anno)
            data_infos.append(data_info)
        return data_infos
コード例 #18
0
ファイル: object_2d.py プロジェクト: houwenbo87/mmdetection
 def load_annotations(self, ann_file):
     self.label_path = {}
     self.img_infos = []
     xml_files = mmcv.list_from_file(ann_file)
     #bar = Bar('load labels', max=len(xml_files), suffix='%(percent)d%%')
     bar = Bar('load labels', max=len(xml_files))
     for id, xml_file in enumerate(xml_files):
         bar.next()
         xml_path = osp.join(self.img_prefix, xml_file)
         try:
             tree = ET.parse(xml_path)
         except:
             print('Can not open {}\n'.format(xml_path))
             pass
         else:
             root = tree.getroot()
             impath = root.find('path').text
             fullpath = osp.join(self.img_prefix, impath)
             exists = osp.isfile(fullpath)
             if not exists:
                 continue
             size = root.find('size')
             width = int(size.find('width').text)
             height = int(size.find('height').text)
             #id = root.find('filename').text
             self.img_infos.append(
                 dict(id=id, filename=impath, width=width, height=height))
             self.label_path[id] = xml_path
     bar.finish()
     print('total label num: {}'.format(len(self.label_path)))
     return self.img_infos
コード例 #19
0
    def load_annotations(self, ann_file):
        cat2label = {k: i for i, k in enumerate(self.CLASSES)}
        # load image list from file
        image_list = mmcv.list_from_file(self.ann_file)
    
        data_infos = []
        # convert annotations to middle format
        for image_id in image_list:
            filename = f'{self.img_prefix}/{image_id}.jpg'
            image = mmcv.imread(filename)
            height, width = image.shape[:2]
    
            data_info = dict(filename=f'{image_id}.jpg', width=width, height=height)
    
            # load annotations
            label_prefix = self.img_prefix.replace('image_2', 'label_2')
            lines = mmcv.list_from_file(osp.join(label_prefix, f'{image_id}.txt'))
    
            content = [line.strip().split(' ') for line in lines]
            bbox_names = [x[0] for x in content]
            bboxes = [[float(info) for info in x[1:5]] for x in content]
    
            gt_bboxes = []
            gt_labels = []
            gt_bboxes_ignore = []
            gt_labels_ignore = []
    
            # filter 'DontCare'
            for bbox_name, bbox in zip(bbox_names, bboxes):
                if bbox_name in cat2label:
                    gt_labels.append(cat2label[bbox_name])
                    gt_bboxes.append(bbox)
                else:
                    gt_labels_ignore.append(-1)
                    gt_bboxes_ignore.append(bbox)

            data_anno = dict(
                bboxes=np.array(gt_bboxes, dtype=np.float32).reshape(-1, 4),
                labels=np.array(gt_labels, dtype=np.long),
                bboxes_ignore=np.array(gt_bboxes_ignore,
                                       dtype=np.float32).reshape(-1, 4),
                labels_ignore=np.array(gt_labels_ignore, dtype=np.long))

            data_info.update(ann=data_anno)
            data_infos.append(data_info)

        return data_infos
コード例 #20
0
    def load_annotations(self, ann_file):
        self.project = sly.Project(self.img_prefix, sly.OpenMode.READ)
        img_infos = list()
        anno_list = mmcv.list_from_file(ann_file)
        print('data loading ...')
        img_infos = mmcv.track_parallel_progress(self._load_ann, anno_list, 16)
        print('data loading finished !!!')

        return img_infos
コード例 #21
0
 def load_annotations(self, ann_file):
     img_ids = mmcv.list_from_file(self.ann_file)
     self.img_ids = img_ids
     pool = Pool(12)
     img_infos = pool.map(self._load_annotations, img_ids)
     pool.close()
     pool.join()
     print("\nload success with %d samples in load_annotations" % len(img_infos))
     return img_infos
コード例 #22
0
def parse_val_list(ann_dir):
    """Parse the txt file of ImageNet VID val dataset."""
    img_list = osp.join(ann_dir, 'Lists/VID_val_videos.txt')
    img_list = mmcv.list_from_file(img_list)
    val_infos = defaultdict(list)
    for info in img_list:
        info = info.split(' ')
        val_infos[info[0]] = dict(num_frames=int(info[-1]))
    return val_infos
コード例 #23
0
ファイル: lyft_eval.py プロジェクト: zhanggefan/mmdetection3d
def load_lyft_gts(lyft, data_root, eval_split, logger=None):
    """Loads ground truth boxes from database.

    Args:
        lyft (:obj:`LyftDataset`): Lyft class in the sdk.
        data_root (str): Root of data for reading splits.
        eval_split (str): Name of the split for evaluation.
        logger (logging.Logger | str | None): Logger used for printing
        related information during evaluation. Default: None.

    Returns:
        list[dict]: List of annotation dictionaries.
    """
    split_scenes = mmcv.list_from_file(osp.join(data_root,
                                                f'{eval_split}.txt'))

    # Read out all sample_tokens in DB.
    sample_tokens_all = [s['token'] for s in lyft.sample]
    assert len(sample_tokens_all) > 0, 'Error: Database has no samples!'

    if eval_split == 'test':
        # Check that you aren't trying to cheat :)
        assert len(lyft.sample_annotation) > 0, \
            'Error: You are trying to evaluate on the test set \
             but you do not have the annotations!'

    sample_tokens = []
    for sample_token in sample_tokens_all:
        scene_token = lyft.get('sample', sample_token)['scene_token']
        scene_record = lyft.get('scene', scene_token)
        if scene_record['name'] in split_scenes:
            sample_tokens.append(sample_token)

    all_annotations = []

    print_log('Loading ground truth annotations...', logger=logger)
    # Load annotations and filter predictions and annotations.
    for sample_token in mmcv.track_iter_progress(sample_tokens):
        sample = lyft.get('sample', sample_token)
        sample_annotation_tokens = sample['anns']
        for sample_annotation_token in sample_annotation_tokens:
            # Get label name in detection task and filter unused labels.
            sample_annotation = \
                lyft.get('sample_annotation', sample_annotation_token)
            detection_name = sample_annotation['category_name']
            if detection_name is None:
                continue
            annotation = {
                'sample_token': sample_token,
                'translation': sample_annotation['translation'],
                'size': sample_annotation['size'],
                'rotation': sample_annotation['rotation'],
                'name': detection_name,
            }
            all_annotations.append(annotation)

    return all_annotations
コード例 #24
0
 def __init__(self, img_prefix, ann_file, batch_size, device_id, num_gpus):
     self.images_dir = img_prefix
     self.batch_size = batch_size
     self.files = mmcv.list_from_file(ann_file)
     self.data_set_len = len(self.files)
     self.files = self.files[self.data_set_len * device_id //
                             num_gpus:self.data_set_len * (device_id + 1) //
                             num_gpus]
     self.n = self.data_set_len
コード例 #25
0
def main():

    username = getpass.getuser()
    xml_dir = osp.join('/media/', username,
                       'Data/DoubleCircle/datasets/CVC/annotations-xml/')
    pkl_dir = osp.join('/media/', username,
                       'Data/DoubleCircle/datasets/CVC/annotations-pkl/')
    txt_dir = osp.join('/media/', username,
                       'Data/DoubleCircle/datasets/CVC/imageSets/')
    img_dir = osp.join('/media/', username,
                       'Data/DoubleCircle/datasets/CVC/images')
    mmcv.mkdir_or_exist(pkl_dir)

    # all train images
    train_filelist = osp.join(txt_dir, 'train-all.txt')
    img_all_names = mmcv.list_from_file(train_filelist)
    xml_all_paths = [
        osp.join(xml_dir, '{}.xml'.format(img_name))
        for img_name in img_all_names
    ]
    img_all_paths = [
        osp.join(img_dir, '{}.png'.format(img_name))
        for img_name in img_all_names
    ]
    flags = ['train' for _ in img_all_names]
    train_annotations = track_progress_yuan(
        parse_xml, list(zip(xml_all_paths, img_all_paths, flags)))
    mmcv.dump(train_annotations, osp.join(pkl_dir, 'train-all.pkl'))

    # all test images
    test_filelist = osp.join(txt_dir, 'test-all.txt')
    img_all_names = mmcv.list_from_file(test_filelist)
    xml_all_paths = [
        osp.join(xml_dir, '{}.xml'.format(img_name))
        for img_name in img_all_names
    ]
    img_all_paths = [
        osp.join(img_dir, '{}.png'.format(img_name))
        for img_name in img_all_names
    ]
    flags = ['test' for _ in img_all_names]
    test_annotations = track_progress_yuan(
        parse_xml, list(zip(xml_all_paths, img_all_paths, flags)))
    mmcv.dump(test_annotations, osp.join(pkl_dir, 'test-all.pkl'))
コード例 #26
0
def read_movie_list(path):
    """ read movie list from txt file or json.
    """

    if path.endswith('txt'):
        return mmcv.list_from_file(path)
    elif path.endswith('json'):
        return mmcv.load(path)
    else:
        raise ValueError('File must be `txt` or `json` file.')
コード例 #27
0
ファイル: script.py プロジェクト: PaddlePaddle/Contrib
        def get_pred(pred_path):
            pred = []
            lines = mmcv.list_from_file(pred_path)
            for line in lines:
                line = line.split(',')
                bbox = [int(line[i]) for i in range(len(line) - 1)]
                word = line[-1]
                pred.append((bbox, word))

            return pred
コード例 #28
0
def main():
    username = getpass.getuser()
    xml_dir = osp.join('/home/' + username +
                       '/WangCK/Data/datasets/CVC/annotations-xml/')
    pkl_dir = osp.join('/home/' + username +
                       '/WangCK/Data/datasets/CVC/annotations-pkl/')
    txt_dir = osp.join('/home/' + username +
                       '/WangCK/Data/datasets/CVC/imageSets/')
    img_dir = osp.join('/home/' + username +
                       '/WangCK/Data/datasets/CVC/images')
    mmcv.mkdir_or_exist(pkl_dir)

    # all train images
    train_filelist = osp.join(txt_dir, 'train-all.txt')
    img_all_names = mmcv.list_from_file(train_filelist)
    xml_paths = [
        osp.join(xml_dir, img_name.replace('.txt', '.xml'))
        for img_name in img_all_names
    ]
    img_paths = [
        osp.join(img_dir, img_name.replace('.txt', '.jpg'))
        for img_name in img_all_names
    ]
    flags = ['train' for _ in img_all_names]
    train_annotations = track_progress_kai(
        parse_xml, list(zip(xml_paths, img_paths, flags)))
    mmcv.dump(train_annotations, osp.join(pkl_dir, 'train-all.pkl'))

    # all test images
    test_filelist = osp.join(txt_dir, 'test-all.txt')
    img_all_names = mmcv.list_from_file((test_filelist))
    xml_paths = [
        osp.join(xml_dir, img_name.replace('.txt', '.xml'))
        for img_name in img_all_names
    ]
    img_paths = [
        osp.join(img_dir, img_name.replace('.txt', '.jpg'))
        for img_name in img_all_names
    ]
    flags = ['test' for _ in img_all_names]
    test_annotations = track_progress_kai(
        parse_xml, list(zip(xml_paths, img_paths, flags)))
    mmcv.dump(test_annotations, osp.join(pkl_dir, 'test-all.pkl'))
コード例 #29
0
    def load_annotations(self, ann_file):
        vid_infos_list = []
        vid_ids = mmcv.list_from_file(ann_file)

        def _train_get_vid_id(_id_line):
            _4d_8d, _1, _start_ind, _num_raw_frames = _id_line.split(' ')
            _start_ind = int(_start_ind)
            _end_ind = int(_num_raw_frames)
            _num_frames = _end_ind - _start_ind
            return _4d_8d, _start_ind, _end_ind, _num_frames

        def _val_get_vid_id(_id_line):
            _vid_id, _start_ind, _end_ind, _num_frames = id_line.split(' ')
            return _vid_id, int(_start_ind), int(_end_ind), int(_num_frames)

        if vid_ids[0].split('/')[0] == 'train':
            vid_id_func = _train_get_vid_id
        elif vid_ids[0].split('/')[0] == 'val':
            raise NotImplementedError
            # vid_id_func = _val_get_vid_id
        else:
            raise ValueError("Unknown prefix in annoation txt file.")

        for id_line in vid_ids:
            # Probe first frame to get info
            vid_id, start_ind, end_ind, num_frames = vid_id_func(id_line)
            foldername = f'Data/VID/{vid_id}.JPEG'
            xml_path = Path(self.img_prefix
                            ) / f'Annotations/VID/{vid_id}/{start_ind:06d}.xml'
            tree = ET.parse(xml_path)
            root = tree.getroot()
            size = root.find('size')
            width = int(size.find('width').text)
            height = int(size.find('height').text)
            vid_infos_list.append(
                dict(id=vid_id,
                     filename=foldername,
                     width=width,
                     height=height,
                     start_ind=start_ind,
                     end_ind=end_ind,
                     num_frames=num_frames))
        vid_dict = dict()
        for vid_info in vid_infos_list:
            _id = vid_info['id']
            if _id not in vid_dict:
                vid_dict[_id] = dict(id=_id,
                                     filename=vid_info['filename'],
                                     width=vid_info['width'],
                                     height=vid_info['height'],
                                     frames=[vid_info['start_ind']])
            else:
                vid_dict[_id]['frames'].append(vid_info['start_ind'])

        return list(vid_dict.values())
コード例 #30
0
def main():
    xml_dir = '/media/ser606/Data/DoubleCircle/CVC/annotations-xml/'
    json_dir = '/media/ser606/Data/DoubleCircle/CVC/annotations-json/'
    txt_dir = '/media/ser606/Data/DoubleCircle/CVC/imageSets/'
    img_dir = '/media/ser606/Data/DoubleCircle/CVC/images/'
    mmcv.mkdir_or_exist(json_dir)

    # all train images
    train_filelist = osp.join(txt_dir, 'train-all.txt')
    img_all_names = mmcv.list_from_file(train_filelist)
    img_all_names = [
        img_name.replace('annotations/', '') for img_name in img_all_names
    ]
    xml_all_paths = [
        osp.join(xml_dir, img_name.replace('.txt', '.xml'))
        for img_name in img_all_names
    ]
    img_all_paths = [
        osp.join(img_dir, img_name.replace('.txt', '.jpg'))
        for img_name in img_all_names
    ]
    coco_train = CoCoData(xml_all_paths, img_all_paths,
                          osp.join(json_dir, 'train-all.json'))
    coco_train.convert()

    # all test images
    test_filelist = osp.join(txt_dir, 'test-all.txt')
    img_test_names = mmcv.list_from_file(test_filelist)
    img_test_names = [
        img_name.replace('annotations/', '') for img_name in img_test_names
    ]
    xml_test_paths = [
        osp.join(xml_dir, img_name.replace('.txt', '.xml'))
        for img_name in img_test_names
    ]
    img_test_paths = [
        osp.join(img_dir, img_name.replace('.txt', '.jpg'))
        for img_name in img_test_names
    ]
    coco_test = CoCoData(xml_test_paths, img_test_paths,
                         osp.join(json_dir, 'test-all.json'))
    coco_test.convert()