コード例 #1
0
def generate_names(base_name, count):
    name = sly_fs.get_file_name(base_name)
    ext = sly_fs.get_file_ext(base_name)

    names = [base_name]
    for idx in range(1, count):
        names.append('{}_{:02d}{}'.format(name, idx, ext))

    return names
コード例 #2
0
def gen_video_stream_name(file_name, stream_index):
    '''
    Create name to video stream from given filename and index of stream
    :param file_name: str
    :param stream_index: int
    :return: str
    '''
    return "{}_stream_{}_{}{}".format(get_file_name(file_name), stream_index,
                                      rand_str(5), get_file_ext(file_name))
コード例 #3
0
ファイル: pascal_voc.py プロジェクト: supervisely/supervisely
def save_project_as_pascal_voc_detection(save_path, project: Project):
    import pascal_voc_writer
    
    # Create root pascal 'datasets' folders
    for dataset in project.datasets:
        pascal_dataset_path = os.path.join(save_path, dataset.name)

        images_dir = os.path.join(pascal_dataset_path, 'JPEGImages')
        anns_dir = os.path.join(pascal_dataset_path, 'Annotations')
        lists_dir = os.path.join(pascal_dataset_path, 'ImageSets/Layout')

        fs_utils.mkdir(pascal_dataset_path)
        for subdir in ['ImageSets',  # Train list, Val list, etc.
                       'ImageSets/Layout',
                       'Annotations',
                       'JPEGImages']:
            fs_utils.mkdir(os.path.join(pascal_dataset_path, subdir))

        samples_by_tags = defaultdict(list)  # TRAIN: [img_1, img2, ..]

        for item_name in dataset:
            img_path, ann_path = dataset.get_item_paths(item_name)
            no_ext_name = fs_utils.get_file_name(item_name)
            pascal_img_path = os.path.join(images_dir, no_ext_name + OUT_IMG_EXT)
            pascal_ann_path = os.path.join(anns_dir, no_ext_name + XML_EXT)


            if item_name.endswith(OUT_IMG_EXT):
                fs_utils.copy_file(img_path, pascal_img_path)
            else:
                img = image_utils.read(img_path)
                image_utils.write(pascal_img_path, img)

            ann = Annotation.load_json_file(ann_path, project_meta=project.meta)

            # Read tags for images lists generation
            for tag in ann.img_tags:
                samples_by_tags[tag.name].append((no_ext_name ,len(ann.labels)))

            writer = pascal_voc_writer.Writer(path=pascal_img_path,
                                              width=ann.img_size[1],
                                              height=ann.img_size[0])

            for label in ann.labels:
                obj_class = label.obj_class
                rect: Rectangle = label.geometry.to_bbox()
                writer.addObject(name=obj_class.name,
                                 xmin = rect.left,
                                 ymin = rect.top,
                                 xmax = rect.right,
                                 ymax = rect.bottom)
            writer.save(pascal_ann_path)

        save_images_lists(lists_dir, samples_by_tags)
コード例 #4
0
ファイル: image_api.py プロジェクト: supervisely/supervisely
    def _get_free_name(exist_check_fn, name):
        res_title = name
        suffix = 1

        name_without_ext = get_file_name(name)
        ext = get_file_ext(name)

        while exist_check_fn(res_title):
            res_title = '{}_{:03d}{}'.format(name_without_ext, suffix, ext)
            suffix += 1
        return res_title
コード例 #5
0
def generate_free_name(used_names, possible_name, with_ext=False):
    res_name = possible_name
    new_suffix = 1
    while res_name in set(used_names):
        if with_ext is True:
            res_name = '{}_{:02d}{}'.format(
                sly_fs.get_file_name(possible_name), new_suffix,
                sly_fs.get_file_ext(possible_name))
        else:
            res_name = '{}_{:02d}'.format(possible_name, new_suffix)
        new_suffix += 1
    return res_name
コード例 #6
0
ファイル: task_api.py プロジェクト: supervisely/supervisely
    def upload_dtl_archive(self, task_id, archive_path, progress_cb=None):
        encoder = MultipartEncoder({
            'id':
            str(task_id).encode('utf-8'),
            'name':
            get_file_name(archive_path),
            'archive':
            (os.path.basename(archive_path), open(archive_path,
                                                  'rb'), 'application/x-tar')
        })

        def callback(monitor_instance):
            read_mb = monitor_instance.bytes_read / 1024.0 / 1024.0
            if progress_cb is not None:
                progress_cb(read_mb)

        monitor = MultipartEncoderMonitor(encoder, callback)
        self._api.post('tasks.upload.dtl_archive', monitor)
コード例 #7
0
    def get_free_name(self, team_id, path):
        directory = Path(path).parent
        name = get_file_name(path)
        ext = get_file_ext(path)
        res_name = name
        suffix = 0

        def _combine(suffix: int = None):
            res = "{}/{}".format(directory, res_name)
            if suffix is not None:
                res += "_{:03d}".format(suffix)
            if ext:
                res += "{}".format(ext)
            return res

        res_path = _combine()
        while self.exists(team_id, res_path):
            res_path = _combine(suffix)
            suffix += 1
        return res_path
コード例 #8
0
def download_pointcloud_project(api,
                                project_id,
                                dest_dir,
                                dataset_ids=None,
                                download_items=True,
                                log_progress=False):
    LOG_BATCH_SIZE = 1

    key_id_map = KeyIdMap()

    project_fs = PointcloudProject(dest_dir, OpenMode.CREATE)

    meta = ProjectMeta.from_json(api.project.get_meta(project_id))
    project_fs.set_meta(meta)

    datasets_infos = []
    if dataset_ids is not None:
        for ds_id in dataset_ids:
            datasets_infos.append(api.dataset.get_info_by_id(ds_id))
    else:
        datasets_infos = api.dataset.get_list(project_id)

    for dataset in datasets_infos:
        dataset_fs = project_fs.create_dataset(dataset.name)
        pointclouds = api.pointcloud.get_list(dataset.id)

        ds_progress = None
        if log_progress:
            ds_progress = Progress('Downloading dataset: {!r}'.format(
                dataset.name),
                                   total_cnt=len(pointclouds))
        for batch in batched(pointclouds, batch_size=LOG_BATCH_SIZE):
            pointcloud_ids = [pointcloud_info.id for pointcloud_info in batch]
            pointcloud_names = [
                pointcloud_info.name for pointcloud_info in batch
            ]

            ann_jsons = api.pointcloud.annotation.download_bulk(
                dataset.id, pointcloud_ids)

            for pointcloud_id, pointcloud_name, ann_json in zip(
                    pointcloud_ids, pointcloud_names, ann_jsons):
                if pointcloud_name != ann_json[ApiField.NAME]:
                    raise RuntimeError(
                        "Error in api.video.annotation.download_batch: broken order"
                    )

                pointcloud_file_path = dataset_fs.generate_item_path(
                    pointcloud_name)
                if download_items is True:
                    api.pointcloud.download_path(pointcloud_id,
                                                 pointcloud_file_path)

                    related_images_path = dataset_fs.get_related_images_path(
                        pointcloud_name)
                    related_images = api.pointcloud.get_list_related_images(
                        pointcloud_id)
                    for rimage_info in related_images:
                        name = rimage_info[ApiField.NAME]

                        if not has_valid_ext(name):
                            new_name = get_file_name(
                                name)  # to fix cases like .png.json
                            if has_valid_ext(new_name):
                                name = new_name
                                rimage_info[ApiField.NAME] = name
                            else:
                                raise RuntimeError(
                                    'Something wrong with photo context filenames.\
                                                    Please, contact support')

                        rimage_id = rimage_info[ApiField.ID]

                        path_img = os.path.join(related_images_path, name)
                        path_json = os.path.join(related_images_path,
                                                 name + ".json")

                        api.pointcloud.download_related_image(
                            rimage_id, path_img)
                        dump_json_file(rimage_info, path_json)

                else:
                    touch(pointcloud_file_path)

                dataset_fs.add_item_file(pointcloud_name,
                                         pointcloud_file_path,
                                         ann=PointcloudAnnotation.from_json(
                                             ann_json, project_fs.meta,
                                             key_id_map),
                                         _validate_item=False)

            ds_progress.iters_done_report(len(batch))

    project_fs.set_key_id_map(key_id_map)