Ejemplo n.º 1
0
    def _convert_json_info(self, info: dict):
        if info is None:
            return None
        temp_ext = None
        field_values = []
        for field_name in self.__class__._info_sequence:
            if field_name == ApiField.EXT:
                continue
            field_values.append(info[field_name])
            if field_name == ApiField.MIME:
                temp_ext = info[field_name].split('/')[1]
                field_values.append(temp_ext)
        for idx, field_name in enumerate(self.__class__._info_sequence):
            if field_name == ApiField.NAME:
                cur_ext = get_file_ext(field_values[idx])
                if not cur_ext:
                    field_values[idx] = "{}.{}".format(field_values[idx],
                                                       temp_ext)
                    break

                cur_ext = cur_ext.replace(".", "").lower()
                if temp_ext == 'jpeg' and cur_ext in ['jpg', 'jpeg']:
                    break

                if temp_ext not in field_values[idx]:
                    field_values[idx] = "{}.{}".format(field_values[idx],
                                                       temp_ext)
                break
        return self.__class__.Info._make(field_values)
Ejemplo n.º 2
0
 def _convert_json_info(self, info: dict, skip_missing=True):
     if info is None:
         return None
     temp_ext = None
     field_values = []
     for field_name in self.info_sequence():
         if field_name == ApiField.EXT:
             continue
         if skip_missing is True:
             val = info.get(field_name, None)
         else:
             val = info[field_name]
         field_values.append(val)
         if field_name == ApiField.MIME:
             temp_ext = val.split('/')[1]
             field_values.append(temp_ext)
     for idx, field_name in enumerate(self.info_sequence()):
         if field_name == ApiField.NAME:
             cur_ext = get_file_ext(field_values[idx]).replace(".",
                                                               "").lower()
             if not cur_ext:
                 field_values[idx] = "{}.{}".format(field_values[idx],
                                                    temp_ext)
                 break
             if temp_ext == 'jpeg' and cur_ext in ['jpg', 'jpeg', 'mpo']:
                 break
             if temp_ext != cur_ext:
                 field_values[idx] = "{}.{}".format(field_values[idx],
                                                    temp_ext)
             break
     return self.InfoType(*field_values)
Ejemplo n.º 3
0
    def _read(self):
        if not dir_exists(self.img_dir):
            raise FileNotFoundError('Image directory not found: {!r}'.format(
                self.img_dir))
        if not dir_exists(self.ann_dir):
            raise FileNotFoundError(
                'Annotation directory not found: {!r}'.format(self.ann_dir))

        ann_paths = list_files(self.ann_dir, [ANN_EXT])
        img_paths = list_files(self.img_dir, image.SUPPORTED_IMG_EXTS)

        ann_names = set(get_file_name(path) for path in ann_paths)
        img_names = {
            get_file_name(path): get_file_ext(path)
            for path in img_paths
        }

        if len(img_names) == 0 or len(ann_names) == 0:
            raise RuntimeError('Dataset {!r} is empty'.format(self.name))
        if ann_names != set(img_names.keys()):
            raise RuntimeError(
                'File names in dataset {!r} are inconsistent'.format(
                    self.name))

        self._items_exts = img_names
Ejemplo n.º 4
0
def gen_video_stream_name(file_name, stream_index):
    '''
    Create name to video stream from given filename and index of stream
    :param file_name: str
    :param stream_index: int
    :return: str
    '''
    return "{}_stream_{}_{}{}".format(get_file_name(file_name), stream_index,
                                      rand_str(5), get_file_ext(file_name))
Ejemplo n.º 5
0
def generate_names(base_name, count):
    name = sly_fs.get_file_name(base_name)
    ext = sly_fs.get_file_ext(base_name)

    names = [base_name]
    for idx in range(1, count):
        names.append('{}_{:02d}{}'.format(name, idx, ext))

    return names
Ejemplo n.º 6
0
    def _get_free_name(exist_check_fn, name):
        res_title = name
        suffix = 1

        name_without_ext = get_file_name(name)
        ext = get_file_ext(name)

        while exist_check_fn(res_title):
            res_title = '{}_{:03d}{}'.format(name_without_ext, suffix, ext)
            suffix += 1
        return res_title
Ejemplo n.º 7
0
def generate_free_name(used_names, possible_name, with_ext=False):
    res_name = possible_name
    new_suffix = 1
    while res_name in set(used_names):
        if with_ext is True:
            res_name = '{}_{:02d}{}'.format(
                sly_fs.get_file_name(possible_name), new_suffix,
                sly_fs.get_file_ext(possible_name))
        else:
            res_name = '{}_{:02d}'.format(possible_name, new_suffix)
        new_suffix += 1
    return res_name
Ejemplo n.º 8
0
 def download(self, team_id, remote_path, local_save_path, cache: FileCache = None, progress_cb=None):
     if cache is None:
         self._download(team_id, remote_path, local_save_path, progress_cb)
     else:
         file_info = self.get_info_by_path(team_id, remote_path)
         if file_info.hash is None:
             self._download(team_id, remote_path, local_save_path, progress_cb)
         else:
             cache_path = cache.check_storage_object(file_info.hash, get_file_ext(remote_path))
             if cache_path is None:
                 # file not in cache
                 self._download(team_id, remote_path, local_save_path, progress_cb)
                 if file_info.hash != get_file_hash(local_save_path):
                     raise KeyError(f"Remote and local hashes are different (team id: {team_id}, file: {remote_path})")
                 cache.write_object(local_save_path, file_info.hash)
             else:
                 cache.read_object(file_info.hash, local_save_path)
                 if progress_cb is not None:
                     progress_cb(get_file_size(local_save_path))
Ejemplo n.º 9
0
def load_font(font_file_name: str,
              font_size: int = 12) -> ImageFont.FreeTypeFont:
    """
    Set global font true-type for drawing.
    Args:
        font_file_name: name of font file (example: 'DejaVuSansMono.ttf')
        font_size: selected font size
    Returns:
        loaded from file font
    """
    if get_file_ext(font_file_name) == FONT_EXTENSION:
        font_path = _get_font_path_by_name(font_file_name)
        if (font_path is not None) and file_exists(font_path):
            return ImageFont.truetype(font_path, font_size, encoding='utf-8')
        else:
            raise ValueError(
                'Font file "{}" not found in system paths. Try to set another font.'
                .format(font_file_name))
    else:
        raise ValueError('Supported only TrueType fonts!')
Ejemplo n.º 10
0
    def get_free_name(self, team_id, path):
        directory = Path(path).parent
        name = get_file_name(path)
        ext = get_file_ext(path)
        res_name = name
        suffix = 0

        def _combine(suffix: int = None):
            res = "{}/{}".format(directory, res_name)
            if suffix is not None:
                res += "_{:03d}".format(suffix)
            if ext:
                res += "{}".format(ext)
            return res

        res_path = _combine()
        while self.exists(team_id, res_path):
            res_path = _combine(suffix)
            suffix += 1
        return res_path
Ejemplo n.º 11
0
 def _add_img_file(self, item_name, img_path):
     img_ext = get_file_ext(img_path)
     dst_img_path = self.deprecated_make_img_path(item_name, img_ext)
     if img_path != dst_img_path:  # used only for agent + api during download project
         copy_file(img_path, dst_img_path)
     self._items_exts[item_name] = img_ext
Ejemplo n.º 12
0
def gen_video_stream_name(file_name, stream_index):
    return "{}_stream_{}_{}{}".format(get_file_name(file_name), stream_index,
                                      rand_str(5), get_file_ext(file_name))
Ejemplo n.º 13
0
    parser = argparse.ArgumentParser(description='Inference REST client for standalone Supervisely models.')
    parser.add_argument('--server-url', required=True)
    parser.add_argument('--request-type', required=True, choices=SUPPORTED_REQUEST_TYPES)
    parser.add_argument('--in-image', default='')
    parser.add_argument('--out-json', default='')
    args = parser.parse_args()

    request_url = args.server_url + '/' + MODEL + '/' + args.request_type

    response_json = None
    if args.request_type == GET_OUTPUT_META:
        response = requests.post(request_url)
    elif args.request_type == INFERENCE:
        with open(args.in_image, 'rb') as fin:
            img_bytes = fin.read()
        img_ext = sly_fs.get_file_ext(args.in_image)
        encoder = MultipartEncoder({IMAGE: (args.in_image, io.BytesIO(img_bytes), 'application/octet-stream')})
        response = requests.post(request_url, data=encoder, headers={'Content-Type': encoder.content_type})
    else:
        raise ValueError(
            'Unknown model request type: {!r}. Only the following request types are supported: {!r}.'.format(
                args.request_type, SUPPORTED_REQUEST_TYPES))

    response.raise_for_status()
    response_str = json.dumps(response.json(), indent=4, sort_keys=True)

    if args.out_json:
        with open(args.out_json, 'w') as fout:
            fout.write(response_str)
    else:
        print(response_str)
Ejemplo n.º 14
0
 def img_to_hash(item):
     img, name = item[0], item[1]
     return sly_image.get_hash(img, get_file_ext(name))
Ejemplo n.º 15
0
 def img_to_bytes_stream(item):
     img, name = item[0], item[1]
     img_bytes = sly_image.write_bytes(img, get_file_ext(name))
     return io.BytesIO(img_bytes)
Ejemplo n.º 16
0
def from_sl_to_cityscapes(api: sly.Api, task_id, context, state, app_logger):
    def get_image_and_ann():
        mkdir(image_dir_path)
        mkdir(ann_dir)
        image_path = os.path.join(image_dir_path, image_name)
        api.image.download_path(image_id, image_path)
        image_ext_to_png(image_path)

        mask_color, mask_label, poly_json = from_ann_to_cityscapes_mask(
            ann, name2id, app_logger, train_val_flag)
        # dump_json_file(poly_json,
        #                os.path.join(ann_dir, get_file_name(base_image_name) + cityscapes_polygons_suffix))
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_color_suffix), mask_color)
        # write(
        #     os.path.join(ann_dir,
        #                  get_file_name(base_image_name) + cityscapes_labels_suffix), mask_label)

        dump_json_file(
            poly_json,
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_polygons_suffix))
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_color_suffix), mask_color)
        write(
            os.path.join(
                ann_dir,
                get_file_name(base_image_name).replace('_leftImg8bit', '') +
                cityscapes_labels_suffix), mask_label)

    project_name = api.project.get_info_by_id(PROJECT_ID).name
    ARCHIVE_NAME = '{}_{}_Cityscapes.tar.gz'.format(PROJECT_ID, project_name)
    meta_json = api.project.get_meta(PROJECT_ID)
    meta = sly.ProjectMeta.from_json(meta_json)
    has_bitmap_poly_shapes = False
    for obj_class in meta.obj_classes:
        if obj_class.geometry_type not in possible_geometries:
            app_logger.warn(
                f'Cityscapes format supports only bitmap and polygon classes, {obj_class.geometry_type} will be skipped'
            )
        else:
            has_bitmap_poly_shapes = True

    if has_bitmap_poly_shapes is False:
        raise Exception(
            'Input project does not contain bitmap or polygon classes')
        my_app.stop()

    RESULT_ARCHIVE = os.path.join(my_app.data_dir, ARCHIVE_NAME)
    RESULT_DIR = os.path.join(my_app.data_dir, RESULT_DIR_NAME)
    result_images_train = os.path.join(RESULT_DIR, images_dir_name,
                                       default_dir_train)
    result_images_val = os.path.join(RESULT_DIR, images_dir_name,
                                     default_dir_val)
    result_images_test = os.path.join(RESULT_DIR, images_dir_name,
                                      default_dir_test)
    result_anns_train = os.path.join(RESULT_DIR, annotations_dir_name,
                                     default_dir_train)
    result_anns_val = os.path.join(RESULT_DIR, annotations_dir_name,
                                   default_dir_val)
    result_anns_test = os.path.join(RESULT_DIR, annotations_dir_name,
                                    default_dir_test)
    sly.fs.mkdir(RESULT_DIR)
    app_logger.info("Cityscapes Dataset folder has been created")

    class_to_id = []
    name2id = {}
    for idx, obj_class in enumerate(meta.obj_classes):
        if obj_class.geometry_type not in possible_geometries:
            continue
        curr_class = {}
        curr_class['name'] = obj_class.name
        curr_class['id'] = idx + 1
        curr_class['color'] = obj_class.color
        class_to_id.append(curr_class)
        name2id[obj_class.name] = (idx + 1, idx + 1, idx + 1)

    dump_json_file(class_to_id, os.path.join(RESULT_DIR, 'class_to_id.json'))
    app_logger.info("Writing classes with colors to class_to_id.json file")

    datasets = api.dataset.get_list(PROJECT_ID)
    for dataset in datasets:
        images_dir_path_train = os.path.join(result_images_train, dataset.name)
        images_dir_path_val = os.path.join(result_images_val, dataset.name)
        images_dir_path_test = os.path.join(result_images_test, dataset.name)
        anns_dir_path_train = os.path.join(result_anns_train, dataset.name)
        anns_dir_path_val = os.path.join(result_anns_val, dataset.name)
        anns_dir_path_test = os.path.join(result_anns_test, dataset.name)

        images = api.image.get_list(dataset.id)
        progress = sly.Progress(
            'Convert images and anns from dataset {}'.format(dataset.name),
            len(images), app_logger)
        if len(images) < 3:
            app_logger.warn(
                'Number of images in {} dataset is less then 3, val and train directories for this dataset will not be created'
                .format(dataset.name))

        image_ids = [image_info.id for image_info in images]
        base_image_names = [image_info.name for image_info in images]
        # image_names = [
        #     get_file_name(image_info.name) + cityscapes_images_suffix + get_file_ext(image_info.name) for
        #     image_info in images
        # ]

        image_names = [
            get_file_name(image_info.name.replace('_leftImg8bit', '')) + \
            cityscapes_images_suffix + get_file_ext(image_info.name) for image_info in images
        ]

        ann_infos = api.annotation.download_batch(dataset.id, image_ids)
        anns = [
            sly.Annotation.from_json(ann_info.annotation, meta)
            for ann_info in ann_infos
        ]

        splitter = get_tags_splitter(anns)
        curr_splitter = {'train': 0, 'val': 0, 'test': 0}

        for ann, image_id, image_name, base_image_name in zip(
                anns, image_ids, image_names, base_image_names):
            train_val_flag = True
            try:
                split_name = ann.img_tags.get('split').value
                if split_name == 'train':
                    image_dir_path = images_dir_path_train
                    ann_dir = anns_dir_path_train
                elif split_name == 'val':
                    image_dir_path = images_dir_path_val
                    ann_dir = anns_dir_path_val
                else:
                    image_dir_path = images_dir_path_test
                    ann_dir = anns_dir_path_test
                    train_val_flag = False
            except:
                ann_tags = [tag.name for tag in ann.img_tags]
                separator_tags = list(set(ann_tags) & set(possible_tags))
                if len(separator_tags) > 1:
                    app_logger.warn(
                        '''There are more then one separator tag for {} image. {}
                    tag will be used for split'''.format(
                            image_name, separator_tags[0]))

                if len(separator_tags) >= 1:
                    if separator_tags[0] == 'train':
                        image_dir_path = images_dir_path_train
                        ann_dir = anns_dir_path_train
                    elif separator_tags[0] == 'val':
                        image_dir_path = images_dir_path_val
                        ann_dir = anns_dir_path_val
                    else:
                        image_dir_path = images_dir_path_test
                        ann_dir = anns_dir_path_test
                        train_val_flag = False

                if len(separator_tags) == 0:
                    if curr_splitter['test'] == splitter['test']:
                        curr_splitter = {'train': 0, 'val': 0, 'test': 0}
                    if curr_splitter['train'] < splitter['train']:
                        curr_splitter['train'] += 1
                        image_dir_path = images_dir_path_train
                        ann_dir = anns_dir_path_train
                    elif curr_splitter['val'] < splitter['val']:
                        curr_splitter['val'] += 1
                        image_dir_path = images_dir_path_val
                        ann_dir = anns_dir_path_val
                    elif curr_splitter['test'] < splitter['test']:
                        curr_splitter['test'] += 1
                        image_dir_path = images_dir_path_test
                        ann_dir = anns_dir_path_test
                        train_val_flag = False

            get_image_and_ann()

            progress.iter_done_report()

    sly.fs.archive_directory(RESULT_DIR, RESULT_ARCHIVE)
    app_logger.info("Result directory is archived")

    upload_progress = []
    remote_archive_path = "/cityscapes_format/{}/{}".format(
        task_id, ARCHIVE_NAME)

    def _print_progress(monitor, upload_progress):
        if len(upload_progress) == 0:
            upload_progress.append(
                sly.Progress(message="Upload {!r}".format(ARCHIVE_NAME),
                             total_cnt=monitor.len,
                             ext_logger=app_logger,
                             is_size=True))
        upload_progress[0].set_current_value(monitor.bytes_read)

    file_info = api.file.upload(
        team_id=TEAM_ID,
        src=RESULT_ARCHIVE,
        dst=remote_archive_path,
        progress_cb=lambda m: _print_progress(m, upload_progress))

    app_logger.info("Uploaded to Team-Files: {!r}".format(
        file_info.full_storage_url))
    api.task.set_output_archive(task_id,
                                file_info.id,
                                ARCHIVE_NAME,
                                file_url=file_info.full_storage_url)

    my_app.stop()
Ejemplo n.º 17
0
def image_ext_to_png(im_path):
    if get_file_ext(im_path) != '.png':
        im = Image.open(im_path).convert('RGB')
        im.save(im_path[:-1 * len(get_file_ext(im_path))] + '.png')
        silent_remove(im_path)
def write(path, img):
    ensure_base_path(path)
    validate_ext(get_file_ext(path))
    img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)
    return cv2.imwrite(path, img)
Ejemplo n.º 19
0
 def _get_suffix(self, path):
     return sly_fs.get_file_ext(path)