def convert_to_csv(self, input_data, output_dir, is_dir=True, **kwargs):
        self._check_format(Format.CSV)
        ensure_dir(output_dir)
        output_file = os.path.join(output_dir, 'result.csv')
        records = []
        item_iterator = self.iter_from_dir if is_dir else self.iter_from_json_file

        for item in item_iterator(input_data):
            record = deepcopy(item['input'])
            if item.get('id') is not None:
                record['id'] = item['id']
            for name, value in item['output'].items():
                pretty_value = self._prettify(value)
                record[name] = pretty_value if isinstance(
                    pretty_value, str) else json.dumps(pretty_value,
                                                       ensure_ascii=False)
            record['annotator'] = _get_annotator(item)
            record['annotation_id'] = item['annotation_id']
            record['created_at'] = item['created_at']
            record['updated_at'] = item['updated_at']
            record['lead_time'] = item['lead_time']
            if 'agreement' in item:
                record['agreement'] = item['agreement']
            records.append(record)

        pd.DataFrame.from_records(records).to_csv(output_file,
                                                  index=False,
                                                  **kwargs)
    def convert_to_json_min(self, input_data, output_dir, is_dir=True):
        self._check_format(Format.JSON_MIN)
        ensure_dir(output_dir)
        output_file = os.path.join(output_dir, 'result.json')
        records = []
        item_iterator = self.iter_from_dir if is_dir else self.iter_from_json_file

        for item in item_iterator(input_data):
            record = deepcopy(item['input'])
            if item.get('id') is not None:
                record['id'] = item['id']
            for name, value in item['output'].items():
                record[name] = self._prettify(value)
            record['annotator'] = _get_annotator(item, int_id=True)
            record['annotation_id'] = item['annotation_id']
            record['created_at'] = item['created_at']
            record['updated_at'] = item['updated_at']
            record['lead_time'] = item['lead_time']
            if 'agreement' in item:
                record['agreement'] = item['agreement']
            records.append(record)

        with io.open(output_file, mode='w', encoding='utf8') as fout:
            json.dump(records, fout, indent=2, ensure_ascii=False)
    def convert_to_voc(self, input_data, output_dir, output_image_dir=None, is_dir=True):

        ensure_dir(output_dir)
        if output_image_dir is not None:
            ensure_dir(output_image_dir)
            output_image_dir_rel = output_image_dir
        else:
            output_image_dir = os.path.join(output_dir, 'images')
            os.makedirs(output_image_dir, exist_ok=True)
            output_image_dir_rel = 'images'

        def create_child_node(doc, tag, attr, parent_node):
            child_node = doc.createElement(tag)
            text_node = doc.createTextNode(attr)
            child_node.appendChild(text_node)
            parent_node.appendChild(child_node)

        data_key = self._data_keys[0]
        item_iterator = self.iter_from_dir(input_data) if is_dir else self.iter_from_json_file(input_data)
        for item_idx, item in enumerate(item_iterator):
            if not item['output']:
                logger.warning('No annotations found for item #' + str(item_idx))
                continue
            image_path = item['input'][data_key]
            annotations_dir = os.path.join(output_dir, 'Annotations')
            if not os.path.exists(annotations_dir):
                os.makedirs(annotations_dir)

            channels = 3
            if not os.path.exists(image_path):
                try:
                    image_path = download(
                        image_path, output_image_dir, project_dir=self.project_dir,
                        upload_dir=self.upload_dir, return_relative_path=True, download_resources=self.download_resources)
                except:
                    logger.error('Unable to download {image_path}. The item {item} will be skipped'.format(
                        image_path=image_path, item=item), exc_info=True)
                else:
                    full_image_path = os.path.join(output_image_dir, os.path.basename(image_path))
                    # retrieve number of channels from downloaded image
                    try:
                        _, _, channels = get_image_size_and_channels(full_image_path)
                    except:
                        logger.warning(f"Can't read channels from image {image_path}")

            image_name = os.path.splitext(os.path.basename(image_path))[0]

            # concatenate results over all tag names
            bboxes = []
            for key in item['output']:
                bboxes += item['output'][key]

            if len(bboxes) == 0:
                logger.warning(f'Empty bboxes for {item["output"]}')
                continue

            if 'original_width' not in bboxes[0] or 'original_height' not in bboxes[0]:
                logger.warning(f'original_width or original_height not found in {image_name}')
                continue

            width, height = bboxes[0]['original_width'], bboxes[0]['original_height']
            xml_filepath = os.path.join(annotations_dir, image_name + '.xml')

            my_dom = xml.dom.getDOMImplementation()
            doc = my_dom.createDocument(None, 'annotation', None)
            root_node = doc.documentElement
            create_child_node(doc, 'folder', output_image_dir_rel, root_node)
            create_child_node(doc, 'filename', image_name, root_node)

            source_node = doc.createElement('source')
            create_child_node(doc, 'database', 'MyDatabase', source_node)
            create_child_node(doc, 'annotation', 'COCO2017', source_node)
            create_child_node(doc, 'image', 'flickr', source_node)
            create_child_node(doc, 'flickrid', 'NULL', source_node)
            create_child_node(doc, 'annotator', _get_annotator(item, ''), source_node)
            root_node.appendChild(source_node)

            owner_node = doc.createElement('owner')
            create_child_node(doc, 'flickrid', 'NULL', owner_node)
            create_child_node(doc, 'name', 'Label Studio', owner_node)
            root_node.appendChild(owner_node)
            size_node = doc.createElement('size')
            create_child_node(doc, 'width', str(width), size_node)
            create_child_node(doc, 'height', str(height), size_node)
            create_child_node(doc, 'depth', str(channels), size_node)
            root_node.appendChild(size_node)
            create_child_node(doc, 'segmented', '0', root_node)

            for bbox in bboxes:
                key = 'rectanglelabels' if 'rectanglelabels' in bbox else ('labels' if 'labels' in bbox else None)
                if key is None or len(bbox[key]) == 0:
                    continue

                name = bbox[key][0]
                x = int(bbox['x'] / 100 * width)
                y = int(bbox['y'] / 100 * height)
                w = int(bbox['width'] / 100 * width)
                h = int(bbox['height'] / 100 * height)

                object_node = doc.createElement('object')
                create_child_node(doc, 'name', name, object_node)
                create_child_node(doc, 'pose', 'Unspecified', object_node)
                create_child_node(doc, 'truncated', '0', object_node)
                create_child_node(doc, 'difficult', '0', object_node)
                bndbox_node = doc.createElement('bndbox')
                create_child_node(doc, 'xmin', str(x), bndbox_node)
                create_child_node(doc, 'ymin', str(y), bndbox_node)
                create_child_node(doc, 'xmax', str(x + w), bndbox_node)
                create_child_node(doc, 'ymax', str(y + h), bndbox_node)

                object_node.appendChild(bndbox_node)
                root_node.appendChild(object_node)

            with io.open(xml_filepath, mode='w', encoding='utf8') as fout:
                doc.writexml(fout, addindent='' * 4, newl='\n', encoding='utf-8')
    def convert_to_coco(self, input_data, output_dir, output_image_dir=None, is_dir=True):
        self._check_format(Format.COCO)
        ensure_dir(output_dir)
        output_file = os.path.join(output_dir, 'result.json')
        if output_image_dir is not None:
            ensure_dir(output_image_dir)
        else:
            output_image_dir = os.path.join(output_dir, 'images')
            os.makedirs(output_image_dir, exist_ok=True)
        images, categories, annotations = [], [], []
        categories, category_name_to_id = self._get_labels()
        data_key = self._data_keys[0]
        item_iterator = self.iter_from_dir(input_data) if is_dir else self.iter_from_json_file(input_data)
        for item_idx, item in enumerate(item_iterator):
            if not item['output']:
                logger.warning('No annotations found for item #' + str(item_idx))
                continue
            image_path = item['input'][data_key]
            if not os.path.exists(image_path):
                try:
                    image_path = download(image_path, output_image_dir, project_dir=self.project_dir,
                                          return_relative_path=True, upload_dir=self.upload_dir,
                                          download_resources=self.download_resources)
                except:
                    logger.error('Unable to download {image_path}. The item {item} will be skipped'.format(
                        image_path=image_path, item=item
                    ), exc_info=True)

            # concatenate results over all tag names
            labels = []
            for key in item['output']:
                labels += item['output'][key]

            if len(labels) == 0:
                logger.warning(f'Empty bboxes for {item["output"]}')
                continue

            first = True

            for label in labels:

                category_name = None
                for key in ['rectanglelabels', 'polygonlabels', 'labels']:
                    if key in label and len(label[key]) > 0:
                        category_name = label[key][0]
                        break

                if category_name is None:
                    logger.warning("Unknown label type or labels are empty: " + str(label))
                    continue

                # get image sizes
                if first:

                    if 'original_width' not in label or 'original_height' not in label:
                        logger.warning(f'original_width or original_height not found in {image_path}')
                        continue

                    width, height = label['original_width'], label['original_height']
                    image_id = len(images)
                    images.append({
                        'width': width,
                        'height': height,
                        'id': image_id,
                        'file_name': image_path
                    })
                    first = False

                '''if category_name not in category_name_to_id:
                    category_id = len(categories)
                    category_name_to_id[category_name] = category_id
                    categories.append({
                        'id': category_id,
                        'name': category_name,
                        'supercategory': category_name
                    })'''
                category_id = category_name_to_id[category_name]

                annotation_id = len(annotations)

                if 'rectanglelabels' in label or 'labels' in label:
                    x = int(label['x'] / 100 * width)
                    y = int(label['y'] / 100 * height)
                    w = int(label['width'] / 100 * width)
                    h = int(label['height'] / 100 * height)

                    annotations.append({
                        'id': annotation_id,
                        'image_id': image_id,
                        'category_id': category_id,
                        'segmentation': [],
                        'bbox': [x, y, w, h],
                        'ignore': 0,
                        'iscrowd': 0,
                        'area': w * h,
                    })
                elif "polygonlabels" in label:
                    points_abs = [(x / 100 * width, y / 100 * height) for x, y in label["points"]]
                    x, y = zip(*points_abs)

                    annotations.append({
                        'id': annotation_id,
                        'image_id': image_id,
                        'category_id': category_id,
                        'segmentation': [[coord for point in points_abs for coord in point]],
                        'bbox': get_polygon_bounding_box(x, y),
                        'ignore': 0,
                        'iscrowd': 0,
                        'area': get_polygon_area(x, y)
                    })
                else:
                    raise ValueError("Unknown label type")

                if os.getenv('LABEL_STUDIO_FORCE_ANNOTATOR_EXPORT'):
                    annotations[-1].update({'annotator': _get_annotator(item)})

        with io.open(output_file, mode='w', encoding='utf8') as fout:
            json.dump({
                'images': images,
                'categories': categories,
                'annotations': annotations,
                'info': {
                    'year': datetime.now().year,
                    'version': '1.0',
                    'description': '',
                    'contributor': 'Label Studio',
                    'url': '',
                    'date_created': str(datetime.now())
                }
            }, fout, indent=2)