Exemple #1
0
    def post(self):
        """ Creates a dataset """
        args = dataset_create.parse_args()
        name = args['name']
        categories = args.get('categories', [])

        category_ids = CategoryModel.bulk_create(categories)

        try:
            dataset = DatasetModel(name=name, categories=category_ids)
            dataset.save()
        except NotUniqueError:
            return {'message': 'Dataset already exists. Check the undo tab to fully delete the dataset.'}, 400

        return query_util.fix_ids(dataset)
Exemple #2
0
    def post(self, from_id, to_id):
        args = copy_annotations.parse_args()
        category_ids = args.get('category_ids')

        image_from = current_user.images.filter(id=from_id).first()
        image_to = current_user.images.filter(id=to_id).first()

        if image_from is None or image_to is None:
            return {'success': False, 'message': 'Invalid image ids'}, 400

        if image_from == image_to:
            return {'success': False, 'message': 'Cannot copy self'}, 400

        if image_from.width != image_to.width or image_from.height != image_to.height:
            return {
                'success': False,
                'message': 'Image sizes do not match'
            }, 400

        if category_ids is None:
            category_ids = DatasetModel.objects(
                id=image_from.dataset_id).first().categories

        query = AnnotationModel.objects(image_id=image_from.id,
                                        category_id__in=category_ids,
                                        deleted=False)

        return {'annotations_created': image_to.copy_annotations(query)}
Exemple #3
0
 def import_json_to_all_dataset(self, dataset_id_list, dataset_source_folder_path):
     for dataset in DatasetModel.find_datasets_by_id_list(dataset_id_list):
         result = format_mount_directory(dataset_source_folder_path, dataset.name)\
             .flat_map(lambda path: self.execute(dataset.id, path))
         if result.is_failure():
             return result
     return Result.success(dataset_id_list)
Exemple #4
0
    def get(self, dataset_id):
        dataset = DatasetModel.objects(id=dataset_id).first()

        if not dataset:
            return {'message': 'Invalid dataset ID'}, 400

        return dataset.scan()
Exemple #5
0
 def check_labelme_json(self, dataset_id_list, dataset_source_folder_path):
     for dataset in DatasetModel.find_datasets_by_id_list(dataset_id_list):
         result = format_mount_directory(dataset_source_folder_path, dataset.name)\
             .flat_map(lambda path: self.find_labelme_json(path)\
             .flat_map(lambda labelme_json: LabelChecker.check_string(labelme_json)))
         if result.is_failure():
             return result
     return Result.success('')
Exemple #6
0
    def get(self, refset_id):

        refset = DatasetModel.objects(id=refset_id).first()
        
        if not refset:
            return {'message': 'Invalid refset ID'}, 400
        
        return refset.scan()
    def get(self, image_id):
        """ Called when loading from the annotator client """
        image = ImageModel.objects(id=image_id)\
            .exclude('events').first()

        if image is None:
            return {'success': False, 'message': 'Could not load image'}, 400

        # until login condition set
        # dataset = current_user.datasets.filter(id=image.dataset_id).first()
        dataset = DatasetModel.objects(id=image.dataset_id).first()
        # add condition if dataset is_public?
        if dataset is None:
            return {'success': False, 'message': 'Could not find associated dataset'}, 400

        categories = CategoryModel.objects(deleted=False)\
            .in_bulk(dataset.categories).items()

        # Get next and previous image
        images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
        pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first()
        nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first()

        preferences = {}
        if not Config.LOGIN_DISABLED and current_user.is_authenticated:
            # change it after login_condition
            # print(current_user)
            # if current_user.username.:
            preferences = current_user.preferences

        # Generate data about the image to return to client
        data = {
            'image': query_util.fix_ids(image),
            'categories': [],
            'dataset': query_util.fix_ids(dataset),
            'preferences': preferences,
            'permissions': {
                'dataset': dataset.permissions(current_user),
                'image': image.permissions(current_user)
            }
        }

        data['image']['previous'] = pre.id if pre else None
        data['image']['next'] = nex.id if nex else None

        for category in categories:
            category = query_util.fix_ids(category[1])

            category_id = category.get('id')
            annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\
                .exclude('events').all()

            category['show'] = True
            category['visualize'] = False
            category['annotations'] = [] if annotations is None else query_util.fix_ids(annotations)
            data.get('categories').append(category)

        return data
Exemple #8
0
    def delete(self, dataset_id):
        """ Deletes dataset by ID (only owners)"""

        dataset = DatasetModel.objects(id=dataset_id, deleted=False).first()

        if dataset is None:
            return {"message": "Invalid dataset id"}, 400
        
        if not current_user.can_delete(dataset):
            return {"message": "You do not have permission to delete the dataset"}, 403

        dataset.update(set__deleted=True, set__deleted_date=datetime.datetime.now())
        return {"success": True}
Exemple #9
0
    def get(self, dataset_id):

        args = export.parse_args()
        categories = args.get('categories')

        if len(categories) == 0:
            categories = []

        if len(categories) > 0 or isinstance(categories, str):
            categories = [int(c) for c in categories.split(',')]

        dataset = DatasetModel.objects(id=dataset_id).first()

        if not dataset:
            return {'message': 'Invalid dataset ID'}, 400

        return dataset.export_coco(categories=categories)
Exemple #10
0
    def get(self, dataset_id):

        args = export.parse_args()
        categories = args.get('categories')
        export_format = args.get('export_format')

        if len(categories) == 0:
            categories = []
        if len(categories) > 0 or isinstance(categories, str):
            categories = [int(c) for c in categories.split(',')]

        dataset = DatasetModel.objects(id=dataset_id).first()
        if not dataset:
            return {'message': 'Invalid dataset ID'}, 400
        if export_format == "coco":
            return dataset.export_coco(categories=categories)
        elif export_format == "tfrecord":
            return dataset.export_tf_record(
                train_shards=args.get('tfrecord_train_num_shards'),
                val_shards=args.get('tfrecord_val_num_shards'),
                test_shards=args.get('tfrecord_test_num_shards'),
                categories=categories,
                validation_set_size=args.get('validation_size'),
                testing_set_size=args.get('testing_size'))
 def find_dataset_by(self, dataset_id):
     return DatasetModel.find_by(dataset_id)
 def execute(self, dataset_id, categories):
     dataset = DatasetModel.find_by(dataset_id)
     category_id_list = CategoryModel.bulk_create(categories)
     # dataset.update(set__categories=category_id_list)
     dataset.update_categories(category_id_list)
Exemple #13
0
 def create_dataset(self, dataset_name):
     return DatasetModel(name=dataset_name, categories=[])
 def find_datasets_by_stripID(self, stripId):
     key = stripId + "/"
     return DatasetModel.find_by_name_contain(key)
Exemple #15
0
 def execute(self, dataset_id, dataset_source_folder_path):
     dataset = DatasetModel.find_by(dataset_id)
     self.move_content_from_source_to_dataset(dataset, dataset_source_folder_path)
     self.image_repository.create_images_from(dataset.id)
     return self.scan_annotation_from_json(dataset_id, dataset_source_folder_path)
 def delete_dataset(self, dataset):
     DatasetModel.delete_by_id(dataset.id)
     self.delete_whole_directory(dataset)
Exemple #17
0
 def to_dataset_id(self, name):
     dataset = DatasetModel.find_by_name(name)
     if dataset:  # exist
         return Result.success(dataset.id)
     else:
         return self.create_new_dataset(name)
 def find_datasets_by(self, stripID):
     # contain bugs, id need to 123/ exactly
     key = stripID + "/"
     return DatasetModel.find_by_name_contain(key)
Exemple #19
0
def get_image_coco(image_id):
    """
    Generates coco for an image

    :param image: ImageModel
    :return: Coco in dictionary format
    """
    image = ImageModel.objects(id=image_id)\
        .only(*ImageModel.COCO_PROPERTIES)
    
    image = fix_ids(image)[0]
    dataset = DatasetModel.objects(id=image.get('dataset_id')).first()

    bulk_categories = CategoryModel.objects(id__in=dataset.categories, deleted=False) \
        .only(*CategoryModel.COCO_PROPERTIES)

    print(bulk_categories)

    db_annotations = AnnotationModel.objects(deleted=False, image_id=image_id)
    categories = []
    annotations = []

    for category in fix_ids(bulk_categories):

        category_annotations = db_annotations\
            .filter(category_id=category.get('id'))\
            .only(*AnnotationModel.COCO_PROPERTIES)
        
        if category_annotations.count() == 0:
            continue
        
        category_annotations = fix_ids(category_annotations)
        for annotation in category_annotations:

            has_segmentation = len(annotation.get('segmentation', [])) > 0
            has_keypoints = len(annotation.get('keypoints', [])) > 0

            if has_segmentation or has_keypoints:

                if has_keypoints:
                    arr = np.array(annotation.get('keypoints', []))
                    arr = arr[2::3]
                    annotation['num_keypoints'] = len(arr[arr > 0])
                
                annotations.append(annotation)

        if len(category.get('keypoint_labels')) > 0:
            category['keypoints'] = category.pop('keypoint_labels')
            category['skeleton'] = category.pop('keypoint_edges')
        else:
            del category['keypoint_edges']
            del category['keypoint_labels']
        
        categories.append(category)

    coco = {
        "images": [image],
        "categories": categories,
        "annotations": annotations
    }

    return coco
Exemple #20
0
 def set_dataset_by(self, dataset_id):
     self.dataset = DatasetModel.find_by(dataset_id)