def crop(path, dataset_id): pil_image = Image.open(path) directory = os.path.dirname(path) base_name = os.path.basename(path) width, height = pil_image.size crop_width = width / 2 crop_height = height / 2 top_left_box = (0, 0, crop_width, crop_height) top_right_box = (crop_width, 0, width, crop_height) bottom_left_box = (0, crop_height, crop_width, height) bottom_right_box = (crop_width, crop_height, width, height) file_names = [os.path.join(directory, f"{base_name}_{f_name}") \ for f_name in ["topleft.png", "topright.png", "bottomleft.png", "bottomright.png"]] # pil_image.crop(top_left_box).save(os.path.join(directory, f"{base_name}_topleft.png")) # pil_image.crop(top_right_box).save(os.path.join(directory, f"{base_name}_topright.png")) # pil_image.crop(bottom_left_box).save(os.path.join(directory, f"{base_name}_bottomleft.png")) # pil_image.crop(bottom_right_box).save(os.path.join(directory, f"{base_name}_bottomright.png")) pil_image.crop(top_left_box).save(file_names[0]) pil_image.crop(top_right_box).save(file_names[1]) pil_image.crop(bottom_left_box).save(file_names[2]) pil_image.crop(bottom_right_box).save(file_names[3]) for fn in file_names: ImageModel.create_from_path(fn, dataset_id).save()
def on_any_event(self, event): path = event.dest_path if event.event_type == "moved" else event.src_path if (event.is_directory # check if its a hidden file or bool(re.search(r'\/\..*?\/', path)) or not path.lower().endswith(self.pattern)): return self._log(f'File {path} for {event.event_type}') image = ImageModel.objects(path=event.src_path).first() if image is None and event.event_type != 'deleted': self._log(f'Adding new file to database: {path}') ImageModel.create_from_path(path).save() elif event.event_type == 'moved': self._log(f'Moving image from {event.src_path} to {path}') image.update(path=path) elif event.event_type == 'deleted': self._log(f'Deleting image from database {path}') ImageModel.objects(path=path).delete()
def post(self): """ Creates an image """ args = image_upload.parse_args() image = args['image'] folder = args['folder'] if len(folder) > 0: folder = folder[0].strip('/') + folder[1:] directory = os.path.join(Config.DATASET_DIRECTORY, folder) path = os.path.join(directory, image.filename) if os.path.exists(path): return {'message': 'file already exists'}, 400 if not os.path.exists(directory): os.makedirs(directory) pil_image = Image.open(io.BytesIO(image.read())) image_model = ImageModel(file_name=image.filename, width=pil_image.size[0], height=pil_image.size[1], path=path) image_model.save() pil_image.save(path) image.close() pil_image.close() return query_util.fix_ids(image_model)
def get(self, image_id): """ Called when loading from the annotator client """ image = ImageModel.objects(id=image_id)\ .exclude('events').first() if image is None: return {'success': False, 'message': 'Could not load image'}, 400 dataset = current_user.datasets.filter(id=image.dataset_id).first() if dataset is None: return { 'success': False, 'message': 'Could not find associated dataset' }, 400 categories = CategoryModel.objects(deleted=False)\ .in_bulk(dataset.categories).items() # Get next and previous image images = ImageModel.objects(dataset_id=dataset.id, deleted=False) pre = images.filter( file_name__lt=image.file_name).order_by('-file_name').first() nex = images.filter( file_name__gt=image.file_name).order_by('file_name').first() preferences = {} if not Config.LOGIN_DISABLED: preferences = current_user.preferences # Generate data about the image to return to client data = { 'image': query_util.fix_ids(image), 'categories': [], 'dataset': query_util.fix_ids(dataset), 'preferences': preferences, 'permissions': { 'dataset': dataset.permissions(current_user), 'image': image.permissions(current_user) } } data['image']['previous'] = pre.id if pre else None data['image']['next'] = nex.id if nex else None for category in categories: category = query_util.fix_ids(category[1]) category_id = category.get('id') annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\ .exclude('events').all() category['show'] = True category['visualize'] = False category[ 'annotations'] = [] if annotations is None else query_util.fix_ids( annotations) data.get('categories').append(category) return data
def create_image_by_path(self, path): if path.endswith(ImageModel.PATTERN): db_image = ImageModel.objects(path=path).first() if db_image is not None: raise ValueError('Image Should Not Exist In This Path: ' + path) try: ImageModel.create_from_path(path, self.dataset.id).save() except: raise ValueError('Create Image Failed Given Path: ' + path + ', Dataset Id: ' + int(self.dataset.id))
def get(self, dataset_id): args = dataset_generate.parse_args() dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first() if dataset is None: return {"message": "Invalid dataset id"}, 400 AnnotationModel.objects(dataset_id=dataset.id).delete() ImageModel.objects(dataset_id=dataset.id).update( set__annotated=False, set__num_annotations=0) return {'success': True}
def create_image_by_path(path): created_image_size = 0 if path.endswith(ImageModel.PATTERN): db_image = ImageModel.objects(path=path).first() if db_image is not None: return 0 try: ImageModel.create_from_path(path, dataset.id).save() created_image_size += 1 task.info(f"New file found: {path}") except: task.warning(f"Could not read {path}") return created_image_size
def get(self, dataset_id): """ All users in the dataset """ args = dataset_generate.parse_args() dataset = current_user.find_exist_dataset_by_id(dataset_id) if dataset is None: return {"message": "Invalid dataset id"}, 400 AnnotationModel.objects(dataset_id=dataset.id)\ .update(metadata=dataset.default_annotation_metadata) ImageModel.objects(dataset_id=dataset.id)\ .update(metadata={}) return {'success': True}
def get(self, dataset_id): """ All users in the dataset """ args = dataset_generate.parse_args() dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first() if dataset is None: return {"message": "Invalid dataset id"}, 400 AnnotationModel.objects(dataset_id=dataset.id).update( metadata=dataset.default_annotation_metadata) ImageModel.objects(dataset_id=dataset.id).update(metadata={}) return {'success': True}
def scan_dataset(task_id, dataset_id): task = TaskModel.objects.get(id=task_id) dataset = DatasetModel.objects.get(id=dataset_id) task.update(status="PROGRESS") socket = create_socket() directory = dataset.directory toplevel = list(os.listdir(directory)) task.info(f"Scanning {directory}") count = 0 for root, dirs, files in os.walk(directory): try: youarehere = toplevel.index(root.split('/')[-1]) progress = int(((youarehere) / len(toplevel)) * 100) task.set_progress(progress, socket=socket) except: pass if root.split('/')[-1].startswith('.'): continue for file in files: path = os.path.join(root, file) if path.endswith(ImageModel.PATTERN): db_image = ImageModel.objects(path=path).first() if db_image is not None: continue try: ImageModel.create_from_path(path, dataset.id).save() count += 1 task.info(f"New file found: {path}") except: task.warning(f"Could not read {path}") [ thumbnail_generate_single_image.delay(image.id) for image in ImageModel.objects(regenerate_thumbnail=True).all() ] task.info(f"Created {count} new image(s)") task.set_progress(100, socket=socket)
def get(self): """ Endpoint called by dataset viewer client """ args = page_data.parse_args() limit = args['limit'] page = args['page'] folder = args['folder'] datasets = current_user.datasets.filter(deleted=False) pagination = Pagination(datasets.count(), limit, page) datasets = datasets[pagination.start:pagination.end] datasets_json = [] for dataset in datasets: dataset_json = query_util.fix_ids(dataset) images = ImageModel.objects(dataset_id=dataset.id, deleted=False) dataset_json['numberImages'] = images.count() dataset_json['numberAnnotated'] = images.filter(annotated=True).count() dataset_json['permissions'] = dataset.permissions(current_user) first = images.first() if first is not None: dataset_json['first_image_id'] = images.first().id datasets_json.append(dataset_json) return { "pagination": pagination.export(), "folder": folder, "datasets": datasets_json, "categories": query_util.fix_ids(current_user.categories.filter(deleted=False).all()) }
def load_image(short_url): image_full_path = ImageUrlCacheManager.get_image_full_path(short_url) if not image_full_path: # We need to get the path from the db and cache it # Get the id of the image from short_url id = ImageModel.get_row_id_for_short_url(short_url) image = ImageModel.get_image_by_id(id) if image: ImageUrlCacheManager.cache_image_short_url(image) image_full_path = os.path.join(image.storage_full_dir, image.image_filename) else: return ApiResponse(message="Image not found", has_error=True).send() if os.path.isfile(image_full_path): return send_file(image_full_path) return ApiResponse(message="Image not found", has_error=True).send()
def get(self, category_id): """"Endpoint called by category image list """ args = page_data.parse_args() limit = args['limit'] page = args['page'] category = current_user.categories.filter(id=category_id).first() # check if the id exits if category is None: return {"message": "Invalid category id"}, 400 image_ids = AnnotationModel.objects(category_id=category_id).distinct('image_id') image_ids.sort() pagination = Pagination(len(image_ids), limit, page) image_ids = image_ids[pagination.start:pagination.end] images = [] for image_id in image_ids: img = ImageModel.objects(id=image_id, annotated=True).first() if img: images.append(query_util.fix_ids(img)) return { "category": query_util.fix_ids(category), "pagination": pagination.export(), "page": page, "images": images, }
def post(self): """ Creates an image """ args = image_upload.parse_args() image = args['image'] dataset_id = args['dataset_id'] try: dataset = DatasetModel.objects.get(id=dataset_id) except: return {'message': 'dataset does not exist'}, 400 # check if current user exists or dataset is public if current_user or dataset['is_public']: directory = dataset.directory path = os.path.join(directory, image.filename) if os.path.exists(path): return {'message': 'file already exists'}, 400 pil_image = Image.open(io.BytesIO(image.read())) pil_image.save(path) image.close() pil_image.close() db_image = ImageModel.create_from_path( path, dataset_id, current_user.username).save() # to do @sriram # generate thubnail immediately after uploading return db_image.id else: return {'message': 'Upload not permitted'}, 400
def post(self): """ Creates an image """ args = image_upload.parse_args() image = args['image'] dataset_id = args['dataset_id'] try: dataset = DatasetModel.objects.get(id=dataset_id) except: return {'message': 'dataset does not exist'}, 400 directory = dataset.directory path = os.path.join(directory, image.filename) if os.path.exists(path): return {'message': 'file already exists'}, 400 pil_image = Image.open(io.BytesIO(image.read())) pil_image.save(path) image.close() pil_image.close() try: db_image = ImageModel.create_from_path(path, dataset_id).save() except NotUniqueError: db_image = ImageModel.objects.get(path=path) return db_image.id
def get(self, dataset_id): """ All users in the dataset """ args = dataset_generate.parse_args() dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first() if dataset is None: return {"message": "Invalid dataset id"}, 400 images = ImageModel.objects(dataset_id=dataset.id, deleted=False) annotated_images = images.filter(annotated=True) annotations = AnnotationModel.objects(dataset_id=dataset_id, deleted=False) # Calculate annotation counts by category in this dataset category_count = dict() image_category_count = dict() for category in dataset.categories: # Calculate the annotation count in the current category in this dataset cat_name = CategoryModel.objects(id=category).first()['name'] cat_count = AnnotationModel.objects(dataset_id=dataset_id, category_id=category, deleted=False).count() category_count.update({str(cat_name): cat_count}) # Calculate the annotated images count in the current category in this dataset image_count = len( AnnotationModel.objects(dataset_id=dataset_id, category_id=category, deleted=False).distinct('image_id')) image_category_count.update({str(cat_name): image_count}) stats = { 'total': { 'Users': dataset.get_users().count(), 'Images': images.count(), 'Annotated Images': annotated_images.count(), 'Annotations': annotations.count(), 'Categories': len(dataset.categories), 'Time Annotating (s)': (images.sum('milliseconds') or 0) / 1000 }, 'average': { 'Image Size (px)': images.average('width'), 'Image Height (px)': images.average('height'), 'Annotation Area (px)': annotations.average('area'), 'Time (ms) per Image': images.average('milliseconds') or 0, 'Time (ms) per Annotation': annotations.average('milliseconds') or 0 }, 'categories': category_count, 'images_per_category': image_category_count } return stats
def generate_thumbnails(): PREFIX = "[Thumbnails]" print( f'{PREFIX} Sending request for regenerating images with non actual thumbnails', flush=True) [ generate_thumbnail(image) for image in ImageModel.objects(regenerate_thumbnail=True).all() ]
def post(self, image_id): """ COCO data test """ image_model = ImageModel.objects(id=image_id).first() image_model.update(superpixel_generated=False) reload(image_model.path_original, image_model.path) return {"success": True}
def get_images(page=0): images, next_page_num, total_page_nums = ImageModel.get_all_images(page) # Cache the image short_url as key and full image path as value # for faster image retrieves and less db calls for image in images: image.image_url = _generate_image_url(image.image_short_url) ImageUrlCacheManager.cache_image_short_url(image) image.create_posted_on_property() images = ImageSchema(many=True).dump(images).data return ApiResponse(data=images, total_page_nums=total_page_nums).send()
def post(self, image_id): """ COCO data test """ print("in real metric lol prposal") image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 metric(image_model.path_mask, image_model.path, image_model.path_original, image_id) return {"success": True}
def get(self, dataset_id): """ All users in the dataset """ dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first() if dataset is None: return {"message": "Invalid dataset id"}, 400 images = ImageModel.objects(dataset_id=dataset.id, deleted=False) num_images_cs_not_annotated = len( ImageModel.objects(dataset_id=dataset.id, cs_annotated=[], deleted=False)) cs_stats = { 'total': { 'Images': images.count(), 'CS Annotated Images': num_images_cs_not_annotated, } } return cs_stats
def handle_image_uploading(): if not request.files or not request.files['file']: return ApiResponse(message="File is missing", has_error=True).send() if not request.form['title'] or len(request.form['title']) > constants.IMAGE_TITLE_MAX_LENGTH: return ApiResponse(message="Title is invalid", has_error=True).send() if request.form['description'] and len(request.form['description']) > constants.IMAGE_DESCRIPTION_MAX_LENGTH: return ApiResponse(message="Description is invalid", has_error=True).send() # read the file and prepare a filename file = request.files['file'] filename = request.files['file'].filename if not is_allowed_file_extension(filename): return ApiResponse(message="File type is not supported", has_error= True).send() # generate today's folder such as folder '12-2-2012' storage_full_dir = generate_todays_date_folder() filename = secure_filename(file.filename) secret_filename = generate_secret_filename_for(filename) image_complete_path = os.path.join(storage_full_dir, secret_filename) file.save(image_complete_path) image_record = ImageModel(title=request.form['title'], description=request.form['description'], image_filename=secret_filename, storage_full_dir=storage_full_dir) db.session.add(image_record) db.session.commit() # we do refresh to update image_record with ID from the db db.session.refresh(image_record) short_url = image_record.self_assign_short_url() db.session.add(image_record) db.session.commit() image_full_url = _generate_image_url(short_url) return ApiResponse(message='Image uploaded: URL => {}'.format(image_full_url)).send()
def append_papers_object_to(self, collect_annotation_data, annotation_data, image_id): paperjs_object = annotation_data.get('compoundPath', []) if len(paperjs_object) == 2: image = ImageModel.find_by(image_id) width = image.width height = image.height segmentation, area, bbox = self.generate_coco_format_for_segment_data( width, height, paperjs_object) collect_annotation_data['paper_object'] = paperjs_object collect_annotation_data['segmentation'] = segmentation collect_annotation_data['area'] = area collect_annotation_data['bbox'] = bbox
def to_datasets_json(self, datasets, current_user): datasets_json = [] for dataset in datasets: dataset_json = self.fix_ids(dataset) images = ImageModel.objects(dataset_id=dataset.id, deleted=False) dataset_json['numberImages'] = images.count() dataset_json['numberAnnotated'] = images.filter( annotated=True).count() dataset_json['permissions'] = dataset.permissions(current_user) first = images.first() if first is not None: dataset_json['first_image_id'] = images.first().id datasets_json.append(dataset_json) return datasets_json
def post(self, image_id): """ COCO data test """ print("WTF") if not SUPER_LOADED: print("SSN not loaded") return {"disabled": True, "message": "DEXTR is disabled"}, 400 image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 save(image_model.path_mask, image_model.path, image_model.path_original, image_id) return {"success": True}
def post(self, image_id): """ COCO data test """ print("WTF") image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 path, path_mask, path_original, path_super_original = main( image_model.path, image_model.path_original) image_model.update(path_original=path_original) image_model.update(path_mask=path_mask) image_model.update(path=path) image_model.update(path_super_original=path_super_original) image_model.update(superpixel_generated=True) return {"success": True}
def scanning_json_file_in_the_directory_of_dataset(dataset): # for root, dirs, files in os.walk(dataset.directory): from .jsonFileFinder import JsonFileFinder import json labelme_json = JsonFileFinder().find_json_in_the(dataset.directory) print(labelme_json) if labelme_json == "": return from usecase.importLabelmeAnnotationsUsecase import ImportLabelmeAnnotationsUseCase from usecase.addCategoriesToDatasetUseCase import AddCategoriesToDatasetUseCase usecase = ImportLabelmeAnnotationsUseCase(AddCategoriesToDatasetUseCase()) images = ImageModel.find_images_by_dataset_id(dataset.id) for image in images: usecase.execute(dataset.id, image.id, json.dumps(labelme_json))
def post(self, image_id): """ COCO data test """ print("WTF") if not SUPER_LOADED: print("SSN not loaded") return {"disabled": True, "message": "DEXTR is disabled"}, 400 print("hannah du geiles super stücki") image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 image = Image.open(image_model.path) result = superpixel.predict_mask(image) main() return {"segmentaiton": Mask(result).polygons().segmentation}
def post(self, image_id): """ COCO data test """ args = dextr_args.parse_args() points = args.get('points') classcolor = args.get('classcolor') print(classcolor) print("in super_model") image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 #image = Image.open(image_model.path) colorize(classcolor, points, image_model.path_original, image_model.path) #image_model.update(path_mask=path_mask) return {"success": True}
def addImage(): st.header("Upload image to continue") img_name = st.text_input("Enter image name") img_file = st.file_uploader("Insert Image Here") if img_file: img = Image.open(img_file) st.image(img) img_path = "./uploads/" + img_name + ".png" img.save(img_path) add_btn = st.button("Save image") if add_btn and img_name and img_file: with st.spinner("Saving your Image"): img_data = ImageModel(name=img_name, path=img_path) sess.add(img_data) sess.commit() st.success("Image successfully saved")