def save_new_files(indexed_files: list, file_type: str): """ Function get files dict and files type and save it to DB :param indexed_files: List with all indexed files :param file_type: Files type - `image` or `video` """ try: if file_type == "image": for image_data in indexed_files: # if current md5_hash not exist if not Image.get(image_md5_hash=image_data["md5_hash"]): Image( image_name=image_data["namepath"][0], image_path=image_data["namepath"][1], image_height=image_data["height"], image_width=image_data["width"], image_descriptor=image_data["image_descriptor"], image_md5_hash=image_data["md5_hash"], ) elif file_type == "video": for video_data in indexed_files: # if current video+path not exist if not Video.get(video_path=video_data["namepath"][1]): Video(video_name=video_data["namepath"][0], video_path=video_data["namepath"][1]) else: raise ValueError("Wrong `file_type` param set") except Exception: logger.error(traceback.format_exc())
def import_source(): """Parse `images.toml` and update databases""" with open('images.toml', 'rt', encoding='utf8') as infile: newdefinitions = toml.load(infile) # images section urls = set() for keyw, images in newdefinitions['images'].items(): if isinstance(images, list): for image in images: db.merge(Image(image, keyw)) if image in urls: print('Duplicate: ', image) else: urls.add(image) else: db.merge(Image(images, keyw)) if images in urls: print('Duplicate: ', images) else: urls.add(images) db.merge(Keyword(keyw, keyw)) # alias section for keyw, kalias in newdefinitions['alias'].items(): db.merge(Keyword(keyw, kalias)) # hidden section for keyw, hidden in newdefinitions['hidden'].items(): db.query(Keyword).get(keyw).hidden = hidden # ignored for keyw, ignored in newdefinitions['ignored'].items(): KeywordCandidate.get_or_create(keyw).ignored = ignored db.commit()
def group_image_files() -> dict: """ Function group image files to dict with key - path, value - images in this path :return: Dict with path's and files { <path name>: [ (<filename>, <image ID>), (<filename>, <image ID>), ], <path name>: [ (<filename>, <image ID>), (<filename>, <image ID>), ] } """ result = {} try: all_images_paths = Image.group_images_paths() for path in all_images_paths: path_files = select((image.image_name, image.id) for image in Image if image.image_path == path)[:] result.update({path: path_files}) except Exception: logger.critical(traceback.format_exc()) finally: return result
def handle_image(filename: str, image_stream: BinaryIO, owner_id: int) -> bool: filename, extension = os.path.splitext(filename) extension = extension[1:].lower() if extension.lower() not in ALLOWED_EXTENSIONS: return False image = Image(owner=owner_id, original_filename=filename, extension=extension, uuid_access_token=str(uuid.uuid4())) db.session.add(image) user = User.query.get(owner_id) if user: user.last_update = datetime.now() db.session.commit() if not os.path.isdir( safe_join(current_app.config.get('IMAGE_DIRECTORY'), 'original')): os.makedirs( safe_join(current_app.config.get('IMAGE_DIRECTORY'), 'original')) with open( safe_join(current_app.config.get('IMAGE_DIRECTORY'), 'original', f"{image.id}.{extension}"), 'wb') as f: f.write(image_stream.read()) # Add image tasks get_image_metadata.send(image.id, image.uuid_access_token) return True
def add_new_images(): def commit_or_not(image): print(image) answer = input("Save? (y/n) ") if len(answer) == 0: answer="0" is_good = answer[0].lower() if is_good == "y": s.add(image) s.commit() print("Image info saved") elif is_good == "n": print("Image not saved to database. Run again to add.") else: commit_or_not(image) dir_list = [x for x in listdir('static') if isdir(f'static/{x}') and not x in IGNORED_DIRS] print("directoyr list:", dir_list) for d in dir_list: for i in listdir(f"static/{d}"): if not isdir(f'static/{d}/{i}'): try: #TODO recall/create object first, then edit img = s.query(Image).filter(Image.file_name == i, Image.file_loc == f'static/{d}').one() print(img) answer = input(f"{i} already has entry. Erase and change later? (y/n) ") if len(answer) == 0: answer = "n" else: answer = answer[0].lower() if answer == "y": s.delete(img) s.commit() except NoResultFound: print(i, "will have it's entry edited") viewer = subprocess.run(['feh', f'static/{d}/{i}']) image_name = input("Name of image: ") file_loc = f'static/{d}' description = input("Description (optional): ") price = input("Price: ") sold_raw = input("Already sold? (T/F; blank for F): ") if len(sold_raw) == 0: sold_raw = "f" if (sold_raw[0].lower()) == "t": sold = True else: sold = False img = Image(file_name=i,file_loc=file_loc, image_name=image_name, description=description,price=price,sold=sold) commit_or_not(img) except MultipleResultsFound: print(i, "Multiples found. Removing all but one") imgs = s.query(Image).filter(Image.file_name == i, Image.file_loc == f'static/{d}').all() for img in imgs[:-1]: s.delete(img) s.commit()
def save_photo_info(session, full_size, width, height, file_id): message = None for i in session.query(Message.id): message = i image = Image(message_id=message, full_size=full_size, width=width, height=height, file_id=file_id) session.add(image) session.commit()
def upload_image(): form = UploadImageForm() if form.validate_on_submit(): image = form.image.data current_app.logger.debug(image) image_name = save_file(image,Path(current_app.instance_path).parent.joinpath('static/img') ) img = Image() img.name = image_name db.session.add(img) db.session.commit() current_app.logger.info(f'{image.name} saved to /static/images/') flash('image successfully uploaded', category='alert alert-success') return redirect(url_for('admin.homepage')) return render_template('admin/upload_image.html',form=form)
def post(self): file = request.files.get('file') if not file: return {'message': 'Не найден файл для загрузки'}, 400 else: user_id = get_jwt_identity() user = User.get(id=user_id) file.save(os.path.join(UPLOAD_FOLDER, file.filename)) img_id = Image.store_image(file.filename, user) threading.Thread(target=process_image, args=(img_id,)).start() return { 'message': 'Файл успешно загружен', 'img_id': img_id, }, 201
def download_image(url, filename, file_id=None, image_type=None): #TODO consider resizing before saving r = requests.get(url) image = FileStorage(stream=BytesIO(r.content)) ext = validate_image(image) result = save_image(PROFILE_IMAGE_FOLDER, image, ext, filename=filename) if file_id and ext: image_ob = Image(image_id=file_id, uploader_id=-1, image_type=image_type, image_height=result.get("image_height"), image_width=result.get("image_width"), image_size= result.get("image_size")) return image_ob else: return None
def get(self, img_id): file = Image.get(id=img_id) user_id = get_jwt_identity() user = User.get(id=user_id) if not file: return {'message': 'Файл не найден'}, 404 elif file.uploaded_by != user: return {'message': 'Информация о загруженных файлах доступно только загрузившим их пользователям'}, 403 elif not file.complete: return {'message': 'Обработка файла не завершена'} else: path = os.path.join(UPLOAD_FOLDER, file.filename) file.delete() return send_file(path)
def refresh(): """Delete all predictor networks, build new and save it in database.""" log.info('Refreshing predictors.') for network in PredictorNetwork.all(): network.delete() for category in core.CATEGORIES: # First create general predictor for category if _enough_samples(category): _create_predictor(category) # Second create specific predictor for image for image in Image.get_images_names(category): if _enough_samples(category, image): _create_predictor(category, image)
def find_duplicates(self): if IMAGE_PATH_DICT == {}: self.statusBar.setStyleSheet("color: red") self.statusBar.showMessage("Please process your media files first") return None self.duplicateButton.setDisabled(True) self.processButton.setDisabled(True) self.reindexButton.setDisabled(True) self.statusBar.setStyleSheet("color: black") self.statusBar.showMessage("Finding duplicates...") with db_session(): # get all images descriptors image_files_query = Image.get_descriptors() pairs_amount = int( len(image_files_query) * (len(image_files_query) - 1) / 2) QMessageBox.information( self, "Find duplicates", f""" Similar images search start. Please wait!\n You have ~{pairs_amount} images pairs; Work will get ~{round(pairs_amount*0.00006, 2)} sec. """, QMessageBox.Ok, QMessageBox.Ok, ) # run function to find duplicates result = feature_description(images_list=image_files_query) with db_session(): # save duplicates to DB save_images_duplicates(result) QMessageBox.information(self, "Find duplicates", "Success!", QMessageBox.Ok, QMessageBox.Ok) # set all buttons able self.duplicateButton.setEnabled(True) self.reindexButton.setEnabled(True) self.processButton.setEnabled(True)
def table_data_init(self): # get available images from DB with db_session(): images = Image.all() for idx, image in enumerate(images): numRows = self.imageListTable.rowCount() self.imageListTable.insertRow(numRows) str_image_idx = str(idx) IMAGE_PATH_DICT[str_image_idx] = { "id": image.id, "name": image.image_name, "additional_attrs": { "height": image.image_height, "width": image.image_width }, "folder": image.image_path, "type": (image.image_name.split(".")[-1]).lower(), "full_path": image.full_path(), } self.imageListTable.setItem(idx, self.COLUMNS_DICT["ID"]["index"], self.center_widget_item(str_image_idx)) self.imageListTable.setItem( idx, self.COLUMNS_DICT["File name"]["index"], self.center_widget_item(image.image_name), ) self.imageListTable.setItem( idx, self.COLUMNS_DICT["Format"]["index"], self.center_widget_item( IMAGE_PATH_DICT[str_image_idx]["type"]), ) duplicateIcon = QTableWidgetItem() duplicateIcon.setIcon(QIcon("gui/static/icon_view_duplicates.png")) self.imageListTable.setItem(idx, self.COLUMNS_DICT["Dup"]["index"], duplicateIcon)
def process_image(img_id): time.sleep(5) Image.process_complete(img_id)
def import_metrics_from_swan_experiment(experiment_id): """Insert metric from SWAN experiment. Keyword arguments: ------------------ experiment_id : string Experiment id.""" # Get metric query. metrics = Metrics.get_by_experiment_id(experiment_id) # Check metrics. metric_count = metrics.count() metric_batch_size = len(core.METRICS) try: _check_metrics(metric_count, metric_batch_size) except NotEnoughMetricsError as e: log.error('For experiment "{0}": {1}'.format(experiment_id, e.message)) return # Count batch number. batch_count = int(metric_count) / metric_batch_size # Gather information from first metric. metric = metrics.first() try: category = metric.tags['category'] name = metric.tags['name'] configuration_id = metric.tags['host_aggregate_configuration_id'] parameters = _get_parameters(metric, category) host_aggregate = Host( name=metric.tags['host_aggregate_name'], configuration_id=metric.tags['host_aggregate_configuration_id'], disk={ 'iops': int(metric.tags['host_aggregate_disk_iops']), 'size': int(metric.tags['host_aggregate_disk_size']) }, ram={ 'bandwidth': int(metric.tags['host_aggregate_ram_bandwidth']), 'size': int(metric.tags['host_aggregate_ram_size']) }, cpu={ 'performance': int(metric.tags['host_aggregate_cpu_performance']), 'threads': int(metric.tags['host_aggregate_cpu_threads']) }) flavor = Flavor(vcpus=int(metric.tags['flavor_vcpus']), disk=int(metric.tags['flavor_disk']), ram=int(metric.tags['flavor_ram']), name=metric.tags['flavor_name']) image = metric.tags['image'] host = metric.host instance_id = metric.tags['instance_id'] except KeyError as e: raise NotFoundError('No basic parameters found: {}'.format(e.message)) # Save host aggregate information. HostAggregate(name=host_aggregate.name, configuration_id=host_aggregate.configuration_id, cpu=host_aggregate.cpu, ram=host_aggregate.ram, disk=host_aggregate.disk).save() # Get metrics. resource_usage = _prepare_resource_usage(metric_batch_size, batch_count, metrics) # Change metrics names from SNAP to KRICO standards. _remap_metrics_names(resource_usage) # Transform metrics. resource_usage = _transform_resource_usage(resource_usage, batch_count) # Skip one metric row because of transformation. batch_count = batch_count - 1 # Fill database. for i in range(0, batch_count): # Calculate resources usage. usage = {} for name in METRIC_NAMES_MAP.values(): usage[name] = resource_usage[name][i] ClassifierInstance(id=uuid4(), category=category, name=name, configuration_id=configuration_id, parameters=parameters, host_aggregate=host_aggregate, flavor=flavor, image=image, host=host, instance_id=instance_id, resource_usage=usage).save() PredictorInstance(id=uuid4(), image=image, category=category, parameters=parameters, requirements=_get_requirements(usage), instance_id=instance_id).save() Image(image=image, category=category).save()
print(time.time() - start_time) # get photo and video files lists image_files_list, video_files_list = index_folder_files(path=path, max_depth=4, indexing_type="all") # print(image_files_list) # print(video_files_list) print("Files indexed") # video_processing(video_files_list) print(len(image_files_list)) image_processing(image_files_list) print((time.time() - start_time) / len(image_files_list)) with db_session(): image_files_query = Image.get_descriptors() feature_description(images_list=image_files_query) print((time.time() - start_time) / len(image_files_query)) raise Exception print(len(image_files_list)) print(len(image_files_query)) # get certain image all duplicates result = get_image_duplicates(image_id=8, similarity_threshold=150) print(result)
def run(self): # start indexing folder images, videos = index_folder_files( path=self.folderField.text(), max_depth=json_settings.user_json_read("folderDepth") if self.folderTreeCheckbox.isChecked() else 0, ) # processing new files processed_files = image_processing(images) # save new files with db_session(): save_new_files(indexed_files=processed_files, file_type="image") db_flush() # get available images from DB images = Image.all() for idx, image in enumerate(images): str_image_idx = str(idx) IMAGE_PATH_DICT[str_image_idx] = { "id": image.id, "name": image.image_name, "additional_attrs": { "height": image.image_height, "width": image.image_width }, "folder": image.image_path, "type": (image.image_name.split(".")[-1]).lower(), "full_path": image.full_path(), } self.imageListTable.setRowCount(idx) self.imageListTable.setItem(idx - 1, 0, QTableWidgetItem(str_image_idx)) self.imageListTable.setItem(idx - 1, 1, QTableWidgetItem(image.image_name)) self.imageListTable.setItem( idx - 1, 2, QTableWidgetItem(IMAGE_PATH_DICT[str_image_idx]["type"])) duplicateIcon = QTableWidgetItem() duplicateIcon.setIcon(QIcon("gui/static/icon_view_duplicates.png")) self.imageListTable.setItem(idx - 1, 3, duplicateIcon) # TODO add video to DB and processing logic """ for video in videos: rowVideos += 1 videoId = str(rowVideos) VIDEO_PATH_DICT[videoId] = [ video[0], (video[0].split(".")[-1]).lower(), os.path.join(video[1], video[0]), ] self.videoListTable.setRowCount(rowVideos) self.videoListTable.setItem(rowVideos - 1, 0, QTableWidgetItem(videoId)) self.videoListTable.setItem(rowVideos - 1, 1, QTableWidgetItem(video[0])) self.videoListTable.setItem( rowVideos - 1, 2, QTableWidgetItem(VIDEO_PATH_DICT[videoId][1]) ) duplicateIcon = QTableWidgetItem() duplicateIcon.setIcon( QWidget().style().standardIcon(QStyle.SP_FileDialogContentsView) ) self.videoListTable.setItem(rowVideos, 3, duplicateIcon) """ self.finishedTrigger.emit()