def post(self): context = self.get_context_data() image = self.request.files.get('image', [None])[0] # form validation if image is None: message = self.add_message( self._("Image field is required."), 'error') elif not image.content_type.startswith('image/'): message = self.add_message( self._("File should be image format."), 'error') else: message = self.add_message( self._("Your color palette is ready."), 'success') # add flash notification message to context context['messages'].append(message) if message['type'] == 'success': image = Image(image) context.update() context.update({ 'palette': image.get_palette(), 'image_data': image.get_image_data() }) context['palette'] = image.get_palette() return self.render(self.template_name, **context)
def field_conversion_method(field_image, image=None, get_position_field=True): data = np.zeros_like(field_image.data, dtype=np.float32) # Matrix to go from voxel to world space if image is not None: voxel_2_xyz = image.voxel_2_mm vol_ext = image.vol_ext field = Image.from_data(data, image.get_header()) else: voxel_2_xyz = field_image.voxel_2_mm vol_ext = field_image.vol_ext field = Image.from_data(data, field_image.get_header()) voxels = np.mgrid[[slice(i) for i in vol_ext]] voxels = [d.reshape(vol_ext, order='F') for d in voxels] mms = [voxel_2_xyz[i][3] + sum(voxel_2_xyz[i][k] * voxels[k] for k in range(len(voxels))) for i in range(len(voxel_2_xyz) - (4 - len(vol_ext)))] input_data = np.squeeze(field_image.data) field_data = np.squeeze(data) mms = np.squeeze(mms) if get_position_field: for i in range(data.shape[-1]): # Output is the deformation/position field field_data[..., i] = input_data[..., i] + mms[i] else: for i in range(data.shape[-1]): # Output is the displacement field field_data[..., i] = input_data[..., i] - mms[i] return field
def _invert(image: WandImage): """ Invert an image. Made for use with _invert_command. :param image: :return discord.File: """ with image: image.negate() ret = image.to_discord_file(filename="inverted.png") b_io = image.to_bytes_io() return ret, b_io
def parse_json(file): _dirname = os.path.dirname(os.path.dirname(file)) basename = os.path.basename(file) image_filename = os.path.splitext(basename)[0] image_path = os.path.join(_dirname, IMAGE_FOLDER, image_filename) image = cv2.imread(image_path) image_object = Image(image, filename=image_filename) with open(file) as f: info = json.load(f) image_object.regions = find_regions(info) return image_object
def _wasted(image: WandImage): """ Add the wasted image on top of the provided image. :param image: :return: """ with WandImage(filename="assets/wasted.png") as wasted: with image: wasted.resize(image.width, image.height) image.composite(wasted, 0, 0) ret = image.to_discord_file("wasted.png") b_io = image.to_bytes_io() return ret, b_io
def _magic(image: WandImage): """ Content aware scale an image. Made for use with _magic_command. :param image: :return discord.File: """ # overly content-aware-scale it with image: image.liquid_rescale( width=int(image.width * 0.5), height=int(image.height * 0.5), delta_x=1, rigidity=0 ) image.liquid_rescale( width=int(image.width * 1.5), height=int(image.height * 1.5), delta_x=2, rigidity=0 ) image.resize(256, 256) ret = image.to_discord_file("magik.png") b_io = image.to_bytes_io() return ret, b_io
def _thonk(image: WandImage): """ Add the thonk hand image on top of the provided image. :param image: :return: """ with WandImage(filename="assets/thonk_hand.png") as thonk: with image: thonk.resize(image.width, image.height) image.composite(thonk, 0, 0) ret = image.to_discord_file("thonk.png") b_io = image.to_bytes_io() return ret, b_io
def run_classification(image_path: str, mode: str, user_id: str, model_checkpoint_file_path: str): image = Image(image_path=image_path) try: image.find_iris_and_pupil() except ImageProcessingException: return RunResults.PROCESSING_FAILURE iris = normalize_iris(image) iris.pupil = image.pupil iris.iris = image.iris # Save the normalized image to a temporary file for easier use with # the trained network create_empty_dir("tmp") iris_hash = hashlib.sha1(image_path.encode()).hexdigest() iris_path = f"tmp/{iris_hash}.jpg" iris.save(iris_path) # Load trained classifier classifier = IrisClassifier(load_from_checkpoint=True, checkpoint_file=model_checkpoint_file_path) # Get the classifier's prediction predicted_user, probability = classifier.classify_single_image(iris_path) if mode == Mode.IDENTIFY: if predicted_user == User.UNKNOWN: run_result = RunResults.IDENTIFICATION_FAILURE else: print(f"This image portraits user {predicted_user} " f"(Prediction probability: {probability:.2%})") run_result = RunResults.IDENTIFICATION_SUCCESS else: if predicted_user == User.UNKNOWN: run_result = RunResults.VERIFICATION_FAILURE_USER_UNKNOWN else: if predicted_user == user_id: print(f"Successfully verified user {user_id} " f"(Prediction probability: {probability:.2%})") run_result = RunResults.VERIFICATION_SUCCESS else: run_result = RunResults.VERIFICATION_FAILURE_USER_MISMATCH # Remove temporary files remove(iris_path) return run_result
def circle_available_images(image_paths: List[str], target_dir: str): create_empty_dir(target_dir) failed_count = 0 for image_path in image_paths: eye_image = Image(image_path=image_path) user_id, sample_id = extract_user_sample_ids(image_path) try: eye_image.circle_iris_and_pupil().save( f"{target_dir}/{user_id}_{sample_id}.jpg" ) except ImageProcessingException: failed_count += 1 print(f"{failed_count} failed preprocessings")
def initialise_field(im, affine=None): """ Create a field image from the specified target image. Sets the data to 0. Parameters: ----------- :param im: The target image. Mandatory. :param affine: The initial affine transformation :return: Return the created field object """ vol_ext = im.vol_ext dims = list() dims.extend(vol_ext) while len(dims) < 4: dims.extend([1]) dims.extend([len(vol_ext)]) # Inititalise with zero data = np.zeros(dims, dtype=np.float32) field = Image.from_data(data, im.get_header()) # We have supplied an affine transformation if affine is not None: if affine.shape != (4, 4): raise RegError('Input affine transformation ' 'should be a 4x4 matrix.') # The updated transformation transform = affine * im.voxel_2_mm field.update_transformation(transform) return field
def initialise_jacobian_field(im, affine=None): """ Create a jacobian field image from the specified target image/field. Sets the data to 0. Parameters: ----------- :param im: The target image/field. Mandatory. :param affine: The initial affine transformation :return: Return the created jacobian field object. Each jacobian is stored in a vector of size 9 in row major order """ vol_ext = np.array(im.vol_ext) dims = list() dims.extend(vol_ext) while len(dims) < 4: dims.extend([1]) num_dims = len(vol_ext[vol_ext>1]) dims.extend([num_dims**2]) # Inititalise with zero data = np.zeros(dims, dtype=np.float32) jacfield = Image.from_data(data, im.get_header()) jacfield.set_matrix_data_attributes(num_dims, num_dims) # We have supplied an affine transformation if affine is not None: if affine.shape != (4, 4): raise RegError('Input affine transformation ' 'should be a 4x4 matrix.') # The updated transformation transform = affine * im.voxel_2_mm jacfield.update_transformation(transform) return jacfield
def __init__(self): super().__init__() self.mainUI = QUiLoader().load(mainWindowPath) self.subUI = QUiLoader().load(subWindowPath) self.mainUI.setWindowTitle('EDAutopilot v2') # self.logger = Logger(self.mainUI.logText,toFile=True) # start logger self.thread_log = LogThread() self.logger = self.thread_log.getLogger() self.thread_log._logSignal.connect(self.onReceiveLog) self.thread_log.start() self.config = Config(logger=self.logger) if self.config.get('Main', 'log_to_file'): self.thread_log.initFile( self.config.get('Main', 'override_previous_logs')) self.keysDict = init_keybinds(self.logger) self.mainUI.actionScriptName.setDisabled(True) self.thread_io = IOThread(self.usingWatchdog, logger=self.logger) self.thread_io._ioSignal.connect(self.updateStatus) self.thread_io.start() self.image_templates = Image(logger=self.logger, config=self.config) self.thread_image = ImageThread(logger=self.logger) self.thread_image._imageSignal.connect(self.updateImage) self.thread_image.start() self.thread_script = None # initialize in loadScript() self.mainUI.logText.customContextMenuRequested.connect( self._onRightClickLog ) # connect to ContextMenu in logging area (Copy, debug view, clear screen...) self.mainUI.actionLoadScript.triggered.connect( lambda: self.loadScript(None)) self.mainUI.actionStopScript.triggered.connect(self.stopScript) self.mainUI.actionSettings.triggered.connect(self.onSettings) self.mainUI.actionAbout.triggered.connect(self.onAbout) self.mainUI.actionExit.triggered.connect(self.onExit) QApplication.instance().aboutToQuit.connect(self.onExit) self.mainUI.actionScriptName.setText('(Empty)') self.locationLabel = QLabel('Loc: None'.ljust(49)) self.targetLabel = QLabel('Target: None'.ljust(49)) self.alignedLabel = QLabel('Align: 0'.ljust(9)) self.fpsLabel = QLabel('FPS: 0'.ljust(8)) self.scriptStatusLabel = QLabel('Idle'.ljust(20)) self.mainUI.statusBar().addWidget(self.locationLabel) self.mainUI.statusBar().addWidget(self.targetLabel) self.mainUI.statusBar().addWidget(self.alignedLabel) self.mainUI.statusBar().addWidget(self.fpsLabel) self.mainUI.statusBar().addWidget(self.scriptStatusLabel) self._setScriptActionsState(True) if self.config.get('GUI', 'load_default_on_startup'): defaultPath = self.config.get('GUI', 'default_script') if defaultPath is not None: self.loadScript(path=defaultPath)
def on_website_crawled(response): global image_list decoded = response.body.decode('utf-8') # luckily re.findall is always in the same order links = re.findall(r'data-src="([^"]+)"', decoded) users = re.findall(r'\/u[^>]+>([^<]+)<\/a', decoded) print("Done crawling! Found {} new images. ".format(len(links))) for index, image in enumerate(links): image_list.append(Image(links[index], "image/jpeg", users[index])) get_next_image(image_callback)
def _ascii(image: WandImage, inverted: bool = False, brightness: int = 100, size: int = 62): """ Converts image into an ascii art string. :param image: The :class WandImage: to convert to ascii. :param inverted: A :type bool: determining whether or not to invert. :param brightness: A :type int: determining the brightness. :param size: A :type int: determining the size. :return: A :type str: containing the art. """ with image: if inverted: image.negate() if brightness is not 100: image.modulate(brightness=brightness) if size > 62: size = 62 if size < 0: size = 2 size = int(math.ceil(size / 2.) * 2) image.sample(size, int(size / 2)) ascii_art = "```" for row in image: ascii_art += "\n" for col in row: with Color(str(col)) as c: ascii_art += c.ascii_character ascii_art += "```" return ascii_art, ascii_art
def exponentiate(self): """ Compute the exponential of this velocity field using the scaling and squaring approach. The velocity field is in the tangent space of the manifold and the displacement field is the actual manifold and the transformation between the velocity field and the displacement field is given by the exponential chart. :param disp_image: Displacement field image that will be updated with the exponentiated velocity field. """ self.__do_init_check() data = self.field.data result_data = np.zeros(self.field.data.shape) result = Image.from_data(result_data, self.field.get_header()) # Important: Need to specify which axes to use norm = np.linalg.norm(data, axis=data.ndim-1) max_norm = np.max(norm[:]) if max_norm < 0: raise ValueError('Maximum norm is invalid.') if max_norm == 0: return result pix_dims = np.asarray(self.field.zooms) # ignore NULL dimensions min_size = np.min(pix_dims[pix_dims > 0]) num_steps = max(0,np.ceil(np.log2(max_norm / (min_size / 2))).astype('int')) # Approximate the initial exponential init = 1 << num_steps result.data = data / init dfc = DisplacementFieldComposer() # Do the squaring step to perform the integration # The exponential is num_steps times recursive composition of # the field with itself, which is equivalant to integration over # the unit interval. for _ in range(0, num_steps): result = dfc.compose(result, result) return result
def _expand(image: WandImage): """ Expand an image using a bit of seam-carving. :param image: :return discord.File: """ with image: image.liquid_rescale(int(image.width * 0.5), image.height) image.liquid_rescale(int(image.width * 3.5), image.height, delta_x=1) ret = image.to_discord_file("expand_dong.png") b_io = image.to_bytes_io() return ret, b_io
def _deepfry(image: WandImage): """ Deepfry an image. :param image: :return: """ with image: if image.format != "jpeg": image.format = "jpeg" image.compression_quality = 2 image.modulate(saturation=700) ret = image.to_discord_file("deep-fry.png") b_io = image.to_bytes_io() return ret, b_io
def build(self): self.log.debug("create and parse manifest") # fail path in case of erros fail_log_path = self.config.get_folder( "download_folder") + "/faillogs/faillog-{}.txt".format( self.params["request_hash"]) self.image = Image(self.params) # first determine the resulting manifest hash return_code, manifest_content, errors = self.run_meta("manifest") if return_code == 0: self.image.params["manifest_hash"] = get_hash(manifest_content, 15) manifest_pattern = r"(.+) - (.+)\n" manifest_packages = dict( re.findall(manifest_pattern, manifest_content)) self.database.add_manifest_packages( self.image.params["manifest_hash"], manifest_packages) self.log.info("successfully parsed manifest") else: self.log.error("couldn't determine manifest") self.write_log(fail_log_path, stderr=errors) self.database.set_image_requests_status( self.params["request_hash"], "manifest_fail") return False # set directory where image is stored on server self.image.set_image_dir() self.log.debug("dir %s", self.image.params["dir"]) # calculate hash based on resulted manifest self.image.params["image_hash"] = get_hash( " ".join(self.image.as_array("manifest_hash")), 15) # set log path in case of success success_log_path = self.image.params[ "dir"] + "/buildlog-{}.txt".format(self.params["image_hash"]) # set build_status ahead, if stuff goes wrong it will be changed self.build_status = "created" # check if image already exists if not self.image.created(): self.log.info("build image") with tempfile.TemporaryDirectory( dir=self.config.get_folder("tempdir")) as build_dir: # now actually build the image with manifest hash as EXTRA_IMAGE_NAME self.params["worker"] = self.location self.params["BIN_DIR"] = build_dir self.params["j"] = str(os.cpu_count()) self.params["EXTRA_IMAGE_NAME"] = self.params["manifest_hash"] # if uci defaults are added, at least at parts of the hash to time image name if self.params["defaults_hash"]: defaults_dir = build_dir + "/files/etc/uci-defaults/" # create folder to store uci defaults os.makedirs(defaults_dir) # request defaults content from database defaults_content = self.database.get_defaults( self.params["defaults_hash"]) with open(defaults_dir + "99-server-defaults", "w") as defaults_file: defaults_file.write( defaults_content ) # TODO check if special encoding is required # tell ImageBuilder to integrate files self.params["FILES"] = build_dir + "/files/" self.params["EXTRA_IMAGE_NAME"] += "-" + self.params[ "defaults_hash"][:6] # download is already performed for manifest creation self.params["NO_DOWNLOAD"] = "1" build_start = time.time() return_code, buildlog, errors = self.run_meta("image") self.image.params["build_seconds"] = int(time.time() - build_start) if return_code == 0: # create folder in advance os.makedirs(self.image.params["dir"], exist_ok=True) self.log.debug(os.listdir(build_dir)) for filename in os.listdir(build_dir): if os.path.exists(self.image.params["dir"] + "/" + filename): break shutil.move(build_dir + "/" + filename, self.image.params["dir"]) # possible sysupgrade names, ordered by likeliness possible_sysupgrade_files = [ "*-squashfs-sysupgrade.bin", "*-squashfs-sysupgrade.tar", "*-squashfs.trx", "*-squashfs.chk", "*-squashfs.bin", "*-squashfs-sdcard.img.gz", "*-combined-squashfs*", "*.img.gz" ] sysupgrade = None for sysupgrade_file in possible_sysupgrade_files: sysupgrade = glob.glob(self.image.params["dir"] + "/" + sysupgrade_file) if sysupgrade: break if not sysupgrade: self.log.debug("sysupgrade not found") if buildlog.find("too big") != -1: self.log.warning("created image was to big") self.database.set_image_requests_status( self.params["request_hash"], "imagesize_fail") self.write_log(fail_log_path, buildlog, errors) return False else: self.build_status = "no_sysupgrade" self.image.params["sysupgrade"] = "" else: self.image.params["sysupgrade"] = os.path.basename( sysupgrade[0]) self.write_log(success_log_path, buildlog) self.database.add_image(self.image.get_params()) self.log.info("build successfull") else: self.log.info("build failed") self.database.set_image_requests_status( self.params["request_hash"], 'build_fail') self.write_log(fail_log_path, buildlog, errors) return False self.log.info("link request %s to image %s", self.params["request_hash"], self.params["image_hash"]) self.database.done_build_job(self.params["request_hash"], self.image.params["image_hash"], self.build_status) return True
class Worker(threading.Thread): def __init__(self, location, job, queue): self.location = location self.queue = queue self.job = job threading.Thread.__init__(self) self.log = logging.getLogger(__name__) self.log.info("log initialized") self.config = Config() self.log.info("config initialized") self.database = Database(self.config) self.log.info("database initialized") def setup_meta(self): self.log.debug("setup meta") os.makedirs(self.location, exist_ok=True) if not os.path.exists(self.location + "/meta"): cmdline = "git clone https://github.com/aparcar/meta-imagebuilder.git ." proc = subprocess.Popen( cmdline.split(" "), cwd=self.location, stdout=subprocess.PIPE, shell=False, ) _, errors = proc.communicate() return_code = proc.returncode if return_code != 0: self.log.error("failed to setup meta ImageBuilder") exit() self.log.info("meta ImageBuilder successfully setup") def write_log(self, path, stdout=None, stderr=None): with open(path, "a") as log_file: log_file.write("### BUILD COMMAND:\n\n") for key, value in self.params.items(): log_file.write("{}={}\n".format(key.upper(), str(value))) log_file.write("sh meta\n") if stdout: log_file.write("\n\n### STDOUT:\n\n" + stdout) if stderr: log_file.write("\n\n### STDERR:\n\n" + stderr) # build image def build(self): self.log.debug("create and parse manifest") # fail path in case of erros fail_log_path = self.config.get_folder( "download_folder") + "/faillogs/faillog-{}.txt".format( self.params["request_hash"]) self.image = Image(self.params) # first determine the resulting manifest hash return_code, manifest_content, errors = self.run_meta("manifest") if return_code == 0: self.image.params["manifest_hash"] = get_hash(manifest_content, 15) manifest_pattern = r"(.+) - (.+)\n" manifest_packages = dict( re.findall(manifest_pattern, manifest_content)) self.database.add_manifest_packages( self.image.params["manifest_hash"], manifest_packages) self.log.info("successfully parsed manifest") else: self.log.error("couldn't determine manifest") self.write_log(fail_log_path, stderr=errors) self.database.set_image_requests_status( self.params["request_hash"], "manifest_fail") return False # set directory where image is stored on server self.image.set_image_dir() self.log.debug("dir %s", self.image.params["dir"]) # calculate hash based on resulted manifest self.image.params["image_hash"] = get_hash( " ".join(self.image.as_array("manifest_hash")), 15) # set log path in case of success success_log_path = self.image.params[ "dir"] + "/buildlog-{}.txt".format(self.params["image_hash"]) # set build_status ahead, if stuff goes wrong it will be changed self.build_status = "created" # check if image already exists if not self.image.created(): self.log.info("build image") with tempfile.TemporaryDirectory( dir=self.config.get_folder("tempdir")) as build_dir: # now actually build the image with manifest hash as EXTRA_IMAGE_NAME self.params["worker"] = self.location self.params["BIN_DIR"] = build_dir self.params["j"] = str(os.cpu_count()) self.params["EXTRA_IMAGE_NAME"] = self.params["manifest_hash"] # if uci defaults are added, at least at parts of the hash to time image name if self.params["defaults_hash"]: defaults_dir = build_dir + "/files/etc/uci-defaults/" # create folder to store uci defaults os.makedirs(defaults_dir) # request defaults content from database defaults_content = self.database.get_defaults( self.params["defaults_hash"]) with open(defaults_dir + "99-server-defaults", "w") as defaults_file: defaults_file.write( defaults_content ) # TODO check if special encoding is required # tell ImageBuilder to integrate files self.params["FILES"] = build_dir + "/files/" self.params["EXTRA_IMAGE_NAME"] += "-" + self.params[ "defaults_hash"][:6] # download is already performed for manifest creation self.params["NO_DOWNLOAD"] = "1" build_start = time.time() return_code, buildlog, errors = self.run_meta("image") self.image.params["build_seconds"] = int(time.time() - build_start) if return_code == 0: # create folder in advance os.makedirs(self.image.params["dir"], exist_ok=True) self.log.debug(os.listdir(build_dir)) for filename in os.listdir(build_dir): if os.path.exists(self.image.params["dir"] + "/" + filename): break shutil.move(build_dir + "/" + filename, self.image.params["dir"]) # possible sysupgrade names, ordered by likeliness possible_sysupgrade_files = [ "*-squashfs-sysupgrade.bin", "*-squashfs-sysupgrade.tar", "*-squashfs.trx", "*-squashfs.chk", "*-squashfs.bin", "*-squashfs-sdcard.img.gz", "*-combined-squashfs*", "*.img.gz" ] sysupgrade = None for sysupgrade_file in possible_sysupgrade_files: sysupgrade = glob.glob(self.image.params["dir"] + "/" + sysupgrade_file) if sysupgrade: break if not sysupgrade: self.log.debug("sysupgrade not found") if buildlog.find("too big") != -1: self.log.warning("created image was to big") self.database.set_image_requests_status( self.params["request_hash"], "imagesize_fail") self.write_log(fail_log_path, buildlog, errors) return False else: self.build_status = "no_sysupgrade" self.image.params["sysupgrade"] = "" else: self.image.params["sysupgrade"] = os.path.basename( sysupgrade[0]) self.write_log(success_log_path, buildlog) self.database.add_image(self.image.get_params()) self.log.info("build successfull") else: self.log.info("build failed") self.database.set_image_requests_status( self.params["request_hash"], 'build_fail') self.write_log(fail_log_path, buildlog, errors) return False self.log.info("link request %s to image %s", self.params["request_hash"], self.params["image_hash"]) self.database.done_build_job(self.params["request_hash"], self.image.params["image_hash"], self.build_status) return True def run(self): self.setup_meta() while True: self.params = self.queue.get() self.version_config = self.config.version(self.params["distro"], self.params["version"]) if self.job == "image": self.build() elif self.job == "update": self.info() self.parse_packages() def info(self): self.parse_info() if os.path.exists( os.path.join(self.location, "imagebuilder", self.params["distro"], self.params["version"], self.params["target"], self.params["subtarget"], "target/linux", self.params["target"], "base-files/lib/upgrade/platform.sh")): self.log.info("%s target is supported", self.params["target"]) self.database.insert_supported(self.params) def run_meta(self, cmd): cmdline = ["sh", "meta", cmd] env = os.environ.copy() if "parent_version" in self.version_config: self.params["IB_VERSION"] = self.version_config["parent_version"] if "repos" in self.version_config: self.params["REPOS"] = self.version_config["repos"] for key, value in self.params.items(): env[key.upper()] = str( value) # TODO convert meta script to Makefile proc = subprocess.Popen(cmdline, cwd=self.location, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, env=env) output, errors = proc.communicate() return_code = proc.returncode output = output.decode('utf-8') errors = errors.decode('utf-8') return (return_code, output, errors) def parse_info(self): self.log.debug("parse info") return_code, output, errors = self.run_meta("info") if return_code == 0: default_packages_pattern = r"(.*\n)*Default Packages: (.+)\n" default_packages = re.match(default_packages_pattern, output, re.M).group(2) logging.debug("default packages: %s", default_packages) profiles_pattern = r"(.+):\n (.+)\n Packages: (.*)\n" profiles = re.findall(profiles_pattern, output) if not profiles: profiles = [] self.database.insert_profiles( { "distro": self.params["distro"], "version": self.params["version"], "target": self.params["target"], "subtarget": self.params["subtarget"] }, default_packages, profiles) else: logging.error("could not receive profiles") return False def parse_packages(self): self.log.info("receive packages") return_code, output, errors = self.run_meta("package_list") if return_code == 0: packages = re.findall(r"(.+?) - (.+?) - .*\n", output) self.log.info("found {} packages".format(len(packages))) self.database.insert_packages_available( { "distro": self.params["distro"], "version": self.params["version"], "target": self.params["target"], "subtarget": self.params["subtarget"] }, packages) else: self.log.warning("could not receive packages")
img.save(image_path) if save_reference: image_path = image_path[:-4] + '_reference' + extension if not os.path.exists(image_path): img_float = img_as_float(img) if image_source == 'google': # remove alpha channel img_new = np.zeros((len(img_float), len(img_float[0]), 3)) for r in range(0, len(img_float)): for c in range(0, len(img_float[0])): img_new[r][c] = img_float[r][c][:3] img_float = img_new image = Image(img_float) for pixel in road.pixels(size, zoom_level): image.mark_pixel(pixel, radius=2) img = im.fromarray((image.get() * 255).astype(np.uint8)) img.save(image_path) i = 0 for segment in segments: i += 1 image_path = path_images + name + '_' + str(i) + '_z' + str(zoom_level) + '_s' + str(size) + extension if not os.path.exists(image_path): mpp = meter_per_pixel(segment.center().lat, zoom_level) reference = segment.pixels(size, zoom_level=zoom_level)
def split_system_users(circled_images_dir: str = CIRCLED_DATA_DIR, original_images_dir: str = ORIGINAL_DATA_DIR, system_database_dir: str = SYSTEM_DATABASE_DIR, normalized_images_dir: str = NORMALIZED_DATA_DIR, number_of_users_wanted: int = 100, normalized_image_width: int = NORMALIZED_IMAGE_WIDTH, normalized_image_height: int = NORMALIZED_IMAGE_HEIGHT, random_state: int = 1): """ Divide selected users into "registered", which will be known to the system, and "unknown", which should be rejected upon identification. Move their input images into their respective data/system_database folders. Additionally, split "registered" user photos into train and val subsets and save them into data/tmp/normalized. """ circled_images_paths = sorted(glob(f"{circled_images_dir}/*")) top_users_ids = _find_users_with_most_photos(circled_images_paths, number_of_users_wanted) # Filter out unnecessary images top_users_image_paths = [ path for path in circled_images_paths if extract_user_sample_ids(path)[0] in top_users_ids ] # Shuffle IDs random.seed(random_state) random.shuffle(top_users_ids) # Split users into "registered" and "unknown" split_idx = number_of_users_wanted // 2 registered_ids = top_users_ids[:split_idx] registered_dir = f"{system_database_dir}/registered_users" unknown_dir = f"{system_database_dir}/unknown_users" create_empty_dir(normalized_images_dir) create_empty_dir(registered_dir) create_empty_dir(unknown_dir) for image_path in tqdm(top_users_image_paths, desc="Copying irides images"): user_id, _ = extract_user_sample_ids(image_path) is_registered = user_id in registered_ids target_dir = registered_dir if is_registered else unknown_dir file_name = get_file_name(image_path) original_image_path = f"{original_images_dir}/{file_name}" # Save original image to data/system_database copyfile(original_image_path, f"{target_dir}/{file_name}") if is_registered: eye_image = Image(image_path=original_image_path) eye_image.find_iris_and_pupil() normalized = normalize_iris( eye_image, output_height=normalized_image_height, output_width=normalized_image_width ) normalized.save(f"{normalized_images_dir}/{file_name}")
def normalize_iris(image: Image, output_height: int = NORMALIZED_IMAGE_HEIGHT, output_width: int = NORMALIZED_IMAGE_WIDTH) -> Image: """ Normalize iris region by unwrapping the circular region into a rectangular block of constant dimensions. Source: https://github.com/thuyngch/Iris-Recognition/ :param image: Input iris image :param output_height: Radial resolution (vertical dimension) :param output_width: Angular resolution (horizontal dimension) :return image: Normalized form of the iris region """ radius_pixels = output_height + 2 angle_divisions = output_width - 1 theta = np.linspace(0, 2 * np.pi, angle_divisions + 1) # Calculate displacement of pupil center from the iris center ox = image.pupil.center_x - image.iris.center_x oy = image.pupil.center_y - image.iris.center_y sgn = -1 if ox <= 0 else 1 if ox == 0 and oy > 0: sgn = 1 a = np.ones(angle_divisions + 1) * (ox ** 2 + oy ** 2) # Need to do something for ox = 0 if ox == 0: phi = np.pi / 2 else: phi = np.arctan(oy / ox) b = sgn * np.cos(np.pi - phi - theta) # Calculate radius around the iris as a function of the angle r = np.sqrt(a) * b + np.sqrt(a * b ** 2 - (a - image.iris.radius ** 2)) r = np.array([r - image.pupil.radius]) r_mat = np.dot(np.ones([radius_pixels, 1]), r) r_mat = r_mat * np.dot(np.ones([angle_divisions + 1, 1]), np.array([ np.linspace(0, 1, radius_pixels) ])).transpose() r_mat = r_mat + image.pupil.radius # Exclude values at the boundary of the pupil iris border, # and the iris scelra border as these may not correspond to areas # in the iris region and will introduce noise. # ie don't take the outside rings as iris data. r_mat = r_mat[1: radius_pixels - 1, :] # Calculate cartesian location of each data point around # the circular iris region x_cos_mat = np.dot(np.ones([radius_pixels - 2, 1]), np.array([np.cos(theta)])) x_sin_mat = np.dot(np.ones([radius_pixels - 2, 1]), np.array([np.sin(theta)])) xo = r_mat * x_cos_mat yo = r_mat * x_sin_mat xo = image.pupil.center_x + xo xo = np.round(xo).astype(int) coords = np.where(xo >= image.shape[1]) xo[coords] = image.shape[1] - 1 coords = np.where(xo < 0) xo[coords] = 0 yo = image.pupil.center_y - yo yo = np.round(yo).astype(int) coords = np.where(yo >= image.shape[0]) yo[coords] = image.shape[0] - 1 coords = np.where(yo < 0) yo[coords] = 0 polar_array = image.img[yo, xo] polar_array = polar_array / 255 # Get rid of outlying points in order to write out the circular pattern image.img[yo, xo] = 255 # Get pixel coords for circle around iris x, y = image.iris.find_circle_coordinates(image.shape) image.img[y, x] = 255 # Get pixel coords for circle around pupil xp, yp = image.pupil.find_circle_coordinates(image.shape) image.img[yp, xp] = 255 # Replace NaNs before performing feature encoding coords = np.where((np.isnan(polar_array))) polar_array2 = polar_array polar_array2[coords] = 0.5 avg = np.sum(polar_array2) / (polar_array.shape[0] * polar_array.shape[1]) polar_array[coords] = avg return Image((polar_array * 255).astype(np.uint8))
def _process_request(self): self.log.debug("request_json: %s", self.request_json) # if request_hash is available check the database directly if "request_hash" in self.request_json: self.request = self.database.check_build_request_hash( self.request_json["request_hash"]) if not self.request: self.response_status = HTTPStatus.NOT_FOUND return self.respond() else: return self.return_status() else: # required params for a build request missing_params = self.check_missing_params( ["distro", "version", "target", "subtarget", "board"]) if missing_params: return self.respond() self.request_json["profile"] = self.request_json[ "board"] # TODO fix this workaround if "defaults" in self.request_json: # check if the uci file exceeds the max file size. this should be # done as the uci-defaults are at least temporary stored in the # database to be passed to a worker if getsizeof(self.request_json["defaults"]) > self.config.get( "max_defaults_size"): self.response_json[ "error"] = "attached defaults exceed max size" self.response_status = 420 # this error code is the best I could find self.respond() # create image object to get the request_hash image = Image(self.request_json) image.set_packages_hash() request_hash = get_hash(" ".join(image.as_array("packages_hash")), 12) request_database = self.database.check_build_request_hash(request_hash) # if found return instantly the status if request_database: self.log.debug("found image in database: %s", request_database["status"]) self.request = request_database return self.return_status() else: self.request["request_hash"] = request_hash self.request["packages_hash"] = image.params[ "packages_hash"] # TODO make this better # if not perform various checks to see if the request is acutally valid # check for valid distro and version bad_request = self.check_bad_request() if bad_request: return bad_request # check for valid target and subtarget bad_target = self.check_bad_target() if bad_target: return bad_target # check for existing packages bad_packages = self.check_bad_packages() if bad_packages: return bad_packages # add package_hash to database self.database.insert_packages_hash(self.request["packages_hash"], self.request["packages"]) # now some heavy guess work is done to figure out the profile # eventually this could be simplified if upstream unifirm the profiles/boards if "board" in self.request_json: self.log.debug("board in request, search for %s", self.request_json["board"]) self.request["profile"] = self.database.check_profile( self.request["distro"], self.request["version"], self.request["target"], self.request["subtarget"], self.request_json["board"]) if not self.request["profile"]: if "model" in self.request_json: self.log.debug("model in request, search for %s", self.request_json["model"]) self.request["profile"] = self.database.check_model( self.request["distro"], self.request["version"], self.request["target"], self.request["subtarget"], self.request_json["model"]) self.log.debug("model search found profile %s", self.request["profile"]) if not self.request["profile"]: if self.database.check_profile(self.request["distro"], self.request["version"], self.request["target"], self.request["subtarget"], "Generic"): self.request["profile"] = "Generic" elif self.database.check_profile(self.request["distro"], self.request["version"], self.request["target"], self.request["subtarget"], "generic"): self.request["profile"] = "generic" else: self.response_json[ "error"] = "unknown device, please check model and board params" self.response_status = HTTPStatus.PRECONDITION_FAILED # 412 return self.respond() self.request["defaults_hash"] = image.params["defaults_hash"] # check if a default uci config is attached to the request if image.params["defaults_hash"] != "": self.database.insert_defaults(image.params["defaults_hash"], self.request_json["defaults"]) # all checks passed, eventually add to queue! self.request.pop("packages") self.log.debug("add build job %s", self.request) self.database.add_build_job(self.request) return self.return_queued()
def generate_random_smooth_deformation(volume_size, max_deformation=3, sigma=1): """ Generate a random smooth deformation param: def_image: Deformation field image. It will be updated with the deformation. max_deformation_in_voxels: Maximum amount of deformation in voxels. The method ensures that the jacobian determinant of the deformation is positive. """ if sigma <= 0: sigma = max_deformation/3 if len(volume_size) > 3: volume_size = volume_size[0:3] dims = list() dims.extend(volume_size) while len(dims) < 4: dims.extend([1]) dims.extend([len(volume_size)]) # Inititalise with zero data = np.zeros(dims, dtype=np.float32) def_field = Image.generate_default_image_from_data(data) generate_identity_deformation(def_field) # Generate a random displacement field displacement = max_deformation * 2 * \ (np.random.random_sample(def_field.data.shape) - 0.5) # Smooth the displacement field for i in range(0, displacement.shape[4]): displacement[:,:,:,0,i] = ndimage.filters.gaussian_filter(displacement[:,:,:,0,i], sigma=sigma) disp_s = def_field.data.squeeze() + displacement.squeeze() done = False while not done: if len(volume_size) == 2: grad = np.gradient(disp_s[..., 0]) x_x = grad[0] x_y = grad[1] grad = np.gradient(disp_s[..., 1]) y_x = grad[0] y_y = grad[1] jac_det = x_x * y_y - x_y * y_x if np.min(jac_det) < 0.1: for i in range(0, displacement.shape[4]): displacement[:,:,:,0,i] = ndimage.filters.gaussian_filter(displacement[:,:,:,0,i], sigma=sigma) disp_s = def_field.data.squeeze() + displacement.squeeze() else: done = True else: grad = np.gradient(disp_s[..., 0]) x_x = grad[0] x_y = grad[1] x_z = grad[2] grad = np.gradient(disp_s[..., 1]) y_x = grad[0] y_y = grad[1] y_z = grad[2] grad = np.gradient(disp_s[..., 2]) z_x = grad[0] z_y = grad[1] z_z = grad[2] jac_det = x_x * (y_y*z_z - y_z*z_y) - \ x_y * (y_x*z_z - y_z*z_x) + x_z * (y_x*z_y - y_y*z_x) if np.min(jac_det) < 0.1: for i in range(0, displacement.shape[4]): displacement[:,:,:,0,i] = ndimage.filters.gaussian_filter(displacement[:,:,:,0,i], sigma=sigma) disp_s = def_field.data.squeeze() + displacement.squeeze() else: done = True def_field.data += displacement return def_field
# os.remove(image_path) # print('Test sample removed') continue node_tuples = [] for n in range(index_start, index_goal + 1): node_tuples.append((nodes[n].lat, nodes[n].lon)) node_tuples_set = set(node_tuples) if len(node_tuples_set) < len(node_tuples): print_abort('road segment contains a loop') # os.remove(image_path) # print('Test sample removed') continue image = Image(img) img_ref = np.matrix.copy(img) if save_init: image.mark_pixel(start, radius=5.5) image.mark_pixel(goal, radius=5.5) result = PILImage.fromarray((image.get() * 255).astype(np.uint8)) result.save(image_path[:-4] + '_init' + extension) continue if straight_line_baseline: start_time = time.time() extraction = fill([], start, goal) smoothed_extraction = extraction cost_map = np.zeros((len(img), len(img[0]))) total_extraction_time += time.time() - start_time
plt.subplots_adjust(left=0, right=1, top=0.99, bottom=0.01) ax = plt.gca() implot = ax.imshow(img_marked, interpolation='kaiser') fig = plt.gcf() cid = fig.canvas.mpl_connect('button_press_event', onclick) fm = plt.get_current_fig_manager() # only works for TkAgg backend fm.window.state('zoomed') plt.show() start = input[0][0], input[0][1] goal = input[1][0], input[1][1] smoothed_extraction, points, extraction, cost_map = extract_road( img, start, goal) image = Image(img) costs_extraction = [] for pixel in extraction: costs_extraction.append((pixel, cost_map[pixel[1]][pixel[0]])) # cost = cost_map[pixel[1]][pixel[0]] # color = map_to_color(cost, 0, 1) # image.mark_pixel(pixel, color, radius=1.5) # image.mark_pixel(pixel, (1, 0, 0), radius=1.5) xe = [item[0][0] for item in costs_extraction] ye = [item[0][1] for item in costs_extraction] ce = [item[1] for item in costs_extraction] costs_smoothed_extraction = [] for pixel in smoothed_extraction: