def draw(dot_file_path, jupyter=True): """ This method uses graphviz to draw the dot file located at dot_file_path. It creates a temporary file called tempo.png with a png of the dot file. If jupyter=True, it embeds the png in a jupyter notebook. If jupyter=False, it opens a window showing the png. Parameters ---------- dot_file_path : str jupyter : bool Returns ------- None """ s = gv.Source.from_file(dot_file_path) # using display(s) will draw the graph but will not embed it # permanently in the notebook. To embed it permanently, # must generate temporary image file and use Image(). # display(s) x = s.render("tempo", format='png', view=False) if jupyter: display(Image(x)) else: open_image("tempo.png").show()
def baixarimg(pasta, i, n, url_img): """.""" rr = get(str(url_img)) with open(pasta + "/" + str(i) + "." + n[n.__len__() - 1], 'wb') as code: code.write(rr.content) open_image(pasta + "/" + str(i) + "." + n[n.__len__() - 1]).save( pasta + "/" + str(i) + "." + n[n.__len__() - 1])
def __init__(self, N, width=600, height=600, decay_rate=1.0, hormone_secretion=None): from PIL.Image import open as open_image # setup simulation self._N = N self._INITIAL_FIELD = np.array(open_image(path.join(ENV_MAP_PATH, 'envmap01.png'))).astype(np.float32) / 255. #self._INITIAL_FIELD = np.zeros(self._INITIAL_FIELD.shape) self._FIELD_WIDTH = self._INITIAL_FIELD.shape[1] self._FIELD_HEIGHT = self._INITIAL_FIELD.shape[0] self._FIELD_DECAY_RATE = decay_rate self._SECRATION = hormone_secretion sensor_th = np.linspace(0, 2*np.pi, self.SENSOR_NUM, endpoint=False) self._SENSOR_POSITION = self.AGENT_RADIUS * np.array([np.cos(sensor_th), np.sin(sensor_th)]).T self.reset() # initialize all variables, position, velocity and field status # setup display self._canvas = SceneCanvas(size=(width, height), position=(0,0), keys='interactive', title="ALife book "+self.__class__.__name__) self._canvas.events.mouse_double_click.connect(self._on_mouse_double_click) self._view = self._canvas.central_widget.add_view() self._view.camera = PanZoomCamera((0, 0, self._FIELD_WIDTH, self._FIELD_HEIGHT), aspect=1) self._field_image = Image(self._field, interpolation='nearest', parent=self._view.scene, method='subdivide', clim=(0,1)) self._agent_polygon = [] for i in range(self._N): p = AntSimulator._generate_agent_visual_polygon(self.AGENT_RADIUS) p.parent = self._field_image self._agent_polygon.append(p) self._canvas.show()
def gen_glyph_image(self, codepoint): gfile = os.path.join(os.path.dirname(__file__), 'glyphs', 'x'.join( (str(i) for i in self.size)), '%08x' % codepoint) if os.path.isfile(gfile): return open_image(gfile) else: return self.draw_glyph(self.gen_image(), codepoint)
def __init__( self, image_path=None, loop=True, align="left", vertical_align="top", resize=False, resize_resampling=None, initial_state={}, **kwargs, ): self._image = open_image(image_path) if image_path else None self.stop_animating_event = Event() super().__init__( **kwargs, initial_state={ "image_path": image_path, "frame": 0, "loop": loop, "align": align, "vertical_align": vertical_align, "resize": resize, "resize_resampling": resize_resampling, **initial_state, }, ) if self._image and self._image.is_animated: self._start_animating()
def on_state_change(self, previous_state): # on loop change loop = self.state["loop"] if self.state["loop"] != previous_state["loop"]: if loop and self._image.is_animated: self._start_animating() if not loop and self.stop_animating_event: self.stop_animating_event.set() # on image_path change image_path = self.state["image_path"] if image_path != previous_state["image_path"]: if self.stop_animating_event: self.stop_animating_event.set() # bail if image_path is now None if image_path is None: self._image = None return # update self.image self._image = open_image(image_path) # reset frame state if needed if self.state["frame"] != 0: self.state.update({"frame": 0}) # start animating image if it is animated if self._image.is_animated: self._start_animating()
def get_template(path: str = "../resources/images/template.png") -> PILImage: """ Get the template image. :param path: The path to the location of the template image. :return: The template image. """ return open_image(path)
def _load_image_from_url_or_local_path(file_path): file = file_path try: return open_image(file) except (IOError, ValueError): return None
def load(name: Text) -> Image: if name not in _LOADED_TEMPLATES: LOGGER.debug("load: %s", name) # rp = mathtools.ResizeProxy(_g.screenshot_width) img = open_image(pathlib.Path(__file__).parent / "templates" / name) # img = imagetools.resize(img, width=rp.vector(img.width, TARGET_WIDTH)) _LOADED_TEMPLATES[name] = img return _LOADED_TEMPLATES[name]
def baixarimg(self): """.""" while not self.q.empty(): pasta, i, n, url_img = self.q.get() rr = self._req_link(str(url_img)) with open(pasta + "/" + str(i) + "." + n[n.__len__() - 1], 'wb') as code: code.write(rr.content) open_image(pasta + "/" + str(i) + "." + n[n.__len__() - 1]).save(pasta + "/" + str(i) + "." + n[n.__len__() - 1]) self.ids.baixado.text = str('0'+str(self.i) if len(str(self.i)) < 2 else self.i) + \ "/" + self.total self.ids.barra.value = self.i / int(self.total) * 100 self.i += 1
def load(input_path): image = open_image(input_path) data = image.getdata() data = np.array(data, dtype=np.uint8) width, height = image.size data = data.reshape(height, width) png = PNG(data) return png
def convert_palmer_resource_to_image(resource): data = io.BytesIO(resource) image = open_image(data) data = image.getdata() data = np.array(data, dtype=np.uint8) image = data.reshape(image.size) image = image.astype(np.float) image = image / (2.0**8) return image
def _get_handle(self): if PIL is False: return None # Open image f = StringIO(self.data) try: im = open_image(f) except (IOError, OverflowError): return None # Ok return im
def get_img_classification(self, img_path): img = open_image(img_path).convert('RGB') input_img = V(self._centre_crop(img).unsqueeze(0)) logit = self.model.forward(input_img) h_x = F.softmax(logit, 1).data.squeeze() probs, idx = h_x.sort(0, True) out = OrderedDict() for i in range(0, 5): label = self.classes[idx[i]] out[label] = np.round(probs[i].detach().item(), 3) return out
def get_logits(self, img_path): try: img = open_image(img_path).convert('RGB') input_img = V(self._centre_crop(img).unsqueeze(0)).to(self._device) logit = self.model.forward(input_img) h_x = F.softmax(logit, 1).data.squeeze() return h_x.detach().cpu().numpy().squeeze() except KeyboardInterrupt: raise except: logging.error(f'Cannot create logits for {img_path}') return []
def _load_image_or_none(self, file_path): parsed_url = urlparse(file_path) if parsed_url.scheme == 'http' or parsed_url.scheme == 'https': # response = requests.get(file_path, stream=True) try: response = requests.get(file_path, stream=True) except requests.exceptions.RequestException as e: # this is the base requests exception self.Error.request_failed(' '.join( e.args)) # See the link below log.error(...) return response.raw.decode_content = True try: image = open_image(response.content) except IOError: log.warning("Image skipped (invalid file path)", exc_info=True) return None else: try: image = open_image(file_path) except IOError: log.warning("Image skipped (invalid file path)", exc_info=True) return None if not image.mode == 'RGB': image = image.convert('RGB') image.thumbnail(self._target_image_size, LANCZOS) image_bytes_io = BytesIO() image.save(image_bytes_io, format="JPEG") image.close() image_bytes_io.seek(0) image_bytes = image_bytes_io.read() image_bytes_io.close() return image_bytes
def get_img_embedding(self, image_path): try: image = open_image(image_path).convert("RGB") image = np.array(image) faces = self._FaceDetector.find_faces(image) embeddings = [] for face in faces: embeddings.append(self._Encoder.generate_embedding(face)) return embeddings except KeyboardInterrupt: raise except Exception as e: logging.error(f"Cannot create embedding for {image_path}! {e}") return []
def frenomar(self, caminho, col=False, r=True, mensagem=1, coloca_hifen=1): """.""" for _, __, arquivo in walk(caminho): if str(_).find(caminho) != -1 and str(_).find("pycache") == -1: tam = __.__len__() if tam == 0: i = 1 arquivo.sort() for arq in arquivo: if not r: # im = Image.open(_ + "\\" + arq) # ima = im.copy() # remove(_ + "\\" + arq) # ima.save(_ + "\\-" + arq) # C:/Users/Guilherme/Desktop/download mangas/img\BlackClover138\15.jpg # C:/Users/Guilherme/Desktop/download mangas/img\BlackClover144\04.jpg try: if 'Thumbs' in arq: continue im = open_image(_ + "/" + arq) x, y = im.size if coloca_hifen == 1: im.resize((x, y), ANTIALIAS).save(_ + "/-" + arq) remove(_ + "/" + arq) else: im.resize((x, y), ANTIALIAS).save(_ + "/" + arq.replace("-", "")) remove(_ + "/" + arq) except (ValueError, IOError): from tkinter import messagebox messagebox.showerror("Mangás Downloader", "Erro no Arquivo:\n" + _ + "/" + arq) else: if not col: aa = arq.split(".") rename(_ + "/" + arq, _ + "/" + self.nomei(aa[aa.__len__() - 2], col=True) + '.' + aa[aa.__len__() - 1]) else: aa = arq.split(".") rename(_ + "/" + arq, _ + "/" + str(i) + '.' + aa[aa.__len__() - 1]) i += 1 if mensagem != 1: from tkinter import messagebox messagebox.showinfo("Mangás Downloader", "Arquivos Renomeados!")
def load_perturbation_pattern(index, path): url = get_resource_locator(index, dataset='checkerboard', scheme='file', path=path) url = urlparse(url) path = url.path[1:] image = open_image(path) data = image.getdata() data = np.array(data, dtype=np.uint8) width, height = image.size data = data.reshape(height, width) data = data.astype(np.float) data = data / np.iinfo(np.uint8).max return data
def get_img_embedding(self, img_path): try: img = open_image(img_path).convert('RGB') input_img = V(self._centre_crop(img).unsqueeze(0)).to(device) # forward pass for feature extraction x = input_img i = 0 for module in self.model._modules.values(): if i == 9: break x = module(x) i += 1 return x.detach().cpu().numpy().squeeze() except Exception as e: # print(e) # logging.error(f'Cannot create embedding for {img_path}') return None
def load_reference_image(index, path): url = get_resource_locator(index, dataset='reference', scheme='file', path=path) url = urlparse(url) path = url.path[1:] image = open_image(path) data = image.getdata() data = np.array(data, dtype=np.uint8) width, height = image.size data = data.reshape(height, width) data = data.astype(np.float) info = np.iinfo(np.uint8) data = (data - float(info.min)) / float(info.max - info.min + 1) return data
def get_snap(self, timeout: float = 3, proxies: Any = None) -> Optional[Image]: """ Gets a "snap" of the current camera video data and returns a Pillow Image or None :param timeout: Request timeout to camera in seconds :param proxies: http/https proxies to pass to the request object. :return: Image or None """ data = { 'cmd': 'Snap', 'channel': 0, 'rs': ''.join( random.choices(string.ascii_uppercase + string.digits, k=10)), 'user': self.username, 'password': self.password, } parms = parse.urlencode(data).encode("utf-8") try: response = requests.get(self.url, proxies=proxies, params=parms, timeout=timeout) if response.status_code == 200: return open_image(BytesIO(response.content)) print( "Could not retrieve data from camera successfully. Status:", response.status_code) return None except Exception as e: print("Could not get Image data\n", e) raise
def get_img_embedding(self, img_path): try: img = open_image(img_path).convert('RGB') input_img = V(self._centre_crop(img).unsqueeze(0)).to(self._device) # forward pass for feature extraction x = input_img i = 0 for module in self.model._modules.values(): if i == 9: break x = module(x) i += 1 return [x.detach().cpu().numpy().squeeze() ] # return as list for compatability to face verification except KeyboardInterrupt: raise except Exception as e: print(e) logging.error(f'Cannot create embedding for {img_path}') return []
def _load_image_from_url_or_local_path(self, file_path): urlparts = urlparse(file_path) if urlparts.scheme in ('http', 'https'): try: file = self._session.get(file_path, stream=True).raw except RequestException: log.warning("Image skipped", exc_info=True) return None elif urlparts.scheme in ("ftp", "data"): try: file = urlopen(file_path) except (URLError, ) + ftplib.all_errors: log.warning("Image skipped", exc_info=True) return None else: file = file_path try: return open_image(file) except (IOError, ValueError): log.warning("Image skipped", exc_info=True) return None
def _load_image_or_none(self, file_path): try: image = open_image(file_path) except IOError: log.warning("Image skipped (invalid file path)", exc_info=True) return None image.thumbnail(self._target_image_size, LANCZOS) image_bytes_io = BytesIO() image.save(image_bytes_io, format="JPEG") image.close() image_bytes_io.seek(0) image_bytes = image_bytes_io.read() image_bytes_io.close() # todo: temporary here because of a backend bug: when body # of exactly 19456 bytes in size is sent in the http2 post # request the request doesn't reach the upstream servers if len(image_bytes) == 19456: return None return image_bytes
def frenomar(col=False, r=True, t=1, m=1): for _, __, arquivo in walk('./'): if str(_).find("./img") != -1: tam = __.__len__() if tam == 0: i = 1 for arq in arquivo: if not r: # im = open_image(_ + "\\" + arq) # ima = im.copy() # remove(_ + "\\" + arq) # ima.save(_ + "\\-" + arq) im = open_image(_ + "\\" + arq) x, y = im.size if m == 1: im.resize((x, y), ANTIALIAS).save(_ + "\\-" + arq) remove(_ + "\\" + arq) else: im.resize((x, y), ANTIALIAS).save(_ + "\\" + arq.replace("-", "")) remove(_ + "\\" + arq) else: if not col: aa = arq.split(".") rename( _ + "\\" + arq, _ + "\\" + nomei(aa[aa.__len__() - 2], col=True) + '.' + aa[aa.__len__() - 1]) else: aa = arq.split(".") rename( _ + "\\" + arq, _ + "\\" + str(i) + '.' + aa[aa.__len__() - 1]) i += 1 if t != 1: print("Arquivos Renomeados")
def __init__(self, image: bytes) -> None: try: self.image: Image = open_image(BytesIO(image)) except UnidentifiedImageError: raise MyImageError(Message.BYTES_ARE_NOT_A_IMAGE.value)
def image_crop( image: Union[bytes, Image], crop: Optional[Crop] = None, width_preview: Optional[int] = None, image_alt: Optional[str] = None, min_width: Optional[int] = None, min_height: Optional[int] = None, max_width: Optional[int] = None, max_height: Optional[int] = None, # FIXME: Changing these properties, the component is rerendered unfortunately. # ---- # keep_selection: Optional[bool] = None, # disabled: Optional[bool] = None, # locked: Optional[bool] = None, rule_of_thirds: Optional[bool] = None, circular_crop: Optional[bool] = None, # ---- key: Optional[str] = None, ) -> Optional[Image]: import dataclasses from io import BytesIO from os import path import streamlit as st from PIL.Image import composite as composite_image from PIL.Image import new as new_image from PIL.Image import open as open_image from PIL.ImageDraw import Draw from streamlit.components import v1 as components from streamlit.elements.image import image_to_url global _impl if _impl is None: if _DEBUG: option_address = st.get_option("browser.serverAddress") option_port = st.get_option("browser.serverPort") _impl = ( components.declare_component( "image_crop", url="http://localhost:3001", ), lambda s: f"http://{option_address}:{option_port}" + s, ) else: _impl = ( components.declare_component( "image_crop", path=path.join(path.dirname(path.abspath(__file__)), "frontend/build"), ), lambda s: s, ) if isinstance(image, Image): image_ = image else: image_ = open_image(BytesIO(image)) width, _ = image_.size src = image_to_url( image_, width=min(width, width_preview) if width_preview else width, clamp=False, channels="RGB", output_format="auto", image_id="foo", ) crop_ = None if crop is None else dataclasses.asdict(crop) default = { "width": 0.0, "height": 0.0, "x": 0.0, "y": 0.0, } component, build_url = _impl result = component( src=build_url(src), image_alt=image_alt, minWidth=min_width, minHeight=min_height, maxWidth=max_width, maxHeight=max_height, # FIXME: Changing these properties, the component is rerendered unfortunately. # ---- keepSelection=None, disabled=None, locked=None, ruleOfThirds=rule_of_thirds, circularCrop=circular_crop, # ---- crop=crop_, key=key, default=default, ) w, h = image_.size w_crop = int(w * float(result["width"]) / 100) h_crop = int(h * float(result["height"]) / 100) x0 = int(w * float(result["x"]) / 100) y0 = int(h * float(result["y"]) / 100) x1 = x0 + w_crop y1 = y0 + h_crop if w_crop <= 0 or h_crop <= 0: return None else: image_crop = image_.crop((x0, y0, x1, y1)) if circular_crop: background = new_image("RGBA", (w_crop, h_crop), (0, 0, 0, 0)) mask = new_image("L", (w_crop, h_crop), 0) draw = Draw(mask) draw.ellipse((0, 0, w_crop, h_crop), fill="white") image_crop = composite_image(image_crop, background, mask) return image_crop
def _getPathOfNewOrExistingThumbnail(self, uid): """ Wenn das Vorschaubild noch aktuell ist, gib den Pfad zurück; wenn nicht, erzeuge es vorher. """ img_id = self.THUMBNAIL_FIELDID o = None if img_id: filename = self._getThumbnailPhysicalPath(uid) imgfield = self.getField(img_id) if imgfield: # hat scale-Methode o = imgfield.getRaw(self) if o is None: # kein image-Feld vorhanden return None # pep 20.2 str_o = str(o) if not str_o: # vorhandenes image-Feld ist leer # Das ist bei UnitraccNews-Objekten durchaus häufig: logger.info('%(self)r.getThumbnailPath: Bilddaten sind leer', locals()) return None # pep 20.2 # o = self.getThumbnailImageObject() # auf den Wahrheitswert von o ist leider kein Verlaß! try: prefix = self.THUMBNAIL_PREFIX if not prefix: raise ValueError('Kein Praefix fuer Vorschaubild-Pfade!' ' (%(self)r, %(uid)r, prefix=%(prefix)r)' % locals()) force = defaultdict(gimme_False) try: form = o.REQUEST.form form_force = form.get('force', {}) # Checkbox erzeugt ggf. den Wert 'on' (oder nichts); siehe (gf): # ../browser/unitraccsearch/templates/brain_maintenance_view.pt force.update(form_force) except Exception as e: logger.error('Error evaluating the force arguments!' ' (proceeding anyway)') logger.exception(e) scale_to_size = list(map(int, self.THUMBNAIL_SCALING.split('x'))) fs_mtime = get_mtime(filename) if fs_mtime is None: logger.info('%(self)r.getThumbnailPath: no image %(filename)r yet', locals()) # TODO: Lock auf Datei <filename> else: log_thumbnail(filename) logger.info('mtime: %s (%s)', fs_mtime, filename) logger.info('mtime: %s (%r data)', o._p_mtime, imgfield) if o._p_mtime <= fs_mtime: if o._p_mtime is None: logger.warn('%(self)r.getThumbnailPath:' ' image object has no modification date/time (ZODB)', locals()) else: logger.info('%(self)r.getThumbnailPath: %(img_id)r not changed', locals()) if force['replace-thumbnail']: logger.info('%(self)r.getThumbnailPath: recreating anyway!', locals()) else: return prefix+uid else: logger.info('%(self)r.getThumbnailPath: %(img_id)r has changed', locals()) virtualfile = StringIO(str_o) try: img = open_image(virtualfile) target_width, target_height = scale_to_size # aktuell hartcodierte Skalierungslogik: # - falls zu breit, maßstäblich verkleinern # - falls dann noch zu hoch, unten abschneiden # TODO: Wählbare Strategien ... # - oben oder links # - unten oder rechts # - horizontal oder vertikal zentriert # - nicht beschneiden, sondern skalieren und ggf. auffüllen if o.width > target_width: fact = target_width * 1.0 / o.width new_height = int(o.height * fact) if new_height: # auch hier kann noch ein IOError auftreten! img = img.resize((target_width, new_height), resample=BILINEAR) current_width, current_height = img.size logger.info('%(self)r.getThumbnailPath:' ' scaled image by factor %(fact)0.2f' ' to WxH=%(current_width)rx%(current_height)r', locals()) else: current_width, current_height = o.width, o.height logger.info('%(self)r.getThumbnailPath:' ' matches target width %(target_width)r, current' ' WxH=%(current_width)rx%(current_height)r', locals()) if current_height > target_height: img = img.crop((0, 0, current_width, target_height)) logger.info('%(self)r.getThumbnailPath:' ' cropped image to target height, ' ' WxH=%(current_width)rx%(target_height)r', locals()) elif current_height < target_height: logger.warning('%(self)r.getThumbnailPath:' ' insufficient height, ' ' WxH=%(current_width)rx%(current_height)r', locals()) mimetype = imgfield.getContentType(self) subtype = mimetype.split('/')[1] except IOError as e: logger.error('%(self)r.getThumbnailPath: %(e)r', locals()) else: try: img.save(filename, subtype) img.close() except IOError as e: logger.error('%(self)r.getThumbnailPath:' "error %(e)r while saving to '%(filename)s'", locals()) except Exception as e: logger.error('%(self)r.getThumbnailPath (uid=%(uid)r) :' 'PIL complains, %(e)r', locals()) else: logger.info('%(self)r.getThumbnailPath:' " saved thumbnail image to '%(filename)s'", locals()) return prefix+uid finally: log_thumbnail(filename) except AttributeError as e: logger.error('%(e)r', locals()) logger.exception(e) logger.info('ist ok, oder?') except ValueError: raise
def create_image(path: str) -> Image: return open_image(path).convert('RGB')
self.total_time += time.clock() - self.start_time self.n_runs += 1 def average(self): return self.total_time/self.n_runs try: from PIL.Image import open as open_image except ImportError, err: from Image import open as open_image import Image if __name__ == "__main__": try: inital_texture = open_image(sys.argv[1]) except IndexError: print "Usage:",sys.argv[0],"INITAL_TEXTURE" sys.exit(-1) try: ix, iy, image_data = inital_texture.size[0], inital_texture.size[1], inital_texture.tostring("raw", "RGBA", 0, -1) except SystemError: ix, iy, image_data = inital_texture.size[0], inital_texture.size[1], inital_texture.tostring("raw", "RGBX", 0, -1) width,height = inital_texture.size temperature_field_a = numpy.ndarray((width*height,1), dtype=numpy.float32) temperature_field_b = numpy.ndarray((width*height,1), dtype=numpy.float32) init_conductivity = numpy.ndarray((width*height,1), dtype=numpy.float32) init_capacity = numpy.ndarray((width*height,1), dtype=numpy.float32) for x in xrange(ix):
def _load_content_from_file(self, local_artifact_path): from PIL.Image import open as open_image self._content = open_image(local_artifact_path) return self._content