Ejemplo n.º 1
0
 def add_image(self, image):
     output = StringIO()
     im = Image.open(image)
     im.thumbnail((384, 384), Image.ANTIALIAS)
     ImageOps.flip(im).convert('1').save(output, 'BMP')
     content = 'P:%s' % b64encode(output.getvalue())
     self.contents.append(content)
def main():
	p = ArgumentParser()
	p.add_argument("--outdir", nargs="?", default="")
	p.add_argument("--skip-existing", action="store_true")
	p.add_argument("--only", type=str, nargs="?", help="Extract specific IDs")
	p.add_argument("files", nargs="+")
	args = p.parse_args(sys.argv[1:])

	cards, textures = extract_info(args.files)
	paths = [card["path"] for card in cards.values()]
	print("Found %i cards, %i textures including %i unique in use." % (
		len(cards), len(textures), len(set(paths))
	))

	orig_dir = "orig"
	thumb_sizes = (256, 512)
	tiles_dir = "tiles"
	filter_ids = args.only.split(",") if args.only else []

	for id, values in sorted(cards.items()):
		if filter_ids and id not in filter_ids:
			continue

		path = values["path"]
		print("Parsing %r (%r)" % (id, path))
		if not path:
			print("%r does not have a texture" % (id))
			continue

		if path not in textures:
			print("Path %r not found for %r" % (path, id))
			continue

		pptr = textures[path]
		texture = pptr.resolve()
		flipped = None

		filename, exists = get_filename(args.outdir, orig_dir, id)
		if not (args.skip_existing and exists):
			print("-> %r" % (filename))
			flipped = ImageOps.flip(texture.image).convert("RGB")
			flipped.save(filename)

		if values["tile"]:
			filename, exists = get_filename(args.outdir, tiles_dir, id)
			if not (args.skip_existing and exists):
				tile_texture = generate_tile_image(texture.image, values["tile"])
				print("-> %r" % (filename))
				tile_texture.save(filename)

		for sz in thumb_sizes:
			thumb_dir = "%ix" % (sz)
			filename, exists = get_filename(args.outdir, thumb_dir, id, ext=".jpg")
			if not (args.skip_existing and exists):
				if not flipped:
					flipped = ImageOps.flip(texture.image).convert("RGB")
				thumb_texture = flipped.resize((sz, sz))
				print("-> %r" % (filename))
				thumb_texture.save(filename)
Ejemplo n.º 3
0
def invertingAndCompare(problem, figures, options):
    av_invert = ImageOps.flip(um.openAndInvert(figures[0]))
    bv_invert = ImageOps.flip(um.openAndInvert(figures[1]))
    ah_invert = ImageOps.mirror(um.openAndInvert(figures[0]))
    dh_invert = ImageOps.mirror(um.openAndInvert(figures[3]))
    aTog = um.measurePixelDifference(av_invert, um.openAndInvert(figures[6]))
    bToh = um.measurePixelDifference(bv_invert, um.openAndInvert(figures[7]))
    aToc = um.measurePixelDifference(ah_invert, um.openAndInvert(figures[2]))
    dTof = um.measurePixelDifference(dh_invert, um.openAndInvert(figures[5]))
    direction = um.determineDirection(aToc, dTof, aTog, bToh, figures)
    return direction[1], um.searchForSolution(problem, direction[0], options)
Ejemplo n.º 4
0
def test_sanity():

    ImageOps.autocontrast(lena("L"))
    ImageOps.autocontrast(lena("RGB"))

    ImageOps.autocontrast(lena("L"), cutoff=10)
    ImageOps.autocontrast(lena("L"), ignore=[0, 255])

    ImageOps.colorize(lena("L"), (0, 0, 0), (255, 255, 255))
    ImageOps.colorize(lena("L"), "black", "white")

    ImageOps.crop(lena("L"), 1)
    ImageOps.crop(lena("RGB"), 1)

    ImageOps.deform(lena("L"), deformer)
    ImageOps.deform(lena("RGB"), deformer)

    ImageOps.equalize(lena("L"))
    ImageOps.equalize(lena("RGB"))

    ImageOps.expand(lena("L"), 1)
    ImageOps.expand(lena("RGB"), 1)
    ImageOps.expand(lena("L"), 2, "blue")
    ImageOps.expand(lena("RGB"), 2, "blue")

    ImageOps.fit(lena("L"), (128, 128))
    ImageOps.fit(lena("RGB"), (128, 128))
    ImageOps.fit(lena("RGB").resize((1, 1)), (35, 35))

    ImageOps.flip(lena("L"))
    ImageOps.flip(lena("RGB"))

    ImageOps.grayscale(lena("L"))
    ImageOps.grayscale(lena("RGB"))

    ImageOps.invert(lena("L"))
    ImageOps.invert(lena("RGB"))

    ImageOps.mirror(lena("L"))
    ImageOps.mirror(lena("RGB"))

    ImageOps.posterize(lena("L"), 4)
    ImageOps.posterize(lena("RGB"), 4)

    ImageOps.solarize(lena("L"))
    ImageOps.solarize(lena("RGB"))

    success()
Ejemplo n.º 5
0
    def _get_image(overlay, x, y):
        """Superpose the picture of the timezone on the map"""
        def _get_x_offset():
            now = datetime.utcnow().timetuple()
            return - int((now.tm_hour * 60 + now.tm_min - 12 * 60) / (24 * 60) * MAP_SIZE[0])  # night is centered at UTC noon (12)

        im = BACK_IM.copy()
        if overlay:
            overlay_im = Image.open(TIMEZONE_RESOURCES + overlay)
            im.paste(BACK_ENHANCED_IM, overlay_im)
        night_im = ImageChops.offset(NIGHT_IM, _get_x_offset(), 0).crop(im.getbbox())
        if IS_WINTER:
            night_im = ImageOps.flip(night_im)

        # In Wheezy alpha_composite and tobytes are not implemented, yet
        try:
            im.paste(Image.alpha_composite(night_im, LIGHTS_IM), night_im)
        except:
            im.paste(Image.blend(night_im, LIGHTS_IM, 0.5), night_im)
        im.paste(DOT_IM, (int(x - DOT_IM.size[1] / 2), int(y - DOT_IM.size[0] / 2)), DOT_IM)
        try:
            data = im.tobytes()
            w, h = im.size
            data = GLib.Bytes.new(data)
            pb = GdkPixbuf.Pixbuf.new_from_bytes(data, GdkPixbuf.Colorspace.RGB,
                 False, 8, w, h, w * 3)
            return pb
        except:
            data = im.tostring()
            w, h = im.size
            pb = GdkPixbuf.Pixbuf.new_from_data(barr, GdkPixbuf.Colorspace.RGB,
                 False, 8, w, h, w * 3)
            return pb
Ejemplo n.º 6
0
 def get_render(self):
     return np.asarray(\
                 ImageOps.flip(\
                     self.env.render('rgb_array')\
                       .convert('L')\
                           .resize((self.reduced_width, self.reduced_height), \
                                   Image.BILINEAR)))
Ejemplo n.º 7
0
	def load_image(self):
		"""decode the image into a buffer"""
		self.image_data = Image.open(self.image_file)
		self.image_data = ImageOps.grayscale(self.image_data)

		if self.autocontrast:
			# may or may not improve the look of the transmitted spectrum
			self.image_data = ImageOps.autocontrast(self.image_data)

		if self.image_invert:
			# may or may not improve the look of the transmitted spectrum
			self.image_data = ImageOps.invert(self.image_data)

		if self.image_flip:
			# set to true for waterfalls that scroll from the top
			self.image_data = ImageOps.flip(self.image_data)

		(self.image_width, self.image_height) = self.image_data.size
		max_width = 4096.0
		if self.image_width > max_width:
			scaling = max_width / self.image_width
			newsize = (int(self.image_width * scaling), int(self.image_height * scaling))
			(self.image_width, self.image_height) = newsize
			self.image_data = self.image_data.resize(newsize)
		self.set_output_multiple(self.image_width)

		self.image_data = list(self.image_data.getdata())
		if self.bt709_map:
			# scale brightness according to ITU-R BT.709
			self.image_data = map( lambda x: x * 219 / 255 + 16,  self.image_data)
		self.image_len = len(self.image_data)
		if self.repeatmode != 2:
			print "paint.image_source: %d bytes, %dpx width" % (self.image_len, self.image_width)
		self.line_num = 0
Ejemplo n.º 8
0
def wms_image(server, layers):
    """ Retrieve and plot a WMS image.
    Hard-coded for a single WMS server and PlateCarre() images.
    Can plot on any projection"""
    
    xlim = plt.gca().get_xlim()
    ylim = plt.gca().get_ylim()
    bbox = [xlim[0], ylim[0], xlim[1], ylim[1]]
    
    # Construct the query string
    request = server
    request += "&version=1.1.0"
    request += "&request=GetMap"
    request += "&layers={}".format(layers)
    request += "&bbox={}".format(",".join([str(i) for i in bbox]))
    request += "&styles="
    request += "&width={}&height={}".format(512, 512)
    request += "&srs=EPSG:4326"
    request += "&format=image/jpeg"

    # Get jpeg from server.
    jpeg_bytes = urllib2.urlopen(request).read()
    pil_img = Image.open(StringIO.StringIO(jpeg_bytes))
    pil_img = ImageOps.flip(pil_img)
    
    # Turn the pil image into rgb array, to workaround a cartopy bug.
    img_array = np.array(list(pil_img.getdata())).reshape((pil_img.size[0], pil_img.size[1], -1)) / 256.0
    img_array = img_array.squeeze()

    # Plot the platecaree image in the current plot projection. 
    plt.gca().imshow(img_array, origin="lower",
                     extent=[bbox[0], bbox[2], bbox[1], bbox[3]],
                     transform=ccrs.PlateCarree())
Ejemplo n.º 9
0
def apply_flip(pixbuf):
    '''
    image top to bottom
    '''
    width,height = pixbuf.get_width(),pixbuf.get_height() 
    y = ImageOps.flip(Image.frombytes(K.ImageConstants.RGB_SHORT_NAME,(width,height),pixbuf.get_pixels() ))
    return I.fromImageToPixbuf(y)
def generate_tile_image(img, tile):
	# tile the image horizontally (x2 is enough),
	# some cards need to wrap around to create a bar (e.g. Muster for Battle),
	# also discard alpha channel (e.g. Soulfire, Mortal Coil)
	tiled = Image.new("RGB", (img.width * 2, img.height))
	tiled.paste(img, (0, 0))
	tiled.paste(img, (img.width, 0))

	x, y, width, height = get_rect(
		tile["m_TexEnvs"]["_MainTex"]["m_Offset"]["x"],
		tile["m_TexEnvs"]["_MainTex"]["m_Offset"]["y"],
		tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["x"],
		tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["y"],
		tile["m_Floats"].get("_OffsetX", 0.0),
		tile["m_Floats"].get("_OffsetY", 0.0),
		tile["m_Floats"].get("_Scale", 1.0),
		img.width
	)

	bar = tiled.crop((x, y, x + width, y + height))
	bar = ImageOps.flip(bar)
	# negative x scale means horizontal flip
	if tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["x"] < 0:
		bar = ImageOps.mirror(bar)

	return bar.resize((OUT_WIDTH, OUT_HEIGHT), Image.LANCZOS)
def do_texture(path, id, textures, values, thumb_sizes, args):
	print("Parsing %r (%r)" % (id, path))
	if not path:
		print("%r does not have a texture" % (id))
		return

	if path not in textures:
		print("Path %r not found for %r" % (path, id))
		return

	pptr = textures[path]
	texture = pptr.resolve()
	flipped = None

	filename, exists = get_filename(args.outdir, args.orig_dir, id, ext=".png")
	if not (args.skip_existing and exists):
		print("-> %r" % (filename))
		flipped = ImageOps.flip(texture.image).convert("RGB")
		flipped.save(filename)

	for format in args.formats:
		ext = "." + format

		if not args.skip_tiles:
			filename, exists = get_filename(args.outdir, args.tiles_dir, id, ext=ext)
			if not (args.skip_existing and exists):
				tile_texture = generate_tile_image(texture.image, values["tile"])
				print("-> %r" % (filename))
				tile_texture.save(filename)

		if ext == ".png":
			# skip png generation for thumbnails
			continue

		if args.skip_thumbnails:
			# --skip-thumbnails was specified
			continue

		for sz in thumb_sizes:
			thumb_dir = "%ix" % (sz)
			filename, exists = get_filename(args.outdir, thumb_dir, id, ext=ext)
			if not (args.skip_existing and exists):
				if not flipped:
					flipped = ImageOps.flip(texture.image).convert("RGB")
				thumb_texture = flipped.resize((sz, sz))
				print("-> %r" % (filename))
				thumb_texture.save(filename)
Ejemplo n.º 12
0
    def getframe(self):
        if self.select_env == 'InvertedPendulum-v1':  # for mujoco envs:
            numpy_im = self.env.render('rgb_array')  #this gives uint8 numpy array
            im = Image.fromarray(numpy_im, 'RGB')
            return np.asarray(im.convert('L').resize((self.dimO[0], self.dimO[1])), dtype= np.uint8)

        im = self.env.render('rgb_array')  # this gives uint8 numpy array
        return np.asarray(ImageOps.flip(im.convert('L').resize((self.dimO[0], self.dimO[1]))))
Ejemplo n.º 13
0
def determineDirection(a, b, c, d, figures):
    if min(a, b) > min(c, d):
        result = ImageOps.mirror(openAndInvert(figures[6]))
        optimal = min(a, b)
    else:
        result = ImageOps.flip(openAndInvert(figures[2]))
        optimal = min(c, d)
    return result, optimal
Ejemplo n.º 14
0
def handle_asset(asset, handle_formats):
	for id, obj in asset.objects.items():
		if obj.type not in handle_formats:
			continue

		d = obj.read()

		if obj.type == "AudioClip":
			if not d.data:
				# eg. StreamedResource not available
				continue
			af = FSB5(d.data)
			for i, sample in enumerate(af.samples):
				if i > 0:
					filename = "%s-%i.%s" % (d.name, i, af.get_sample_extension())
				else:
					filename = "%s.%s" % (d.name, af.get_sample_extension())
				try:
					sample = af.rebuild_sample(sample)
				except ValueError as e:
					print("WARNING: Could not extract %r (%s)" % (d, e))
					continue
				write_to_file(filename, sample, mode="wb")

		elif obj.type == "MovieTexture":
			filename = d.name + ".ogv"
			write_to_file(filename, d.movie_data, mode="wb")

		elif obj.type == "Shader":
			write_to_file(d.name + ".cg", d.script)

		elif obj.type == "Mesh":
			try:
				mesh_data = OBJMesh(d).export()
				write_to_file(d.name + ".obj", mesh_data, mode="w")
			except NotImplementedError as e:
				print("WARNING: Could not extract %r (%s)" % (d, e))
				mesh_data = pickle.dumps(d._obj)
				write_to_file(d.name + ".Mesh.pickle", mesh_data, mode="wb")

		elif obj.type == "TextAsset":
			if isinstance(d.script, bytes):
				write_to_file(d.name + ".bin", d.script, mode="wb")
			else:
				write_to_file(d.name + ".txt", d.script)

		elif obj.type == "Texture2D":
			filename = d.name + ".png"
			image = d.image
			if image is None:
				print("WARNING: %s is an empty image" % (filename))
				write_to_file(filename, "")
			else:
				print("Decoding %r" % (d))
				img = ImageOps.flip(image)
				path = get_output_path(filename)
				img.save(path)
Ejemplo n.º 15
0
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
    BaseFilter.process(self, image)
    newImage = ImageOps.flip(image)
    return newImage
Ejemplo n.º 16
0
def modify_photo(image_data, film_size):
    """
    Make the image grayscale and invert the colors
    All manipulations for the original photo go here
    """
    modified_image = ImageOps.grayscale(image_data)
    # Image.ANTIALIAS is best for down sizing
    modified_image = modified_image.resize(film_size, Image.ANTIALIAS)
    modified_image = ImageOps.invert(modified_image)
    modified_image = ImageOps.flip(modified_image)
    return modified_image
Ejemplo n.º 17
0
    def operations(self):
        """Perform enhancements from the ImageOps class. """
        if self.effect == 'flip':
            to_save = ImageOps.flip(self.image)

        if self.effect == 'mirror':
            to_save = ImageOps.mirror(self.image)
        if self.effect == 'grayscale':
            to_save = ImageOps.grayscale(self.image)
        to_save.save(self.new_file)
        return self.return_path
Ejemplo n.º 18
0
def draw_groups(groups, mcu, mem):
    for n, group in enumerate(groups):
        img = Image.new('RGBA', (XS, Y0), (0, 0, 0, 255))
        drw = ImageDraw.Draw(img)

        draw_boards(drw, mcu, mem)
        for conn in group:
            draw_conn(drw, conn)

        img = ImageOps.flip(img)
        img.save('%s.png' % n)
        del drw
def flip_vertical(images, uv_pixel_positions):
    """
    Fip the images and the pixel positions vertically (flip up/down)

    See random_image_and_indices_mutation() for documentation of args and return types.

    """
    mutated_images = [ImageOps.flip(image) for image in images]
    v_pixel_positions = uv_pixel_positions[1]
    mutated_v_pixel_positions = (image.height-1) - v_pixel_positions
    mutated_uv_pixel_positions = (uv_pixel_positions[0], mutated_v_pixel_positions)
    return mutated_images, mutated_uv_pixel_positions
Ejemplo n.º 20
0
def save_image(image_name='image'):
  global n_image
  glPixelStorei(GL_PACK_ALIGNMENT, 1)
  width = width_GL
  if nTextures == 2 :width = 2*width_GL
  data = glReadPixels(0, 0, width, height_GL, GL_RGBA, GL_UNSIGNED_BYTE)
  image = Image.frombytes("RGBA", (width, height_GL), data)
  image = ImageOps.flip(image) # in my case image is flipped top-bottom for some reason
  image_file_name = '{0}_{1}.png'.format(image_name, n_image)
  image.save(image_file_name, 'PNG')
  n_image += 1
  print 'Image saved: {0}'.format(image_file_name)
Ejemplo n.º 21
0
def prepare_side_img(img, a, b):
	(w, h) = img.size

	p1 = [(0, h), (3*a, h), (0, h-3*a)] # first triangle
	p2 = [(w, h), (w-3*a, h), (w, h-3*a)] # second one

	mask = Image.new('L', (w, h), 255) # generate mask
	ImageDraw.Draw(mask).polygon(p1, outline=1, fill=0)
	ImageDraw.Draw(mask).polygon(p2, outline=1, fill=0)

	output = ImageOps.fit(img, mask.size) # crop image
	output.putalpha(mask) # put alpha to cropped part
	return ImageOps.flip(output)
Ejemplo n.º 22
0
		def applyflip(self, image):
			if self.rotationflip == 0:
				return image
			elif self.rotationflip == 1:
				return image.rotate(90)
			elif self.rotationflip == 2:
				return image.rotate(180)
			elif self.rotationflip == 3:
				return image.rotate(270)
			elif self.rotationflip == 4:
				return ImageOps.mirror(image)
			elif self.rotationflip == 5:
				return ImageOps.flip(image)
Ejemplo n.º 23
0
def upload_file():
    global index
    global cache_size
    
    if request.method == 'POST':
    	err = False
    	
    	if not 'width' in request.form:
    		print '%s%sDENY%s: The client did not specify a width.' % (timestamp(), FAIL, ENDC)
		abort(500)
    		
    	if not 'height' in request.form:
    		print '%s%sDENY%s: The client did not specify a height.' % (timestamp(), FAIL, ENDC)
		abort(500)
    	
    	if not 'img_data' in request.form:
    		print '%s%sDENY%s: The client did not specify any image data.' % (timestamp(), FAIL, ENDC)
		abort(500)

    	if not 'Origin' in request.headers:
    		print '%s%sDENY%s: The client did not specify a CORS origin header.' % (timestamp(), FAIL, ENDC)
		abort(500)

	w = int(request.form['width'])
	h = int(request.form['height'])
	d = numpy.fromstring(request.form['img_data'][1:], dtype=numpy.uint8, sep=',');
	fname = 'cache/%08d.png' % index
	
	img = Image.frombuffer('RGB', [w,h], d, 'raw', 'RGB', 0, 1)
	ImageOps.flip(img).save(fname)
	cache_size = cache_size + os.path.getsize(fname)
	index += 1
	
	print '%s%sALLOW%s from %s%s%s frame %d (%d x %d), cache size = %s' % (timestamp(), OK, ENDC, BLUE, request.remote_addr, ENDC, index, w, h, sizeof_fmt(cache_size))

	r = make_response('200')
	r.headers['Access-Control-Allow-Origin'] = '*'
	
	return r
Ejemplo n.º 24
0
def handle_asset(asset, handle_formats, dir, flip, objMesh):
	for id, obj in asset.objects.items():
		try:
			otype = obj.type
		except Exception as e:
			error("[Error] %s" % (e))
			continue

		if otype not in handle_formats:
			continue

		d = obj.read()
		save_path = os.path.join(dir, obj.type, d.name)
		utils.make_dirs(save_path)

		if otype == "Mesh":
			try:
				mesh_data = None

				if not objMesh:
					mesh_data = BabylonMesh(d).export()
					utils.write_to_file(save_path + ".babylon", mesh_data, mode="w")

				mesh_data = OBJMesh(d).export()
				utils.write_to_file(save_path + ".obj", mesh_data, mode="w")
			except (NotImplementedError, RuntimeError) as e:
				error("WARNING: Could not extract %r (%s)" % (d, e))
				mesh_data = pickle.dumps(d._obj)
				utils.write_to_file(save_path + ".Mesh.pickle", mesh_data, mode="wb")

		elif otype == "TextAsset":
			if isinstance(d.script, bytes):
				utils.write_to_file(save_path + ".bin", d.script, mode="wb")
			else:
				utils.write_to_file(save_path + ".txt", d.script)

		elif otype == "Texture2D":
			filename = d.name + ".png"
			try:
				image = d.image
				if image is None:
					info("WARNING: %s is an empty image" % (filename))
					utils.write_to_file(save_path + ".empty", "")
				else:
					info("Decoding %r" % (d))
					img = image
					if flip:
						img = ImageOps.flip(image)
					img.save(save_path + ".png")
			except Exception as e:
				error("Failed to extract texture %s (%s)" % (d.name, e))
Ejemplo n.º 25
0
 def _createMask(self, image, colorValue):
     # Create mask of provided image 
     # (this method operates on numpy arrays) while typical _createMask
     # method operates on PIL image. But that's still fine.
     imgtemp=np.copy(image)
     imgtemp[image==colorValue]=255
     imgtemp[image!=colorValue]=0
     
     # Convert numpy array into PIL image.
     image = Image.fromarray(imgtemp.astype(np.uint8)).convert("L")
     resizeTuple = self.renderingProperties['imageSize']
     image = ImageOps.flip(image)
     image = ImageChops.invert(image).resize(resizeTuple, Image.NEAREST)
     return image
Ejemplo n.º 26
0
 def _get_image(overlay, x, y):
     """Superpose the picture of the timezone on the map"""
     def _get_x_offset():
         now = datetime.utcnow().timetuple()
         return - int((now.tm_hour*60 + now.tm_min - 12*60) / (24*60) * MAP_SIZE[0])  # night is centered at UTC noon (12)
     im = BACK_IM.copy()
     if overlay:
         overlay_im = Image.open(TIMEZONE_RESOURCES + overlay)
         im.paste(BACK_ENHANCED_IM, overlay_im)
     night_im = ImageChops.offset(NIGHT_IM, _get_x_offset(), 0).crop(im.getbbox())
     if IS_WINTER: night_im = ImageOps.flip(night_im)
     im.paste(Image.alpha_composite(night_im, LIGHTS_IM), night_im)
     im.paste(DOT_IM, (int(x - DOT_IM.size[1]/2), int(y - DOT_IM.size[0]/2)), DOT_IM)
     return gtk.gdk.pixbuf_new_from_data(im.tobytes(), gtk.gdk.COLORSPACE_RGB,
                                         False, 8, im.size[0], im.size[1], im.size[0] * 3)
Ejemplo n.º 27
0
    def save_png(self, filename, data, scaling):
        data, boundingBox = self.reposition(data, scaling)
        
        im = Image.new('L', boundingBox, 255)

        draw = ImageDraw.Draw(im)
        
        for i in range(len(data) - 1):
            draw.line(data[i] + data[i + 1], fill=128, width=5)
        del draw
        
        #Image origin is top left, convert to CAD-style bottom left
        im = ImageOps.flip(im)

        im.save(filename, "PNG")
Ejemplo n.º 28
0
    def verticalReflection(image1, image2):
        neg_image1 = convertImage(image1)
        neg_image2 = convertImage(image2)

        reflected_image1 = ImageOps.flip(neg_image1)

        diff = ImageChops.difference(reflected_image1, neg_image2)
        stat = ImageStat.Stat(diff)
        mean = stat.mean[0]

        if mean < 1.0:
            print ("Vertical Reflection Function: Reflected Vertically")
            return True
        else:
            print ("Vertical Reflection Function: Not reflected vertically")
            return False
Ejemplo n.º 29
0
def test_snapshot_flip_with_ssim_check(cam, configer, snapshot):

    # Test camera actually flip image after flip is enabled Check if image is
    # set to flip correctly by comparing SSIM.

    configer.set('video_c0_imprinttimestamp=0')
    configer.set('video_c0_rotate=0&videoin_c0_mirror=0&videoin_c0_flip=0')
    sleep(2)

    option = {'resolution': '640x480'}

    # take a snapshot
    status, image = snapshot.take(option)
    assert status, "Snapshot failed with resolution %s" % (option['resolution'])

    # flip image
    image_flip = image
    image_flip_io = StringIO.StringIO()
    image_flip = ImageOps.flip(image_flip)
    image_flip.save(image_flip_io, format='jpeg')
    image_flip_ref = scipy.misc.imread(image_flip_io, flatten=True).astype(numpy.float32)

    # rotate
    configer.set('videoin_c0_rotate=0&videoin_c0_mirror=0&videoin_c0_flip=1')
    sleep(2)

    # take another snapshot
    status, image = snapshot.take(option)
    assert status, "Snapshot failed with resolution %s" % (option['resolution'])

    image_flip_cam = image
    image_flip_cam_io = StringIO.StringIO()
    image_flip_cam.save(image_flip_cam_io, format='jpeg')
    image_flip_cam_ref = scipy.misc.imread(image_flip_cam_io, flatten=True).astype(numpy.float32)

    ssim_exact = ssim.ssim_exact(image_flip_ref / 255, image_flip_cam_ref / 255)
    print 'ssim (exact): %f' % ssim_exact

    lower_bound = 0.700
    if ssim_exact <= lower_bound:
        image_flip.save('image-flip.jpg')
        image_flip_cam.save('image-flip-cam.jpg')

    assert ssim_exact > lower_bound, "SSIM too low. Image may not been flipped.\
        Exp %f, Act %f" % (lower_bound, ssim_exact)
Ejemplo n.º 30
0
def voltearImagen(archivo):
    imgVolteada = ImageOps.flip(archivo)

    return imgVolteada
Ejemplo n.º 31
0
def build_feature_index(is_horizontal_banner=False,
                        resolution=None,
                        apply_flip=False,
                        apply_mirror=False):
    pooling = "avg"
    feature_filename = get_label_database_filename(pooling)

    app_ids = list_app_ids(is_horizontal_banner=is_horizontal_banner)
    num_games = len(app_ids)

    target_model_size = get_target_model_size(resolution=resolution)
    model = load_model(target_model_size=target_model_size, pooling=pooling)
    preprocess = get_preprocessing_tool()

    try:
        Y_hat = np.load(feature_filename)
    except FileNotFoundError:
        num_features = get_num_features(model)
        Y_hat = np.zeros((num_games, num_features))

    start = time()

    app_ids = sorted(app_ids, key=int)
    freeze_app_ids(app_ids)

    for (counter, app_id) in enumerate(app_ids):

        # Avoid re-computing values of Y_hat which were previously computed and saved to disk, then recently loaded
        if any(Y_hat[counter, :] != 0):
            continue

        image_filename = app_id_to_image_filename(app_id, is_horizontal_banner)
        image = load_image(image_filename, target_size=target_model_size)
        if apply_flip:
            image = pil_imageops.flip(image)
        if apply_mirror:
            image = pil_imageops.mirror(image)
        features = convert_image_to_features(image,
                                             model,
                                             preprocess=preprocess)

        Y_hat[counter, :] = features

        if (counter % 1000) == 0:
            print("{}/{} in {:.2f} s".format(counter, num_games,
                                             time() - start))
            np.save(
                feature_filename,
                np.asarray(Y_hat, dtype=np.float16),
                allow_pickle=False,
                fix_imports=False,
            )

    np.save(
        feature_filename,
        np.asarray(Y_hat, dtype=np.float16),
        allow_pickle=False,
        fix_imports=False,
    )

    return
Ejemplo n.º 32
0
startingX = int(startingX)
startingY = input("Input initial Y-coordinate: ")
startingY = int(startingY)
Width = input("Input width of croped image")
Width = int(Width)
Height = input("Input height of croped image: ")
Height = int(Height)
img2 = img2.crop(box=(startingX, startingY, Width, Height))
img2.show()

# horizontal flipping
img3 = ImageOps.mirror(img3)
img3.show()

# vertical flipping
img4 = ImageOps.flip(img4)
img4.show()

# reading pixels & applying negative transformation
for i in range(0, img5.size[0] - 1):
    for j in range(0, img5.size[1] - 1):
        # geting pixel value at (x,y) position of the image
        pixelColorVals = img5.getpixel((i, j))

        # inverting color
        redPixel = 255 - pixelColorVals[0]
        # negate red pixel

        greenPixel = 255 - pixelColorVals[1]
        # negate green pixel
Ejemplo n.º 33
0
Archivo: twms.py Proyecto: MVoz/twms
def twms_main(data):
    """
    Do main TWMS work. 
    data - dictionary of params. 
    returns (error_code, content_type, resp)
    """

    start_time = datetime.datetime.now()

    content_type = "text/html"
    resp = ""
    srs = data.get("srs", "EPSG:4326")
    gpx = data.get("gpx", "").split(",")
    if gpx == ['']:
        gpx = []
    wkt = data.get("wkt", "")
    trackblend = float(data.get("trackblend", "0.5"))
    color = data.get("color", data.get("colour", "")).split(",")
    track = False
    tracks = []
    if len(gpx) == 0:
        req_bbox = projections.from4326(
            (27.6518898, 53.8683186, 27.6581944, 53.8720359), srs)
    else:
        for g in gpx:
            local_gpx = config.gpx_cache + "%s.gpx" % g
            if not os.path.exists(config.gpx_cache):
                os.makedirs(config.gpx_cache)
            if not os.path.exists(local_gpx):
                urllib.urlretrieve(
                    "http://www.openstreetmap.org/trace/%s/data" % g,
                    local_gpx)
            if not track:
                track = GPXParser(local_gpx)
                req_bbox = projections.from4326(track.bbox, srs)
            else:
                track = GPXParser(local_gpx)
                req_bbox = bbox.add(req_bbox,
                                    projections.from4326(track.bbox, srs))
            tracks.append(track)

    req_type = data.get("request", "GetMap")
    version = data.get("version", "1.1.1")
    ref = data.get("ref", config.service_url)
    if req_type == "GetCapabilities":
        content_type, resp = capabilities.get(version, ref)
        return (OK, content_type, resp)

    layer = data.get("layers", config.default_layers).split(",")
    if ("layers" in data) and not layer[0]:
        layer = ["transparent"]

    if req_type == "GetCorrections":
        points = data.get("points", data.get("POINTS", "")).split("=")
        resp = ""
        points = [a.split(",") for a in points]
        points = [(float(a[0]), float(a[1])) for a in points]

        req.content_type = "text/plain"
        for lay in layer:
            for point in points:
                resp += "%s,%s;" % tuple(
                    correctify.rectify(config.layers[lay], point))
            resp += "\n"
        return (OK, content_type, resp)

    force = data.get("force", "")
    if force != "":
        force = force.split(",")
    force = tuple(force)

    filt = data.get("filt", "")
    if filt != "":
        filt = filt.split(",")
    filt = tuple(filt)

    if layer == [""]:
        content_type = "text/html"
        resp = overview.html(ref)
        return (OK, content_type, resp)

    format = data.get("format", config.default_format).lower()
    format = formats.get("image/" + format, format)
    format = formats.get(format, format)
    if format not in formats.values():
        return (ERROR, content_type, "Invalid format")
    content_type = mimetypes[format]

    width = 0
    height = 0
    resp_cache_path, resp_ext = "", ""
    if req_type == "GetTile":
        width = 256
        height = 256
        height = int(data.get("height", height))
        width = int(data.get("width", width))
        srs = data.get("srs", "EPSG:3857")
        x = int(data.get("x", 0))
        y = int(data.get("y", 0))
        z = int(data.get("z", 1)) + 1
        if "cache_tile_responses" in dir(config) and not wkt and (len(gpx)
                                                                  == 0):
            if (srs, tuple(layer), filt, width, height, force,
                    format) in config.cache_tile_responses:

                resp_cache_path, resp_ext = config.cache_tile_responses[(
                    srs, tuple(layer), filt, width, height, force, format)]
                resp_cache_path = resp_cache_path + "/%s/%s/%s.%s" % (
                    z - 1, x, y, resp_ext)
                if os.path.exists(resp_cache_path):
                    return (OK, content_type, open(resp_cache_path,
                                                   "r").read())
        if len(layer) == 1:
            if layer[0] in config.layers:
                if config.layers[layer[0]][
                        "proj"] == srs and width is 256 and height is 256 and not filt and not force and not correctify.has_corrections(
                            config.layers[layer[0]]):
                    local = config.tiles_cache + config.layers[
                        layer[0]]["prefix"] + "/z%s/%s/x%s/%s/y%s." % (
                            z, x / 1024, x, y / 1024, y)
                    ext = config.layers[layer]["ext"]
                    adds = ["", "ups."]
                    for add in adds:
                        if os.path.exists(local + add + ext):
                            tile_file = open(local + add + ext, "r")
                            resp = tile_file.read()
                            return (OK, content_type, resp)
        req_bbox = projections.from4326(projections.bbox_by_tile(z, x, y, srs),
                                        srs)

    if data.get("bbox", None):
        req_bbox = tuple(map(float, data.get("bbox", req_bbox).split(",")))

    req_bbox = projections.to4326(req_bbox, srs)

    req_bbox, flip_h = bbox.normalize(req_bbox)
    box = req_bbox
    #print >> sys.stderr, req_bbox
    #sys.stderr.flush()

    height = int(data.get("height", height))
    width = int(data.get("width", width))
    width = min(width, config.max_width)
    height = min(height, config.max_height)
    if (width == 0) and (height == 0):
        width = 350

# layer = layer.split(",")

    imgs = 1.
    ll = layer.pop(0)
    if ll[-2:] == "!c":
        ll = ll[:-2]
        if wkt:
            wkt = "," + wkt
        wkt = correctify.corr_wkt(config.layers[ll]) + wkt
        srs = config.layers[ll]["proj"]
    try:
        result_img = getimg(box, srs, (height, width), config.layers[ll],
                            start_time, force)
    except KeyError:
        result_img = Image.new("RGBA", (width, height))

    #width, height =  result_img.size
    for ll in layer:
        if ll[-2:] == "!c":
            ll = ll[:-2]
            if wkt:
                wkt = "," + wkt
            wkt = correctify.corr_wkt(config.layers[ll]) + wkt
            srs = config.layers[ll]["proj"]

        im2 = getimg(box, srs, (height, width), config.layers[ll], start_time,
                     force)

        if "empty_color" in config.layers[ll]:
            ec = ImageColor.getcolor(config.layers[ll]["empty_color"], "RGBA")
            sec = set(ec)
            if "empty_color_delta" in config.layers[ll]:
                delta = config.layers[ll]["empty_color_delta"]
                for tr in range(-delta, delta):
                    for tg in range(-delta, delta):
                        for tb in range(-delta, delta):
                            if (ec[0] + tr) >= 0 and (ec[0] + tr) < 256 and (
                                    ec[1] + tr
                            ) >= 0 and (ec[1] + tr) < 256 and (
                                    ec[2] + tr) >= 0 and (ec[2] + tr) < 256:
                                sec.add((ec[0] + tr, ec[1] + tg, ec[2] + tb,
                                         ec[3]))
            i2l = im2.load()
            for x in range(0, im2.size[0]):
                for y in range(0, im2.size[1]):
                    t = i2l[x, y]
                    if t in sec:
                        i2l[x, y] = (t[0], t[1], t[2], 0)
        if not im2.size == result_img.size:
            im2 = im2.resize(result_img.size, Image.ANTIALIAS)
        im2 = Image.composite(im2, result_img,
                              im2.split()[3])  # imgs/(imgs+1.))

        if "noblend" in force:
            result_img = im2
        else:
            result_img = Image.blend(im2, result_img, 0.5)
        imgs += 1.

    ##Applying filters
    result_img = filter.raster(result_img, filt, req_bbox, srs)

    #print >> sys.stderr, wkt
    #sys.stderr.flush()
    if wkt:
        result_img = drawing.wkt(wkt, result_img, req_bbox, srs,
                                 color if len(color) > 0 else None, trackblend)
    if len(gpx) > 0:
        last_color = None
        c = iter(color)
        for track in tracks:
            try:
                last_color = c.next()
            except StopIteration:
                pass
            result_img = drawing.gpx(track, result_img, req_bbox, srs,
                                     last_color, trackblend)

    if flip_h:
        result_img = ImageOps.flip(result_img)
    image_content = StringIO.StringIO()

    if format == "JPEG":
        try:
            result_img.save(image_content,
                            format,
                            quality=config.output_quality,
                            progressive=config.output_progressive)
        except IOError:
            result_img.save(image_content,
                            format,
                            quality=config.output_quality)
    elif format == "PNG":
        result_img.save(image_content,
                        format,
                        progressive=config.output_progressive,
                        optimize=config.output_optimize)
    elif format == "GIF":
        result_img.save(image_content,
                        format,
                        quality=config.output_quality,
                        progressive=config.output_progressive)
    else:  ## workaround for GIF
        result_img = result_img.convert("RGB")
        result_img.save(image_content,
                        format,
                        quality=config.output_quality,
                        progressive=config.output_progressive)
    resp = image_content.getvalue()
    if resp_cache_path:
        try:
            "trying to create local cache directory, if it doesn't exist"
            os.makedirs("/".join(resp_cache_path.split("/")[:-1]))
        except OSError:
            pass
        try:
            a = open(resp_cache_path, "w")
            a.write(resp)
            a.close()
        except (OSError, IOError):
            print >> sys.stderr, "error saving response answer to file %s." % (
                resp_cache_path)
            sys.stderr.flush()

    return (OK, content_type, resp)
Ejemplo n.º 34
0
 def _image_callback(self, msg):
     self.pil_image = imgmsg_to_pil(msg)
     if hasattr(self, 'display_flip') and self.display_flip:
         self.pil_image = ImageOps.flip(self.pil_image)
         self.pil_image = ImageOps.mirror(self.pil_image)
     self.cv_image = cv2.resize(pil_to_cv(self.pil_image), dsize=(300, 300))
Ejemplo n.º 35
0
def imageBorder(img, thickness, edgeFill="#ffffff00"):
    """
    Add a border of thickness pixels around the image

    :param img: the image to add a border to can be pil image, numpy array, or whatever
    :param thickness: the border thickness in pixels.  Can be:
        int - border all the way around
        (w_border,h_border) - add border this big to each side
        (x,y,x2,y2) - add border this big to each side
    :param edgeFill: defines how to extend.  It can be:
        mirror - reflect the pixels leading up to the border
        repeat - repeat the image over again (useful with repeating textures)
        clamp - streak last pixels out to edge
        [background color] - simply fill with the given color

    TODO: combine into extendImageCanvas function
    """
    if not isinstance(thickness, (tuple, list)):
        thickness = (thickness, thickness, thickness, thickness)
    elif len(thickness) == 2:
        thickness = (thickness[0], thickness[1], thickness[0], thickness[1])
    thickness = [int(t) for t in thickness]
    img = imageRepr.pilImage(img)
    newSize = (int(img.size[0] + thickness[0] + thickness[2]),
               int(img.size[1] + thickness[1] + thickness[3]))
    if edgeFill == 'mirror':
        newImage = Image.new(img.mode, newSize)
        # top
        fill = ImageOps.flip(img.crop((0, 0, img.width, thickness[1])))
        newImage.paste(fill, (thickness[1], 0))
        # bottom
        fill = ImageOps.flip(
            img.crop((0, img.height - thickness[2], img.width, img.height)))
        newImage.paste(fill, (thickness[2], img.height + thickness[2]))
        # left
        fill = ImageOps.mirror(img.crop((0, 0, thickness[0], img.height)))
        newImage.paste(fill, (0, thickness[0]))
        # right
        fill = ImageOps.mirror(
            img.crop((img.width - thickness[3], 0, img.width, img.height)))
        newImage.paste(fill, (img.width + thickness[3], thickness[3]))
        # top-left corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((0,0,thickness,thickness))))
        #newImage.paste(fill,(0,0))
        # top-right corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((img.width-thickness,0,img.width,thickness))))
        #newImage.paste(fill,(img.width+thickness,0))
        # bottom-left corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((0,img.height-thickness,thickness,img.height))))
        #newImage.paste(fill,(0,img.height+thickness))
        # bottom-right corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((img.width-thickness,img.height-thickness,img.width,img.height))))
        #newImage.paste(fill,(img.width+thickness,img.height+thickness))
    elif edgeFill == 'repeat':
        newImage = Image.new(img.mode, newSize)
        # top
        fill = img.crop((0, 0, img.width, thickness[1]))
        newImage.paste(fill, (thickness[1], img.height + thickness[1]))
        # bottom
        fill = img.crop((0, img.height - thickness[2], img.width, img.height))
        newImage.paste(fill, (thickness[2], 0))
        # left
        fill = img.crop((0, 0, thickness[0], img.height))
        newImage.paste(fill, (img.width + thickness[0], thickness[0]))
        # right
        fill = img.crop((img.width - thickness[3], 0, img.width, img.height))
        newImage.paste(fill, (0, thickness[3]))
        # top-left corner
        fill = img.crop((0, 0, thickness, thickness))
        newImage.paste(fill, (img.width + thickness, img.height + thickness))
        # top-right corner
        fill = img.crop((img.width - thickness, 0, img.width, thickness))
        newImage.paste(fill, (0, img.height + thickness))
        # bottom-left corner
        fill = img.crop((0, img.height - thickness, thickness, img.height))
        newImage.paste(fill, (img.width + thickness, 0))
        # bottom-right corner
        fill = img.crop((img.width - thickness, img.height - thickness,
                         img.width, img.height))
        newImage.paste(fill, (0, 0))
    elif edgeFill == 'clamp':
        newImage = Image.new(img.mode, newSize)
        # top
        fill = img.crop((0, 0, img.width, 1)).resize((img.width, thickness[1]),
                                                     resample=Image.NEAREST)
        newImage.paste(fill, (thickness[1], 0))
        # bottom
        fill = img.crop((0, img.height - 1, img.width, img.height)).resize(
            (img.width, thickness[2]), resample=Image.NEAREST)
        newImage.paste(fill, (thickness[2], img.height + thickness[2]))
        # left
        fill = img.crop((0, 0, 1, img.height)).resize(
            (thickness[0], img.height), resample=Image.NEAREST)
        newImage.paste(fill, (0, thickness[0]))
        # right
        fill = img.crop((img.width - 1, 0, img.width, img.height)).resize(
            (thickness[3], img.height), resample=Image.NEAREST)
        newImage.paste(fill, (img.width + thickness[3], thickness[3]))
        # TODO: corners
        # top-left corner
        fill = img.crop((0, 0, 1, 1)).resize((thickness, thickness),
                                             resample=Image.NEAREST)
        newImage.paste(fill, (0, 0))
        # top-right corner
        fill = img.crop((img.width - 1, 0, img.width, 1)).resize(
            (thickness, thickness), resample=Image.NEAREST)
        newImage.paste(fill, (img.width + thickness, 0))
        # bottom-left corner
        fill = img.crop((0, img.height - 1, 1, img.height)).resize(
            (thickness, thickness), resample=Image.NEAREST)
        newImage.paste(fill, (0, img.height + thickness))
        # bottom-right corner
        fill = img.crop(
            (img.width - 1, img.height - 1, img.width, img.height)).resize(
                (thickness, thickness), resample=Image.NEAREST)
        newImage.paste(fill, (img.width + thickness, img.height + thickness))
    else:
        newImage = Image.new(img.mode, newSize, edgeFill)
    # splat the original image in the middle
    if True:
        if newImage.mode.endswith('A'):
            newImage.alpha_composite(img, dest=(thickness[0], thickness[1]))
        else:
            newImage.paste(img, (thickness[0], thickness[1]))
    return newImage
Ejemplo n.º 36
0

if __name__ == '__main__':
    width = 699
    height = 699
    img = Image.new('RGB', (width+1, height+1), "black") # create a new black image
    pixels = img.load()

    model = Model('african_head.obj')
    light_dir = Point3d(0, 0, -1)
    for face in model.faces:
        len_face = len(face)
        triangle_coords = []
        world_coords = []
        for j in range(len_face):
            v0 = model.verts[face[j]]
            v1 = model.verts[face[(j+1) % len_face]]
            x0 = (v0.x+1.) * width/2
            y0 = (v0.y+1.) * height/2
            triangle_coords.append([int(x0), int(y0)])
            world_coords.append(v0)
        v1 = vect3d_minus(world_coords[2], world_coords[0])
        v2 = vect3d_minus(world_coords[1], world_coords[0])
        normal = normalize(cross_product(v1, v2))
        intencity = vect3d_mult(normal, light_dir)
        if intencity > 0:
            triangle(triangle_coords[0], triangle_coords[1], triangle_coords[2], pixels, gray_intencify(intencity))

    ImageOps.flip(img).show()

 def flip(self):
     return ImageOps.flip(self.image)
Ejemplo n.º 38
0
    def makepilimage(self, scale="log", negative=False):
        """
        Makes a PIL image out of the array, respecting the z1 and z2 cutoffs.
        By default we use a log scaling identical to iraf's, and produce an image of mode "L", i.e. grayscale.
        But some drawings or colourscales will change the mode to "RGB" later, if you choose your own colours.
        If you choose scale = "clog" or "clin", you get hue values (aka rainbow colours).
        """

        calcarray = self.numpyarray.transpose()

        if scale == "log" or scale == "lin":
            self.negative = negative
            # numpyarrayshape = self.numpyarray.shape

            # calcarray.ravel() # does not change in place in fact !
            calcarray = calcarray.clip(min=self.z1, max=self.z2)

            if scale == "log":
                # calcarray = np.array(map(lambda x: loggray(x, self.z1, self.z2), calcarray))
                calcarray = loggray(calcarray, self.z1, self.z2)
            else:
                # calcarray = np.array(map(lambda x: lingray(x, self.z1, self.z2), calcarray))
                calcarray = lingray(calcarray, self.z1, self.z2)

            # calcarray.shape = numpyarrayshape
            bwarray = np.zeros(calcarray.shape)
            calcarray.round(out=bwarray)
            bwarray = bwarray.astype(
                np.uint8)  # and you get the dtype you want in the end
            if negative:
                if self.verbose:
                    print "Using negative scale"
                bwarray = 255 - bwarray

            if self.verbose:
                print "PIL range : [%i, %i]" % (np.min(bwarray),
                                                np.max(bwarray))

            # We flip it so that (0, 0) is back in the bottom left corner as in ds9
            # We do this here, so that you can write on the image from left to right :-)

            self.pilimage = imop.flip(im.fromarray(bwarray))
            if self.verbose:
                print "PIL image made with scale : %s" % scale
            return 0

        if scale == "clog" or scale == "clin":  # Rainbow !

            self.negative = False
            if scale == "clin":
                calcarray = (calcarray.clip(min=self.z1, max=self.z2) -
                             self.z1) / (self.z2 - self.z1)  # 0 to 1
            if scale == "clog":
                calcarray = 10.0 + 990.0 * (
                    calcarray.clip(min=self.z1, max=self.z2) - self.z1) / (
                        self.z2 - self.z1)  # 10 to 1000
                calcarray = (np.log10(calcarray) - 1.0) * 0.5  # 0 to 1

            (rarray, garray, barray) = rainbow(calcarray, autoscale=False)
            carray = np.dstack((rarray, garray, barray))

            self.pilimage = imop.flip(im.fromarray(carray, "RGB"))
            if self.verbose:
                print "PIL image made with scale : %s" % scale
            return 0

        raise RuntimeError, "I don't know your colourscale, choose lin log clin or clog !"
Ejemplo n.º 39
0
  def transform(self, i, image):
    # (_x, _y): random translation position      
    _x = np.random.randint(0, self.x)
    _y = np.random.randint(0, self.y)
    
    # _angle : random rotation angle
    _angle  = np.random.randint(0, self.angle)

    minw = float(image.width) * float(self.shrink_rw)
    minh = float(image.height)* float(self.shrink_rh)
    
    # (_w, _h) : random image_width and image_height
    self._w = int( np.random.randint(int(minw), image.width)  )
    self._h = int( np.random.randint(int(minh), image.height) )

    print(" {} {} {} {}".format(_x, _y, self._w, self._h))
    
    if i % 3 == 0:
      _angle = _angle * (-1)

    # Resize the image to (_w, _h)
    image = image.resize(size=(self._w, self._h), resample=Image.LANCZOS)
      
    # Rotate the image by _angle 
    image = image.rotate(_angle, translate=(_x, _y), expand=True)
    

    if i % 3 == 0:
      print("CONTRAST {}".format(i))
      image = ImageOps.autocontrast(image, self.contrast)

    if i % 4 == 0 and self.vflip == True:
      print("VERTICAL FLIP {}".format(i))
      image = ImageOps.flip(image)

    if i % 5 == 0 and self.hflip == True:
      print("HORIZONTAL FLIP {}".format(i))
      image = ImageOps.mirror(image)

    if i % 5 == 0:
      # Apply a simple AFFINE transformation to the image.
      xshift = int( abs(self.xshift) * self.width )
      yshift = int( abs(self.yshift) * self.height )
      
      # New width and height (_nw, _nh) for AFFINE transformation
      _nw = self._w + xshift
      _nh = self._h + yshift
      
      # Very simple coefficients fo AFFINE transformation
      coeffs = (1, self.xshift, 0, 0, 1, 0, 0, 0)
      
      # Inverting _xs flag.
      self.xshift = self.xshift * (-1)
      
      print("AFFINE {}".format(i))
      image = image.transform((_nw, _nh), Image.AFFINE, coeffs, Image.BICUBIC)
      
    if i % 6 == 0 and self.sharpen == True:
      print("SHARPEN {}".format(i))
      image = image.filter(ImageFilter.SHARPEN)

    if i % 8 == 0 and self.smooth == True:
      print("SMOOTH_MORE {}".format(i))
      image = image.filter(ImageFilter.SMOOTH_MORE)
      
    if i % 9 == 0 and self.edge_enhance == True:
      print("EDGE_ENHANCE_MORE {}".format(i))
      image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
      
    if i>0 and i % 11 == 0 and self.noise >0.0:
      print("NOISE {}".format(i))
      image = self.inject_saultpepper_noise(image)

    return image
Ejemplo n.º 40
0
    def _draw_image(self, gcode, include_rapids):
        # size of the image (should be based on the max path point)
        scale = self._image_size()

        if self.transparency:
            img = Image.new('RGBA', self.image_size, (255, 0, 0, 0))
        else:
            img = Image.new('RGB', self.image_size, self.background)

        draw = ImageDraw.Draw(img)

        # draw centreline
        cl_y = (self.image_size[1] / 2 - self._min_y)
        start = (self.margin * 0.25, cl_y)
        end = (self.image_size[0] - self.margin * 0.5, cl_y)
        draw.line([start, end],
                  fill=(252, 226, 5),
                  width=self.line_thickness * 2)

        for idx, command in enumerate(gcode):

            if idx < len(gcode) - 1:

                movement = command.get_movement()
                if movement not in ["G0", "G1", "G2", "G3"]:
                    continue

                if movement == "G0" and not include_rapids:
                    continue

                params = command.get_params()
                prev_params = gcode[idx - 1].get_params()

                line_colour = self._get_line_colour(movement)

                x_start = (prev_params['Z'] -
                           self._min_x) * scale + self.margin / 2
                y_start = (prev_params['X'] -
                           self._min_y) * scale + self.margin / 2
                x_end = (params['Z'] - self._min_x) * scale + self.margin / 2
                y_end = (params['X'] - self._min_y) * scale + self.margin / 2

                if movement in ["G0", "G1"]:
                    draw.line([(x_start, y_start), (x_end, y_end)],
                              fill=line_colour,
                              width=self.line_thickness)

                if movement in ["G2", "G3"]:
                    x_centre = (prev_params['Z'] + params['K'] -
                                self._min_x) * scale + self.margin / 2
                    y_centre = (prev_params['X'] + params['I'] -
                                self._min_y) * scale + self.margin / 2

                    distance = self._get_distance(x_centre, y_centre, x_start,
                                                  y_start)

                    start_angle = self._get_angle(x_centre, y_centre, x_start,
                                                  y_start)
                    end_angle = self._get_angle(x_centre, y_centre, x_end,
                                                y_end)
                    boundbox = [(x_centre - distance, y_centre - distance),
                                (x_centre + distance, y_centre + distance)]
                    if movement == "G2":
                        draw.arc(boundbox,
                                 end_angle,
                                 start_angle,
                                 fill=line_colour,
                                 width=self.line_thickness)

                    if movement == "G3":
                        draw.arc(boundbox,
                                 start_angle,
                                 end_angle,
                                 fill=line_colour,
                                 width=self.line_thickness)

        # Mirror because its draw flipped.
        if self.mirror_image:
            img = ImageOps.mirror(img)
        elif self.flip_image:
            img = ImageOps.flip(img)
        else:
            img = ImageOps.flip(img)

        if self.transparency:
            img.save(self.file_location + self.image_name + '.png')
        else:
            img.save(self.file_location + self.image_name + self.image_type)
Ejemplo n.º 41
0
    def __getitem__(self, index):
        # Arrange frames
        folder_id = int(str(self.input_list[index])[-9:-7])
        num_input = len(self.input_list)
        num_gt = len(self.gt_list)
        if num_input != num_gt:
            raise ValueError("wrong dataset")

        if index == (len(self.input_list) - 1):
            folder_id_next = None
            folder_id_next_next = None
        elif index == (len(self.input_list) - 2):
            folder_id_next = int(str(self.input_list[index + 1])[-9:-7])
            folder_id_next_next = None
        else:
            folder_id_next = int(str(self.input_list[index + 1])[-9:-7])
            folder_id_next_next = int(str(self.input_list[index + 2])[-9:-7])

        if index == 0:
            folder_id_previous = None
            folder_id_previous_previous = None
        elif index == 1:
            folder_id_previous = int(str(self.input_list[index - 1])[-9:-7])
            folder_id_previous_previous = None
        else:
            folder_id_previous = int(str(self.input_list[index - 1])[-9:-7])
            folder_id_previous_previous = int(
                str(self.input_list[index - 2])[-9:-7])

        # Get image ID
        interval = 1
        if self.frames == 5:
            if folder_id != folder_id_previous:
                self.input_frame_lists = [
                    index, index, index, index + interval, index + interval * 2
                ]  # [n, n, n, n+1, n+2]
                self.seg_frame_lists = [
                    index, index, index, index + interval, index + interval * 2
                ]
                self.gt_frame_lists = [index, index, index + interval]
                self.input1_frame_lists = self.input_frame_lists
            elif folder_id == folder_id_previous and folder_id != folder_id_previous_previous:
                self.input_frame_lists = [
                    index - interval, index - interval, index,
                    index + interval, index + interval * 2
                ]  # [n-1, n-1, n, n+1, n+2]
                self.seg_frame_lists = [
                    index - interval, index - interval, index,
                    index + interval, index + interval * 2
                ]
                self.gt_frame_lists = [
                    index - interval, index, index + interval
                ]
                self.input1_frame_lists = self.input_frame_lists
            elif folder_id != folder_id_next:
                self.input_frame_lists = [
                    index - interval * 2, index - interval, index, index, index
                ]  # [n-2, n-1, n, n, n]
                self.seg_frame_lists = [
                    index - interval * 2, index - interval, index, index, index
                ]
                self.gt_frame_lists = [index - interval, index, index]
                self.input1_frame_lists = self.input_frame_lists
            elif folder_id == folder_id_next and folder_id != folder_id_next_next:
                self.input_frame_lists = [
                    index - interval * 2, index - interval, index,
                    index + interval, index + interval
                ]  # [n-2, n-1, n, n+1, n+1]
                self.seg_frame_lists = [
                    index - interval * 2, index - interval, index,
                    index + interval, index + interval
                ]
                self.gt_frame_lists = [
                    index - interval, index, index + interval
                ]
                self.input1_frame_lists = self.input_frame_lists
            else:
                self.input_frame_lists = [
                    index - interval * 2, index - interval, index,
                    index + interval, index + interval * 2
                ]
                self.seg_frame_lists = [
                    index - interval * 2, index - interval, index,
                    index + interval, index + interval * 2
                ]  # [n-2, n-1, n, n+1, n+2]
                self.gt_frame_lists = [
                    index - interval, index, index + interval
                ]
                self.input1_frame_lists = self.input_frame_lists
        elif self.frames == 3:
            if folder_id != folder_id_previous:
                self.frame_lists = [index, index, index + interval]
            elif folder_id != folder_id_next:
                self.frame_lists = [index - interval, index, index]
            elif index == (len(self.input_list) - 1):
                self.frame_lists = [index - interval, index, index]
            else:
                self.frame_lists = [index - interval, index, index + interval]

        elif self.frames == 1:
            self.frame_lists = [index]

        else:
            raise ValueError("only support frames == 1 & 3 & 5")

        # Open images
        input_imgs = [
            Image.open((self.input_list[i])).convert('RGB')
            for i in self.input_frame_lists
        ]
        input1_imgs = [
            Image.open((self.input1_list[i])).convert('RGB')
            for i in self.input1_frame_lists
        ]
        gt_imgs = [
            Image.open((self.gt_list[i])).convert('RGB')
            for i in self.gt_frame_lists
        ]  #.convert('L')
        seg_imgs = [
            Image.open((self.seg_list[i])).convert('L')
            for i in self.seg_frame_lists
        ]

        # Set parameters
        left_top_w = random.randint(0, gt_imgs[0].size[0] - self.fineSize - 1)
        left_top_h = random.randint(0, gt_imgs[0].size[1] - self.fineSize - 1)
        random_flip_h = random.random()
        random_flip_v = random.random()
        random_rot = random.random()

        seg = []
        for seg_img in seg_imgs:
            input_patch = seg_img.crop(
                (left_top_w, left_top_h, left_top_w + self.fineSize,
                 left_top_h + self.fineSize))
            input_patch = input_patch.resize((self.cropSize, self.cropSize),
                                             Image.BICUBIC)
            if random_flip_h < 0.5:
                input_patch = ImageOps.flip(input_patch)
            if random_flip_v < 0.5:
                input_patch = ImageOps.mirror(input_patch)
            if random_rot < 0.5:
                input_patch = input_patch.rotate(180)
            input_patch = np.array(input_patch, dtype=np.float32) / 255
            # input_patch = input_patch.transpose((2, 0, 1))
            input_patch = torch.from_numpy(input_patch.copy()).float()
            input_patch = torch.unsqueeze(input_patch, 0)
            seg.append(input_patch)
        seg = torch.cat(seg, 0)
        seg = np.array(seg, dtype=np.float32)

        gts = []
        for gt_img in gt_imgs:
            input_patch = gt_img.crop(
                (left_top_w, left_top_h, left_top_w + self.fineSize,
                 left_top_h + self.fineSize))
            input_patch = input_patch.resize((self.cropSize, self.cropSize),
                                             Image.BICUBIC)
            if random_flip_h < 0.5:
                input_patch = ImageOps.flip(input_patch)
            if random_flip_v < 0.5:
                input_patch = ImageOps.mirror(input_patch)
            if random_rot < 0.5:
                input_patch = input_patch.rotate(180)
            input_patch = np.array(input_patch, dtype=np.float32) / 255
            input_patch = input_patch.transpose((2, 0, 1))
            input_patch = torch.from_numpy(input_patch.copy()).float()
            #nomalize = transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            #input_patch = nomalize(input_patch)
            #input_patch = torch.unsqueeze(input_patch, 0)
            gts.append(input_patch)

        gts = torch.cat(gts, 0)
        gts = np.array(gts, dtype=np.float32)
        # Processing input images
        inputs = []
        for input_img in input_imgs:
            input_patch = input_img.crop(
                (left_top_w, left_top_h, left_top_w + self.fineSize,
                 left_top_h + self.fineSize))
            input_patch = input_patch.resize((self.cropSize, self.cropSize),
                                             Image.BICUBIC)
            if random_flip_h < 0.5:
                input_patch = ImageOps.flip(input_patch)
            if random_flip_v < 0.5:
                input_patch = ImageOps.mirror(input_patch)
            if random_rot < 0.5:
                input_patch = input_patch.rotate(180)
            input_patch = np.array(input_patch, dtype=np.float32) / 255
            input_patch = input_patch.transpose((2, 0, 1))
            input_patch = torch.from_numpy(input_patch.copy()).float()
            # nomalize = transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
            # input_patch = nomalize(input_patch)
            inputs.append(input_patch)
        inputs = torch.cat(inputs, 0)
        inputs = np.array(inputs, dtype=np.float32)

        inputs1 = []
        for input1_img in input1_imgs:
            input_patch = input1_img.crop(
                (left_top_w, left_top_h, left_top_w + self.fineSize,
                 left_top_h + self.fineSize))
            input_patch = input_patch.resize((self.cropSize, self.cropSize),
                                             Image.BICUBIC)
            if random_flip_h < 0.5:
                input_patch = ImageOps.flip(input_patch)
            if random_flip_v < 0.5:
                input_patch = ImageOps.mirror(input_patch)
            if random_rot < 0.5:
                input_patch = input_patch.rotate(180)
            input_patch = np.array(input_patch, dtype=np.float32) / 255
            input_patch = input_patch.transpose((2, 0, 1))
            input_patch = torch.from_numpy(input_patch.copy()).float()
            inputs1.append(input_patch)
        inputs1 = torch.cat(inputs1, 0)
        inputs1 = np.array(inputs1, dtype=np.float32)

        # Processing gt image
        # gt_patch = gt_img.crop(
        #     (left_top_w, left_top_h, left_top_w + self.fineSize, left_top_h + self.fineSize))
        # gt_patch = gt_patch.resize((self.cropSize, self.cropSize), Image.BICUBIC)
        # if random_flip_h < 0.5:
        #     gt_patch = ImageOps.flip(gt_patch)
        # if random_flip_v < 0.5:
        #     gt_patch = ImageOps.mirror(gt_patch)
        # if random_rot < 0.5:
        #     gt_patch = gt_patch.rotate(180)
        # gt_patch = np.array(gt_patch, dtype=np.float32) / 255
        # gt_patch = gt_patch.transpose((2, 0, 1))

        return inputs.copy(), \
               gts.copy(), \
                seg.copy(),\
                inputs1.copy()
Ejemplo n.º 42
0
        def got_clipboard_lock():
            if COMPRESSED_IMAGES:
                fmt_name = LPCSTR(img_format.upper().encode("latin1") +
                                  b"\0")  #ie: "PNG"
                fmt = RegisterClipboardFormatA(fmt_name)
                if fmt:
                    data_handle = GetClipboardData(fmt)
                    if data_handle:
                        size = GlobalSize(data_handle)
                        data = GlobalLock(data_handle)
                        log("GetClipboardData(%s)=%#x size=%s, data=%#x",
                            img_format.upper(), data_handle, size, data)
                        if data and size:
                            try:
                                cdata = (c_char * size).from_address(data)
                            finally:
                                GlobalUnlock(data)
                            got_image(bytes(cdata), False)
                            return True

            data_handle = GetClipboardData(win32con.CF_DIBV5)
            log("CF_BITMAP=%s", data_handle)
            data = GlobalLock(data_handle)
            if not data:
                log("failed to lock data handle %#x (may try again)",
                    data_handle)
                return False
            try:
                header = cast(data, PBITMAPV5HEADER).contents
                offset = header.bV5Size + header.bV5ClrUsed * 4
                w, h = header.bV5Width, abs(header.bV5Height)
                bits = header.bV5BitCount
                log(
                    "offset=%s, width=%i, height=%i, compression=%s", offset,
                    w, h,
                    BI_FORMATS.get(header.bV5Compression,
                                   header.bV5Compression))
                log("planes=%i, bitcount=%i", header.bV5Planes, bits)
                log("colorspace=%s",
                    COLOR_PROFILES.get(header.bV5CSType, header.bV5CSType))
                #if header.bV5Compression in (BI_JPEG, BI_PNG):
                #    pass
                if header.bV5Compression != BI_RGB:
                    errback(
                        "cannot handle %s compression yet" % BI_FORMATS.get(
                            header.bV5Compression, header.bV5Compression))
                    return True
                if bits == 24:
                    save_format = "RGB"
                    rgb_format = "BGR"
                    stride = roundup(w * 3, 4)
                elif bits == 32:
                    save_format = "RGBA"
                    rgb_format = "BGRA"
                    stride = w * 4
                else:
                    errback(
                        "cannot handle image data with %i bits per pixel yet" %
                        bits)
                    return True
                img_size = stride * h
                rgb_data = (c_char * img_size).from_address(data + offset)
                from PIL import Image, ImageOps
                img = Image.frombytes(save_format, (w, h), rgb_data, "raw",
                                      rgb_format, stride, 1)
                if header.bV5Height > 0:
                    img = ImageOps.flip(img)
                buf = BytesIO()
                img.save(buf, format=save_format)
                data = buf.getvalue()
                buf.close()
                got_image(data, True)
                return True
            finally:
                GlobalUnlock(data)
Ejemplo n.º 43
0
def kaleidoscope(triangle_width, infile, outfile):
    triangle_height = int(triangle_width * numpy.sqrt(3) / 2)

    img = Image.open(infile).convert('RGBA')
    width, height = img.size
    print(width, height)
    centre_point = (width / 2, height / 2)
    print("Centre:", centre_point)
    top_of_triangle = (height / 2) - (triangle_height / 2)
    print(top_of_triangle)
    bottom_of_triangle = top_of_triangle + triangle_height
    left_of_triangle = (width / 2) - (triangle_width / 2)
    right_of_triangle = left_of_triangle + triangle_width
    print("Top:", top_of_triangle)
    print("Bottom:", bottom_of_triangle)
    print("Left:", left_of_triangle)
    print("Right:", right_of_triangle)

    # Triangle zero
    a = (centre_point[0], top_of_triangle)
    b = (right_of_triangle, bottom_of_triangle)
    c = (left_of_triangle, bottom_of_triangle)
    print(a, b, c)

    # x is leftmost point of next triangle
    x = centre_point[0]
    i = 0
    tri1 = [a, b, c]
    new_a = a
    new_b = b
    new_c = c

    # (Every third image is a vertical flip, so an optimisation would be to
    # calculate just the three rotated triangles and paste flips as needed.
    # These three flips could also be cached.)

    # Fill to the right
    print("Fill to the right")
    while (x < width):
        i += 1
        print(i, x, width)

        if is_odd(i):
            new_y = top_of_triangle
        else:
            new_y = bottom_of_triangle

        if i % 3 == 1:
            new_c = (new_c[0] + (1.5 * triangle_width), new_y)
        elif i % 3 == 2:
            new_a = (new_a[0] + (1.5 * triangle_width), new_y)
        elif i % 3 == 0:
            new_b = (new_b[0] + (1.5 * triangle_width), new_y)
        tri2 = [new_a, new_b, new_c]
        transformblit(tri1, tri2, img, img)

        x += triangle_width / 2

    # x is rightmost point of next triangle
    x = centre_point[0]
    i = 0
    new_a = a
    new_b = b
    new_c = c

    # Fill to the left
    print("Fill to the left")
    while (x > 0):
        i += 1
        print(i, x, width)

        if is_odd(i):
            new_y = top_of_triangle
        else:
            new_y = bottom_of_triangle

        if i % 3 == 1:
            new_b = (new_b[0] - (1.5 * triangle_width), new_y)
        elif i % 3 == 2:
            new_a = (new_a[0] - (1.5 * triangle_width), new_y)
        elif i % 3 == 0:
            new_c = (new_c[0] - (1.5 * triangle_width), new_y)
        tri2 = [new_a, new_b, new_c]
        transformblit(tri1, tri2, img, img)

        x -= triangle_width / 2

    # Flip strip
    strip = img.crop((0, top_of_triangle, width, bottom_of_triangle))
    flip = ImageOps.flip(strip)

    # Fill down
    print("Fill down")
    y = bottom_of_triangle
    i = 0
    while (y < height):
        print(y, height)
        i += 1
        if is_odd(i):
            img.paste(flip, (0, y))
        else:
            img.paste(strip, (0, y))
        # img.show()
        y += triangle_height

    # Fill up
    print("Fill up")
    y = top_of_triangle
    i = 0
    while (y > 0):
        print(y, 0)
        i += 1
        if is_odd(i):
            img.paste(flip, (0, y - triangle_height))
        else:
            img.paste(strip, (0, y - triangle_height))
        # img.show()
        y -= triangle_height

    img.show()
    print("Saving to", outfile)
    img.save(outfile, quality=100)
Ejemplo n.º 44
0
async def transform(event):
    if not event.reply_to_msg_id:
        await event.edit("`Reply to Any media..`")
        return
    reply_message = await event.get_reply_message()
    if not reply_message.media:
        await event.edit("`reply to a image/sticker`")
        return
    await event.edit("`Downloading Media..`")
    if reply_message.photo:
        transform = await bot.download_media(
            reply_message,
            "transform.png",
        )
    elif (DocumentAttributeFilename(file_name="AnimatedSticker.tgs")
          in reply_message.media.document.attributes):
        await bot.download_media(
            reply_message,
            "transform.tgs",
        )
        os.system("lottie_convert.py transform.tgs transform.png")
        transform = "transform.png"
    elif reply_message.video:
        video = await bot.download_media(
            reply_message,
            "transform.mp4",
        )
        extractMetadata(createParser(video))
        os.system(
            "ffmpeg -i transform.mp4 -vframes 1 -an -s 480x360 -ss 1 transform.png"
        )
        transform = "transform.png"
    else:
        transform = await bot.download_media(
            reply_message,
            "transform.png",
        )
    try:
        await event.edit("`Transforming this media..`")
        cmd = event.pattern_match.group(1)
        im = Image.open(transform).convert("RGB")
        if cmd == "mirror":
            IMG = ImageOps.mirror(im)
        elif cmd == "flip":
            IMG = ImageOps.flip(im)
        elif cmd == "ghost":
            IMG = ImageOps.invert(im)
        elif cmd == "bw":
            IMG = ImageOps.grayscale(im)
        elif cmd == "poster":
            IMG = ImageOps.posterize(im, 2)
        IMG.save(Converted, quality=95)
        await event.client.send_file(event.chat_id,
                                     Converted,
                                     reply_to=event.reply_to_msg_id)
        await event.delete()
        os.system("rm -rf *.mp4")
        os.system("rm -rf *.tgs")
        os.remove(transform)
        os.remove(Converted)
    except BaseException:
        return
Ejemplo n.º 45
0
    def Solve(self, problem):

        answer = -1

        # create and init dict for problems.
        figures = problem.figures

        if (problem.problemType == '2x2'):

            #load training and answer images
            imgA = Image.open(figures['A'].visualFilename)
            imgB = Image.open(figures['B'].visualFilename)
            imgC = Image.open(figures['C'].visualFilename)
            img1 = Image.open(figures['1'].visualFilename)
            img2 = Image.open(figures['2'].visualFilename)
            img3 = Image.open(figures['3'].visualFilename)
            img4 = Image.open(figures['4'].visualFilename)
            img5 = Image.open(figures['5'].visualFilename)
            img6 = Image.open(figures['6'].visualFilename)

            # create pixel counts for training images using loadImages
            pixelCountImgA = loadImages(imgA)
            pixelCountImgB = loadImages(imgB)
            pixelCountImgC = loadImages(imgC)

            # create pixel counts for answer images using loadImages
            Img1Pixels = loadImages(img1)
            Img2Pixels = loadImages(img2)
            Img3Pixels = loadImages(img3)
            Img4Pixels = loadImages(img4)
            Img5Pixels = loadImages(img5)
            Img6Pixels = loadImages(img6)

            #array of pixel counts for answer answer images
            answerImagePixels = [
                Img1Pixels, Img2Pixels, Img3Pixels, Img4Pixels, Img5Pixels,
                Img6Pixels
            ]

            # Cross coompare pixel count for training imagesself.
            # This will find trivial matches where answer image pixels = training images pixels
            if np.array_equal(pixelCountImgA,
                              pixelCountImgB):  #if imgA, imgB are the same...
                answerImage = pixelCountImgC  #then answerImage var equals imgC pixels
                for index, element in enumerate(
                        answerImagePixels
                ):  #in which case we loop over answerImagePixels
                    if np.array_equal(
                            answerImage, element
                    ):  # if imgC pixels (via answerImage) equal answer image...
                        print('array_equal TRUE for imgA imgB')
                        answer = index + 1  #then answer var equals the corresponding answer image
                        break

            #repeat the above for ImgA and imgC
            elif np.array_equal(
                    pixelCountImgA,
                    pixelCountImgC):  # otherwise, compare imgA and imgC
                answerImage = pixelCountImgB  # init answerImage to imgB
                for index, element in enumerate(answerImagePixels):
                    if np.array_equal(answerImage, element):
                        print('array_equal TRUE for imgA imgC')
                        answer = index + 1
                        break

            else:

                #check for answer image as mirror or flip of training images.
                # create var for mirroring; init to L-R mirror of imgA
                mirrorpixelCountImgA = loadImages(ImageOps.mirror(imgA))
                #same as above but for T-B flip
                flippixelCountImgA = loadImages(ImageOps.flip(imgA))

                if answer == -1 and similarity(mirrorpixelCountImgA,
                                               pixelCountImgB)[1] > .95:
                    answer = similarityThrshld(
                        loadImages(ImageOps.mirror(imgC)), answerImagePixels)
                    print('answer = imgA, imgB mirrored')
                if answer == -1 and similarity(mirrorpixelCountImgA,
                                               pixelCountImgC)[1] > .95:
                    answer = similarityThrshld(
                        loadImages(ImageOps.mirror(imgB)), answerImagePixels)
                    print('answer = imgA, imgC mirrored')
                if answer == -1 and similarity(flippixelCountImgA,
                                               pixelCountImgB)[1] > .98:
                    answer = similarityThrshld(loadImages(ImageOps.flip(imgC)),
                                               answerImagePixels)
                    print('answer = imgA, imgB flip')
                if answer == -1 and similarity(flippixelCountImgA,
                                               pixelCountImgC)[1] > .98:
                    answer = similarityThrshld(loadImages(ImageOps.flip(imgB)),
                                               answerImagePixels)
                    print('answer = imgA, imgC flip')

                # use array_equal to compare pixel count for ImgA and ImgB to ImgC and ImgA and ImgC to ImgB

            for index, element in enumerate(answerImagePixels):
                print('\n  Now comparing answer: ' + str(answer) + ' to ' +
                      problem.name + ':\n')
                #print('\n  Similarity threshold for: ' + str(similarityThrshld()))

                break

        pass

        return answer
Ejemplo n.º 46
0
    elif FLAGS.camera:
        while True:
            key = input('商品をスキャンします。「Enter」を押して下さい')

            if key == 'q':
                break

            photo_filename = '/tmp/data.jpg'

            with picamera.PiCamera() as camera:
                camera.resolution = (300, 400)
                camera.start_preview()
                camera.capture(photo_filename)
            #try:
            image = Image.open(photo_filename)
            image = ImageOps.flip(image)
            image = ImageOps.mirror(image)
            #except:
            #print('読込みエラー、再度入力お願いします。')

            #else:
            output_dir = 'output/'

            time = datetime.now().strftime('%Y%m%d%H%M%S')

            pred, score, r_image = yolo.detect_image(image)

            image_path = output_dir + 'result_{}.jpg'.format(time)
            r_image.save(image_path)
            predict.show_image(image_path)
print(height)

Lbox = (0, 0, 426, 720)
Lframe1 = image.crop(Lbox)
#Lframe1.show()

#Cbox = (426,0,753.34,720)
#Cframe = image.crop(Cbox)
#Cframe.show()

Rbox = (852, 0, 1280, 720)
Rframe1 = image.crop(Rbox)
#Rframe1.show()

#run the HOG meth on Lbox&&Rbox
#send out the frame back out modified & pplboxed
#ScannedL & Cframe & ScannedR; merge the 3 together
#might have to be a new image
SL = ImageOps.flip(Lframe1)
SR = ImageOps.flip(Rframe1)
image.paste(SL, Lbox)
image.paste(SR, Rbox)
image.show()

#pedest time stamps
#5.29 ----- 6.04
#9.19 ----- 9.39

#lane
#0.45 ----- 1.15
Ejemplo n.º 48
0
def voltearImagen(imagen):
    voltear = ImageOps.flip(imagen)
    return voltear
Ejemplo n.º 49
0
        draw.text((0, 32-8), "=" + config.LOCAL_CURRENCY_CHAR, fill='white', font=currencyFont)
        draw.text((12, 32-6), "{:,.2f} @ S{:,.9f}".format((lerp(previousBalance, currentBalance, timeDelta) * lerp(previousRate, currentRate, timeDelta)) * localExchange, lerp(previousRate, currentRate, timeDelta) * localExchange), fill='white', font=titleFont)   

    if screenToShow == 2:

        canvas = Image.new(imageEncoding, (frameSize))
        draw = ImageDraw.Draw(canvas)
        draw.text((0, -1), "SafeMoon Price (USDT)", fill='white', font=titleFont)
        draw.text((1, 10), "$", fill='white', font=safemoonFont_large)
        draw.text((17, 7), "{:,.9f}".format(lerp(previousRate, currentRate, timeDelta)), fill="white", font=balanceFont)
        
        sign = currentRate - previousRate

        if sign > 0:
            arrow2 = ImageOps.flip(arrow)
            canvas.paste(arrow2, (105, 12))
        if sign < 0:
            canvas.paste(arrow, (105, 12))

        perc24 = lerp(previousPerc, currentPerc, timeDelta)
        includeSign = ""
        if perc24 > 0:
            includeSign = "+"
        
        draw.text((2, 32-6), "24h {}{:,.2f}%".format(includeSign, perc24), fill='white', font=titleFont)    


    timeDelta = inverse_lerp(0, displayTime, time.time() - startTime)

    if config.RUN_EMULATOR:
Ejemplo n.º 50
0
    def gen_data(self, random_index):
        img_pil = Image.open(
            os.path.join(r'D:\datasets\LSLOGO\Logo-2K+',
                         self.pathlist[random_index])).convert('L')

        pad_top = int(abs(np.random.uniform(0, self.pad_param)))
        pad_bottom = int(abs(np.random.uniform(0, self.pad_param)))
        pad_left = int(abs(np.random.uniform(0, self.pad_param)))
        pad_right = int(abs(np.random.uniform(0, self.pad_param)))
        rotate_param = np.random.uniform(0, self.rotate_degree_param)

        flip_flag = np.random.randint(0, 1)
        mirror_flag = np.random.randint(0, 1)

        if (flip_flag):
            img_pil = ImageOps.flip(img_pil)
        if (mirror_flag):
            img_pil = ImageOps.mirror(img_pil)

        blur_rad = np.random.normal(loc=0.0, scale=1, size=None)
        img_pil = img_pil.filter(ImageFilter.GaussianBlur(blur_rad))

        enhancer_contrat = ImageEnhance.Contrast(img_pil)
        enhancer_brightness = ImageEnhance.Brightness(img_pil)
        enhancer_color = ImageEnhance.Color(img_pil)
        contrast_factor = np.random.normal(loc=1.0, scale=0.25, size=None)
        color_factor = np.max(
            [0, 1 - abs(np.random.normal(loc=0, scale=0.5, size=None))])

        translate_factor_hor = np.random.normal(loc=0, scale=5, size=None)
        translate_factor_ver = np.random.normal(loc=0, scale=5, size=None)
        brightness_factor = np.random.normal(loc=1.0, scale=0.5, size=None)

        img_pil = enhancer_contrat.enhance(contrast_factor)
        img_pil = enhancer_brightness.enhance(brightness_factor)
        img_pil = enhancer_color.enhance(color_factor)
        img_pil = ImageChops.offset(img_pil, int(translate_factor_hor),
                                    int(translate_factor_ver))

        img_pil = img_pil.rotate(rotate_param,
                                 resample=Image.BILINEAR,
                                 expand=True,
                                 fillcolor=(255))

        img = np.asarray(img_pil)
        img = cv2.copyMakeBorder(img,
                                 pad_top,
                                 pad_bottom,
                                 pad_left,
                                 pad_right,
                                 cv2.BORDER_CONSTANT,
                                 value=(255, 255, 255))
        img = cv2.resize(img, dsize=(self.input_shape))
        img = img / 127.5 - 1

        classT = self.pathlist[random_index]
        classT = classT.split('/')[1]
        targetT = self.classdf[self.classdf['class'] == classT]
        targetI = targetT['index'].values[0]
        target = np.zeros(self.numclass)
        target[targetI] = 1
        return img, target
Ejemplo n.º 51
0
async def imirror(event):  # sourcery no-metrics
    "imgae refelection fun."
    reply = await event.get_reply_message()
    mediatype = media_type(reply)
    if not reply or not mediatype or mediatype not in ["Photo", "Sticker"]:
        return await edit_delete(
            event, "__Reply to photo or sticker to make mirror.__")
    catevent = await event.edit("__Reflecting the image....__")
    args = event.pattern_match.group(1)
    if args:
        filename = "catuserbot.webp"
        f_format = "webp"
    else:
        filename = "catuserbot.jpg"
        f_format = "jpeg"
    try:
        imag = await _cattools.media_to_pic(catevent, reply, noedits=True)
        if imag[1] is None:
            return await edit_delete(
                imag[0],
                "__Unable to extract image from the replied message.__")
        image = Image.open(imag[1])
    except Exception as e:
        return await edit_delete(
            catevent, f"**Error in identifying image:**\n__{str(e)}__")
    flag = event.pattern_match.group(3) or "r"
    w, h = image.size
    if w % 2 != 0 and flag in ["r", "l"] or h % 2 != 0 and flag in ["u", "b"]:
        image = image.resize((w + 1, h + 1))
        h, w = image.size
    if flag == "l":
        left = 0
        upper = 0
        right = w // 2
        lower = h
        nw = right
        nh = left
    elif flag == "r":
        left = w // 2
        upper = 0
        right = w
        lower = h
        nw = upper
        nh = upper
    elif flag == "u":
        left = 0
        upper = 0
        right = w
        lower = h // 2
        nw = left
        nh = lower
    elif flag == "b":
        left = 0
        upper = h // 2
        right = w
        lower = h
        nw = left
        nh = left
    temp = image.crop((left, upper, right, lower))
    temp = ImageOps.mirror(temp) if flag in ["l", "r"] else ImageOps.flip(temp)
    image.paste(temp, (nw, nh))
    img = BytesIO()
    img.name = filename
    image.save(img, f_format)
    img.seek(0)
    await event.client.send_file(event.chat_id, img, reply_to=reply)
    await catevent.delete()
Ejemplo n.º 52
0
	else:
		stage = stages.Stage1()
	
	# 初期描画
	root = tk.Tk()
	root.title(stage.name)
	root.geometry(f'{WINDOW_WIDTH}x{WINDOW_HEIGHT}+0+0')
	cv = tk.Canvas(root, width=WINDOW_WIDTH, height=WINDOW_HEIGHT, bg='white')
	cv.pack()
	cv.focus_set()

	# 画像の読み込み
	# キャラクター
	obake_img = Image.open('./img/obake.png')
	obake_img = obake_img.resize((IMG_SIZE, IMG_SIZE))
	obake_flip_img = ImageOps.flip(obake_img)		# 上下反転
	obake_mirror_img = ImageOps.mirror(obake_img)	# 左右反転(右向き)
	obake_fm_img = ImageOps.mirror(obake_flip_img)	# 上下左右反転
	obake_tkimg = ImageTk.PhotoImage(obake_img)
	obake_flip_tkimg = ImageTk.PhotoImage(obake_flip_img)
	obake_mirror_tkimg = ImageTk.PhotoImage(obake_mirror_img)
	obake_fm_tkimg = ImageTk.PhotoImage(obake_fm_img)
	# 重力ブロック
	udarrow_img = Image.open('./img/updownarrow.png')
	udarrow_img = udarrow_img.resize((BLOCK_SIZE, BLOCK_SIZE))
	udarrow2_img = udarrow_img.resize((BLOCK_SIZE*2, BLOCK_SIZE*2))
	udarrow_tkimg = ImageTk.PhotoImage(udarrow_img)
	udarrow2_tkimg = ImageTk.PhotoImage(udarrow2_img)
	# 看板
	triple_size = (BLOCK_SIZE*3, BLOCK_SIZE*3)
	dsc_J_img = Image.open(f'./img/dsc_J.png').resize(triple_size)
Ejemplo n.º 53
0
    for chunk in r.iter_content(chunk_size=1024):
        raw_bytes += chunk
        a = raw_bytes.find(b'\xff\xd8\xff')
        b = raw_bytes.find(b'\xff\xd9',a)
        if a != -1 and b != -1:
            
            
            jpg = raw_bytes[a:b+2]
            raw_bytes = raw_bytes[b+2:]
            
            imgIo=io.BytesIO(jpg)
            img=Image.open(imgIo)
            
            
            # cvImg=cv2.cvtColor(np.array(img.convert("RGB")),cv2.COLOR_RGB2BGR)
            
            
            c+=1
            c=c % 5
            if(c==0):
                processing.useImg(np.array(ImageOps.mirror(ImageOps.flip(img))),mc)

            
            
            
            
else:
    print("INVALID CODE CAN'T PROCEED")
    
    
Ejemplo n.º 54
0
        if (last_col - first_col + 1 > width) or (last_row - first_row + 1 >
                                                  height):
            s -= 1
        else:
            im.crop((first_col - 1, first_row - 1, last_col + 2, last_row + 2))
            return im


# Base image
text = "coil winder".upper()

im = fit_image(text, 300, 24)

im.save('label.png')

im = ImageOps.flip(im)
# Convert into a list of lists of numbers, where 0 is black and 255 is white.
im = im.convert(mode='1')
pixels = list(im.getdata())
width, height = im.size
pixels = [pixels[i * width:(i + 1) * width] for i in xrange(height)]

up_position = '105\n'
down_position = '90\n'
start_motor = '182\n'
stop_motor = '0\n'

# Send the pixel data as up and down commands.
ser = serial.Serial('/dev/ttyACM0')
sleep(1)
print ser.write(down_position)
Ejemplo n.º 55
0
        print file, round(total_count2 * 100.0 / total_count, 2)
        total_count2 += 1

        row = fish_label(label)
        lbl_text = str(row[0]) + "," + str(row[1]) + "," + str(
            row[2]) + "," + str(row[3]) + "," + str(row[4]) + "," + str(
                row[5]) + "," + str(row[6]) + "," + str(row[7])
        name, ext = file.split(".")

        for t in xrange(8):
            img = Image.open(path + file, 'r')
            # print t
            if t == 1:
                img = ImageOps.mirror(img)
            if t == 2:
                img = ImageOps.flip(img)
            if t == 3:
                img = ImageOps.mirror(img)
                img = ImageOps.flip(img)
            if t == 4:
                img = img.rotate(90)
            if t == 5:
                img = img.rotate(90)
                img = ImageOps.mirror(img)
            if t == 6:
                img = img.rotate(270)
            if t == 7:
                img = img.rotate(270)
                img = ImageOps.mirror(img)
            ## making the size require aspec ratio
            img_w, img_h = img.size
Ejemplo n.º 56
0
 def execute(self, image, query):
     return ImageOps.flip(image)
Ejemplo n.º 57
0
def export_images(data, path=".", only=None):
    if not os.path.exists(path):
        os.path.makedirs(path)

    # export chr
    outfile = "mm-chr.png"
    print("exporting", outfile)
    outfile = os.path.join(path, outfile)
    chr_image = produce_chr_sheet(data)
    chr_image.save(outfile)

    # export title
    outfile = "mm-title.png"
    print("exporting", outfile)
    outfile = os.path.join(path, outfile)
    produce_title_screen(data, 0).save(outfile)

    # export ending
    outfile = "mm-ending.png"
    print("exporting", outfile)
    outfile = os.path.join(path, outfile)
    produce_title_screen(data, 1).save(outfile)

    # export levels
    for level in data.levels:
        for hard in [False, True]:
            outfile = "mm-" + str(level.world_idx + 1) + "-" + str(
                level.world_sublevel + 1) + ("h" if hard else "") + ".png"
            print("exporting " + outfile + " ...")
            outfile = os.path.join(path, outfile)

            # create tiles per-palette per-level (could be optimized to per-world)
            minitile_images = produce_micro_tile_images(
                data, level.world, hard)

            # create object data images
            object_images = produce_object_images(data)

            w = 256
            h = 32 * constants.macro_rows_per_level
            img = Image.new('RGB', (w, h), color='black')
            draw = ImageDraw.Draw(img)

            tile_rows, macro_tile_idxs = level.produce_med_tiles(hard)

            y = h

            dangerous_tiles = [[
                False for y in range(constants.macro_rows_per_level * 4)
            ] for x in range(0x20)]

            for row in tile_rows:
                x = -16
                y -= 16
                for medtile_idx in row:
                    x += 16
                    offsets = [(0, 0), (8, 0), (0, 8), (8, 8)]
                    medtile = level.world.get_med_tile(medtile_idx)
                    palette_idx = level.world.get_med_tile_palette_idx(
                        medtile_idx, hard) % 4
                    if palette_idx is None:
                        continue
                    # draw subtiles
                    for i in range(4):
                        microtile_idx = level.world.get_micro_tile(
                            medtile[i], hard)

                        offx = offsets[i][0]
                        offy = offsets[i][1]
                        _x = x + offx
                        _y = y + offy

                        if (microtile_idx in constants.dangerous_micro_tiles):
                            dangerous_tiles[_x // 8][_y // 8] = True
                        else:
                            img.paste(
                                minitile_images[palette_idx][microtile_idx],
                                (_x, _y))

            # objects
            for obj in level.objects:
                if obj.drop:
                    continue
                x = obj.x * 8 - 4
                y = obj.y * 8
                text = hb(obj.gid)
                objimg = object_images[
                    obj.gid] if obj.gid < len(object_images) else None

                if obj.flipx and obj.flipy:
                    text += "+"
                elif obj.flipx:
                    text += "-"
                elif obj.flipy:
                    text += "|"
                if objimg is None:
                    draw.text((x, y),
                              text,
                              fill="white" if
                              self.data.get_object_name(obj.gid)[0:4] != "unk-"
                              else "red")
                else:
                    x += 4 - objimg.width // 2 + objimg._mm_offset[0]
                    y += 8 - objimg.height + objimg._mm_offset[1]
                    if not objimg._mm_hard or dangerous_tiles[obj.x][obj.y]:
                        paste_image = objimg
                        if obj.flipx:
                            paste_image = ImageOps.mirror(paste_image)
                        if obj.flipy:
                            paste_image = ImageOps.flip(paste_image)
                        img.paste(paste_image, (x, y))

            img.save(outfile)
Ejemplo n.º 58
0
def flipImage(arch):
    img = ImageOps.flip(arch)
    return img
def label_objects(win, occluders, sposs, isFamiliar, isVertical, side, label_snd, tracker=None):

    hand_img = hand_img_obj
    hand = visual.ImageStim(win)

    for i in range(2):
        occluders[i].setPos(sposs[i])

    # randomise or select positions:
    sposLabeled = sposs[side] if side != None else None
    occluderLabeled = occluders[side] if side != None else None
    sndLabel = sound.Sound(value=label_snd)
    sndLabel.setVolume(0.8)

    if isVertical: # in vertical positioning of occluders
        handX = random.choice([-c.OCC_SIZE, c.OCC_SIZE])
        handY = sposLabeled[1]

        angle = 0
        if handX > 0:
            hand_img = ImageOps.flip(hand_img)
            angle = 180

    else:
        handX, handY, angle = calculations.calculate_handPos(sposLabeled)
        if sposLabeled[0] < 0:
            hand_img = ImageOps.flip(hand_img)

    # calculate handshake steps
    shakeX, shakeY = calculations.calculate_handShake(angle)
    # rotate hand image with angle
    hand_img = hand_img.rotate(angle, expand=True)

    hand = visual.ImageStim(win, hand_img)

    occluderLabeled.lineColor = "red"
    occluderLabeled.lineWidth = 3

    event.clearEvents()

    if tracker:
        tracker.log("Labeling_{0}_STARTS".format("fam_object" if isFamiliar else "test_object"))

    frames = 420 if isFamiliar else 480
    for frameN in range(frames):

        occluders[0].draw()
        occluders[1].draw()

        # play labeling voice
        if frameN == 75:
            pass
            sndLabel.play()

        # pointing hand
        if frameN > 50:
            m = frameN%50
            if m < 25:
                hand.setPos((handX + shakeX*m, handY + shakeY*m))
                hand.draw()
            else:
                hand.setPos((handX + shakeX*(50-m), handY + shakeY*(50-m)))
                hand.draw()

        win.flip()

        _getKeypress(win, tracker=tracker)
Ejemplo n.º 60
-1
    def test_sanity(self):

        ImageOps.autocontrast(hopper("L"))
        ImageOps.autocontrast(hopper("RGB"))

        ImageOps.autocontrast(hopper("L"), cutoff=10)
        ImageOps.autocontrast(hopper("L"), ignore=[0, 255])

        ImageOps.autocontrast_preserve(hopper("L"))
        ImageOps.autocontrast_preserve(hopper("RGB"))

        ImageOps.autocontrast_preserve(hopper("L"), cutoff=10)
        ImageOps.autocontrast_preserve(hopper("L"), ignore=[0, 255])

        ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255))
        ImageOps.colorize(hopper("L"), "black", "white")

        ImageOps.crop(hopper("L"), 1)
        ImageOps.crop(hopper("RGB"), 1)

        ImageOps.deform(hopper("L"), self.deformer)
        ImageOps.deform(hopper("RGB"), self.deformer)

        ImageOps.equalize(hopper("L"))
        ImageOps.equalize(hopper("RGB"))

        ImageOps.expand(hopper("L"), 1)
        ImageOps.expand(hopper("RGB"), 1)
        ImageOps.expand(hopper("L"), 2, "blue")
        ImageOps.expand(hopper("RGB"), 2, "blue")

        ImageOps.fit(hopper("L"), (128, 128))
        ImageOps.fit(hopper("RGB"), (128, 128))

        ImageOps.flip(hopper("L"))
        ImageOps.flip(hopper("RGB"))

        ImageOps.grayscale(hopper("L"))
        ImageOps.grayscale(hopper("RGB"))

        ImageOps.invert(hopper("L"))
        ImageOps.invert(hopper("RGB"))

        ImageOps.mirror(hopper("L"))
        ImageOps.mirror(hopper("RGB"))

        ImageOps.posterize(hopper("L"), 4)
        ImageOps.posterize(hopper("RGB"), 4)

        ImageOps.solarize(hopper("L"))
        ImageOps.solarize(hopper("RGB"))