Ejemplo n.º 1
0
 def hflip(img, lab):
     # Flip image and label if necessary.
     # 3 classes
     if self.is_hflip3:
         if np.random.binomial(1, 0.5) != 1:
             return img, lab
         # Flip left and right labels.
         if lab == 0:
             lab = 2
         elif lab == 2:
             lab = 0
         # Do horizontal flip of the image.
         img = ImageOps.mirror(img)
     elif self.is_hflip5:
         if np.random.binomial(1, 0.5) != 1:
             return img, lab
         # Flip L_c and R_c or C_l and C_r
         label_remap = {0:4, 1:3, 2:2, 3:1, 4:0}
         lab = label_remap[lab]
         # Do horizontal flip of the image.
         img = ImageOps.mirror(img)
     elif self.is_hflip:
         # Just flip the image.
         if np.random.binomial(1, 0.5) == 1:
             img = ImageOps.mirror(img)
     return img, lab
Ejemplo n.º 2
0
def compare(ab, ac, image_b, image_c, problem, options):
    if max(ac, ab) < .5:
        # no answer
        return max(ac, ab), (max(ac, ab), -1)
    if ab >= ac:
        return max(ac, ab), (searchForSolution(problem, ImageOps.mirror(image_c), options))
    return max(ac, ab), (searchForSolution(problem, ImageOps.mirror(image_b), options))
Ejemplo n.º 3
0
    def make_mirror(self, image):

        if image == 1:
            self.img1.image = ImageOps.mirror(self.img1.image)
        elif image == 2:
            self.img2.image = ImageOps.mirror(self.img2.image)
        elif image == 3:
            self.img3.image = ImageOps.mirror(self.img3.image)
Ejemplo n.º 4
0
def invertingAndCompare(problem, figures, options):
    av_invert = ImageOps.flip(um.openAndInvert(figures[0]))
    bv_invert = ImageOps.flip(um.openAndInvert(figures[1]))
    ah_invert = ImageOps.mirror(um.openAndInvert(figures[0]))
    dh_invert = ImageOps.mirror(um.openAndInvert(figures[3]))
    aTog = um.measurePixelDifference(av_invert, um.openAndInvert(figures[6]))
    bToh = um.measurePixelDifference(bv_invert, um.openAndInvert(figures[7]))
    aToc = um.measurePixelDifference(ah_invert, um.openAndInvert(figures[2]))
    dTof = um.measurePixelDifference(dh_invert, um.openAndInvert(figures[5]))
    direction = um.determineDirection(aToc, dTof, aTog, bToh, figures)
    return direction[1], um.searchForSolution(problem, direction[0], options)
Ejemplo n.º 5
0
def espejo():

    global imSerializable
    label.destroy()
    grande=Image.open("grande.jpeg")
    grande=grande.resize((640,353))
    largo, ancho =imSerializable.size
    if largo<800 or ancho <800:
        imSerializable= ImageOps.mirror(imSerializable)
        refresca(imSerializable)
    else:
        imSerializable= ImageOps.mirror(imSerializable)
        refresca(grande)   
        tkMessageBox.showinfo("¡ Mensaje !","Imagen Editada Correctamente\n\n * Puede Seguir Editando\n\n *Para ver los resultados vaya a su carpeta contendora") 
Ejemplo n.º 6
0
def checkWhileMirrored(problem, figure_a, figure_b, figure_c, options):
    image_a = um.openAndInvert(figure_a)
    image_b = um.openAndInvert(figure_b)
    image_c = um.openAndInvert(figure_c)

    # check based on reflection using ImageOps
    # http://pillow.readthedocs.io/en/3.1.x/reference/ImageOps.html
    # http://stackoverflow.com/questions/14182642/fliping-image-mirror-image
    # http://effbot.org/imagingbook/imageops.htm#tag-ImageOps.mirror
    # first get the pixel difference then do a -> b and a -> comparison
    ab = um.measurePixelDifference(ImageOps.mirror(image_a), image_b)
    ac = um.measurePixelDifference(ImageOps.mirror(image_a), image_c)

    # get optimal max
    return um.compare(ab, ac, image_b, image_c, problem, options)
Ejemplo n.º 7
0
def checkWhileMirrored(problem, figure_a, figure_b, figure_c, options):
    image_a = Image.open(figure_a.visualFilename).convert('L')
    image_b = Image.open(figure_b.visualFilename).convert('L')
    image_c = Image.open(figure_c.visualFilename).convert('L')

    # check based on reflection using ImageOps
    # http://pillow.readthedocs.io/en/3.1.x/reference/ImageOps.html
    # http://stackoverflow.com/questions/14182642/fliping-image-mirror-image
    # http://effbot.org/imagingbook/imageops.htm#tag-ImageOps.mirror
    # first get the pixel difference then do a -> b and a -> comparison
    ab = Util.getPixelRatio(ImageOps.mirror(image_a), image_b)
    ac = Util.getPixelRatio(ImageOps.mirror(image_a), image_c)

    # get optimal max
    return Util.compare(ab, ac, image_b, image_c, problem, options)
Ejemplo n.º 8
0
def test_sanity():

    ImageOps.autocontrast(lena("L"))
    ImageOps.autocontrast(lena("RGB"))

    ImageOps.autocontrast(lena("L"), cutoff=10)
    ImageOps.autocontrast(lena("L"), ignore=[0, 255])

    ImageOps.colorize(lena("L"), (0, 0, 0), (255, 255, 255))
    ImageOps.colorize(lena("L"), "black", "white")

    ImageOps.crop(lena("L"), 1)
    ImageOps.crop(lena("RGB"), 1)

    ImageOps.deform(lena("L"), deformer)
    ImageOps.deform(lena("RGB"), deformer)

    ImageOps.equalize(lena("L"))
    ImageOps.equalize(lena("RGB"))

    ImageOps.expand(lena("L"), 1)
    ImageOps.expand(lena("RGB"), 1)
    ImageOps.expand(lena("L"), 2, "blue")
    ImageOps.expand(lena("RGB"), 2, "blue")

    ImageOps.fit(lena("L"), (128, 128))
    ImageOps.fit(lena("RGB"), (128, 128))
    ImageOps.fit(lena("RGB").resize((1, 1)), (35, 35))

    ImageOps.flip(lena("L"))
    ImageOps.flip(lena("RGB"))

    ImageOps.grayscale(lena("L"))
    ImageOps.grayscale(lena("RGB"))

    ImageOps.invert(lena("L"))
    ImageOps.invert(lena("RGB"))

    ImageOps.mirror(lena("L"))
    ImageOps.mirror(lena("RGB"))

    ImageOps.posterize(lena("L"), 4)
    ImageOps.posterize(lena("RGB"), 4)

    ImageOps.solarize(lena("L"))
    ImageOps.solarize(lena("RGB"))

    success()
Ejemplo n.º 9
0
def imgprocforsingle(imgsize, imgid):
    """processing image into multiple [forced] resolution"""
    imgdata = db.image.find_one({'_id': ObjectId(imgid)})
    buf = StringIO()
    path = os.getcwd()
    path = path + "/app/walls/img/"
    im = Image.open(path + imgdata['filename'])
    # crop
    size = imgdata['dims']
    im = im.crop((11, 15, size[0]-8, size[1]-18))
    # mirror
    im = ImageOps.mirror(im)
    # rotate
    # detail
    im = im.filter(ImageFilter.DETAIL)
    # edge enhance
    #im = im.filter(ImageFilter.EDGE_ENHANCE_MORE)
    # unsharp mask
    im = im.filter(ImageFilter.UnsharpMask)
    # smooth
    im = im.filter(ImageFilter.SMOOTH_MORE)
    # sharper
    im = im.filter(ImageFilter.SHARPEN)
    # RGB
    im = ImageEnhance.Color(im).enhance(1.2)
    # resize here
    dims = [int(i) for i in imgsize.split('x')]
    im.thumbnail((dims), Image.ANTIALIAS)
    im.save(buf, format="JPEG")
    imgstring = buf.getvalue()
    response = make_response(imgstring)
    response.headers['Content-Type'] = 'image/jpeg'
    response.headers['Cache-Control'] = 'public, max-age=43200'
    response.headers['Content-Disposition'] = 'inline; filename=sedecor.jpg'
    return response
Ejemplo n.º 10
0
def apply_mirror(pixbuf):
    '''
    image left to right
    '''
    width,height = pixbuf.get_width(),pixbuf.get_height() 
    y = ImageOps.mirror(Image.frombytes(K.ImageConstants.RGB_SHORT_NAME,(width,height),pixbuf.get_pixels() ))
    return I.fromImageToPixbuf(y)
    def processImage(self,fileName):
        imPath = os.path.join(self.picdir,fileName)
        print ('imPath', imPath)
        im = Image.open(imPath)
        w,h = im.size
        im = ImageOps.mirror(im)
        pix = im.load()
        output = []
        for i in range(64*32):
            output.append(0)
        if w != 32:
            print('invalid image width, should be 32 px')
        if h != 64:
            print('invalid image height, should be 64 px')
        for y in range(64):
            for x in range(32):
                pixelVal = pix[x,y]
                val = int(round(self.compute(pixelVal)))
                output[y*32+x] = val

        frameName = fileName.split('.')[0]
        self.outputText += 'static unsigned int '+frameName+'[2048] =  {'
        for val in output:
            self.outputText += str(val)+', '

        self.outputText = self.outputText[:-2]+'};\n'
Ejemplo n.º 12
0
def mirror_image(image_path):
    img = Image.open(image_path)
    mirror_img = ImageOps.mirror(img)
    image_name = image_path.split('/')[-1]
    fh = 'data/' + split
    fh = os.path.join(fh, image_name)
    mirror_img.save(fh, "JPEG")
def generate_tile_image(img, tile):
	# tile the image horizontally (x2 is enough),
	# some cards need to wrap around to create a bar (e.g. Muster for Battle),
	# also discard alpha channel (e.g. Soulfire, Mortal Coil)
	tiled = Image.new("RGB", (img.width * 2, img.height))
	tiled.paste(img, (0, 0))
	tiled.paste(img, (img.width, 0))

	x, y, width, height = get_rect(
		tile["m_TexEnvs"]["_MainTex"]["m_Offset"]["x"],
		tile["m_TexEnvs"]["_MainTex"]["m_Offset"]["y"],
		tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["x"],
		tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["y"],
		tile["m_Floats"].get("_OffsetX", 0.0),
		tile["m_Floats"].get("_OffsetY", 0.0),
		tile["m_Floats"].get("_Scale", 1.0),
		img.width
	)

	bar = tiled.crop((x, y, x + width, y + height))
	bar = ImageOps.flip(bar)
	# negative x scale means horizontal flip
	if tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["x"] < 0:
		bar = ImageOps.mirror(bar)

	return bar.resize((OUT_WIDTH, OUT_HEIGHT), Image.LANCZOS)
Ejemplo n.º 14
0
def determineDirection(a, b, c, d, figures):
    if min(a, b) > min(c, d):
        result = ImageOps.mirror(openAndInvert(figures[6]))
        optimal = min(a, b)
    else:
        result = ImageOps.flip(openAndInvert(figures[2]))
        optimal = min(c, d)
    return result, optimal
Ejemplo n.º 15
0
def _mirror_images(process_me):
    """
    Mirrors the given images horizontally.
    """
    results = []
    for orig_im in process_me:
        results.append(orig_im)
        results.append(ImageOps.mirror(orig_im))
    return results
Ejemplo n.º 16
0
def gen_trans(imgfile,trans,outfile):
    for trans in trans.split('*'):
        image = Image.open(imgfile)
        w,h = image.size
        if trans=="plain":
            image.save(outfile,"JPEG",quality=100)
        elif trans=="flip":
            ImageOps.mirror(image).save(outfile,"JPEG",quality=100)
        elif trans.startswith("crop"):
            c = int(trans[4:])
            image.crop(gen_crop(c,w,h)).save(outfile,"JPEG",quality=100)
        elif trans.startswith("h**o"):
            c = int(trans[4:])
            image.transform((w,h),Image.QUAD,
                            gen_homo(c,w,h),
                            Image.BILINEAR).save(outfile,"JPEG",quality=100)
        elif trans.startswith("jpg"):
            image.save(outfile,quality=int(trans[3:]))
        elif trans.startswith("scale"):
            scale = SCALES[int(trans.replace("scale",""))]
            image.resize((int(w/scale),int(h/scale)),Image.BILINEAR).save(outfile,"JPEG",quality=100)
        elif trans.startswith('color'):
            (dr,dg,db) = gen_colorimetry(int(trans[5]))
            table = numpy.tile(numpy.arange(256),(3))
            table[   :256]+= dr
            table[256:512]+= dg
            table[512:   ]+= db
            image.convert("RGB").point(table).save(outfile,"JPEG",quality=100)
        elif trans.startswith('rot-'):
            angle =int(trans[4:])
            for i in range(angle):
                image = rot(image,-1,outfile)
            image.save(outfile,"JPEG",quality=100)
        elif trans.startswith('rot'):
            angle =int(trans[3:])
            for i in range(angle):
                image = rot(image,1,outfile)
            image.save(outfile,"JPEG",quality=100)
        elif trans.startswith('corner'):
            i = int(trans[6:])
            image.crop(gen_corner(i,w,h)).save(outfile,"JPEG",quality=100)
        else:
            assert False, "Unrecognized transformation: "+trans
        imgfile = outfile # in case we iterate
Ejemplo n.º 17
0
    def MirrorImage_windows(self):
        #  print ("8")
        global img
        global PILimg
        global edit

        print (PILimg)
        PILimg=ImageOps.mirror(PILimg)# convert image to a Mirror Image
        PILimg.show()
        PILimg.save("/Users/DANADEVOST/Desktop/PYTHON_FOLDER/GIF_IMAGES/testMIRROR.gif", format=None)
Ejemplo n.º 18
0
def mirror_and_export(im, r, c, w, h, out_file):
    # exploit mirror symmetry of the face

    # flip image
    im = np.asarray(ImageOps.mirror(Image.fromarray(im)))

    # flip column coordinate of the object
    c = im.shape[1] - c

    # export
    export(im, r, c, w, h, out_file)
Ejemplo n.º 19
0
    def operations(self):
        """Perform enhancements from the ImageOps class. """
        if self.effect == 'flip':
            to_save = ImageOps.flip(self.image)

        if self.effect == 'mirror':
            to_save = ImageOps.mirror(self.image)
        if self.effect == 'grayscale':
            to_save = ImageOps.grayscale(self.image)
        to_save.save(self.new_file)
        return self.return_path
Ejemplo n.º 20
0
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
    BaseFilter.process(self, image)
    newImage = ImageOps.mirror(image)
    if not self.both:
      return newImage
    else:
      return [image, newImage]
def crop_mirror(original, left, upper, right, lower, dir_path, prefix, frame_path):
  # crop
  cropped_frame = original.crop((left, upper, right, lower))
  crop_path = os.path.join(dir_path, prefix + frame_path)
  cropped_frame.save(crop_path, quality=95)
  assert cropped_frame.size == (224, 224)

  # mirror
  mirrored_frame = ImageOps.mirror(cropped_frame)
  mirror_path = os.path.join(dir_path, prefix + "m_" + frame_path)
  mirrored_frame.save(mirror_path, quality=95)
  assert mirrored_frame.size == (224, 224)
Ejemplo n.º 22
0
    def resize_image(self, src, dst, max_size, bigger_panoramas=True, preserve_exif_data=False, exif_whitelist={}):
        """Make a copy of the image in the requested size."""
        if not Image or os.path.splitext(src)[1] in ['.svg', '.svgz']:
            self.resize_svg(src, dst, max_size, bigger_panoramas)
            return
        im = Image.open(src)
        size = w, h = im.size
        if w > max_size or h > max_size:
            size = max_size, max_size

            # Panoramas get larger thumbnails because they look *awful*
            if bigger_panoramas and w > 2 * h:
                size = min(w, max_size * 4), min(w, max_size * 4)

        try:
            exif = piexif.load(im.info["exif"])
        except KeyError:
            exif = None
        # Inside this if, we can manipulate exif as much as
        # we want/need and it will be preserved if required
        if exif is not None:
            # Rotate according to EXIF
            value = exif['0th'].get(piexif.ImageIFD.Orientation, 1)
            if value in (3, 4):
                im = im.rotate(180)
            elif value in (5, 6):
                im = im.rotate(270)
            elif value in (7, 8):
                im = im.rotate(90)
            if value in (2, 4, 5, 7):
                im = ImageOps.mirror(im)
            exif['0th'][piexif.ImageIFD.Orientation] = 1

        try:
            im.thumbnail(size, Image.ANTIALIAS)
            if exif is not None and preserve_exif_data:
                # Put right size in EXIF data
                w, h = im.size
                if '0th' in exif:
                    exif["0th"][piexif.ImageIFD.ImageWidth] = w
                    exif["0th"][piexif.ImageIFD.ImageLength] = h
                if 'Exif' in exif:
                    exif["Exif"][piexif.ExifIFD.PixelXDimension] = w
                    exif["Exif"][piexif.ExifIFD.PixelYDimension] = h
                # Filter EXIF data as required
                exif = self.filter_exif(exif, exif_whitelist)
                im.save(dst, exif=piexif.dump(exif))
            else:
                im.save(dst)
        except Exception as e:
            self.logger.warn("Can't process {0}, using original "
                             "image! ({1})".format(src, e))
            utils.copy_file(src, dst)
Ejemplo n.º 23
0
    def gen_key_img(mappings, size=0, color=None):
        base_img = Image.open(BytesIO(KEY_IMAGE_DATA))

        if isinstance(size, tuple):
            key_width, key_height = size
        else:
            key_width, key_height = KEY_SIZES[size]
        center_x = key_width // 2
        top = base_img.crop((16, 0, 32, 32))
        bottom = base_img.crop((16, 32, 32, 64))
        left = base_img.crop((0, 16, 16, 32))
        top_left = base_img.crop((0, 0, 32, 32))
        bottom_left = base_img.crop((0, 32, 32, 64))

        key_img = Image.new('RGBA', size=(key_width, key_height))
        for offset in range(32, center_x, 16):
            paste(key_img, top, (offset, 0))
            paste(key_img, bottom, (offset, -32))
        for offset in range(32, key_height - 16, 16):
            paste(key_img, left, (0, offset))

        paste(key_img, top_left, (0, 0))
        paste(key_img, bottom_left, (0, -32))
        mirrored = key_img.crop((0, 0, center_x, key_height))
        mirrored = ImageOps.mirror(mirrored)
        paste(key_img, mirrored, (center_x, 0))

        bg_img = Image.new('RGBA', size=(key_width, key_height))
        draw = ImageDraw.Draw(bg_img)
        if color:
            draw.rectangle([(8, 8), (key_width - 8, key_height - 8)], color)

        for (rel_x, rel_y, color), mapping in zip(KEY_TEXT_REL_POS, mappings):
            key_text = mapping.raw_code
            if key_text.isupper() and len(key_text) > 1 and key_text.lower() not in KEY_TEXT_SYMBOLS:
                print(key_text.lower())
            key_text = KEY_TEXT_SYMBOLS.get(key_text.lower(), key_text)
            text_width, text_height = draw.textsize(key_text, font=font)
            text_x_pos = key_width * rel_x - text_width // 2
            text_y_pos = key_height * rel_y - text_height // 2

            draw.text(
                (text_x_pos, text_y_pos),
                key_text,
                font=font,
                fill=color,
            )

        del draw
        paste(bg_img, key_img)

        return bg_img
Ejemplo n.º 24
0
		def applyflip(self, image):
			if self.rotationflip == 0:
				return image
			elif self.rotationflip == 1:
				return image.rotate(90)
			elif self.rotationflip == 2:
				return image.rotate(180)
			elif self.rotationflip == 3:
				return image.rotate(270)
			elif self.rotationflip == 4:
				return ImageOps.mirror(image)
			elif self.rotationflip == 5:
				return ImageOps.flip(image)
def flip_horizontal(images, uv_pixel_positions):
    """
    Randomly flip the image and the pixel positions horizontall (flip left/right)

    See random_image_and_indices_mutation() for documentation of args and return types.

    """

    mutated_images = [ImageOps.mirror(image) for image in images]
    u_pixel_positions = uv_pixel_positions[0]
    mutated_u_pixel_positions = (image.width-1) - u_pixel_positions
    mutated_uv_pixel_positions = (mutated_u_pixel_positions, uv_pixel_positions[1])
    return mutated_images, mutated_uv_pixel_positions
Ejemplo n.º 26
0
def exportmirrored(im, r, c, s, folder, id, list):
    #
    # exploit mirror symmetry of the face
    #

    # flip image
    im = numpy.asarray(ImageOps.mirror(Image.fromarray(im)))

    # flip column coordinate of the object
    c = im.shape[1] - c

    # export
    export(im, r, c, s, folder, id, list)
Ejemplo n.º 27
0
    def capturePreview(self):

        if self._is_preview:
            while self._comm.empty(Workers.CAMERA):
                picture = self._cap.getPreview()
                if self._rotation is not None:
                    picture = picture.transpose(self._rotation)
                picture = picture.resize(self._pic_dims.previewSize)
                picture = ImageOps.mirror(picture)
                byte_data = BytesIO()
                picture.save(byte_data, format='jpeg')
                self._comm.send(Workers.GUI,
                                StateMachine.CameraEvent('preview', byte_data))
Ejemplo n.º 28
0
    def handle(self, header, data):
        # PERFORM Cognitive Assistant Processing
        sys.stdout.write("processing: ")
        sys.stdout.write("%s\n" % header)

        image = Image.open(io.BytesIO(data))
        mirror_image=ImageOps.mirror(image)
        
        mirror_output = StringIO.StringIO()
        mirror_image.save(mirror_output, 'JPEG')

        jpeg_image = mirror_output.getvalue()
        result = base64.b64encode(jpeg_image)
        mirror_output.close()
        return result
Ejemplo n.º 29
0
    def horizontalReflection(image1, image2):
        neg_image1 = convertImage(image1)
        neg_image2 = convertImage(image2)

        reflected_image1 = ImageOps.mirror(neg_image1)

        diff = ImageChops.difference(reflected_image1, neg_image2)
        stat = ImageStat.Stat(diff)
        mean = stat.mean[0]

        if mean < 1.0:
            print ("Horizontal Reflection Function: Reflected Horizontally")
            return True
        else:
            print ("Horizontal Reflection Function: Not reflected horizontally")
            return False
Ejemplo n.º 30
0
 def craft(self, blob, doc_id, *args, **kwargs):
     raw_img = self.load_image(blob)
     _img = ImageOps.mirror(raw_img)
     img = self.restore_channel_axis(np.asarray(_img))
     return [{'doc_id': doc_id, 'blob': img.astype('float32')}, ]
Ejemplo n.º 31
0
def updateDisplay(config, pricestack, whichcoin):

    pricenow = pricestack[-1]
    currencythumbnail = 'currency/' + whichcoin + '.bmp'
    tokenimage = Image.open(os.path.join(picdir, currencythumbnail))
    sparkbitmap = Image.open(os.path.join(picdir, 'spark.bmp'))
    if config['ticker']['hidden'] == True:
        if config['display']['orientation'] == 0 or config['display'][
                'orientation'] == 180:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.width, epd.height),
                              255)  # 255: clear the image with white
            image.paste(tokenimage, (10, 20))
            draw = ImageDraw.Draw(image)
            draw.text((5, 200), "1 " + whichcoin, font=fonthiddenprice, fill=0)
            draw.text((0, 10),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            if config['display']['orientation'] == 180:
                image = image.rotate(180, expand=True)

        if config['display']['orientation'] == 90 or config['display'][
                'orientation'] == 270:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.height, epd.width),
                              255)  # 255: clear the image with white
            image.paste(tokenimage, (0, 0))
            draw = ImageDraw.Draw(image)
            draw.text((20, 120), "1 " + whichcoin, font=fontHorizontal, fill=0)
            draw.text((85, 5),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            if config['display']['orientation'] == 270:
                image = image.rotate(180, expand=True)
    #       This is a hack to deal with the mirroring that goes on in 4Gray Horizontal
            image = ImageOps.mirror(image)
    else:
        # Get the numbers
        pricechange = str("%+d" % round(
            (pricestack[-1] - pricestack[0]) / pricestack[-1] * 100, 2)) + "%"
        if pricenow > 1000:
            pricenowstring = format(int(pricenow), ",")
        else:
            pricenowstring = str(float('%.5g' % pricenow))

        if config['display']['orientation'] == 0 or config['display'][
                'orientation'] == 180:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.width, epd.height),
                              255)  # 255: clear the image with white
            draw = ImageDraw.Draw(image)
            draw.text((110, 80), "7day :", font=font_date, fill=0)
            draw.text((110, 95), pricechange, font=font_date, fill=0)
            # Print price to 5 significant figures
            draw.text((5, 200), "$" + pricenowstring, font=font, fill=0)
            draw.text((0, 10),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            image.paste(tokenimage, (10, 25))
            image.paste(sparkbitmap, (10, 125))
            if config['display']['orientation'] == 180:
                image = image.rotate(180, expand=True)

        if config['display']['orientation'] == 90 or config['display'][
                'orientation'] == 270:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.height, epd.width),
                              255)  # 255: clear the image with white
            draw = ImageDraw.Draw(image)
            draw.text((100, 100),
                      "7day : " + pricechange,
                      font=font_date,
                      fill=0)
            # Print price to 5 significant figures
            draw.text((20, 120),
                      "$" + pricenowstring,
                      font=fontHorizontal,
                      fill=0)
            image.paste(sparkbitmap, (80, 50))
            image.paste(tokenimage, (0, 0))
            draw.text((85, 5),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            if config['display']['orientation'] == 270:
                image = image.rotate(180, expand=True)
    #       This is a hack to deal with the mirroring that goes on in 4Gray Horizontal
            image = ImageOps.mirror(image)

#   If the display is inverted, invert the image usinng ImageOps
    if config['display']['inverted'] == True:
        image = ImageOps.invert(image)


#   Send the image to the screen
    epd.display_4Gray(epd.getbuffer_4Gray(image))
    epd.sleep()
Ejemplo n.º 32
0
 def __call__(self, img, anno_class_img):
     if np.random.randint(2):
         img = ImageOps.mirror(img)
         anno_class_img = ImageOps.mirror(anno_class_img)
     return img, anno_class_img
Ejemplo n.º 33
0

i = 0
files2 = [ 'data/images/Covid/'+f for f in listdir('data/images/Covid') if isfile(join('data/images/Covid', f))]
files3 = [ 'data/images/NORMAL/'+f for f in listdir('data/images/NORMAL') if isfile(join('data/images/NORMAL', f))]
j = 0
k = 0


for f in files:
    try:
        pic = Image.open(f)
        im = ImageOps.fit(pic,(128,128),method=3, bleed=0.0, centering=(0.5, 0.5))
        imArray1 = np.array(im)

        im = ImageOps.mirror(im)
        imArray2 = np.array(im)

        im = ImageOps.fit(pic,(128,128),method=3, bleed=0.0, centering=(0.5, 0.5))
        im = ImageOps.solarize(im,threshold=128)

        imArray3 = np.array(im)

        im = ImageOps.mirror(im)
        imArray4 = np.array(im)

    except Exception:
        print('error image')
        pass
    else:
        if not(f in list):
def mirrorImage(initialPath):
    im = Image.open(initialPath)
    return ImageOps.mirror(im)
Ejemplo n.º 35
0
    def _draw_image(self, gcode, include_rapids):
        # size of the image (should be based on the max path point)
        scale = self._image_size()

        if self.transparency:
            img = Image.new('RGBA', self.image_size, (255, 0, 0, 0))
        else:
            img = Image.new('RGB', self.image_size, self.background)

        draw = ImageDraw.Draw(img)

        # draw centreline
        cl_y = (self.image_size[1] / 2 - self._min_y)
        start = (self.margin * 0.25, cl_y)
        end = (self.image_size[0] - self.margin * 0.5, cl_y)
        draw.line([start, end],
                  fill=(252, 226, 5),
                  width=self.line_thickness * 2)

        for idx, command in enumerate(gcode):

            if idx < len(gcode) - 1:

                movement = command.get_movement()
                if movement not in ["G0", "G1", "G2", "G3"]:
                    continue

                if movement == "G0" and not include_rapids:
                    continue

                params = command.get_params()
                prev_params = gcode[idx - 1].get_params()

                line_colour = self._get_line_colour(movement)

                x_start = (prev_params['Z'] -
                           self._min_x) * scale + self.margin / 2
                y_start = (prev_params['X'] -
                           self._min_y) * scale + self.margin / 2
                x_end = (params['Z'] - self._min_x) * scale + self.margin / 2
                y_end = (params['X'] - self._min_y) * scale + self.margin / 2

                if movement in ["G0", "G1"]:
                    draw.line([(x_start, y_start), (x_end, y_end)],
                              fill=line_colour,
                              width=self.line_thickness)

                if movement in ["G2", "G3"]:
                    x_centre = (prev_params['Z'] + params['K'] -
                                self._min_x) * scale + self.margin / 2
                    y_centre = (prev_params['X'] + params['I'] -
                                self._min_y) * scale + self.margin / 2

                    distance = self._get_distance(x_centre, y_centre, x_start,
                                                  y_start)

                    start_angle = self._get_angle(x_centre, y_centre, x_start,
                                                  y_start)
                    end_angle = self._get_angle(x_centre, y_centre, x_end,
                                                y_end)
                    boundbox = [(x_centre - distance, y_centre - distance),
                                (x_centre + distance, y_centre + distance)]
                    if movement == "G2":
                        draw.arc(boundbox,
                                 end_angle,
                                 start_angle,
                                 fill=line_colour,
                                 width=self.line_thickness)

                    if movement == "G3":
                        draw.arc(boundbox,
                                 start_angle,
                                 end_angle,
                                 fill=line_colour,
                                 width=self.line_thickness)

        # Mirror because its draw flipped.
        if self.mirror_image:
            img = ImageOps.mirror(img)
        elif self.flip_image:
            img = ImageOps.flip(img)
        else:
            img = ImageOps.flip(img)

        if self.transparency:
            img.save(self.file_location + self.image_name + '.png')
        else:
            img.save(self.file_location + self.image_name + self.image_type)
Ejemplo n.º 36
0
        origin = Image.open('body_light.png')
    else:
        origin = Image.open('body.png')

    if flinz:
        print("Apply LINZ Correction")
        pantie = pantie.resize((629, 407))
        origin.paste(pantie, (1017, 828), pantie)
        origin_transparent = Image.new("RGBA", (origin.size))
        origin_transparent.paste(pantie, (1017, 828), pantie)
        origin_transparent.save('patched_transparent.png')
    elif fnbody:
        print("!nbody_mode!")
        cut = 7
        right_pantie = pantie.crop((cut, 0, pantie.size[0], pantie.size[1]))
        left_pantie = ImageOps.mirror(right_pantie)
        npantie = Image.new("RGBA",
                            (right_pantie.size[0] * 2, right_pantie.size[1]))
        npantie.paste(right_pantie, (right_pantie.size[0], 0))
        npantie.paste(left_pantie, (0, 0))
        origin.paste(npantie, (403, 836), npantie)

        origin_transparent = Image.new("RGBA", (origin.size))
        origin_transparent.paste(npantie, (403, 836), npantie)
        origin_transparent.save('patched_transparent.png')
    elif flight:
        print("Apply Quiche_Light Conversion")
        pantie = pantie.resize((236, 157))
        origin.paste(pantie, (532, 385), pantie)
        origin_transparent = Image.new("RGBA", (origin.size))
        origin_transparent.paste(pantie, (532, 385), pantie)
Ejemplo n.º 37
0
def imageTransforms(image, type=None, param=0.1):
    """get transformed image
    
    Arguments:
        image {PIL.Image|str} -- input image or url
    
    Keyword Arguments:
        type {str} -- whether image is a url or PIL.Image (default: {None})
        param {float} -- parameter for controlling different transforms (default: {0.1})
    
    Returns:
        [type] -- [description]
    """
    from PIL import Image, ImageFilter, ImageOps, ImageChops
    from torchvision.transforms import Grayscale, ColorJitter
    from random import uniform
    # PIL can convert across formats
    # PIL is a good library brent 11/10
    # https://pillow.readthedocs.io/en/3.3.x/reference/Image.html

    # All ImageFilters: ['BLUR', 'CONTOUR', 'DETAIL', 'EDGE_ENHANCE', 'EDGE_ENHANCE', 'EDGE_ENHANCE_MORE', 'EMBOSS', 'FIND_EDGES', 'SMOOTH', 'SMOOTH_MORE', 'SHARPEN', 'GaussianBlur', 'UnsharpMask', 'RankFilter', 'MedianFilter', 'MinFilter', 'MaxFilter', 'ModeFilter']
    imagefilters = [
        'BLUR', 'CONTOUR', 'DETAIL', 'EDGE_ENHANCE', 'EDGE_ENHANCE',
        'EDGE_ENHANCE_MORE', 'EMBOSS', 'FIND_EDGES', 'SMOOTH', 'SMOOTH_MORE',
        'SHARPEN', 'GaussianBlur(radius=param)', 'UnsharpMask', 'RankFilter',
        'MedianFilter(size=int(param))', 'MinFilter(size=int(param))',
        'MaxFilter(size=int(param))', 'ModeFilter(size=int(param))'
    ]

    # imageops as preprocessing: autocontrast, colorize, equalize
    imageops = ['invert', 'mirror', 'posterize', 'solarize']

    # ImageEnhance?

    # torchvision.transforms : https://pytorch.org/docs/stable/torchvision/transforms.html
    # TODO: try other torchvision transforms
    torchtransforms = ['ColorJitter', 'Grayscale']

    if type == 'crop':
        bounds = list(image.getbbox())
        # sides = sample(range(2, 4), 2)
        bounds[2] = int(bounds[2] * (1 - (uniform(0, param))))
        bounds[3] = int(bounds[3] * (1 - (uniform(0, param))))
        image = image.crop(tuple(bounds))
        return image
    elif type == 'rotate':
        # angle = sample(range(30, 330, 30), 1)[0]
        angle = param
        image = image.rotate(angle)
        return image
    elif type in imagefilters:
        image = image.filter(eval(f'ImageFilter.{type}'))
        return image
    elif type == 'invert':
        image = ImageOps.invert(image)
        return image
    elif type == 'grayscale':
        image = Grayscale(num_output_channels=3)(image)
        return image
    elif type == 'mirror':
        image = ImageOps.mirror(image)
        return image
    elif type == 'color_jitter':
        # TODO: convert return to PIL.image
        image = ColorJitter()(image)
        image = Image.fromarray(image)
    else:
        print(f'{type} transform not found')
        return image
Ejemplo n.º 38
0
def mirror_image(image):
    """
    Returns a copy of the given image flipped horizontally.
    """
    return ImageOps.mirror(image)
Ejemplo n.º 39
0
    def solve2x2(self, problem):

        retVal = -1

        imageDictionary = {}
        figuresDictionary = {}
        for name in ['A', 'B', 'C', '1', '2', '3', '4', '5', '6']:
            imageDictionary[name] = Image.open(
                problem.figures[name].visualFilename)
            figuresDictionary[name] = toNumpyMatrix(imageDictionary[name])

        solutionsDictionary = {
            k: figuresDictionary[k]
            for k in figuresDictionary.keys() & {'1', '2', '3', '4', '5', '6'}
        }

        hMirrorA = toNumpyMatrix(ImageOps.mirror(imageDictionary['A']))
        hMirrorB = toNumpyMatrix(ImageOps.mirror(imageDictionary['B']))
        hMirrorC = toNumpyMatrix(ImageOps.mirror(imageDictionary['C']))
        vMirrorA = toNumpyMatrix(ImageOps.flip(imageDictionary['A']))
        vMirrorB = toNumpyMatrix(ImageOps.flip(imageDictionary['B']))
        vMirrorC = toNumpyMatrix(ImageOps.flip(imageDictionary['C']))

        if retVal == -1:  # A == B
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               figuresDictionary['A'], figuresDictionary['B'],
                               figuresDictionary['C'])
        if retVal == -1:  # A == C
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               figuresDictionary['A'], figuresDictionary['C'],
                               figuresDictionary['B'])
        if retVal == -1:  # A horizontal mirror B
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               hMirrorA, figuresDictionary['B'], hMirrorC)
        if retVal == -1:  # A horizontal mirror C
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               hMirrorA, figuresDictionary['C'], hMirrorB)
        if retVal == -1:  # A vertical mirror B
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               vMirrorA, figuresDictionary['B'], vMirrorC)
        if retVal == -1:  # A vertical mirror C
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               vMirrorA, figuresDictionary['C'], vMirrorB)
        if retVal == -1:  # A is outline of B solid
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               figuresDictionary['A'], figuresDictionary['B'],
                               figuresDictionary['C'], 0.96, 0.90)
        if retVal == -1:  # A is outline of C solid
            retVal = equate2x2(figuresDictionary, solutionsDictionary,
                               figuresDictionary['A'], figuresDictionary['C'],
                               figuresDictionary['B'], 0.96, 0.90)
        if retVal == -1:  # Rotate image
            degrees = [-45, -90, -135, -180, -225, -270, -315]
            background = Image.new('RGBA', imageDictionary['A'].size, 'white')

            for angle in degrees:
                #print ("Rotate Image ", angle, " degrees")

                RotateA = imageDictionary['A'].rotate(angle)
                RotateB = imageDictionary['B'].rotate(angle)
                RotateC = imageDictionary['C'].rotate(angle)

                rotateA = toNumpyMatrix(
                    Image.composite(RotateA, background, RotateA))
                rotateB = toNumpyMatrix(
                    Image.composite(RotateB, background, RotateB))
                rotateC = toNumpyMatrix(
                    Image.composite(RotateC, background, RotateC))

                if retVal == -1:  # A horizontal mirror B
                    similarA, meanA, rmsA, bboxA = diffStatistics(
                        rotateA, figuresDictionary['B'])
                    #print ("A rotated ",angle," degrees to B mean=",meanA," rms=",rmsA," diff=",abs(meanA-rmsA))
                    retVal = equate2x2(figuresDictionary, solutionsDictionary,
                                       rotateA, figuresDictionary['B'],
                                       rotateC)
                if retVal == -1:  # A horizontal mirror C
                    similarA, meanA, rmsA, bboxA = diffStatistics(
                        rotateA, figuresDictionary['C'])
                    #print ("A rotated ",angle," degrees to C mean=",meanA," rms=",rmsA," diff=",abs(meanA-rmsA))
                    retVal = equate2x2(figuresDictionary, solutionsDictionary,
                                       rotateA, figuresDictionary['C'],
                                       rotateB)
                if retVal > 0:
                    break
        if retVal == -1:
            retVal = randint(1, 6)

        return retVal
Ejemplo n.º 40
0
    def solve3x3(self, problem):
        retVal = -1

        imageDictionary = {}
        figuresDictionary = {}
        for name in [
                'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', '1', '2', '3', '4',
                '5', '6', '7', '8'
        ]:
            imageDictionary[name] = Image.open(
                problem.figures[name].visualFilename)
            figuresDictionary[name] = toNumpyMatrix(imageDictionary[name])

        solutionsDictionary = {
            k: figuresDictionary[k]
            for k in figuresDictionary.keys()
            & {'1', '2', '3', '4', '5', '6', '7', '8'}
        }

        hMirrorA = toNumpyMatrix(ImageOps.mirror(imageDictionary['A']))
        hMirrorB = toNumpyMatrix(ImageOps.mirror(imageDictionary['B']))
        hMirrorC = toNumpyMatrix(ImageOps.mirror(imageDictionary['C']))
        hMirrorD = toNumpyMatrix(ImageOps.mirror(imageDictionary['D']))
        hMirrorE = toNumpyMatrix(ImageOps.mirror(imageDictionary['E']))
        hMirrorF = toNumpyMatrix(ImageOps.mirror(imageDictionary['F']))
        hMirrorG = toNumpyMatrix(ImageOps.mirror(imageDictionary['G']))
        hMirrorH = toNumpyMatrix(ImageOps.mirror(imageDictionary['H']))
        vMirrorA = toNumpyMatrix(ImageOps.flip(imageDictionary['A']))
        vMirrorB = toNumpyMatrix(ImageOps.flip(imageDictionary['B']))
        vMirrorC = toNumpyMatrix(ImageOps.flip(imageDictionary['C']))
        vMirrorD = toNumpyMatrix(ImageOps.flip(imageDictionary['D']))
        vMirrorE = toNumpyMatrix(ImageOps.flip(imageDictionary['E']))
        vMirrorF = toNumpyMatrix(ImageOps.flip(imageDictionary['F']))
        vMirrorG = toNumpyMatrix(ImageOps.flip(imageDictionary['G']))
        vMirrorH = toNumpyMatrix(ImageOps.flip(imageDictionary['H']))

        if retVal == -1:  # A == B == C
            similar1, mean1, rms1, bbox1 = diffStatistics(
                figuresDictionary['A'], figuresDictionary['B'])
            similar2, mean2, rms2, bbox2 = diffStatistics(
                figuresDictionary['B'], figuresDictionary['C'])
            bestMeanDiff = 1
            if similar1 and similar2:
                for key, value in solutionsDictionary.items():
                    similar, mean, rms, bbox = diffStatistics(
                        figuresDictionary['G'], value)
                    if similar and abs(mean - mean1) < bestMeanDiff:
                        bestMeanDiff = abs(mean - mean1)
                        retVal = int(key)
        if retVal == -1:  # A == D == G
            similar1, mean1, rms1, bbox1 = diffStatistics(
                figuresDictionary['A'], figuresDictionary['D'])
            similar2, mean2, rms2, bbox2 = diffStatistics(
                figuresDictionary['D'], figuresDictionary['G'])
            bestMeanDiff = 1
            if similar1 and similar2:
                for key, value in solutionsDictionary.items():
                    similar, mean, rms, bbox = diffStatistics(
                        figuresDictionary['C'], value)
                    if similar and abs(mean - mean1) < bestMeanDiff:
                        bestMeanDiff = abs(mean - mean1)
                        retVal = int(key)
        if retVal == -1:  # percentage
            similar1, mean1, rms1, bbox1 = diffStatistics(
                figuresDictionary['A'], figuresDictionary['B'])
            similar2, mean2, rms2, bbox2 = diffStatistics(
                figuresDictionary['B'], figuresDictionary['C'])
            similar12, mean12, rms12, bbox12 = diffStatistics(
                figuresDictionary['A'], figuresDictionary['C'])
            similar3, mean3, rms3, bbox3 = diffStatistics(
                figuresDictionary['D'], figuresDictionary['E'])
            similar4, mean4, rms4, bbox4 = diffStatistics(
                figuresDictionary['E'], figuresDictionary['F'])
            similar34, mean34, rms34, bbox34 = diffStatistics(
                figuresDictionary['D'], figuresDictionary['F'])
            similar5, mean5, rms5, bbox5 = diffStatistics(
                figuresDictionary['G'], figuresDictionary['H'])

            similar1, mean6, rms6, bbox1 = diffStatistics(
                figuresDictionary['A'], figuresDictionary['D'])
            similar2, mean7, rms7, bbox2 = diffStatistics(
                figuresDictionary['D'], figuresDictionary['G'])
            similar3, mean8, rms8, bbox3 = diffStatistics(
                figuresDictionary['B'], figuresDictionary['E'])
            similar4, mean9, rms9, bbox4 = diffStatistics(
                figuresDictionary['E'], figuresDictionary['H'])
            similar5, mean0, rms0, bbox5 = diffStatistics(
                figuresDictionary['C'], figuresDictionary['F'])

            rms1 = round(rms1, 3)
            rms2 = round(rms2, 3)
            rms12 = round(rms12, 3)
            rms3 = round(rms3, 3)
            rms4 = round(rms4, 3)
            rms34 = round(rms34, 3)
            rms5 = round(rms5, 3)
            rms6 = round(rms6, 3)
            rms7 = round(rms7, 3)
            rms8 = round(rms8, 3)
            rms9 = round(rms9, 3)
            rms0 = round(rms0, 3)

            k = round(rms2 - rms0, 3)
            j = round(rms7 - rms5, 3)

            for key, value in solutionsDictionary.items():
                similar, meana, rmsa, bbox = diffStatistics(
                    figuresDictionary['F'], value)
                similar, meanb, rmsb, bbox = diffStatistics(
                    figuresDictionary['H'], value)
                rmsa = round(rmsa, 3)
                rmsb = round(rmsb, 3)
                if (meana == round(mean0 - k, 2)
                        and meanb == round(mean5 - j, 2)):
                    retVal = int(key)
        return retVal
Ejemplo n.º 41
0
    for chunk in r.iter_content(chunk_size=1024):
        raw_bytes += chunk
        a = raw_bytes.find(b'\xff\xd8\xff')
        b = raw_bytes.find(b'\xff\xd9',a)
        if a != -1 and b != -1:
            
            
            jpg = raw_bytes[a:b+2]
            raw_bytes = raw_bytes[b+2:]
            
            imgIo=io.BytesIO(jpg)
            img=Image.open(imgIo)
            
            
            # cvImg=cv2.cvtColor(np.array(img.convert("RGB")),cv2.COLOR_RGB2BGR)
            
            
            c+=1
            c=c % 5
            if(c==0):
                processing.useImg(np.array(ImageOps.mirror(ImageOps.flip(img))),mc)

            
            
            
            
else:
    print("INVALID CODE CAN'T PROCEED")
    
    
Ejemplo n.º 42
0
def imageInflated(_inImportPath, _inExportPath):
    files = os.listdir(_inImportPath)
    imgNum = 0
    for file in files:
        index = re.search('.png',file)
        if index:
            imgNum += 1

    print(imgNum)

    for i in range(imgNum):

        importImagePath = os.path.join(_inImportPath,str(i+1)+'.png')
        im_0 = Image.open(importImagePath)
        saveImage(im_0,_inExportPath)

        im_1 = ImageOps.mirror(im_0)
        saveImage(im_1,_inExportPath)

        im_2 = im_0.point(lambda x:x * 1.3)
        saveImage(im_2,_inExportPath)

        im_3 = im_1.point(lambda x:x * 1.3)
        saveImage(im_3,_inExportPath)

        im_4 = im_0.point(lambda x:x * 0.7)
        saveImage(im_4,_inExportPath)

        im_5 = im_1.point(lambda x:x * 0.7)
        saveImage(im_5,_inExportPath)

        im_6 = changeColorRate(im_0,1.2,1,1)
        saveImage(im_6,_inExportPath)

        im_8 = changeColorRate(im_1,1.2,1,1)
        saveImage(im_8,_inExportPath)

        im_7 = changeColorRate(im_0,0.8,1,1)
        saveImage(im_7,_inExportPath)

        im_9 = changeColorRate(im_1,0.8,1,1)
        saveImage(im_9,_inExportPath)

        im_10 = changeColorRate(im_0,1,1.2,1)
        saveImage(im_10,_inExportPath)

        im_12 = changeColorRate(im_1,1,1.2,1)
        saveImage(im_12,_inExportPath)

        im_11 = changeColorRate(im_0,1,0.8,1)
        saveImage(im_11,_inExportPath)

        im_13 = changeColorRate(im_1,1,0.8,1)
        saveImage(im_13,_inExportPath)

        im_14 = changeColorRate(im_0,1,1,1.2)
        saveImage(im_14,_inExportPath)

        im_16 = changeColorRate(im_1,1,1,1.2)
        saveImage(im_16,_inExportPath)

        im_15 = changeColorRate(im_0,1,1,0.8)
        saveImage(im_15,_inExportPath)

        im_17 = changeColorRate(im_1,1,1,0.8)
        saveImage(im_17,_inExportPath)

        iec_1 = ImageEnhance.Contrast(im_0)
        im_18 = iec_1.enhance(1.3)
        saveImage(im_18,_inExportPath)

        iec_2 = ImageEnhance.Contrast(im_1)
        im_19 = iec_2.enhance(1.3)
        saveImage(im_19,_inExportPath)

        iec_3 = ImageEnhance.Contrast(im_0)
        im_20 = iec_3.enhance(0.7)
        saveImage(im_20,_inExportPath)

        iec_4 = ImageEnhance.Contrast(im_1)
        im_21 = iec_4.enhance(0.7)
        saveImage(im_21,_inExportPath)

        iec_5 = ImageEnhance.Color(im_0)
        im_22 = iec_5.enhance(1.3)
        saveImage(im_22,_inExportPath)

        iec_6 = ImageEnhance.Color(im_1)
        im_23 = iec_6.enhance(1.3)
        saveImage(im_23,_inExportPath)

        iec_7 = ImageEnhance.Color(im_0)
        im_24 = iec_7.enhance(0.7)
        saveImage(im_24,_inExportPath)

        iec_8 = ImageEnhance.Color(im_1)
        im_25 = iec_8.enhance(0.7)
        saveImage(im_25,_inExportPath)

        iec_9 = ImageEnhance.Sharpness(im_0)
        im_26 = iec_9.enhance(2)
        saveImage(im_26,_inExportPath)

        iec_10 = ImageEnhance.Sharpness(im_1)
        im_27 = iec_10.enhance(2)
        saveImage(im_27,_inExportPath)

        iec_11 = ImageEnhance.Sharpness(im_0)
        im_28 = iec_11.enhance(0.3)
        saveImage(im_28,_inExportPath)

        iec_12 = ImageEnhance.Sharpness(im_1)
        im_29 = iec_12.enhance(0.3)
        saveImage(im_29,_inExportPath)
Ejemplo n.º 43
0
 def mirror(self, img_pil, acc_numpy):
     ## image
     img_pil = ImageOps.mirror(img_pil)  #invert image
     ## acc
     acc_numpy[1] = -acc_numpy[1]
     return img_pil, acc_numpy
Ejemplo n.º 44
0
from PIL import Image, ImageOps

base_dir = "/home/workspace/data/IMG/"

csv_file = pd.read_csv("../../data/driving_log.processed.1.csv")

data_set = []

for i in csv_file.index:
    print("Number of data: \t{}\r".format(i + 1), end="")
    image_file_name = csv_file["image"][i]
    angle = csv_file["steering"][i]

    image = Image.open(base_dir + image_file_name.split("/")[-1])

    flippedimage = ImageOps.mirror(image)

    new_name = base_dir + image_file_name.replace(".jpg",
                                                  "_flip.jpg").split("/")[-1]

    flippedimage.save(new_name)

    new_angle = -1 * angle

    data_set.append([image_file_name, angle])
    data_set.append([new_name, new_angle])

new_csv = pd.DataFrame(data_set, columns=["image", "steering"])

new_csv.to_csv("../../data/driving_log.processed.2.csv")
Ejemplo n.º 45
0
def updateDisplay(config, pricestack):
    BTC = pricestack[-1]
    bmp = Image.open(os.path.join(picdir, 'BTC.bmp'))
    bmp2 = Image.open(os.path.join(picdir, 'spark.bmp'))
    if config['ticker']['hidden'] == True:
        if config['display']['orientation'] == 0 or config['display'][
                'orientation'] == 180:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.width, epd.height),
                              255)  # 255: clear the image with white
            image.paste(bmp, (10, 20))
            draw = ImageDraw.Draw(image)
            draw.text((5, 200), "1 BTC", font=font, fill=0)
            draw.text((0, 10),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            if config['display']['orientation'] == 180:
                image = image.rotate(180, expand=True)

        if config['display']['orientation'] == 90 or config['display'][
                'orientation'] == 270:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.height, epd.width),
                              255)  # 255: clear the image with white
            image.paste(bmp, (0, 0))
            draw = ImageDraw.Draw(image)
            draw.text((20, 120), "1 BTC", font=fontHorizontal, fill=0)
            draw.text((85, 5),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            if config['display']['orientation'] == 270:
                image = image.rotate(180, expand=True)
    #       This is a hack to deal with the mirroring that goes on in 4Gray Horizontal
            image = ImageOps.mirror(image)
    else:
        if config['display']['orientation'] == 0 or config['display'][
                'orientation'] == 180:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.width, epd.height),
                              255)  # 255: clear the image with white
            draw = ImageDraw.Draw(image)
            draw.text((110, 80), "7day :", font=font_date, fill=0)
            draw.text(
                (110, 95),
                str("%+d" % round(
                    (pricestack[-1] - pricestack[1]) / pricestack[-1] * 100,
                    2)) + "%",
                font=font_date,
                fill=0)
            draw.text((5, 200),
                      "$" + format(int(round(BTC)), ","),
                      font=font,
                      fill=0)
            draw.text((0, 10),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            image.paste(bmp, (10, 20))
            image.paste(bmp2, (10, 125))
            if config['display']['orientation'] == 180:
                image = image.rotate(180, expand=True)

        if config['display']['orientation'] == 90 or config['display'][
                'orientation'] == 270:
            epd = epd2in7.EPD()
            epd.Init_4Gray()
            image = Image.new('L', (epd.height, epd.width),
                              255)  # 255: clear the image with white
            draw = ImageDraw.Draw(image)
            draw.text(
                (100, 100),
                "7day : " + str("%+d" % round(
                    (pricestack[-1] - pricestack[1]) / pricestack[-1] * 100,
                    2)) + "%",
                font=font_date,
                fill=0)
            draw.text((20, 120),
                      "$" + format(int(round(BTC)), ","),
                      font=fontHorizontal,
                      fill=0)
            image.paste(bmp2, (80, 50))
            image.paste(bmp, (0, 0))
            draw.text((85, 5),
                      str(time.strftime("%c")),
                      font=font_date,
                      fill=0)
            if config['display']['orientation'] == 270:
                image = image.rotate(180, expand=True)
    #       This is a hack to deal with the mirroring that goes on in 4Gray Horizontal
            image = ImageOps.mirror(image)

#   If the display is inverted, invert the image usinng ImageOps
    if config['display']['inverted'] == True:
        image = ImageOps.invert(image)


#   Send the image to the screen
    epd.display_4Gray(epd.getbuffer_4Gray(image))
    epd.sleep()
Ejemplo n.º 46
0
  def transform(self, i, image):
    # (_x, _y): random translation position      
    _x = np.random.randint(0, self.x)
    _y = np.random.randint(0, self.y)
    
    # _angle : random rotation angle
    _angle  = np.random.randint(0, self.angle)

    minw = float(image.width) * float(self.shrink_rw)
    minh = float(image.height)* float(self.shrink_rh)
    
    # (_w, _h) : random image_width and image_height
    self._w = int( np.random.randint(int(minw), image.width)  )
    self._h = int( np.random.randint(int(minh), image.height) )

    print(" {} {} {} {}".format(_x, _y, self._w, self._h))
    
    if i % 3 == 0:
      _angle = _angle * (-1)

    # Resize the image to (_w, _h)
    image = image.resize(size=(self._w, self._h), resample=Image.LANCZOS)
      
    # Rotate the image by _angle 
    image = image.rotate(_angle, translate=(_x, _y), expand=True)
    

    if i % 3 == 0:
      print("CONTRAST {}".format(i))
      image = ImageOps.autocontrast(image, self.contrast)

    if i % 4 == 0 and self.vflip == True:
      print("VERTICAL FLIP {}".format(i))
      image = ImageOps.flip(image)

    if i % 5 == 0 and self.hflip == True:
      print("HORIZONTAL FLIP {}".format(i))
      image = ImageOps.mirror(image)

    if i % 5 == 0:
      # Apply a simple AFFINE transformation to the image.
      xshift = int( abs(self.xshift) * self.width )
      yshift = int( abs(self.yshift) * self.height )
      
      # New width and height (_nw, _nh) for AFFINE transformation
      _nw = self._w + xshift
      _nh = self._h + yshift
      
      # Very simple coefficients fo AFFINE transformation
      coeffs = (1, self.xshift, 0, 0, 1, 0, 0, 0)
      
      # Inverting _xs flag.
      self.xshift = self.xshift * (-1)
      
      print("AFFINE {}".format(i))
      image = image.transform((_nw, _nh), Image.AFFINE, coeffs, Image.BICUBIC)
      
    if i % 6 == 0 and self.sharpen == True:
      print("SHARPEN {}".format(i))
      image = image.filter(ImageFilter.SHARPEN)

    if i % 8 == 0 and self.smooth == True:
      print("SMOOTH_MORE {}".format(i))
      image = image.filter(ImageFilter.SMOOTH_MORE)
      
    if i % 9 == 0 and self.edge_enhance == True:
      print("EDGE_ENHANCE_MORE {}".format(i))
      image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
      
    if i>0 and i % 11 == 0 and self.noise >0.0:
      print("NOISE {}".format(i))
      image = self.inject_saultpepper_noise(image)

    return image
Ejemplo n.º 47
0
def cloudiness(InputFilePath):
    # -*- coding: utf-8 -*-
    from PIL import Image, ImageDraw, ImageOps, ImageFont
    import math
    from pvlib.location import Location
    import matplotlib.dates as mdate
    import pvlib
    import pandas as pd
    import datetime
    import glob
    import numpy as np
    from time import clock
    import copy
    from Converter import convert

    #Information:
    #
    #Code written by Marcus Klingebiel, Max-Planck-Institute for Meteorology
    #E-Mail: [email protected]
    #
    #PLEASE ASK BEFORE SHARING THIS CODE!
    #
    #
    #Preliminary version of the All-Sky Cloud Algorithms (ASCA)
    #The code is based on the analysis of every single pixel on a jpeg-Image.
    #The used Ski-Index and Brightness-Index base on Letu et al. (2014), Applied Optics, Vol. 53, No. 31.
    #
    #
    #
    #Marcus Klingebiel, March 2016

    #Code eddited by Tobias Machnitzki
    #Email: [email protected]
    print("Calculating Cloudcoverage")

    #--------------------Settings------------------------------------------------------------------------------------------

    debugger = False  #if true, the program will print a message after each step

    TXTFile = False  #if True, the program will generate a csv file with several information. Delimiter = ','

    #    imagefont_size = 20 #Sets the font size of everything written into the picture

    Radius_synop = False  #If True: not the whole sky will be used, but just the 60 degrees from the middle on (like the DWD does with cloud covering)

    Save_image = False  #If True: an image will be printed at output-location, where recognized clouds are collored.

    #    font = ImageFont.truetype("/home/tobias/anaconda3/lib/python3.5/site-packages/matplotlib/mpl-data/fonts/ttf/Vera.ttf",imagefont_size)    # Font

    set_scale_factor = 100  #this factor sets the acuracy of the program. By scaling down the image size the program gets faster but also its acuracy dercreases.
    #It needs to be between 1 and 100. If set 100, then the original size of the image will be used.
    #If set to 50 the image will be scaled down to half its size
    #

    #---------------------Calcutlate the SI-parameter--------------------------------------------------------------------------------------
    #The Parameter gets calculated before the loop over all filse start, to save computing time.
    #To see how the function for the parameter was generated, see the documentation.

    size = 100
    parameter = np.zeros(size)
    for j in range(size):
        parameter[j] = (0 + j * 0.4424283716980435 -
                        pow(j, 2) * 0.06676211439554262 +
                        pow(j, 3) * 0.0026358061791573453 -
                        pow(j, 4) * 0.000029417130873311177 +
                        pow(j, 5) * 1.0292852149593944e-7) * 0.001

    #----------------------Read files------------------------------------------------------------------------------------------------------

    OutputPath = "/media/MPI/ASCA/images/s160521/out/"

    cloudiness_value = []
    ASCAtime = []
    cloudmasks = []
    #    print(InputFilePath)

    for InputFile in sorted(glob.glob(InputFilePath + '/*.jpg')):

        #---------------------------------------------------------------------------------------------------------
        #--------Get day and time------------
        if debugger == True:
            print("Getting day and time")

        date_str = InputFile[len(InputFile) - 19:len(InputFile) - 19 + 12]
        if debugger == True:
            print("Date_Str: " + date_str)
        Year_str = date_str[0:2]
        Month_str = date_str[2:4]
        Day_str = date_str[4:6]
        Hour_str = date_str[6:8]
        Minute_str = date_str[8:10]
        Second_str = date_str[10:12]

        Year = int(date_str[0:2])
        Month = int(date_str[2:4])
        Day = int(date_str[4:6])
        Hour = int(date_str[6:8])
        Minute = int(date_str[8:10])
        Second = int(date_str[10:12])

        #------------Calculate SZA--------------------------------------------------------------------------------------------------------------
        if debugger == True:
            print("Calculating SZA")

        tus = Location(
            13.164, -59.433, 'UTC', 70, 'BCO'
        )  #This is the location of the Cloud camera used for calculating the Position of the sun in the picture
        times = pd.date_range(start=datetime.datetime(Year + 2000, Month, Day,
                                                      Hour, Minute, Second),
                              end=datetime.datetime(Year + 2000, Month, Day,
                                                    Hour, Minute, Second),
                              freq='10s')
        times_loc = times.tz_localize(tus.pytz)
        pos = pvlib.solarposition.get_solarposition(times_loc,
                                                    tus.latitude,
                                                    tus.longitude,
                                                    method='nrel_numpy',
                                                    pressure=101325,
                                                    temperature=25)
        sza = float(pos.zenith[0])
        if debugger:
            print("sza=" + str(sza))

        if (84 < sza <=
                85):  #The program will only process images made at daylight

            time1 = clock()
            azimuth = float(pos.azimuth[0])
            sza_orig = sza
            azi_orig = azimuth
            azimuth = azimuth + 190  #197 good
            #            print(( str(sza) + '   '+Hour_str+':'+Minute_str))
            if azimuth > 360:
                azimuth = azimuth - 360

    #------------Open csv-File-------------------------------------------------------------------------------------------------------------
            if debugger == True:
                print("Open csv-File")

            if TXTFile == True:
                f = open(
                    OutputPath + Year_str + Month_str + Day_str + '_' +
                    Hour_str + Minute_str + Second_str + '_ASCA.csv', 'w')
                f.write(
                    'Seconds_since_1970, UTC_Time, SZA_in_degree, Azimuth_in_degree, Cloudiness_in_percent, Cloudiness_in_oktas'
                    + '\n')
                TXTFile = False

    #---Read image and set some parameters-------------------------------------------------------------------------------------------------
            if debugger == True:
                print("Reading image and setting parameters")

            #------------rescale picture-------------------------------------------
            image = Image.open(InputFile)

            x_size_raw = image.size[0]
            y_size_raw = image.size[1]
            scale_factor = (set_scale_factor / 100.)
            NEW_SIZE = (x_size_raw * scale_factor, y_size_raw * scale_factor)
            image.thumbnail(NEW_SIZE, Image.ANTIALIAS)

            image = ImageOps.mirror(image)  #Mirror picture

            x_size = image.size[0]
            y_size = image.size[1]
            x_mittel = x_size / 2  # Detect center of the true image
            y_mittel = y_size / 2
            Radius = 900  #pixel    #  Set area for the true allsky image

            scale = x_size / 2592.

            #-------------convert image to an array and remove unnecessary part araund true allsky image-----------------------------------------------------------------
            if debugger == True:
                print("Drawing circle around image and removing the rest")

            r = Radius * scale
            y, x = np.ogrid[-y_mittel:y_size - y_mittel,
                            -x_mittel:x_size - x_mittel]
            x = x + (15 * scale)  #move centerpoint manually
            y = y - (40 * scale)
            mask = x**2 + y**2 <= r**2  #make a circular boolean array which is false in the area outside the true allsky image

            image_array = np.asarray(
                image, order='F'
            )  #converting the image to an array with array[x,y,color]; color: 0=red, 1,green, 2=blue
            image_array.setflags(
                write=True
            )  #making it able to work with that array and change it
            image_array[:, :, :][~mask] = [
                0, 0, 0
            ]  #using the mask created before on that new made array

            if Radius_synop == True:
                mask = x**2 + y**2 <= (765 * scale)**2
                image_array[:, :, :][~mask] = [0, 0, 0]

            del x, y
            #
            #------------Calculate position of sun on picture---------------------------------------------------------------------------------------
            if debugger == True:
                print("Calculating position of the sun on picture")

            sza = sza - 90
            if sza < 0:
                sza = sza * (-1)

            AzimutWinkel = ((2 * math.pi) / 360) * (azimuth - 90)
            sza = ((2 * math.pi) / 360) * sza
            x_sol_cen = x_mittel - (15 * scale)
            y_sol_cen = y_mittel + (40 * scale)
            RadiusBild = r
            sza_dist = RadiusBild * math.cos(sza)

            x = x_sol_cen - sza_dist * math.cos(AzimutWinkel)
            y = y_sol_cen - sza_dist * math.sin(AzimutWinkel)

            ###-----------Draw circle around position of sun-------------------------------------------------------------------------------------------
            if debugger == True:
                print("Drawing circle around position of sun")

            x_sol_cen = int(x)
            y_sol_cen = int(y)
            Radius_sol = 300 * scale
            Radius_sol_center = 250 * scale

            y, x = np.ogrid[-y_sol_cen:y_size - y_sol_cen,
                            -x_sol_cen:x_size - x_sol_cen]
            sol_mask = x**2 + y**2 <= Radius_sol**2
            sol_mask_cen = x**2 + y**2 <= Radius_sol_center**2
            sol_mask_cen1 = sol_mask_cen
            image_array[:, :, :][sol_mask_cen] = [0, 0, 0]
            #        image_array[:,:,:][]

            ##-------Calculate Sky Index SI and Brightness Index BI------------Based on Letu et al. (2014)-------------------------------------------------
            if debugger == True:
                print("Calculating Sky Index SI and Brightness Index BI")

            image_array_f = image_array.astype(float)

            SI = ((image_array_f[:, :, 2]) -
                  (image_array_f[:, :, 0])) / (((image_array_f[:, :, 2]) +
                                                (image_array_f[:, :, 0])))
            where_are_NaNs = np.isnan(SI)
            SI[where_are_NaNs] = 1

            mask_sol1 = SI < 0.1
            Radius = 990 * scale
            sol_mask_double = x**2 + y**2 <= Radius**2
            mask_sol1 = np.logical_and(mask_sol1, ~sol_mask_double)
            image_array[:, :, :][mask_sol1] = [255, 0, 0]

            ###-------------Include area around the sun----------------------------------------------------------------------------------------------------
            if debugger == True:
                print("Including area around the sun")

            y, x = np.ogrid[-y_sol_cen:y_size - y_sol_cen,
                            -x_sol_cen:x_size - x_sol_cen]
            sol_mask = x**2 + y**2 <= Radius_sol**2
            sol_mask_cen = x**2 + y**2 <= Radius_sol_center**2
            sol_mask_cen = np.logical_and(sol_mask_cen, sol_mask)

            Radius_sol = size * 100 * 2
            sol_mask = x**2 + y**2 <= Radius_sol**2
            mask2 = np.logical_and(~sol_mask_cen, sol_mask)

            image_array_c = copy.deepcopy(
                image_array
            )  #duplicating array: one for counting one for printing a colored image

            time3 = clock()

            for j in range(size):
                Radius_sol = j * 10 * scale
                sol_mask = (x * x) + (y * y) <= Radius_sol * Radius_sol
                mask2 = np.logical_and(~sol_mask_cen, sol_mask)
                sol_mask_cen = np.logical_or(sol_mask, sol_mask_cen)

                mask3 = SI < parameter[j]
                mask3 = np.logical_and(mask2, mask3)
                image_array_c[mask3] = [255, 0, 0]
                image_array[mask3] = [255, 300 - 3 * j, 0]

            time4 = clock()
            #        print 'Schleifenzeit:', time4-time3
            ##---------Count red pixel(clouds) and blue-green pixel(sky)-------------------------------------------------------------------------------------------
            if debugger == True:
                print("Counting red pixel for sky and blue for clouds")

            c_mask = np.logical_and(~sol_mask_cen1, mask)
            c_array = (image_array_c[:, :, 0] + image_array_c[:, :, 1] +
                       image_array_c[:, :, 2])  #array just for the counting
            Count1 = np.shape(np.where(c_array == 255))[1]
            Count2 = np.shape(np.where(c_mask == True))[1]

            CloudinessPercent = (100 / float(Count2) * float(Count1))
            CloudinessSynop = int(round(8 * (float(Count1) / float(Count2))))

            image = Image.fromarray(image_array.astype(np.uint8))

            #----------Mirror Image-----------------------------
            image = ImageOps.mirror(image)  #Mirror Image back
            #---------Add Text-----------------------------------
            if debugger == True:
                print("Adding text")

            sza = "{:5.1f}".format(sza_orig)
            azimuth = "{:5.1f}".format(azi_orig)
            CloudinessPercent = "{:5.1f}".format(CloudinessPercent)

            #            draw = ImageDraw.Draw(image)
            #            draw.text((20*scale, 20*scale),"BCO All-Sky Camera",(255,255,255),font=font)
            #            draw.text((20*scale, 200*scale),Hour_str+":"+Minute_str+' UTC',(255,255,255),font=font)
            #
            #            draw.text((20*scale, 1700*scale),"SZA = "+str(sza)+u'\u00B0',(255,255,255),font=font)
            #            draw.text((20*scale, 1820*scale),"Azimuth = "+str(azimuth)+u'\u00B0',(255,255,255),font=font)
            #
            #            draw.text((1940*scale, 1700*scale),"Cloudiness: ",(255,255,255),font=font)
            #            draw.text((1930*scale, 1820*scale),str(CloudinessPercent)+'%   '+ str(CloudinessSynop)+'/8',(255,255,255),font=font)
            #
            #            draw.text((1990*scale, 20*scale),Day_str+'.'+Month_str+'.20'+Year_str,(255,255,255),font=font)

            #-------------Save values to csv-File---------------------------------------
            #            if debugger == True:
            #                print "Saving values to csv-File"

            #            EpochTime=(datetime.datetime(2000+Year,Month,Day,Hour,Minute,Second) - datetime.datetime(1970,1,1)).total_seconds()
            #            f.write(str(EpochTime)+', '+Hour_str+':'+Minute_str+', '+str(sza)+', '+str(azimuth)+', '+str(CloudinessPercent)+', '+str(CloudinessSynop)+'\n')
            #-------------Save picture--------------------------------------------------
            if Save_image == True:
                if debugger == True:
                    print("saving picture")

                image = convert(InputFile, image, OutputPath)
                image.save(OutputPath + Year_str + Month_str + Day_str + '_' +
                           Hour_str + Minute_str + Second_str + '_ASCA.jpg')

            #image.show()
            time2 = clock()
            time = time2 - time1
            cloudiness_value.append(CloudinessPercent)
            ASCAtime.append((datetime.datetime(Year + 2000, Month, Day, Hour,
                                               Minute, Second)))

            cloudmask = [c_array == 255]
            cloudmask = cloudmask[0] * 1
            cloudmask[np.where(c_mask == False)] = -1
            clodmask = np.fliplr(cloudmask)
            cloudmasks.append(cloudmask)

#               print "Berechnungszeit: ", time
    return cloudiness_value, ASCAtime, cloudmasks, set_scale_factor
Ejemplo n.º 48
0
async def imirror(event):  # sourcery no-metrics
    "imgae refelection fun."
    reply = await event.get_reply_message()
    mediatype = media_type(reply)
    if not reply or not mediatype or mediatype not in ["Photo", "Sticker"]:
        return await edit_delete(
            event, "__Reply to photo or sticker to make mirror.__")
    catevent = await event.edit("__Reflecting the image....__")
    args = event.pattern_match.group(1)
    if args:
        filename = "catuserbot.webp"
        f_format = "webp"
    else:
        filename = "catuserbot.jpg"
        f_format = "jpeg"
    try:
        imag = await _cattools.media_to_pic(catevent, reply, noedits=True)
        if imag[1] is None:
            return await edit_delete(
                imag[0],
                "__Unable to extract image from the replied message.__")
        image = Image.open(imag[1])
    except Exception as e:
        return await edit_delete(
            catevent, f"**Error in identifying image:**\n__{str(e)}__")
    flag = event.pattern_match.group(3) or "r"
    w, h = image.size
    if w % 2 != 0 and flag in ["r", "l"] or h % 2 != 0 and flag in ["u", "b"]:
        image = image.resize((w + 1, h + 1))
        h, w = image.size
    if flag == "l":
        left = 0
        upper = 0
        right = w // 2
        lower = h
        nw = right
        nh = left
    elif flag == "r":
        left = w // 2
        upper = 0
        right = w
        lower = h
        nw = upper
        nh = upper
    elif flag == "u":
        left = 0
        upper = 0
        right = w
        lower = h // 2
        nw = left
        nh = lower
    elif flag == "b":
        left = 0
        upper = h // 2
        right = w
        lower = h
        nw = left
        nh = left
    temp = image.crop((left, upper, right, lower))
    temp = ImageOps.mirror(temp) if flag in ["l", "r"] else ImageOps.flip(temp)
    image.paste(temp, (nw, nh))
    img = BytesIO()
    img.name = filename
    image.save(img, f_format)
    img.seek(0)
    await event.client.send_file(event.chat_id, img, reply_to=reply)
    await catevent.delete()
Ejemplo n.º 49
0
def preprocess(observe):
    ret = Image.fromarray(observe)
    ret = ImageOps.mirror(ret.rotate(270)).convert('L').resize(
        (RESIZE, RESIZE))
    return np.asarray(ret)
Ejemplo n.º 50
0
		stage = stages.Stage1()
	
	# 初期描画
	root = tk.Tk()
	root.title(stage.name)
	root.geometry(f'{WINDOW_WIDTH}x{WINDOW_HEIGHT}+0+0')
	cv = tk.Canvas(root, width=WINDOW_WIDTH, height=WINDOW_HEIGHT, bg='white')
	cv.pack()
	cv.focus_set()

	# 画像の読み込み
	# キャラクター
	obake_img = Image.open('./img/obake.png')
	obake_img = obake_img.resize((IMG_SIZE, IMG_SIZE))
	obake_flip_img = ImageOps.flip(obake_img)		# 上下反転
	obake_mirror_img = ImageOps.mirror(obake_img)	# 左右反転(右向き)
	obake_fm_img = ImageOps.mirror(obake_flip_img)	# 上下左右反転
	obake_tkimg = ImageTk.PhotoImage(obake_img)
	obake_flip_tkimg = ImageTk.PhotoImage(obake_flip_img)
	obake_mirror_tkimg = ImageTk.PhotoImage(obake_mirror_img)
	obake_fm_tkimg = ImageTk.PhotoImage(obake_fm_img)
	# 重力ブロック
	udarrow_img = Image.open('./img/updownarrow.png')
	udarrow_img = udarrow_img.resize((BLOCK_SIZE, BLOCK_SIZE))
	udarrow2_img = udarrow_img.resize((BLOCK_SIZE*2, BLOCK_SIZE*2))
	udarrow_tkimg = ImageTk.PhotoImage(udarrow_img)
	udarrow2_tkimg = ImageTk.PhotoImage(udarrow2_img)
	# 看板
	triple_size = (BLOCK_SIZE*3, BLOCK_SIZE*3)
	dsc_J_img = Image.open(f'./img/dsc_J.png').resize(triple_size)
	dsc_K_img = Image.open(f'./img/dsc_K.png').resize(triple_size)
Ejemplo n.º 51
0
def conv_image_to_module(name, scale_factor):

    module = header % {"name": name.upper()}

    front_image = Image.open("%s_front.png" % name).transpose(Image.FLIP_TOP_BOTTOM) 
    print("Reading image from \"%s_front.png\"" % name)

    front_image_red, front_image_green, front_image_blue, front_image_alpha = front_image.split()

    # Soldermask needs to be inverted
    front_image_red = ImageOps.invert(front_image_red)
    front_image_red = Image.composite(front_image_red, front_image_alpha, front_image_alpha)
    front_image_red = front_image_red.point(lambda i: 0 if i < 127 else 1)
    red_array = np.array(front_image_red)
    bmp_red = potrace.Bitmap(red_array)
    path_red = bmp_red.trace(alphamax = 0.0, opttolerance = 50)

    # Soldermask needs to be inverted
    front_image_green = ImageOps.invert(front_image_green)
    front_image_green = Image.composite(front_image_green, front_image_alpha, front_image_alpha)
    front_image_green = front_image_green.point(lambda i: 0 if i < 127 else 1)
    green_array = np.array(front_image_green)
    bmp_green = potrace.Bitmap(green_array)
    path_green = bmp_green.trace(alphamax = 0.0, opttolerance = 50)

    front_image_blue = front_image_blue.point(lambda i: 0 if i < 127 else 1)
    blue_array = np.array(front_image_blue)
    bmp_blue = potrace.Bitmap(blue_array)
    path_blue = bmp_blue.trace(alphamax = 0.0, opttolerance = 50)

    front_image_alpha = front_image_alpha.point(lambda i: 0 if i < 127 else 1)
    front_image_alpha_array = np.array(front_image_alpha)
    bmp_alpha = potrace.Bitmap(front_image_alpha_array)
    path_alpha = bmp_alpha.trace(alphamax = 0.0, opttolerance = 50)

    w, h = front_image.size

    # print("Generating Outline layer from front alpha channel")
    # module += render_path_to_layer(path_alpha, "line", "20", scale_factor)

    print("Generating tKeepout layer from front red channel")
    module += render_path_to_layer(path_red, "poly", "39", scale_factor)
    print("Generating tStop layer from front green channel")
    module += render_path_to_layer(path_green, "poly", "29", scale_factor)
    print("Generating tPlace layer from front blue channel")
    module += render_path_to_layer(path_blue, "poly", "21", scale_factor)

    try:
        back_image = Image.open("%s_back.png" % name).transpose(Image.FLIP_TOP_BOTTOM) 
        back_image = ImageOps.mirror(back_image)
        print("Reading image from \"%s_back.png\"" % name)

        back_image_red, back_image_green, back_image_blue, back_image_alpha = back_image.split()

        back_image_red = back_image_red.point(lambda i: 0 if i < 127 else 1)
        red_array = np.array(back_image_red)
        bmp_red = potrace.Bitmap(red_array)
        path_red = bmp_red.trace(alphamax = 0.0, opttolerance = 50)

        # Soldermask needs to be inverted
        back_image_green = ImageOps.invert(back_image_green)
        back_image_green = back_image_green.point(lambda i: 0 if i < 127 else 1)
        green_array = np.array(back_image_green)
        bmp_green = potrace.Bitmap(green_array)
        path_green = bmp_green.trace(alphamax = 0.0, opttolerance = 50)

        back_image_blue = back_image_blue.point(lambda i: 0 if i < 127 else 1)
        blue_array = np.array(back_image_blue)
        bmp_blue = potrace.Bitmap(blue_array)
        path_blue = bmp_blue.trace(alphamax = 0.0, opttolerance = 50)

        print("Generating bKeepout layer from back red channel")
        module += render_path_to_layer(path_red, "poly", "40", scale_factor)
        print("Generating bStop layer from back green channel")
        module += render_path_to_layer(path_green, "poly", "30", scale_factor)
        print("Generating bPlace layer from back blue channel")
        module += render_path_to_layer(path_blue, "poly", "22", scale_factor)
    except IOError:
        pass

    module += footer % {"name": name.upper()}
    return module, (w * 25.4 / scale_factor, h * 25.4 / scale_factor)
def detect_birds(model,
                 input_folder,
                 output_folder_crop,
                 generate_masks=False,
                 output_folder_mask="mask_dataset"):
    kernel = np.ones((25, 25), 'uint8')
    for data_folder in list(
            os.listdir(input_folder)):  # Iterate over train, val and test
        non_cropped = 0
        non_cropped_names = []
        num_imgs = 0
        directory = input_folder + '/' + data_folder
        print("\nDetecting birds on :", data_folder)
        for folder in list(
                os.listdir(directory)):  # Iterate over classes of birds
            size = len(list(os.listdir(directory + '/' + folder)))
            num_imgs += size
            os.makedirs(output_folder_crop, exist_ok=True)
            os.makedirs(output_folder_crop + '/' + data_folder + '/' + folder,
                        exist_ok=True)
            if generate_masks:
                os.makedirs(output_folder_mask, exist_ok=True)
                os.makedirs(output_folder_mask + '/' + data_folder + '/' +
                            folder,
                            exist_ok=True)

            img_paths = []
            img_detections = []

            # Reformat weird images
            for file in os.listdir(directory + '/' + folder):
                if '.jpg' not in file:
                    print(file)
                    continue
                i = plt.imread(directory + '/' + folder + '/' + file)
                if len(i.shape) == 2 or i.shape[2] != 3:
                    i = Image.fromarray(i)
                    i = i.convert('RGB')
                    i.save(directory + '/' + folder + '/' + file)
                del i

            # Get image paths and detections : not the most efficient way, but it avoids defining a proper detectron2-specific Dataloader
            for img_path in list(os.listdir(directory + '/' + folder)):
                if '.jpg' not in img_path:
                    print(img_path)
                    continue
                img = cv2.imread(directory + '/' + folder + '/' + img_path)
                with torch.no_grad():
                    detections = model(img)["instances"]
                img_paths.append(directory + '/' + folder + '/' + img_path)
                img_detections.append(detections)

            # Save cropped images
            for (path, detections) in (zip(img_paths, img_detections)):
                img = np.array(Image.open(path))

                # Bounding boxes and labels of detections
                if len(detections.scores) > 0:

                    # Get the most probable bird prediction bounding box
                    index_birds = np.where(detections.pred_classes.cpu().numpy(
                    ) == 14)[0]  # 14 is the default class number for bird
                    if len(index_birds) == 0:
                        # Flip the image if we are not able to detect the bird
                        non_cropped_names.append(path)
                        non_cropped += 1
                        path = path.split("/")[-1]
                        plt.imsave(output_folder_crop + '/' + data_folder +
                                   '/' + folder + '/' + path,
                                   np.array(
                                       ImageOps.mirror(Image.fromarray(img))),
                                   dpi=1000)
                        plt.close()
                        continue
                    bird = int(
                        torch.max(detections.scores[index_birds],
                                  0)[1].cpu().numpy())
                    [
                        x1, y1, x2, y2
                    ] = detections.pred_boxes[index_birds][bird].tensor[0].cpu(
                    ).numpy()
                    mask = detections.pred_masks.cpu().numpy().astype(
                        np.uint8).squeeze()
                    count = 1
                    invalid_mask = False

                    # If we are able to detect the bird, enlarge the bounding box and generate a new image
                    # x1, y1 = np.maximum(0,int(x1)-20), np.maximum(0,int(y1)-20)
                    # x2, y2 = np.minimum(x2+40,img.shape[1]), np.minimum(y2+40,img.shape[0])

                    # generate mask
                    if generate_masks:
                        if len(mask.shape) > 2:
                            invalid_mask = True
                        else:
                            imgcv = cv2.imread(path)
                            dilate_img = cv2.dilate(mask, kernel, iterations=1)
                            masked_img = cv2.bitwise_and(imgcv,
                                                         imgcv,
                                                         mask=dilate_img)

                    img = img[int(np.ceil(y1)):int(y2),
                              int(np.ceil(x1)):int(x2), :]
                    # crop the masked image
                    if not invalid_mask:
                        masked_img = masked_img[int(np.ceil(y1)):int(y2),
                                                int(np.ceil(x1)):int(x2), :]

                    # Save generated image with detections
                    path = path.split("/")[-1]
                    print(output_folder_crop + '/' + data_folder + '/' +
                          folder + '/' + path)
                    plt.imsave(output_folder_crop + '/' + data_folder + '/' +
                               folder + '/crop' + path,
                               img,
                               dpi=1000)
                    if generate_masks:
                        if not invalid_mask:
                            masked_img = cv2.cvtColor(masked_img,
                                                      cv2.COLOR_BGR2RGB)
                            plt.imsave(output_folder_mask + '/' + data_folder +
                                       '/' + folder + '/' + path,
                                       masked_img,
                                       dpi=1000)
                    plt.close()

                else:
                    # Flip the image if we are not able to detect the bird
                    non_cropped_names.append(path)
                    non_cropped += 1
                    path = path.split("/")[-1]
                    # Flip the image if we are not able to detect it
                    plt.imsave(output_folder_crop + '/' + data_folder + '/' +
                               folder + '/crop' + path,
                               np.array(ImageOps.mirror(Image.fromarray(img))),
                               dpi=1000)

                    plt.close()

        print("\t{}% of {} images non cropped".format(
            np.round(100 * non_cropped / num_imgs, 2), data_folder))
    return (non_cropped_names)
Ejemplo n.º 53
0
async def KZD(message, type):
    S = 'sticker'
    A = message
    N = await A.get_reply_message()
    Q, J = await CM(N)
    if not Q or not N:
        await A.edit('<b>Реплай на стикер или фото!</b>')
        return
    O = 'KZD.' + J
    P = U.get_args_raw(A)
    if P:
        if P in [_A[:A] for A in range(1, len(_A) + 1)]:
            O = 'KZD.png'
            J = _C
        if P in [S[:A] for A in range(1, len(S) + 1)]:
            O = 'KZD.webp'
            J = 'webp'
    R = ist()
    await A.edit('<b>Извиняюсь...</b>')
    await A.client.download_media(Q, R)
    E = Image.open(R)
    B, C = E.size
    if B % 2 != 0 and type in [1, 2] or C % 2 != 0 and type in [3, 4]:
        E = E.resize((B + 1, C + 1))
        C, B = E.size
    if type == 1:
        D = 0
        F = 0
        G = B // 2
        H = C
        K = G
        L = D
    if type == 2:
        D = B // 2
        F = 0
        G = B
        H = C
        K = F
        L = F
    if type == 3:
        D = 0
        F = 0
        G = B
        H = C // 2
        K = D
        L = H
    if type == 4:
        D = 0
        F = C // 2
        G = B
        H = C
        K = D
        L = D
    I = E.crop((D, F, G, H))
    if type in [1, 2]:
        I = IO.mirror(I)
    else:
        I = IO.flip(I)
    E.paste(I, (K, L))
    M = ist()
    M.name = O
    E.save(M, J)
    M.seek(0)
    await A.client.send_file(A.to_id, M, reply_to=N)
    await A.delete()
Ejemplo n.º 54
0
def reflect_y_axis(im):
	return ImageOps.mirror(im).rotate(180,expand=True)
Ejemplo n.º 55
0
        if count == max_count:
            break
        print file, round(total_count2 * 100.0 / total_count, 2)
        total_count2 += 1

        row = fish_label(label)
        lbl_text = str(row[0]) + "," + str(row[1]) + "," + str(
            row[2]) + "," + str(row[3]) + "," + str(row[4]) + "," + str(
                row[5]) + "," + str(row[6]) + "," + str(row[7])
        name, ext = file.split(".")

        for t in xrange(8):
            img = Image.open(path + file, 'r')
            # print t
            if t == 1:
                img = ImageOps.mirror(img)
            if t == 2:
                img = ImageOps.flip(img)
            if t == 3:
                img = ImageOps.mirror(img)
                img = ImageOps.flip(img)
            if t == 4:
                img = img.rotate(90)
            if t == 5:
                img = img.rotate(90)
                img = ImageOps.mirror(img)
            if t == 6:
                img = img.rotate(270)
            if t == 7:
                img = img.rotate(270)
                img = ImageOps.mirror(img)
Ejemplo n.º 56
0
 def mirror(self, section):
     src = self._get_option(section, 'src')
     dst = self._get_option(section, 'dst', src)
     self.buffers[dst] = ImageOps.mirror(self.buffers[src])
Ejemplo n.º 57
0
ctx.dump(path)
image = Image.open(path)
image.filter(ImageFilter.GaussianBlur(blur_radius)).save(path)

print(' end', flush=True)

#-------------------------------------------------------------
# generate bottom mask image
#-------------------------------------------------------------
ctx.clear()
print('\n### Top Mask Image ###')
print('loading... ', end='', flush=True)
mask = load_layer(prefix + '.GBS')
print(' end', flush=True)

print('drawing... ', end='', flush=True)
ctx.render_layer(mask, settings=mask_settings, bgsettings=maskbg_settings)
print(' end', flush=True)

print('dumping... ', end='', flush=True)
ctx.dump('outputs/pcb-bottom-mask.png')
print(' end', flush=True)

#print('flipping... ', end='', flush=True)
base_image = Image.open('outputs/pcb-bottom-base.png')
hmap_image = Image.open('outputs/pcb-bottom-hmap.png')
mask_image = Image.open('outputs/pcb-bottom-mask.png')
ImageOps.mirror(base_image).save('outputs/pcb-bottom-base.png')
ImageOps.mirror(hmap_image).save('outputs/pcb-bottom-hmap.png')
ImageOps.mirror(mask_image).save('outputs/pcb-bottom-mask.png')
Ejemplo n.º 58
0
def export_image(
    input,
    output,
    timeout=20,
    palette="white",
    resolution=150,
    layers=None,
    command=None,
    mirror=False,
    showgui=False,
):
    """
    Exporting eagle .sch or .brd file into image file.
    If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
    Problem can be investigated by setting 'showgui' flag.
    Exporting generates an image file. Only PNG format is supported

    :param input: eagle .sch or .brd file name
    :param output: image file name (e.g. 'eagle.png')
    :param palette: background color [None,black,white,colored]
    :param resolution: image resolution in dpi (50..2400)
    :param timeout: operation is canceled after this timeout (sec)
    :param showgui: eagle GUI is displayed
    :param layers: list, layers to be displayed ['top','pads']
    :param command: string, direct eagle command
    :param mirror: Bool
    :rtype: None
    """
    input = norm_path(input)
    output = norm_path(output)
    if not output.endswith(".png"):
        raise ValueError("use .png extension!")

    if palette:
        palette = palette.lower()

    if palette == "none":
        palette = None

    cmds = []
    if palette is not None:
        cmds += ["SET PALETTE {palette}".format(palette=palette)]

    if layers is not None:
        cmds += ["DISPLAY NONE " + " ".join(layers)]

    if command is not None:
        cmds += [command]

    with tempfile.TemporaryDirectory() as temp_dir:
        if mirror:
            fout = join(temp_dir, "out.png")
        else:
            fout = output

        commands = export_command(output=fout,
                                  output_type="image",
                                  commands=cmds,
                                  resolution=resolution)
        command_eagle(input=input,
                      timeout=timeout,
                      commands=commands,
                      showgui=showgui)

        if mirror:
            im = Image.open(fout)
            # save dpi info
            info = im.info
            im = ImageOps.mirror(im)
            im.save(output, **info)
Ejemplo n.º 59
0
def imageBorder(img, thickness, edgeFill="#ffffff00"):
    """
    Add a border of thickness pixels around the image

    :param img: the image to add a border to can be pil image, numpy array, or whatever
    :param thickness: the border thickness in pixels.  Can be:
        int - border all the way around
        (w_border,h_border) - add border this big to each side
        (x,y,x2,y2) - add border this big to each side
    :param edgeFill: defines how to extend.  It can be:
        mirror - reflect the pixels leading up to the border
        repeat - repeat the image over again (useful with repeating textures)
        clamp - streak last pixels out to edge
        [background color] - simply fill with the given color

    TODO: combine into extendImageCanvas function
    """
    if not isinstance(thickness, (tuple, list)):
        thickness = (thickness, thickness, thickness, thickness)
    elif len(thickness) == 2:
        thickness = (thickness[0], thickness[1], thickness[0], thickness[1])
    thickness = [int(t) for t in thickness]
    img = imageRepr.pilImage(img)
    newSize = (int(img.size[0] + thickness[0] + thickness[2]),
               int(img.size[1] + thickness[1] + thickness[3]))
    if edgeFill == 'mirror':
        newImage = Image.new(img.mode, newSize)
        # top
        fill = ImageOps.flip(img.crop((0, 0, img.width, thickness[1])))
        newImage.paste(fill, (thickness[1], 0))
        # bottom
        fill = ImageOps.flip(
            img.crop((0, img.height - thickness[2], img.width, img.height)))
        newImage.paste(fill, (thickness[2], img.height + thickness[2]))
        # left
        fill = ImageOps.mirror(img.crop((0, 0, thickness[0], img.height)))
        newImage.paste(fill, (0, thickness[0]))
        # right
        fill = ImageOps.mirror(
            img.crop((img.width - thickness[3], 0, img.width, img.height)))
        newImage.paste(fill, (img.width + thickness[3], thickness[3]))
        # top-left corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((0,0,thickness,thickness))))
        #newImage.paste(fill,(0,0))
        # top-right corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((img.width-thickness,0,img.width,thickness))))
        #newImage.paste(fill,(img.width+thickness,0))
        # bottom-left corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((0,img.height-thickness,thickness,img.height))))
        #newImage.paste(fill,(0,img.height+thickness))
        # bottom-right corner
        #fill=ImageOps.mirror(ImageOps.flip(img.crop((img.width-thickness,img.height-thickness,img.width,img.height))))
        #newImage.paste(fill,(img.width+thickness,img.height+thickness))
    elif edgeFill == 'repeat':
        newImage = Image.new(img.mode, newSize)
        # top
        fill = img.crop((0, 0, img.width, thickness[1]))
        newImage.paste(fill, (thickness[1], img.height + thickness[1]))
        # bottom
        fill = img.crop((0, img.height - thickness[2], img.width, img.height))
        newImage.paste(fill, (thickness[2], 0))
        # left
        fill = img.crop((0, 0, thickness[0], img.height))
        newImage.paste(fill, (img.width + thickness[0], thickness[0]))
        # right
        fill = img.crop((img.width - thickness[3], 0, img.width, img.height))
        newImage.paste(fill, (0, thickness[3]))
        # top-left corner
        fill = img.crop((0, 0, thickness, thickness))
        newImage.paste(fill, (img.width + thickness, img.height + thickness))
        # top-right corner
        fill = img.crop((img.width - thickness, 0, img.width, thickness))
        newImage.paste(fill, (0, img.height + thickness))
        # bottom-left corner
        fill = img.crop((0, img.height - thickness, thickness, img.height))
        newImage.paste(fill, (img.width + thickness, 0))
        # bottom-right corner
        fill = img.crop((img.width - thickness, img.height - thickness,
                         img.width, img.height))
        newImage.paste(fill, (0, 0))
    elif edgeFill == 'clamp':
        newImage = Image.new(img.mode, newSize)
        # top
        fill = img.crop((0, 0, img.width, 1)).resize((img.width, thickness[1]),
                                                     resample=Image.NEAREST)
        newImage.paste(fill, (thickness[1], 0))
        # bottom
        fill = img.crop((0, img.height - 1, img.width, img.height)).resize(
            (img.width, thickness[2]), resample=Image.NEAREST)
        newImage.paste(fill, (thickness[2], img.height + thickness[2]))
        # left
        fill = img.crop((0, 0, 1, img.height)).resize(
            (thickness[0], img.height), resample=Image.NEAREST)
        newImage.paste(fill, (0, thickness[0]))
        # right
        fill = img.crop((img.width - 1, 0, img.width, img.height)).resize(
            (thickness[3], img.height), resample=Image.NEAREST)
        newImage.paste(fill, (img.width + thickness[3], thickness[3]))
        # TODO: corners
        # top-left corner
        fill = img.crop((0, 0, 1, 1)).resize((thickness, thickness),
                                             resample=Image.NEAREST)
        newImage.paste(fill, (0, 0))
        # top-right corner
        fill = img.crop((img.width - 1, 0, img.width, 1)).resize(
            (thickness, thickness), resample=Image.NEAREST)
        newImage.paste(fill, (img.width + thickness, 0))
        # bottom-left corner
        fill = img.crop((0, img.height - 1, 1, img.height)).resize(
            (thickness, thickness), resample=Image.NEAREST)
        newImage.paste(fill, (0, img.height + thickness))
        # bottom-right corner
        fill = img.crop(
            (img.width - 1, img.height - 1, img.width, img.height)).resize(
                (thickness, thickness), resample=Image.NEAREST)
        newImage.paste(fill, (img.width + thickness, img.height + thickness))
    else:
        newImage = Image.new(img.mode, newSize, edgeFill)
    # splat the original image in the middle
    if True:
        if newImage.mode.endswith('A'):
            newImage.alpha_composite(img, dest=(thickness[0], thickness[1]))
        else:
            newImage.paste(img, (thickness[0], thickness[1]))
    return newImage
Ejemplo n.º 60
-1
    def test_sanity(self):

        ImageOps.autocontrast(hopper("L"))
        ImageOps.autocontrast(hopper("RGB"))

        ImageOps.autocontrast(hopper("L"), cutoff=10)
        ImageOps.autocontrast(hopper("L"), ignore=[0, 255])

        ImageOps.autocontrast_preserve(hopper("L"))
        ImageOps.autocontrast_preserve(hopper("RGB"))

        ImageOps.autocontrast_preserve(hopper("L"), cutoff=10)
        ImageOps.autocontrast_preserve(hopper("L"), ignore=[0, 255])

        ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255))
        ImageOps.colorize(hopper("L"), "black", "white")

        ImageOps.crop(hopper("L"), 1)
        ImageOps.crop(hopper("RGB"), 1)

        ImageOps.deform(hopper("L"), self.deformer)
        ImageOps.deform(hopper("RGB"), self.deformer)

        ImageOps.equalize(hopper("L"))
        ImageOps.equalize(hopper("RGB"))

        ImageOps.expand(hopper("L"), 1)
        ImageOps.expand(hopper("RGB"), 1)
        ImageOps.expand(hopper("L"), 2, "blue")
        ImageOps.expand(hopper("RGB"), 2, "blue")

        ImageOps.fit(hopper("L"), (128, 128))
        ImageOps.fit(hopper("RGB"), (128, 128))

        ImageOps.flip(hopper("L"))
        ImageOps.flip(hopper("RGB"))

        ImageOps.grayscale(hopper("L"))
        ImageOps.grayscale(hopper("RGB"))

        ImageOps.invert(hopper("L"))
        ImageOps.invert(hopper("RGB"))

        ImageOps.mirror(hopper("L"))
        ImageOps.mirror(hopper("RGB"))

        ImageOps.posterize(hopper("L"), 4)
        ImageOps.posterize(hopper("RGB"), 4)

        ImageOps.solarize(hopper("L"))
        ImageOps.solarize(hopper("RGB"))