示例#1
0
    def test_sanity(self):

        im = hopper("L")

        ImageChops.constant(im, 128)
        ImageChops.duplicate(im)
        ImageChops.invert(im)
        ImageChops.lighter(im, im)
        ImageChops.darker(im, im)
        ImageChops.difference(im, im)
        ImageChops.multiply(im, im)
        ImageChops.screen(im, im)

        ImageChops.add(im, im)
        ImageChops.add(im, im, 2.0)
        ImageChops.add(im, im, 2.0, 128)
        ImageChops.subtract(im, im)
        ImageChops.subtract(im, im, 2.0)
        ImageChops.subtract(im, im, 2.0, 128)

        ImageChops.add_modulo(im, im)
        ImageChops.subtract_modulo(im, im)

        ImageChops.blend(im, im, 0.5)
        ImageChops.composite(im, im, im)

        ImageChops.offset(im, 10)
        ImageChops.offset(im, 10, 20)
示例#2
0
def make(src_path, out_path):
    src = Image.open(src_path)
    src = src.copy()
    (srcr, srcg, srcb, srca) = src.split()
    white = ImageChops.constant(src, 255)

    outr = cast_gradation(srcr, 0, 90)
    outg = cast_gradation(srcg, 0, 90)
    outb = cast_gradation(srcb, 0, 90)
    outa = srca.copy()

    outr = ImageChops.composite(srcr, white, srca)
    outg = ImageChops.composite(srcg, white, srca)
    outb = ImageChops.composite(srcb, white, srca)

    (shadow_a, shadow) = make_inset_shadow(srca)
    outr = ImageChops.subtract(outr, shadow, 1, 0)
    outg = ImageChops.subtract(outg, shadow, 1, 0)
    outb = ImageChops.subtract(outb, shadow, 1, 0)
    outa = ImageChops.lighter(outa, shadow_a)

    (highlight_a, highlight) = make_highlight(srca)
    outa = ImageChops.lighter(outa, highlight)

    outa = ImageChops.subtract(outa, ImageChops.constant(outa, 25), 1, 0)

    out = Image.merge('RGBA', (outr, outg, outb, outa))
    out.save(out_path)
示例#3
0
    def test_sanity(self):

        im = hopper("L")

        ImageChops.constant(im, 128)
        ImageChops.duplicate(im)
        ImageChops.invert(im)
        ImageChops.lighter(im, im)
        ImageChops.darker(im, im)
        ImageChops.difference(im, im)
        ImageChops.multiply(im, im)
        ImageChops.screen(im, im)

        ImageChops.add(im, im)
        ImageChops.add(im, im, 2.0)
        ImageChops.add(im, im, 2.0, 128)
        ImageChops.subtract(im, im)
        ImageChops.subtract(im, im, 2.0)
        ImageChops.subtract(im, im, 2.0, 128)

        ImageChops.add_modulo(im, im)
        ImageChops.subtract_modulo(im, im)

        ImageChops.blend(im, im, 0.5)
        ImageChops.composite(im, im, im)

        ImageChops.offset(im, 10)
        ImageChops.offset(im, 10, 20)
示例#4
0
def make(src_path, out_path):
    src = Image.open(src_path)
    src = src.copy()
    (srcr, srcg, srcb, srca) = src.split()
    white = ImageChops.constant(src, 255)

    outr = cast_gradation(srcr, 0, 90)
    outg = cast_gradation(srcg, 0, 90)
    outb = cast_gradation(srcb, 0, 90)
    outa = srca.copy()

    outr = ImageChops.composite(srcr, white, srca)
    outg = ImageChops.composite(srcg, white, srca)
    outb = ImageChops.composite(srcb, white, srca)

    (shadow_a, shadow) = make_inset_shadow(srca)
    outr = ImageChops.subtract(outr, shadow, 1, 0)
    outg = ImageChops.subtract(outg, shadow, 1, 0)
    outb = ImageChops.subtract(outb, shadow, 1, 0)
    outa = ImageChops.lighter(outa, shadow_a)

    (highlight_a, highlight) = make_highlight(srca)
    outa = ImageChops.lighter(outa, highlight)

    outa = ImageChops.subtract(outa, ImageChops.constant(outa, 25), 1, 0)

    out = Image.merge('RGBA', (outr, outg, outb, outa))
    out.save(out_path)
示例#5
0
文件: lapse.py 项目: dflat/timelapse
def overlay_motion_trace(frames_dir, diffs_dir):
    OUT_TMP = os.path.join(OVERLAYS_DIR, "overlay_%04d.png")
    if not os.path.exists(OVERLAYS_DIR):
        os.makedirs(OVERLAYS_DIR)

    raw_frames = deque(
        os.path.join(frames_dir, file_name)
        for file_name in os.listdir(frames_dir))
    diff_frames = deque(
        os.path.join(diffs_dir, file_name)
        for file_name in os.listdir(diffs_dir))

    cur_frame = Image.open(raw_frames.popleft())
    cur_frame.save(OUT_TMP % 0)

    markup_key_color = Image.new("RGB", cur_frame.size,
                                 (255, 0, 255))  # Magenta
    count = 1
    while raw_frames:
        raw = Image.open(raw_frames.popleft())
        diff = Image.open(diff_frames.popleft())
        comp = ImageChops.composite(image1=markup_key_color,
                                    image2=raw,
                                    mask=diff)
        comp.save(OUT_TMP % count)
        count += 1
示例#6
0
def extract_sprite(filepath):
    im = load_gd_image(filepath)
    if not im:
        return None

    im = im.convert('RGBA')
    im = key(im, __party_bg_color__)
    if not im:
        return None
    im = key(im, __party_shadow_color__)
    if not im:
        return None

    im = trim(im, pad=2)

    sil = im.getchannel('A').convert('1')
    shadow = ImageChops.logical_or(sil, ImageChops.offset(sil, 2, 2))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, 0, -1))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, -1, -1))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, -1, 0))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, -1, 1))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, 0, 1))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, 1, 1))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, 1, 0))
    shadow = ImageChops.logical_or(shadow, ImageChops.offset(sil, 1, -1))

    result = ImageChops.composite(Image.new('RGBA', im.size, (0, 0, 0, 210)), Image.new('RGBA', im.size, (0, 0, 0, 0)), shadow)
    result = ImageChops.add(result, im)
    return result
示例#7
0
    def run(self):
        size_x, size_y = (256, 256)

        imgs = []

        data_dem = self.requires()[0].load_data()
        # Height Map
        imgs.append(generate_height_map(data_dem))

        # Slope
        with self.input()[1].open("r") as input_f:
            data_slope = np.load(input_f)
        imgs.append(generate_image_slope(data_slope))

        # Curvature
        with self.input()[2].open("r") as input_f:
            data_curvature = np.load(input_f)
        imgs.append(generate_image_curvature(data_curvature))

        output_img = Image.new('RGBA', (size_x, size_y), (255, 255, 255, 255))
        for img in imgs:
            output_img = ImageChops.multiply(output_img, img)

        # Sea Map
        sea_img = generate_sea_map(data_dem)
        output_img = ImageChops.composite(sea_img, output_img, sea_img)

        with self.output().open("wb") as output_f:
            output_img.save(output_f, 'PNG')
示例#8
0
def generate_cloud_nm_channel(srcImg):

    # channels
    r, g, b, a = srcImg.split()

    # helper images
    gray = Image.new('L', srcImg.size, (127))
    yellowRGB = Image.new('RGB', srcImg.size, (255, 255, 0))

    # discard 'too yellow' values
    oneMinusYellowness = ImageChops.difference(Image.merge('RGB', (r, g, b)), yellowRGB)
    yR, yG, yB = oneMinusYellowness.split()
    oneMinusYellowness = ImageChops.lighter(yR, yG)
    yellowness = ImageChops.invert(oneMinusYellowness)
    yellowness = ImageChops.lighter(yellowness, gray)
    yellowness = ImageChops.subtract(yellowness, gray)
    yellowness = ImageChops.add(yellowness, yellowness)
    #yellowness.save("Y:/art/source/particles/textures/clouds/yellowness.png")

    halfRed = ImageChops.multiply(r, gray) # 50% red
    halfGreen = ImageChops.multiply(g, gray) # 50% green

    # compose
    dstImg = ImageChops.subtract(ImageChops.add(gray, halfRed), halfGreen)
    dstImg = ImageChops.composite(gray, dstImg, yellowness)

    return dstImg
示例#9
0
def make_merch(image_to_print, templates_path, template, offset=(0, 0)):
    source = Image.open(templates_path + template["img"])
    source = source.convert("RGBA")
    width, height = source.size
    long_side = max(source.size)

    mask = Image.open(templates_path + template["mask"])
    mask = mask.convert("RGBA")

    decal = Image.open(image_to_print)
    decal = ImageOps.fit(decal, mask.size, Image.LANCZOS)
    decal = decal.convert("RGBA")

    bg = Image.open(resources_path + "background.jpg")
    bg = ImageOps.fit(bg, (long_side + bleed, long_side + bleed),
                      Image.LANCZOS)

    cutout = ImageChops.multiply(decal, mask)

    transparent_canvas = Image.new('RGBA', bg.size, color=(0, 0, 0, 0))
    transparent_canvas.paste(cutout, offset, cutout)
    offset_decal = transparent_canvas

    displaced = ImageChops.multiply(offset_decal, source)
    product = ImageChops.composite(displaced, source, displaced)
    bg.paste(product, ((bg.width - width) // 2, (bg.height - height) // 2),
             product)

    watermark = Image.open(resources_path + "watermark.png").convert("RGBA")
    bg.paste(watermark, (20, bg.height - watermark.height - 20), watermark)

    final = bg.convert("RGB")
    return final
示例#10
0
def mkbanner():
    im = gentext("dazzler", 1100)
    blank = Image.new("L", im.size, 0)
    # im.transpose(Image.FLIP_TOP_BOTTOM).save("dazzler.png")
    hard = im.point(lambda x: [0, 255][x != 0])
    a = hard.load()
    pos = []
    for y in range(im.size[1]):
        for x in range(im.size[0]):
            if a[x, y] == 255:
                ImageDraw.floodfill(hard, (x, y), 128)
                hard.save("out.png")
                matte = hard.point(lambda x: [0, 255][x == 128])
                ImageDraw.floodfill(hard, (x, y), 0)
                s = ImageChops.composite(im, blank, matte)
                (x0, y0, x1, y1) = s.getbbox()

                s = s.crop((x0, y0, x1, y1))
                pos.append((x0, y0) + s.size + (s, ))
    pos = sorted(pos)

    for i, pp in enumerate(pos):
        (x, y, w, h, s) = pp
        s.transpose(Image.FLIP_TOP_BOTTOM).save("tmp.png")
        os.system("astcenc -c tmp.png %d.astc 10x8 -thorough" % i)
    return [(x, y, w, h) for (x, y, w, h, s) in pos]
示例#11
0
def handle_image_pixel():
    img1 = Image.open('avatar.jpeg')
    img2 = Image.open('scenery.jpeg')
    # 叠加 add(image1, image2, scale=1.0, offset=0)[source] :: out = ((image1 + image2) / scale + offset)
    ops_img = ImageChops.add(img1, img2, scale=2, offset=100)
    ops_img.save('chops_add.jpg')
    # .add_modulo(image1, image2)[source] #:: out = ((image1 + image2) % MAX)
    ops_img = ImageChops.add_modulo(img1, img2)
    ops_img.save('chops_add_modulo.jpg')
    #  blend(image1, image2, alpha)[source] Alias for PIL.Image.Image.blend().
    ops_img = ImageChops.blend(img1, img2.resize(img1.size), 0.5)
    ops_img.save('chops_blend.jpg')

    # composite(image1, image2, mask)[source] : Alias for PIL.Image.Image.composite().
    # A mask image.  This image can have mode "1", "L", or "RGBA", and must have the same size as the other two images
    ops_img = ImageChops.composite(img1, img2, Image.new('L', img1.size))
    ops_img.save('chops_composite.jpg')

    # .constant(image, value)[source]  # Fill a channel with a given grey level.
    ops_img = ImageChops.constant(img1, 200)
    ops_img.save('chops_constant.jpg')
    # darker(image1, image2)[source] :: # out = min(image1, image2)
    ops_img = ImageChops.darker(img1, img2)
    ops_img.save('chops_darker.jpg')
    # difference(image1, image2)[source] # out = abs(image1 - image2)
    ops_img = ImageChops.difference(img1, img2)
    ops_img.save('chops_difference.jpg')

    # duplicate(image)[source] Alias for PIL.Image.Image.copy().
    # PIL.ImageChops.invert(image)[source] # out = MAX - image
    # PIL.ImageChops.lighter(image1, image2)[source] out = max(image1, image2)
    # PIL.ImageChops.logical_and(image1, image2)[source] # Logical AND between two images. # out = ((image1 and image2) % MAX)
    # PIL.ImageChops.logical_or(image1, image2)[source] # Logical OR between two images. # out = ((image1 or image2) % MAX)
    # PIL.ImageChops.multiply(image1, image2)[source] # Superimposes two images on top of each other. # out = image1 * image2 / MAX
    # If you multiply an image with a solid black image, the result is black. If you multiply with a solid white image, the image is unaffected.

    # PIL.ImageChops.offset(image, xoffset, yoffset=None)[source]
    # Returns a copy of the image where data has been offset by the given distances.
    # Data wraps around the edges. If yoffset is omitted, it is assumed to be equal to xoffset.
    # 参数:
    # xoffset – The horizontal distance.
    # yoffset – The vertical distance. If omitted, both distances are set to the same value.

    # PIL.ImageChops.screen(image1, image2)[source] #:: out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
    # Superimposes two inverted images on top of each other.
    ops_img = ImageChops.screen(img1, img2)
    ops_img.save('chops_screen.jpg')
    # PIL.ImageChops.subtract(image1, image2, scale=1.0, offset=0)[source] #:: out = ((image1 - image2) / scale + offset)
    # Subtracts two images, dividing the result by scale and adding the offset. If omitted, scale defaults to 1.0, and offset to 0.0.
    ops_img = ImageChops.subtract(img1, img2)
    ops_img.save('chops_subtract.jpg')

    # PIL.ImageChops.subtract_modulo(image1, image2)[source] # out:: = ((image1 - image2) % MAX)
    # Subtract two images, without clipping the result.
    ops_img = ImageChops.subtract_modulo(img1, img2)
    ops_img.save('chops_subtract_modulo.jpg')
示例#12
0
 def mix_lr(self):
     #左右混合图片
     w = self.img1.size[0]
     h = self.img1.size[1]
     ratio = float(w / h)
     self.img2 = self.img2.resize((self.img1.size[0], self.img1.size[1]),
                                  IM.ANTIALIAS)
     val = int(self.s1_value.get() * 4)
     #根据scale值生成透明度mask
     mask = IM.new(mode='L', size=(400, int(400 * h / w)), color=0)
     if val <= 100:
         #scale值靠近图像左侧边缘
         for i in range(val):
             for j in range(mask.size[1]):
                 col = int(255 - i * 127 / val)
                 mask.putpixel((i, j), col)
         for i in range(val, val + 100):
             for j in range(mask.size[1]):
                 col = int(128 - (i - val) * 128 / 100)
                 mask.putpixel((i, j), col)
     elif val >= 300:
         #scale值靠近图像右侧边缘
         for i in range(val - 100):
             for j in range(mask.size[1]):
                 col = 255
                 mask.putpixel((i, j), col)
         for i in range(val - 100, val):
             for j in range(mask.size[1]):
                 col = int(255 - (i - val + 100) * 127 / 100)
                 mask.putpixel((i, j), col)
         for i in range(val, 400):
             for j in range(mask.size[1]):
                 col = int(128 - (i - val) * 128 / (400 - val))
                 mask.putpixel((i, j), col)
     else:
         #scale值在图像中部
         for i in range(val - 100):
             for j in range(mask.size[1]):
                 col = 255
                 mask.putpixel((i, j), col)
         for i in range(val - 100, val + 100):
             for j in range(mask.size[1]):
                 col = int(255 - (i - val + 100) * 255 / 200)
                 mask.putpixel((i, j), col)
     #使用resize将mask尺寸快速拉伸到img1尺寸
     mask = mask.resize((w, h), IM.ANTIALIAS)
     self.img3 = ICs.composite(self.img1, self.img2, mask)
     if ratio >= 1.77:
         self.photo3 = ITk.PhotoImage(
             self.img3.resize((755, int(h * 755 / w)), IM.ANTIALIAS))
     else:
         self.photo3 = ITk.PhotoImage(
             self.img3.resize((int(w * 425 / h), 425), IM.ANTIALIAS))
     self.canvas_result.create_image(380, 220, image=self.photo3)
    def _reconstruct_trace(self, cim, gr_mask, mim, grid_name):
        '''Reconstruct the trace portions covered by the grid'''
        
        # Isolate the horizontal lines in the grid
        # Shift the grid mask left and right
        sl_grm = ImageChops.offset(gr_mask, -1, 0)
        sr_grm = ImageChops.offset(gr_mask, 1, 0)
        
        h_grm = ImageChops.logical_and(ImageChops.add(sr_grm, gr_mask), ImageChops.add(sl_grm, gr_mask))

        # Isolate the vertical  lines in the grid
        # Shift the grid mask up and down
        su_grm = ImageChops.offset(gr_mask, 0, -1)
        sd_grm = ImageChops.offset(gr_mask, 0, 1)
        
        v_grm = ImageChops.logical_and(ImageChops.add(sd_grm, gr_mask), ImageChops.add(su_grm, gr_mask))

        # Find where a horizontal grid line is bounded by trace pixels above and below
        su_mim = ImageChops.offset(mim, 0, -1)
        sd_mim = ImageChops.offset(mim, 0, 1)
        h_mim = ImageChops.logical_or(su_mim, sd_mim)
        h_mim = ImageChops.logical_or(h_mim, ImageChops.logical_or(ImageChops.invert(v_grm), h_grm))

        # Find where a vertical grid line is bounded by trace pixels left and right
        sl_mim = ImageChops.offset(mim, -1, 0)
        sr_mim = ImageChops.offset(mim, 1, 0)
        v_mim = ImageChops.logical_or(sl_mim, sr_mim)
        v_mim = ImageChops.logical_or(v_mim, ImageChops.logical_or(ImageChops.invert(h_grm), v_grm))

        # Fill in cross points of horiz. and vert. lines if upper left and lower right corners have
        # pixels from a trace
        sul_mim = ImageChops.offset(mim, -1, -1)
        sdr_mim = ImageChops.offset(mim, 1, 1)
        d_mim = ImageChops.logical_or(sul_mim, sdr_mim)
        d_mim = ImageChops.logical_or(d_mim, ImageChops.logical_or(h_grm, v_grm))
        
        recon = ImageChops.logical_and(h_mim, v_mim)
        recon = ImageChops.logical_and(recon, d_mim)

        # Mask out the grid borders from the reconstruction
        m = Image.new('1', IMAGE_SIZE, 1)
        m_drawer = ImageDraw.Draw(m)
        for box in self.settings.grid_boxes[grid_name]:
            m_drawer.rectangle(box, 0)
        del m_drawer
        
        recon = ImageChops.logical_or(recon, m)
        
        # Composite the reconstructed trac segments onto the colorized image
        ol_color = Image.new('RGB', IMAGE_SIZE, self.settings.colors['trace-reconstruction'])
        new_cim = ImageChops.composite(cim, ol_color, recon)
        
        return new_cim
示例#14
0
 def mix_fill(self):
     #填充混合图片
     w = self.img1.size[0]
     h = self.img1.size[1]
     ratio = float(w / h)
     im0 = self.img1.convert('L')
     im1 = IOs.autocontrast(im0, cutoff=self.s1_value.get() / 3)
     self.img2 = self.img2.resize((self.img1.size[0], self.img1.size[1]),
                                  IM.ANTIALIAS)
     self.img3 = ICs.composite(self.img1, self.img2, im1)
     if ratio >= 1.77:
         self.photo3 = ITk.PhotoImage(
             self.img3.resize((755, int(h * 755 / w)), IM.ANTIALIAS))
     else:
         self.photo3 = ITk.PhotoImage(
             self.img3.resize((int(w * 425 / h), 425), IM.ANTIALIAS))
     self.canvas_result.create_image(380, 220, image=self.photo3)
示例#15
0
            frame.load()
            framef.close()
        except Exception, e:
            print "Error decoding image: %s" % repr(e)
            return resp.headers.get("Content-Type"), resp.data

        if self.composite is None:
            self.composite_bg = frame
            self.composite = frame
            self.composite_start = time()
        else:
            diff = PILImageChops.difference(self.composite_bg, frame)
            diff = diff.convert("L")
            #diff = diff.point(self.threshold)
            #print diff.mode
            self.composite = PILImageChops.composite(self.composite, frame, diff)
            #self.composite = PILImage.blend(self.composite, frame, 0.5)
            #self.composite = PILImageChops.multiply(self.composite, frame)

        #print self.url
        return resp.headers.get("content-type"), resp.data

    def update_db(self):
        im = self.fetch_image()
        if im is None:
            return
        mimetype, data = im

        image = Image(sensor=self.sensor, mimetype=mimetype, data=data)
        self.session.add(image)
示例#16
0
def pairs(image1, image2, mode, **kwargs):
    """
    la.image.PairsMode.Add
        Adds two images, dividing the result by scale and adding the offset. If omitted, scale defaults to 1.0, and offset to 0.0.
        out = ((image1 + image2) / scale + offset)
        
        you should append param `scale` and `offset`
        scale: int or float, defaults to 1.0
        offset: int or float, defaults to 0
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Add, scale=1.0, offset=0)
    
    la.image.PairsMode.Add_modulo
        Add two images, without clipping the result.
        out = ((image1 + image2) % 255)
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Add_modulo)
        
    la.image.PairsMode.Blend
        Creates a new image by interpolating between two input images.

        using a constant alpha.
        out = image1 * (1.0 - alpha) + image2 * alpha

        If alpha is 0.0, a copy of the first image is returned. 
        If alpha is 1.0, a copy of the second image is returned. 
        There are no restrictions on the alpha value. 
        If necessary, the result is clipped to fit into the allowed output range.
        
        you should append param `alpha`
        alpha: The interpolation alpha factor.
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Blend, alpha=0.2)
        
    la.image.PairsMode.Composite
        Create composite image by blending images using a transparency mask.
        
        you should append param `mask`
        mask: A mask image. This image can have mode “1”, “L”, or “RGBA”, and must have the same size as the other two images.
        eg.
        la.image.pairs(image1, image2, la.image.PairsMode.Composite, mask)
        
    la.image.PairsMode.Darker
        Compares the two images, pixel by pixel, and returns a new image containing the darker values.
        out = min(image1, image2)
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Darker)
        
    la.image.PairsMode.Difference
        Returns the absolute value of the pixel-by-pixel difference between the two images.
        out = abs(image1 - image2)
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Difference)
    
    la.image.PairsMode.Lighter
        Compares the two images, pixel by pixel, and returns a new image containing the lighter values.
        out = max(image1, image2)
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Lighter)
        
    la.image.PairsMode.Logical_and
        Logical AND between two images.
        Both of the images must have mode “1”. 
        If you would like to perform a logical AND on an image with a mode other than “1”, 
        try multiply() instead, using a black-and-white mask as the second image.
        out = ((image1 and image2) % 255)
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Logical_and)
        
    la.image.PairsMode.Logical_or
        Logical OR between two images.
        Both of the images must have mode “1”.
        out = ((image1 or image2) % 255)
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Logical_or)
        
    la.image.PairsMode.Logical_xor
        Logical XOR between two images.
        Both of the images must have mode “1”.
        out = ((bool(image1) != bool(image2)) % 255)
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Logical_xor)
        
    la.image.PairsMode.multiply
        Superimposes two images on top of each other.
        If you multiply an image with a solid black image, the result is black. 
        If you multiply with a solid white image, the image is unaffected.
        out = image1 * image2 / 255
        
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.multiply)
        
    la.image.PairsMode.SoftLight
        Superimposes two images on top of each other using the Soft Light algorithm
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.SoftLight)
        
    la.image.PairsMode.HardLight
        Superimposes two images on top of each other using the Hard Light algorithm
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.HardLight)
        
    la.image.PairsMode.Overlay
        Superimposes two images on top of each other using the Overlay algorithm
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Overlay)
        
    la.image.PairsMode.Screen
        Superimposes two inverted images on top of each other.
        out = 255 - ((255 - image1) * (255 - image2) / 255)
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Screen)
        
    la.image.PairsMode.Subtract
        Subtracts two images, dividing the result by scale and adding the offset. If omitted, scale defaults to 1.0, and offset to 0.0.
        out = ((image1 - image2) / scale + offset)
        
        you should append param `scale` and `offset`
        scale: int or float, defaults to 1.0
        offset: int or float, defaults to 0
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Subtract, scale=1.0, offset=0)
        
    la.image.PairsMode.Subtract_modulo
        Subtract two images, without clipping the result.
        out = ((image1 - image2) % 255)
        eg.
        la.image.pairs(image1, image2, mode=la.image.PairsMode.Subtract_modulo)


    Args:
        image1: a PIL instance. The first image.
        image2: a PIL instance. The second image.  Must have the same mode and size as the first image.
        mode: la.image.PairsMode
    Return:
        a PIL instance.
    """
    if 'scale' not in kwargs:
        kwargs['scale'] = 1.
    if 'offset' not in kwargs:
        kwargs['offset'] = 0
    if mode == 'add':
        return ImageChops.add(image1,
                              image2,
                              scale=kwargs['scale'],
                              offset=kwargs['offset'])
    elif mode == 'add_modulo':
        return ImageChops.add_modulo(image1, image2)
    elif mode == 'blend':
        if 'alpha' not in kwargs:
            raise ValueError("Missing parameter `alpha`")
        return ImageChops.blend(image1, image2, alpha=kwargs['alpha'])
    elif mode == 'composite':
        if 'mask' not in kwargs:
            raise ValueError("Missing parameter `mask`")
        return ImageChops.composite(image1, image2, mask=kwargs['mask'])
    elif mode == 'darker':
        return ImageChops.darker(image1, image2)
    elif mode == 'difference':
        return ImageChops.difference(image1, image2)
    elif mode == 'lighter':
        return ImageChops.lighter(image1, image2)
    elif mode == 'logical_and':
        return ImageChops.logical_and(image1, image2)
    elif mode == 'logical_or':
        return ImageChops.logical_or(image1, image2)
    elif mode == 'logical_xor':
        return ImageChops.logical_xor(image1, image2)
    elif mode == 'multiply':
        return ImageChops.multiply(image1, image2)
    elif mode == 'soft_light':
        return ImageChops.soft_light(image1, image2)
    elif mode == 'hard_light':
        return ImageChops.hard_light(image1, image2)
    elif mode == 'overlay':
        return ImageChops.overlay(image1, image2)
    elif mode == 'screen':
        return ImageChops.screen(image1, image2)
    elif mode == 'subtract':
        return ImageChops.subtract(image1,
                                   image2,
                                   scale=kwargs['scale'],
                                   offset=kwargs['offset'])
    elif mode == 'subtract_modulo':
        return ImageChops.subtract_modulo(image1, image2)
    else:
        raise ValueError("mode must be la.image.PairsMode param")
示例#17
0
def main():
    # Create target image
    minimap = Image.new('RGBA', (TILES * RES, TILES * RES),
                        color=(255, 255, 255))

    # Stitch image
    for x in range(TILES):
        for y in range(TILES):
            file_name = f'{MAP_LAYER}_Minimap-{TILES-x-1}_{TILES-y-1}.TGA'
            print(f'Stitching {file_name}')
            path = f'{IMG_DIR }\\{MAP_LAYER}\\{file_name}'
            tile = Image.open(path)

            minimap.paste(tile, [RES * x, RES * y])

    if DRAW_FLAG or DRAW_CAPTURE_ZONE or DRAW_BORDER:
        # Load flag data json
        with open(f'{IMG_DIR}\\{MAP_LAYER}\\{MAP_LAYER}_flags.json',
                  'r') as fh:
            flag_data = json.load(fh)

        # Make world to picture coordinates interpolaters
        wti_itp_x = make_wti_interpolater(flag_data['corner_1']['x'],
                                          flag_data['corner_2']['x'], 0,
                                          RES * TILES)

        wti_itp_y = make_wti_interpolater(flag_data['corner_1']['y'],
                                          flag_data['corner_2']['y'], 0,
                                          RES * TILES)

    if DRAW_CAPTURE_ZONE:
        # Create drawing object
        zones_overlay = Image.new('RGBA', (TILES * RES, TILES * RES),
                                  color=(0, 0, 0, 0))
        zones_draw = ImageDraw.Draw(zones_overlay)

        for _, flag in flag_data['flags'].items():
            print(f'Draw zone for {flag["display_name"]}')
            for obj_name, bbox in flag['bounds'].items():
                # Draw a spherical bounding box (circle)
                if 'sphere' in obj_name.lower():
                    x = wti_itp_x(bbox['origin']['x'])
                    y = wti_itp_y(bbox['origin']['y'])
                    r = wti_itp_x(flag_data['corner_1']['x'] + bbox['radius'])
                    zones_draw.ellipse(
                        [x - r / 2, y - r / 2, x + r / 2, y + r / 2],
                        fill=(0, 0, 255, 255),
                    )
                # Draw a square bounding box (rectangle)
                elif 'box' in obj_name.lower():
                    x = wti_itp_x(bbox['origin']['x'])
                    y = wti_itp_y(bbox['origin']['y'])
                    b_x = wti_itp_x(flag_data['corner_1']['x'] +
                                    bbox['box_extent']['x'])
                    b_y = wti_itp_y(flag_data['corner_1']['y'] +
                                    bbox['box_extent']['y'])
                    r = 180 - bbox['rotation']

                    rectangle = Image.new('RGBA', (int(b_x) * 2, int(b_y) * 2),
                                          color=(0, 0, 255, 255))
                    rectangle = rectangle.rotate(r,
                                                 expand=True,
                                                 fillcolor=(0, 0, 0, 0))

                    zones_overlay.alpha_composite(
                        rectangle, (int(x - rectangle.size[0] / 2),
                                    int(y - rectangle.size[1] / 2)))

        print('Compositing zones to image')
        transparency = Image.new('RGBA', (TILES * RES, TILES * RES),
                                 color=(0, 0, 0, 196))
        zones_overlay = ImageChops.subtract(zones_overlay, transparency)
        minimap.alpha_composite(zones_overlay)

    if DRAW_FLAG:
        # Create font object
        font = ImageFont.truetype(f'{RESOURCES_DIR}\\Roboto-Regular.ttf',
                                  size=169)

        # Load team flags
        team_flags = [
            Image.open(f'{RESOURCES_DIR}\\neutral_flag.TGA'),
            Image.open(
                f'{RESOURCES_DIR}\\{ faction_to_image[flag_data["team_one"]] }_flag.TGA'
            ),
            Image.open(
                f'{RESOURCES_DIR}\\{ faction_to_image[flag_data["team_two"]] }_flag.TGA'
            )
        ]

        # Upscale flags and make them translucent
        for i in range(3):
            team_flags[i] = team_flags[i].resize(
                [s * 3 for s in team_flags[i].size])

        # Create drawing object
        objectives_overlay = Image.new('RGBA', (TILES * RES, TILES * RES),
                                       color=(0, 0, 0, 0))
        draw = ImageDraw.Draw(objectives_overlay)

        # Draw a line connecting objectives
        for _, flag in flag_data['flags'].items():
            if flag['next_flag']:
                print(f'Draw objective line for {flag["display_name"]}')
                x = wti_itp_x(flag['location']['x'])
                y = wti_itp_y(flag['location']['y'])
                next_flag_loc = flag_data['flags'][
                    flag['next_flag']]['location']
                n_x = wti_itp_x(next_flag_loc['x'])
                n_y = wti_itp_y(next_flag_loc['y'])
                draw.line([x, y, n_x, n_y],
                          fill=(255, 255, 255, 169),
                          width=15)

        # Draw flags
        for _, flag in flag_data['flags'].items():
            print(f'Drawing flag for {flag["display_name"]}')
            x = wti_itp_x(flag['location']['x'])
            y = wti_itp_y(flag['location']['y'])
            objectives_overlay.alpha_composite(
                team_flags[flag['initial_team']],
                (int(x - team_flags[int(flag['initial_team'])].size[0] / 2),
                 int(y - team_flags[int(flag['initial_team'])].size[1] / 2)))

        print('Compositing objectives to image')

        transparency = Image.new('RGBA', (TILES * RES, TILES * RES),
                                 color=(0, 0, 0, 55))
        objectives_overlay = ImageChops.subtract(objectives_overlay,
                                                 transparency)
        minimap.alpha_composite(objectives_overlay)

        draw = ImageDraw.Draw(minimap)
        # Draw name of flag
        for _, flag in flag_data['flags'].items():
            print(f'Drawing name for {flag["display_name"]}')
            x = wti_itp_x(flag['location']['x'])
            y = wti_itp_y(flag['location']['y'])
            centered_text(draw, font, flag['display_name'], x, y - 230)

    if DRAW_BORDER:
        print('Parsing border line')
        dict_border = {}
        start = None

        # Store unordered line segmets as a hash table where key is start and value is end
        for segment in flag_data['border']:
            s_x = wti_itp_x(segment['start']['x'])
            s_y = wti_itp_y(segment['start']['y'])
            e_x = wti_itp_x(segment['end']['x'])
            e_y = wti_itp_y(segment['end']['y'])
            if not start:
                start = (s_x, s_y)
            dict_border[(s_x, s_y)] = (e_x, e_y)

        # Store line segments in order
        in_order_border = []
        current_point = start
        while True:
            next_point = dict_border[current_point]
            in_order_border += [p for p in current_point
                                ] + [p for p in next_point]

            if next_point == start:
                break
            else:
                current_point = next_point

        print('Creating border')
        # Create border mask similar to the one found in squad
        blend_mask = Image.new('RGBA', (TILES * RES, TILES * RES),
                               color=(255, 255, 255, 255))
        bm_draw = ImageDraw.Draw(blend_mask)
        bm_draw.polygon(in_order_border, fill=(0, 0, 0, 0))

        # Blend outside of the border
        border_outside = Image.new('RGBA', (TILES * RES, TILES * RES),
                                   color=(0, 0, 0, 128))
        bo_draw = ImageDraw.Draw(border_outside)
        bo_draw.line(in_order_border, fill=(0, 0, 0, 255), width=100)
        border_outside = border_outside.filter(ImageFilter.BoxBlur(100))

        # Blend inside of the border
        border_inside = Image.new('RGBA', (TILES * RES, TILES * RES),
                                  color=(0, 0, 0, 0))
        bi_draw = ImageDraw.Draw(border_inside)
        bi_draw.polygon(in_order_border, fill=(0, 0, 0, 0))
        bi_draw.line(in_order_border, fill=(0, 0, 0, 128), width=50)
        border_inside = border_inside.filter(ImageFilter.BoxBlur(50))

        print('Creating border composite')
        border = ImageChops.composite(border_outside, border_inside,
                                      blend_mask)
        print('Compositing border to image')
        minimap.alpha_composite(border)

    print('Saving minimap')
    minimap = minimap.convert('RGB')
    minimap.save(f'{IMG_DIR}\\{MAP_LAYER}_stitched.jpg', 'JPEG', quality=90)
示例#18
0
def diff_visual(from_file, to_file, page=0, output=None):
    with tempfile.TemporaryDirectory() as workdir:
        pages = []
        first = last = 1
        preview = False

        if not output:
            output = os.path.join(
                workdir, "{from_file}-{to_file}.pdf".format(
                    from_file=os.path.basename(from_file).split('.')[0],
                    to_file=os.path.basename(to_file).split('.')[0]))
            preview = True

        if not page:
            try:
                with open(from_file) as _from:
                    with open(to_file) as _to:
                        _from = _from.read()
                        _to = _to.read()
                        # TODO use pyeagle!
                        if not '<schematic' in _from and not '<board' in _from:
                            raise SyntaxError(
                                'File {} is not an eagle/xml design file!'.
                                format(from_file))
                        if not '<schematic' in _to and not '<board' in _to:
                            raise SyntaxError(
                                'File {} is not an eagle/xml design file!'.
                                format(to_file))
                        if not _from.count('<sheet>') == _to.count('<sheet>'):
                            raise Exception(
                                'File {} does not have the same number of sheets as {}'
                                .format(from_file, to_file))
                        last = _from.count('<sheet>') or _from.count('<board')
            except Exception as err:
                log.warning(err.message)
                log.warning("Considering file as an Eagle v5 binary format.")
                first = last = page
        else:
            first = last = page

        for page in range(first, last + 1):
            log.info("Checking page {} of {}".format(page, last))

            a_im_l = to_png(from_file, page=page)
            b_im_l = to_png(to_file, page=page)

            bbox = None
            for k in sorted(a_im_l.keys()):
                a_im = a_im_l[k]
                b_im = b_im_l[k]

                # make the sizes equal
                # if a sheet contains the filename, it is updated with the temporary name
                # and may thus change the size of the image
                width = max((a_im.size[0], b_im.size[0]))
                height = max((a_im.size[1], b_im.size[1]))
                a_im2 = Image.new("L", (width, height))
                a_im2.paste(a_im, (0, 0))
                a_im = a_im2
                a_im2 = None
                b_im2 = Image.new("L", (width, height))
                b_im2.paste(b_im, (0, 0))
                b_im = b_im2
                b_im2 = None

                if bbox is None:
                    bb_a = a_im.getbbox()
                    bb_b = b_im.getbbox()
                    bbox = (min(bb_a[0], bb_b[0]), min(bb_a[1], bb_b[1]),
                            max(bb_a[2], bb_b[2]), max(bb_a[3], bb_b[3]))
                if bbox:
                    a_im = a_im.crop(bbox)
                    b_im = b_im.crop(bbox)

                added = ImageOps.autocontrast(ImageChops.subtract(b_im, a_im),
                                              0)
                deled = ImageOps.autocontrast(ImageChops.subtract(a_im, b_im),
                                              0)

                a_mask = added.point(lambda p: p == 0 and 255).convert("1")
                d_mask = deled.point(lambda p: p == 0 and 255).convert("1")

                deled = ImageOps.colorize(deled, "#000", "#33f")
                added = ImageOps.colorize(added, "#000", "#f33")
                same = ImageOps.colorize(a_im, "#000", "#aaa")

                c1 = ImageChops.composite(same, deled, d_mask)
                im = ImageChops.composite(c1, added, a_mask)
                font = ImageFont.truetype(
                    '/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf', 32)
                ImageDraw.Draw(im).text((20, 20),
                                        '%s' % k[5:-4], (0, 255, 0),
                                        font=font)
                fname = "{from_file}-{to_file}-page_{page}-{type}.pdf".format(
                    from_file=os.path.basename(from_file.split('.')[0]),
                    to_file=os.path.basename(to_file.split('.')[0]),
                    page=page,
                    type=k)
                im.save(os.path.join(workdir, fname))
                pages.append(os.path.join(workdir, fname))

        if len(pages) > 0:
            pdf_concatenate(output, pages)
            log.info("Diff output in file: {}".format(output))
            if preview:
                try:
                    subprocess.call([config.OPEN, output])
                except FileNotFoundError as err:
                    log.warning("Cannot find open utility: `{}`".format(
                        config.OPEN))
                    log.warning("Open your file manually to check it")
                #input("Press enter to flush all outputs")
        else:
            log.error("No diff output.")
示例#19
0
import quad_coll

depths = [4, 8, 16]
sizes = [2, 4, 6, 8, 10]
ratios = [.01, .05, .1, .25]
opacities = [.1, .25, .5, .75, .9]
n = 0

img = Image.open('quarter.png')
for d in depths:
    quad_coll.MAX_DEPTH = d
    for s in sizes:
        quad_coll.MIN_SIZE = s
        for r in ratios:
            quad_coll.MIN_RATIO = r
            for o in opacities:
                quad_coll.MIN_OPACITY = o

                tree = quad_coll.makeTree(img)

                img2 = Image.new('RGBA', img.size)
                quad_coll.drawTree(img2, tree)
                draw = ImageDraw.Draw(img2)
                draw.text((0, 0), 'max tree depth: %d' % d)
                draw.text((0, 10), 'min node size: %d pixels' % s)
                draw.text((0, 20), 'min node fill ratio: %f pixels' % r)
                draw.text((0, 30), 'min filled pixel opactiy:%f alpha' % o)

                ImageChops.composite(img2, img, img2).save('text%02d.png' % n)
                n = n + 1
示例#20
0
文件: image.py 项目: retr0-cyber/p5
    def blend(self, other, mode):
        """Blend the specified image using the given blend mode.

        :param other: The image to be blended to the current image.
        :type other: p5.PImage

        :param mode: Blending mode to use. Should be one of { 'BLEND',
            'ADD', 'SUBTRACT', 'LIGHTEST', 'DARKEST', 'MULTIPLY',
            'SCREEN',}
        :type mode: str

        :raises AssertionError: When the dimensions of img do not
            match the dimensions of the current image.

        :raises KeyError: When the blend mode is invalid.

        """
        mode = mode.lower()
        assert self.size == other.size, "Images are of different sizes!"

        if self._img.mode != 'RGBA':
            self._img = self._img.convert('RGBA')
            self._reload = True

        if other._img.mode != 'RGBA':
            other_img = other._img.convert('RGBA')
        else:
            other_img = other._img

        # todo: implement missing filters -- abhikpal (2018-08-14)
        if mode == 'blend':
            self._img = ImageChops.composite(self._img, other_img, self._img)
        elif mode == 'add':
            self._img = ImageChops.add(self._img, other_img)
        elif mode == 'subtract':
            self._img = ImageChops.subtract(self._img, other_img)
        elif mode == 'lightest':
            self._img = ImageChops.lighter(self._img, other_img)
        elif mode == 'darkest':
            self._img = ImageChops.darker(self._img, other_img)
        elif mode == 'difference':
            raise NotImplementedError
        elif mode == 'exclusion':
            raise NotImplementedError
        elif mode == 'multiply':
            self._img = ImageChops.multiply(self._img, other_img)
        elif mode == 'screen':
            self._img = ImageChops.screen(self._img, other_img)
        elif mode == 'overlay':
            raise NotImplementedError
        elif mode == 'hard_light':
            raise NotImplementedError
        elif mode == 'soft_light':
            raise NotImplementedError
        elif mode == 'dodge':
            raise NotImplementedError
        elif mode == 'burn':
            raise NotImplementedError
        else:
            raise KeyError("'{}' blend mode not found".format(mode.upper()))

        self._reload = True
        return self
示例#21
0
                                           transparency_sensitivity):
    palette_img = img.convert(PALETTE_MODE)
    for idx, val in enumerate(img.getdata()):
        alpha = val[3]
        width, height = palette_img.size
        x, y = divmod(idx, width)
        if alpha < sensitivity:
            palette_img.putpixel((y, x), trans_loc)
    return palette_img.convert(RGBA_MODE)


for hue in range(0, 360, args.hue_rate):
    hsv_string = "hsv({hue},100%,100%)".format(hue=hue)
    im = Image.new(RGBA_MODE, base_image.size, hsv_string)
    blended = ImageChops.blend(base_image, im, args.blend_amount)
    composited = ImageChops.composite(blended, base_image, base_image)
    images.append(composited)

if args.pdb:
    import pdb
    pdb.set_trace()

gif_encoder_args = {
    "duration": args.duration,
    "loop": 0,
    "optimize": args.optimize
}

transparency_loc = get_transparency_palette_loc(base_image)
if DEBUG:
    print(f"DEBUG - transparency_loc was {transparency_loc}")
示例#22
0
 def transformInner(self, imgs):
     mask = imgs[2].convert(self.args["mode"]).point(self.transformFunc).convert("L")
     return ImageChops.composite(imgs[0], imgs[1], mask)
示例#23
0
            frame.load()
            framef.close()
        except Exception, e:
            print "Error decoding image: %s" % repr(e)
            return resp.headers.get("Content-Type"), resp.data

        if self.composite is None:
            self.composite_bg = frame
            self.composite = frame
            self.composite_start = time()
        else:
            diff = PILImageChops.difference(self.composite_bg, frame)
            diff = diff.convert("L")
            #diff = diff.point(self.threshold)
            #print diff.mode
            self.composite = PILImageChops.composite(self.composite, frame,
                                                     diff)
            #self.composite = PILImage.blend(self.composite, frame, 0.5)
            #self.composite = PILImageChops.multiply(self.composite, frame)

        #print self.url
        return resp.headers.get("content-type"), resp.data

    def update_db(self):
        im = self.fetch_image()
        if im is None:
            return
        mimetype, data = im

        image = Image(sensor=self.sensor, mimetype=mimetype, data=data)
        self.session.add(image)
示例#24
0
                    quad_coll.MIN_RATIO = r
                    for o in opacities:
                        quad_coll.MIN_OPACITY = o

                        tree = quad_coll.makeTree(img)

                        img2 = Image.new('RGBA', img.size)
                        quad_coll.drawTree(img2, tree, True)
                        draw = ImageDraw.Draw(img2)
                        draw.text((0, 0), 'max tree depth: %d' % d)
                        draw.text((0, 10), 'min node size: %d pixels' % s)
                        draw.text((0, 20), 'min node fill ratio: %f pixels' % r)
                        draw.text((0, 30), 'min filled pixel opactiy:%f alpha' % o)

                        outfilename = dirName + '/' + imgName + ('%04d' % n) + f[f.rfind('.'):]
                        ImageChops.composite(img2, img, img2).save(outfilename)
                        n = n + 1

                        error = quad_coll.calcError(img, tree)
                        csv.write('%s,%s,%d,%d,%f,%f,%d,%d,%d,%d,%d,%d,%d\n' % (\
                            f,\
                            outfilename,\
                            d,\
                            s,\
                            r,\
                            o,\
                            tree.getNumNodes(),\
                            tree.getNumLeafNodes(),\
                            error['imgPixels'],\
                            error['treePixels'],\
                            error['falsePositives'],\
fade_to_background_indices = np.linspace(1,
                                         2001,
                                         fade_to_background_frames,
                                         dtype=np.int64)
fade_to_foreground_indices = np.linspace(1999,
                                         0,
                                         fade_to_foreground_frames,
                                         dtype=np.int64)

background_img = Image.open(Path("./perth_wires.png"))
foreground_img = Image.open(Path("./perth_street_map.png"))

mask_array = np.ones((1430, 2000), dtype=np.uint8) * 255
mask = Image.fromarray(mask_array)
composite_img = ImageChops.composite(foreground_img, background_img, mask)

figure = plt.figure(figsize=(20, 14.3))
file_writer = FFMpegWriter(fps=FRAME_RATE)
with file_writer.saving(figure, "perth_wires.mp4", dpi=100):
    render_axes = figure.add_axes([0.0, 0.0, 1.0, 1.0])
    render_axes.axis("off")
    render_axes.imshow(composite_img)
    for frame_number in range(static_street_frames):
        file_writer.grab_frame(facecolor=BACKGROUND_COLOUR)

    for frame_number in range(fade_to_background_frames):
        figure.clear()
        render_axes = figure.add_axes([0.0, 0.0, 1.0, 1.0])
        render_axes.axis("off")
        mask_array[:, :fade_to_background_indices[frame_number]] = 0