Example #1
0
 def generate(self, logfilename, delimiter, t, x, y, t_start, t_end, step, reset_dotlayer_every_step=False):
     """Generates the heapmap PNG image files.
     
     @param logfilename: name of csv-formated log file
     @param delimiter: delimiter between columns in csv log file
     @param t: number of log file's column containing time
     @param x: number of log file's column containing x-value
     @param y: number of log file's column containing y-value
     @param t_start: start of output time interval
     @param t_end: end of output time interval
     @param step: size/length of a log accumulation step
     @param reset_dotlayer_every_step: draw second heatmap over first one etc. or clear for each image
     """
     dotlayer = Image.new('RGBA', self.mapsize, 'white')
     for timestep, timestepdata in accumulated_read(logfilename, delimiter, t, x, y, t_start, t_end, step=step):
         for xy in timestepdata:
             dot = Image.new('RGBA', self.mapsize, 'white')
             dot.paste(self.dot, self.__translate(xy))
             dotlayer = ImageChops.multiply(dotlayer, dot)
             
         heatmask_color = dotlayer.copy()
         self._colorize(heatmask_color, colorschemes.schemes['fire'], 200)
         draw = ImageDraw.Draw(heatmask_color)
         draw.text((10,10), str('% 4d' % timestep), font=self.sans18, fill=ImageColor.colormap['black'])
         del draw
         #img.save('/tmp/heatmap-overlay-%4s' % timestep)
         Image.composite(heatmask_color, self.map, heatmask_color).save('/tmp/demo-heatmap1-%05d.png' % timestep)
         if reset_dotlayer_every_step == True:
             dotlayer = Image.new('RGBA', self.mapsize, 'white')
Example #2
0
File: wim.py Project: pombreda/wim
def main(inplace, fontsize, text, filename):
    ttf = 'arial.ttf'
    opacity = 0.50

    im = Image.open(filename)

    if inplace:
        outfile = filename
    else:
        # Insert .wim between file name and extension in the outfile name.
        parts = splitext(filename)
        outfile = parts[0] + '.wim' + parts[1]

    if im.mode != 'RGBA':
        im = im.convert('RGBA')

    watermark = Image.new('RGBA', im.size, (0, 0, 0, 0))

    font = ImageFont.truetype(ttf, fontsize)
    text_width, text_height = font.getsize(text)

    draw = ImageDraw.Draw(watermark, 'RGBA')
    draw.text(((watermark.size[0] - text_width) / 2,
              (watermark.size[1] - text_height)),
              text, font=font)

    # alpha = watermark.split()[3]
    # alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
    # watermark.putalpha(alpha)

    try:
        Image.composite(watermark, im, watermark).save(outfile)
    except IOError:
        print('Cannot add watermark to image: ', filename)
Example #3
0
    def rendered(self):
        if self.image is None: return None

        t, l, r, b = self.get_screencorners()

        assert isinstance(t, int), t
        assert isinstance(l, int), l
        assert isinstance(r, int), r
        assert isinstance(b, int), b

        mask = Image.new('1', self.image.size, 0)
        mask.paste(1, (l, t, r, b))
        image = Image.composite(self.image, self.blurred, mask)

        if self.show_handles:
            dx = (r - l) / 4
            dy = (b - t) / 4

            mask = Image.new('1', self.image.size, 1)
            draw = ImageDraw.Draw(mask)

            draw.line([l, t, r, t], fill=0)
            draw.line([l, b, r, b], fill=0)
            draw.line([l, t, l, b], fill=0)
            draw.line([r, t, r, b], fill=0)

            draw.line([l+dx, t, l+dx, t+dy, l, t+dy], fill=0)
            draw.line([r-dx, t, r-dx, t+dy, r, t+dy], fill=0)
            draw.line([l+dx, b, l+dx, b-dy, l, b-dy], fill=0)
            draw.line([r-dx, b, r-dx, b-dy, r, b-dy], fill=0)

            image = Image.composite(image, self.xor, mask)
        return image
Example #4
0
def watermark_img(file_name):
    # CHANGE TO YOUR CONTENT
    font_name = 'fonts/HelveticaNeue.ttf'
    watermark_text = u"\u00a9 Js Lim"
    font_size = 2
    opacity = 0.5
    # total width of image
    watermark_scale = 0.03

    # font file & font size
    font = ImageFont.truetype(font_name, font_size)

    img = Image.open(file_name).convert('RGBA')

    watermark_position = ((int)(img.size[0] * watermark_scale), (int)(img.size[0] * watermark_scale))

    # create watermark
    watermark_max_width = (int)(img.size[0] * 0.2)
    watermark = Image.new('RGBA', img.size, (0,0,0,0))
    draw = ImageDraw.Draw(watermark, 'RGBA')
    font = ImageFont.truetype(font_name, font_size)
    n_width, n_height = font.getsize(watermark_text)

    while (n_width < watermark_max_width):
        font_size += 2
        font = ImageFont.truetype(font_name, font_size)
        n_width, n_height = font.getsize(watermark_text)

    draw.text(((img.size[0] - n_width - watermark_position[0]), (img.size[1] - n_height - watermark_position[1])), watermark_text, font=font)

    watermark = reduce_opacity(watermark, opacity)

    Image.composite(watermark, img, watermark).save(file_name)
Example #5
0
def text_watermark(img, text, out_file="test4.jpg", angle=23, opacity=0.50):
    '''''
    添加一个文字水印,做成透明水印的模样,应该是png图层合并
    '''
    watermark = Image.new('RGBA', img.size, (255, 255, 255)) #我这里有一层白色的膜,去掉(255,255,255) 这个参数就好了  

    FONT = "msyh.ttf"
    size = 2

    n_font = ImageFont.truetype(FONT, size)                                      #得到字体  
    n_width, n_height = n_font.getsize(text)
    text_box = min(watermark.size[0], watermark.size[1])
    while (n_width+n_height < text_box):
        size += 2
        n_font = ImageFont.truetype(FONT, size=size)
        n_width, n_height = n_font.getsize(text)                                   #文字逐渐放大,但是要小于图片的宽高最小值  

    text_width = (watermark.size[0] - n_width) / 2
    text_height = (watermark.size[1] - n_height) / 2
    # watermark = watermark.resize((text_width,text_height), Image.ANTIALIAS)  
    draw = ImageDraw.Draw(watermark, 'RGBA')                                       #在水印层加画笔  
    draw.text((text_width, text_height),
            text, font=n_font, fill="#21ACDA")
    watermark = watermark.rotate(angle, Image.BICUBIC)
    alpha = watermark.split()[3]
    alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
    watermark.putalpha(alpha)
    Image.composite(watermark, img, watermark).save(out_file, 'JPEG')
    print u"文字水印成功"
Example #6
0
    def addWatermark(self, inFile, outFile, textWatermark, font_file, qualityImg, wtm_type='fullscreen', angle=26, opacity=0.25):

        original_img = Image.open(inFile).convert('RGB')
        empty_img = Image.new('RGBA', original_img.size, (0,0,0,0))

        font_size = 2
        wtm_text = ImageFont.truetype(font_file, font_size)
        wtm_width, wtm_height = wtm_text.getsize(textWatermark)

        if wtm_type == 'fullscreen':
            idp = 0
            while wtm_width + wtm_height < empty_img.size[0]:
                font_size += 2
                wtm_text = ImageFont.truetype(font_file, font_size)
                wtm_width, wtm_height = wtm_text.getsize(textWatermark)
        elif wtm_type == 'stamp':
            idp = 4
            while wtm_width + wtm_height < empty_img.size[0] / 5:
                font_size += 2
                wtm_text = ImageFont.truetype(font_file, font_size)
                wtm_width, wtm_height = wtm_text.getsize(textWatermark)

        wtm_size = [wtm_width, wtm_height]

        wtm_draw = ImageDraw.Draw(empty_img, 'RGBA')
        wtm_draw.text(self.getPosition(empty_img.size, wtm_size, idp), textWatermark, font=wtm_text)

        empty_img = empty_img.rotate(angle, Image.BICUBIC)
        alpha = empty_img.split()[3]
        alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
        empty_img.putalpha(alpha)
        Image.composite(empty_img, original_img, empty_img).save(outFile, 'jpeg', quality=qualityImg)
Example #7
0
def text_watermark(img, text, out_file="test4.jpg", angle=23, opacity=0.50):
    '''
    添加一个文字水印,做成透明水印的模样,应该是png图层合并
    http://www.pythoncentral.io/watermark-images-python-2x/
    这里会产生著名的 ImportError("The _imagingft C module is not installed") 错误
    Pillow通过安装来解决 pip install Pillow
    '''
    watermark = Image.new('RGBA', img.size, (255,255,255))
    FONT = "msyh.ttf"
    size = 2

    n_font = ImageFont.truetype(FONT, size)                                       #得到字体
    n_width, n_height = n_font.getsize(text)
    text_box = min(watermark.size[0], watermark.size[1])
    while (n_width+n_height <  text_box):
        size += 2
        n_font = ImageFont.truetype(FONT, size=size)
        n_width, n_height = n_font.getsize(text)                                   #文字逐渐放大,但是要小于图片的宽高最小值

    text_width = (watermark.size[0] - n_width) / 2
    text_height = (watermark.size[1] - n_height) / 2
    #watermark = watermark.resize((text_width,text_height), Image.ANTIALIAS)
    draw = ImageDraw.Draw(watermark, 'RGBA')                                       #在水印层加画笔
    draw.text((text_width,text_height),
              text, font=n_font, fill="#21ACDA")
    watermark = watermark.rotate(angle, Image.BICUBIC)
    alpha = watermark.split()[3]
    alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
    watermark.putalpha(alpha)
    Image.composite(watermark, img, watermark).save(out_file, 'JPEG')
    print u"文字水印成功"
Example #8
0
    def addImages2(self, args):
        one = ''
        two = ''
        three = ''
        
        for combination in args:
            one = combination[0]
            two = combination[1]
            three = combination[2]
            addition = None
            if one == 'X':
                two = Image.open(self.getImagePathByName(two)).convert(mode='L', dither=Image.NONE)
                three = Image.open(self.getImagePathByName(three)).convert(mode='L', dither=Image.NONE)
                result = Image.composite(two, three, three)
            elif two == 'X':
                one = Image.open(self.getImagePathByName(one)).convert(mode='L', dither=Image.NONE)
                three = Image.open(self.getImagePathByName(three)).convert(mode='L', dither=Image.NONE)
                result = Image.composite(one, three, three)
            elif three == 'X':
                one = Image.open(self.getImagePathByName(one)).convert(mode='L', dither=Image.NONE)
                two = Image.open(self.getImagePathByName(two)).convert(mode='L', dither=Image.NONE)
                result = Image.composite(one, two, two)

        diff = {}
        optionImageNames = self.get3x3Images()[1:2][0]
        for optionImageName in optionImageNames:
            optionImage = Image.open(self.getImagePathByName(optionImageName)).convert(mode='L', dither=Image.NONE)
            diff[optionImageName] = self.getImageDifference(result, optionImage)
        diff = {k:v for k, v in diff.items() if v < 1}
        diff = sorted(diff, key=lambda i: float(diff[i]))
        
        return diff
        pass
Example #9
0
    def build_map(cls, sc):
        if isinstance(sc, supplychain):
            pass
        elif type(sc) is str:
            sc = supplychain.factory(sc)
        bounds = sc.bounds()
        if bounds:
            tl, br = bounds
            tl = sc.project(tl, inverse=True)
            br = sc.project(br, inverse=True)
            bounds = (tl, br)
            tlt, brt = cls.fit_bounds(bounds)
        else:
            # for empty maps, just grab the world at zoom level 3 (8x8)
            tlt = tilesetcls.tileclass.factory(xtile=0,ytile=0,zoom=3,tileset=tilesetcls)
            brt = tilesetcls.tileclass.factory(xtile=7,ytile=7,zoom=3,tileset=tilesetcls)
        g = graph(sc)
        stids = g.nids[0:]
        tiers = {} 
        for stid in stids:
            tiers[stid] = 0; 
        max_plen = 0; 
        for i in range(0,len(g.paths)):
            p = g.paths[i] 
            if len(p) > max_plen:
                max_plen = len(p)
            for j in range(0,len(p)):
                if j > tiers[p[j]]:
                    tiers[p[j]] = j
        dfc = cls.default_feature_colors[0:]
        for i in range(0,len(dfc)):
            dfc[i] = Color.fromHex(dfc[i]) 
        palette = Color.graduate(dfc, max_plen)

        staticmap = maptic(tlt, brt)
        dfc = cls.default_feature_colors[0]
        for st in sc.stops:
            st.geometry = st.to_latlon()
            st.attributes["color"] = str(st.attributes.get("color", palette[tiers.get(st.id, dfc)]))
            staticmap.draw_stop(st)
        for h in sc.hops:
            h.geometry = h.to_latlon()
            hc = h.attributes.get("color", None)
            if not hc:
                fc = palette[tiers[h.from_stop_id]]
                tc = palette[tiers[h.to_stop_id]]
                hc = str(fc.midpoint(tc))
            h.attributes["color"] = hc
            staticmap.draw_hop(h)
        if staticmap.hoplayer:
            hl = staticmap.hoplayer
            hlim = Image.frombuffer("RGBA", (hl.get_width(),hl.get_height()), hl.get_data(), "raw", "BGRA", 0, 1)
            staticmap.image = Image.composite(hlim, staticmap.image, hlim)
        if staticmap.stoplayer:
            stl = staticmap.stoplayer
            stlim = Image.frombuffer("RGBA", (stl.get_width(),stl.get_height()), stl.get_data(), "raw", "BGRA", 0, 1)
            staticmap.image = Image.composite(stlim, staticmap.image, stlim)
        return staticmap
Example #10
0
def save_labels_as_image(label_pred, f):
    img = Image.open(f)
    width, height = img.size
    lbl = np.reshape(label_pred, (height, width)).astype(np.uint8)
    lbl = Image.fromarray(lbl*255)
    import os
    basename = os.path.splitext(os.path.basename(f))[0]
    lbl.save(path_lbl + basename + "_pred_label.png")
    Image.composite(img, Image.new(img.mode, img.size), lbl).save(path_img + basename + "_pred_skin.png")
Example #11
0
 def image_mark(self, source, target):
     check_dir = target[:target.rfind('/')]
     # print check_dir
     if not os.path.exists(check_dir):
         os.makedirs(check_dir)
     fileName = source
     logoName = settings.MARK_LOGO_IMG
     logging.info(u'图片打水印:%s', fileName)
     im = Image.open(fileName)
     mark = Image.open(logoName)
     imWidth, imHeight = im.size
     if imWidth < 800 or imHeight < 400:
         logging.info(u'图片:%s 过小,不打水印', source)
         return
     markWidth, markHeight = mark.size
     logging.info("图片大小:%s", im.size)
     # print mark.size
     if im.mode != 'RGBA':  
         im = im.convert('RGBA')
     if mark.mode != 'RGBA':  
         mark = mark.convert('RGBA')
     layer = Image.new('RGBA', im.size, (0, 0, 0, 0))  
     x = 0
     y = 0
     # 处理nmr抓取的图片
     if imWidth <= 1200 and imHeight <= 1000:
         x = imWidth - markWidth - 30
         y = imHeight - markHeight - 25
         mark = Image.open("/home/kulen/Documents/mark_logov3/logov3_100.png")
         layer.paste(mark, (x, y))
     # 处理ChemDraw生成的图片
     elif imWidth > 1200 and imHeight > 1000:
         ynum = imHeight / 600
         xnum = imWidth / 950
         # print "xnum:%s ynum:%s" % (xnum, ynum)
         yunit = imHeight / ynum
         xunit = imWidth / xnum
         # print "xunit:%s yunit:%s" % (xunit, yunit)
         i = 0;
         while i < ynum:
             j = 0
             # print '-----------'
             while j < xnum:
                 y = i * yunit + (yunit / 2 - markHeight / 2)
                 x = j * xunit + (xunit / 2 - markWidth / 2)
                 # print "X:%s Y:%s" % (x, y)
                 layer.paste(mark, (x, y))
                 j += 1
             i += 1
     '''
     if imWidth > 1200:
         nHeight = (imHeight * 1200) / imWidth
         layer = layer.resize((1200, nHeight))
         im = im.resize((1200, nHeight))
     '''
     Image.composite(layer, im, layer).save(target, quality=80)
     logging.info(u'图片完成打水印:%s', fileName)
def draw_text(image, text, horizontal_offset, vertical_offset,
              horizontal_justification, vertical_justification, size, opacity, hallo,
              cache = {}, color='#FFFFFF', orientation=None, font=None):
    """Draws text on an image."""
    image = convert_safe_mode(image)
    img_size = image.size

    mask_layer = Image.new('L',img_size, '#FFFFFF')
    color_layer = Image.new('RGB',img_size, color)

    draw = ImageDraw.Draw(mask_layer)

    if orientation:
        orientation = getattr(Image, orientation)

    if font.strip():
        font = ImageFont.truetype(font, size)
    else:
        font = ImageFont.load_default()
        text = text.encode('ascii', 'replace')

    if orientation:
        font = ImageFont.TransposedFont(font, orientation)

    location = calculate_location(
        horizontal_offset, vertical_offset,
        horizontal_justification, vertical_justification,
        image.size, draw.textsize(text, font=font))

    if hallo:
        x,y = img_size
        blurred_id = BLURED_ID % (x,y)
        if blurred_id in cache:
			hallo_mask_layer = cache[blurred_id]
        else:
            hallo_mask_layer = Image.new('L',img_size, '#FFFFFF')
            draw_hallo = ImageDraw.Draw(hallo_mask_layer)
            draw_hallo.text(location, text, font=font, fill=255-opacity)
            n = 0
            while n < size/10:
            	hallo_mask_layer = hallo_mask_layer.filter(ImageFilter.BLUR)
            	draw_hallo.text(location, text, font=font, fill=255-opacity)
            	n += 1
            cache[blurred_id] = hallo_mask_layer
		
        hallo_color_layer = Image.new('RGB',img_size, color)
        hallo_color_layer = ImageChops.invert(hallo_color_layer)
        image = Image.composite(image,hallo_color_layer,hallo_mask_layer)
    
 
    draw.text(location, text, font=font, fill=255-opacity)
    # composite the watermark with the layer
    return Image.composite(image,color_layer,mask_layer)
Example #13
0
def add_watermark(image, text=TEXT, opacity=OPACITY):
    img = Image.open(image)
    if img.mode != 'RGBA':
        img = img.convert('RGBA')
    watermark = Image.new('RGBA', img.size, (0, 0, 0, 0))
    waterdraw = ImageDraw.Draw(watermark, 'RGBA')
    waterdraw.text((15, 15), text)
    alpha = watermark.split()[3]
    alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
    watermark.putalpha(alpha)
    Image.composite(watermark, img, watermark).save('watermarked_' + image,
                                                    'JPEG')
def compose_map(outline, outline_mask, filledin, filledin_mask, overlayinfo, overlayinfo_mask):
	image = Image.new('RGB', (size, size))
	draw = ImageDraw.Draw(image)

	if outline is None:
		outline = Image.new('RGB', (size, size), (255, 0, 0))

	image = Image.composite(outline, image, outline_mask)
	image = Image.composite(filledin, image, filledin_mask)
	image = Image.composite(overlayinfo, image, overlayinfo_mask)

	return image
Example #15
0
def main():
    wm = Image.open('watermark.png')
    for infile in glob.glob("*.png"):
        f, ext = os.path.splitext(infile)
        if f == "watermark":
            continue
        im = Image.open(infile)
        if im.mode != "RGBA":
            im = im.convert("RGBA")
        layer = Image.new("RGBA", im.size, (0, 0, 0, 0))
        position = (im.size[0] - wm.size[0], im.size[1] - wm.size[1])
        layer.paste(wm, position)
        Image.composite(layer, im, layer).save('%s.wm.%s' % (f, ext))
Example #16
0
def img_from_text(image, text, fileout="output.jpg"):
    im = Image.open(image)
    watermark = Image.new('RGBA', im.size, (0,0,0,0))
    draw = ImageDraw.Draw(watermark)
    font = ImageFont.truetype("FreeSans.ttf", FONT_SIZE)
    #font = ImageFont.truetype("sans-serif.ttf", 16)
    for x in range(0, im.size[0], len(text) * FONT_SIZE / 2):
        for y in range(0, im.size[1], FONT_SIZE):
            draw.text((x,y), text, font=font)
    alpha = watermark.split()[3]
    alpha = ImageEnhance.Brightness(alpha).enhance(0.20)
    watermark.putalpha(alpha)
    Image.composite(watermark, im, watermark).save(fileout, 'JPEG')
Example #17
0
    def image_mark(self, source, target):
        check_dir = target[:target.rfind('/')]
        # print check_dir
        if not os.path.exists(check_dir):
            os.makedirs(check_dir)
        fileName = source
        logoName = dict_conf.MARK_LOGO_IMG
        logging.info(u'图片打水印:%s', fileName)
        im = Image.open(fileName)
        mark = Image.open(logoName)
        imWidth, imHeight = im.size
        if imWidth < 800 or imHeight < 400:
            logging.info(u'图片:%s 过小,不打水印', source)
            return
        markWidth, markHeight = mark.size
        logging.info("图片大小:%s", im.size)
        # print mark.size
        if im.mode != 'RGBA':  
            im = im.convert('RGBA')
        if mark.mode != 'RGBA':  
            mark = mark.convert('RGBA')
        layer = Image.new('RGBA', im.size, (0, 0, 0, 0))  
        x = 0
        y = 0
        if imWidth > 1200 and imHeight > 1000:
            ynum = imHeight / 600
            xnum = imWidth / 950
            # print "xnum:%s ynum:%s" % (xnum, ynum)
            yunit = imHeight / ynum
            xunit = imWidth / xnum
            # print "xunit:%s yunit:%s" % (xunit, yunit)
            i = 0;
            while i < ynum:
                j = 0
                # print '-----------'
                while j < xnum:
                    y = i * yunit + (yunit / 2 - markHeight / 2)
                    x = j * xunit + (xunit / 2 - markWidth / 2)
                    # print "X:%s Y:%s" % (x, y)
                    layer.paste(mark, (x, y))
                    j += 1
                i += 1
        
        if imWidth > 880:
            nHeight = (imHeight * 880) / imWidth
            layer = layer.resize((880, nHeight))
            im = im.resize((880, nHeight), Image.ANTIALIAS)

        Image.composite(layer, im, layer).save(target, quality=80)
        im.close()
        logging.info(u'图片完成打水印:%s', fileName)
def render_wallpaper(in_file, text, author, out_file, opacity=0.25):
    img = Image.open(in_file).convert("RGB")
    quote_image = Image.new('RGBA', img.size, (0,0,0,0))
    size = 2
    n_font = ImageFont.truetype(FONT, size)
    
    lines = textwrap.wrap(text, width = 40)    
    quote_width, quote_height = n_font.getsize(lines[0])
    
    # Scale the font size based on the length of a line of text
    while (quote_width + quote_height < quote_image.size[0]):
       size += 2
       n_font = ImageFont.truetype(FONT, size)
       quote_width, quote_height = n_font.getsize(lines[0])
    
    # Draw the quote text
    draw = ImageDraw.Draw(quote_image, "RGBA")
    y_text = (quote_image.size[1] - (quote_height * len(lines))) / 2
    
    for line in lines:
        width, height = n_font.getsize(line)
        draw.text((40, y_text), line, font = n_font)
        y_text += height
    
    # Draw the author
    author_image = Image.new('RGBA', img.size, (0,0,0,0,))
    
    # Appropriately scale the author font
    if (size > 160):
        size = size / 4
    else:
        size = size / 2
        
    while (size % 2 != 0):
        size += 1
       
    n_bold = ImageFont.truetype(BOLD_FONT, size)
    author_width, author_height = n_bold.getsize(author)
    draw = ImageDraw.Draw(author_image, "RGBA")
    draw.text(((author_image.size[0] - author_width - size), (author_image.size[1] - author_height) / 2 + (quote_height * len(lines)) / 2 + 40), author, font = n_bold)
  
    # Adjust the opacity
    alpha = quote_image.split()[3]
    alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
    quote_image.putalpha(alpha)
    
    # Render everything
    composite = Image.composite(quote_image, img, quote_image)
    Image.composite(author_image, composite, author_image).filter(ImageFilter.SMOOTH_MORE).save(out_file, "JPEG")
Example #19
0
 def convert(self):
     """変換処理"""
     # 画像サイズを小さくする
     small_img = self._create_small_image(self.gray_img)
     # 元画像を3値変換
     img = small_img.point(self._create_3colors_image)
     # 斜め線のmask処理 グレー部分を透明にしてマスク画像と重ねる
     self._stretch_naname_image(img.size)
     naname_mask = img.point(self._create_naname_mask)
     img = Image.composite(img, self.naname_img, naname_mask)
     # 輪郭線のmask処理 PILの輪郭フィルタ->グレースケール->適当な値で2値化して真っ黒画像と重ねる
     rinkaku = Image.new(mode="L", size=img.size, color=0)
     rinkaku_mask = small_img.filter(ImageFilter.CONTOUR).convert("L").point(self._create_rinkaku_mask)
     img = Image.composite(img, rinkaku, rinkaku_mask)
     self.converted = img
Example #20
0
    def build(self):
        """composites an image"""
        background = Image.new('RGBA', self.overlay_img.size, (0, 0, 0, 0))
        draw = ImageDraw.Draw(background)
        draw.rectangle((0, 0) + self.overlay_img.size, fill=self.background_colour)

        self.user_img_layer = Image.new('RGBA', self.overlay_img.size, (0, 0, 0, 0))
        self.user_img_layer.paste(self.user_img, self.user_img_offset)

        self.user_img_layer = Image.composite(self.user_img_layer, background, self.user_img_layer)

        if self.overlay_img.mode != 'RGBA':
            self.overlay_img = self.overlay_img.convert('RGBA')

        return Image.composite(self.overlay_img, self.user_img_layer, self.overlay_img)
Example #21
0
def waterMark(dir, settings):
    if (re.search(r'(\.[^.]+)$', dir).group(1) not in settings.WATER_TYPE):
    	return
    im = Image.open(dir)
    im = resize(im, settings.IMG_MAXX, settings.IMG_MAXY)
    print "hehehahi"
    print im.size
    mark = reduce_opacity(Image.open(settings.WATER_DIR), settings.WATER_OPACITY)
    if im.mode != 'RGBA':
        im = im.convert('RGBA')
    layer = Image.new('RGBA', im.size, (0,0,0,0))
    scale = min(im.size[0] * settings.WATER_SCALE / mark.size[0], im.size[1] * settings.WATER_SCALE / mark.size[1])
    mark = mark.resize((int(mark.size[0] * scale), int(mark.size[1] * scale)))
    layer.paste(mark, (im.size[0] + settings.WATER_POS_0 - mark.size[0], im.size[1] + settings.WATER_POS_1 - mark.size[1]))
    Image.composite(layer, im, layer).save(dir)
def watermark(im, mark, position, opacity=1):
    """Adds a watermark to an image."""
    if opacity < 1:
        mark = reduce_opacity(mark, opacity)
    if im.mode != 'RGBA':
        im = im.convert('RGBA')
    # create a transparent layer the size of the image and draw the
    # watermark in that layer.
    layer = Image.new('RGBA', im.size, (0,0,0,0))
    if position == 'tile':
        for y in range(0, im.size[1], mark.size[1]):
            for x in range(0, im.size[0], mark.size[0]):
                layer.paste(mark, (x, y))
    elif position == 'scale':
        # scale, but preserve the aspect ratio
        ratio = min(
            float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1])
        w = int(mark.size[0] * ratio)
        h = int(mark.size[1] * ratio)
        mark = mark.resize((w, h))
        layer.paste(mark, ((im.size[0] - w) // 2, (im.size[1] - h) // 2))
    else:
        layer.paste(mark, position)
    # composite the watermark with the layer
    return Image.composite(layer, im, layer)
Example #23
0
    def filter(self, im):
        falloff = self.falloff
        extent = self.extent

        def length(start, end):
            start_x, start_y = start
            end_x, end_y = end
            dist_x = end_x - start_x
            dist_y = end_y - start_y
            return math.sqrt((dist_x ** 2) + (dist_y ** 2))

        def light_falloff(radius, outside):
            return ((radius / outside) ** falloff) * extent

        im = im.convert('RGBA')

        w, h = im.size
        center = w / 2, h / 2
        outside = length(center, (0, 0))

        data = []
        for y in xrange(h):
            for x in xrange(w):
                radius = length(center, (x, y))
                factor = light_falloff(radius, outside)
                data.append(factor)

        alpha_im = Image.new('L', im.size)
        alpha_im.putdata(data)
        overlay_im = Image.new('L', im.size, 'black')
        return Image.composite(overlay_im, im, alpha_im)
Example #24
0
    def _drawPoints(self, img, points, meta={}):
        """
        Draws points into image (view)

        img - 2D array
        points - 2D coordinates
        """
        if len(list(points)) == 0: return img
        color = (255,0,0,self.default_point_alpha) if ("color" not in meta) else meta["color"]
        color = self._validateColor(color, self.default_point_alpha)
        border = self.default_point_border if ("border" not in meta) else meta["border"]
        size = self.default_point_size if ("size" not in meta) else meta["size"]

        img_d = Image.new('RGBA', img.size)
        draw = ImageDraw.Draw(img_d)

        # draw border
        if border is not None:
            bsize = size+2
            for p in points: # p = [x,y]
                xy = [p[0]-(bsize/2), p[1]-(bsize/2), p[0]+(bsize/2), p[1]+(bsize/2)]
                draw.rectangle(xy, fill=border)

        # draw points
        for p in points: # p = [x,y]
            xy = [p[0]-(size/2), p[1]-(size/2), p[0]+(size/2), p[1]+(size/2)]
            draw.rectangle(xy, fill=color)

        img = Image.composite(img_d, img, img_d)
        return img
Example #25
0
    def _drawVolume(self, img, mask, meta={}):
        """
        Draws volume (mask) into image (view)

        img - 2D Image
        mask - 2D array; bool or int32 <0,1;256>
        """
        color = (255,0,0,self.default_volume_alpha) if ("color" not in meta) else meta["color"]
        color = self._validateColor(color, self.default_volume_alpha)

        mask = mask.astype(np.uint8) # fixes future numpy warning
        img_mask = np.zeros((mask.shape[0],mask.shape[1], 4), dtype=np.uint8)
        for d in range(np.max(1, mask.min()), mask.max()+1): # range(1,256+1)
            scale = 1.0 - (((d-1)/255.0) * self.mask_depth_scale )
            mask_d = (mask == d)
            img_mask[mask_d,0] = int(color[0]*scale)
            img_mask[mask_d,1] = int(color[1]*scale)
            img_mask[mask_d,2] = int(color[2]*scale)

        img_mask[:,:,3] = (mask!=0).astype(np.uint8)*color[3]
        img_mask = Image.fromarray(img_mask, 'RGBA')
        #img_mask.show()
        img = Image.composite(img_mask, img, img_mask)

        return img
Example #26
0
    def water_mark(self, img_source, water_str):
        """
            在图片上打水印;
            同时在左上角写上字
            :param img_source:
            :param water_str:在左上角写的字
            :return:
            """
        try:
            im = Image.open(img_source)

            # 在图片上写字
            draw = ImageDraw.Draw(im)
            draw.text(self.water_str_pos, water_str, fill=self.water_str_color, font=self.water_str_font)
            im.save(img_source)

            # 打水印
            wm = Image.open(self.img_water_mark)
            layer = Image.new('RGBA', im.size, (0, 0, 0, 0))
            layer.paste(wm, (im.size[0] - wm.size[0], im.size[1] - wm.size[1]))
            new_im = Image.composite(layer, im, layer)
            new_im.save(img_source)
        except Exception as e:
            print(">>>> water_mark EXCEPTION >>>> " + str(e))
        finally:
            return
Example #27
0
def watermark(img, mark, position=(0, 0), opacity=1, scale=1.0, tile=False, **kwargs):
    if opacity < 1:
        mark = reduce_opacity(mark, opacity)

    if type(scale) != tuple:
        scale = determine_scale(scale, img, mark)

    mark = mark.resize(scale)
    position = determine_position(position, img, mark)

    if img.mode != 'RGBA':
        img = img.convert('RGBA')

    # make sure we have a tuple for a position now
    assert isinstance(position, tuple), 'Invalid position "%s"!' % position

    # create a transparent layer the size of the image and draw the
    # watermark in that layer.
    layer = Image.new('RGBA', img.size, (0,0,0,0))
    if tile:
        first_y = position[1] % mark.size[1] - mark.size[1]
        first_x = position[0] % mark.size[0] - mark.size[0]

        for y in range(first_y, img.size[1], mark.size[1]):
            for x in range(first_x, img.size[0], mark.size[0]):
                layer.paste(mark, (x, y))
    else:
        layer.paste(mark, position)
    return Image.composite(layer, img, layer)
Example #28
0
  def SavePNG(self, matrix, filename, requested_width=None, requested_height=None, bounding_box=None):
    if not bounding_box:
      bounding_box = matrix.BoundingBox()
    bounding_box.ClipToSize(requested_width, requested_height)
    ((minX,minY), (maxX,maxY)) = bounding_box.Corners()
    width = maxX - minX + 1
    height = maxY - minY + 1
    logging.info('saving image (%d x %d)' % (width, height))
    
    from PIL import Image
    if self.background:
      img = Image.new('RGB', (width,height), self.background)
    else:
      img = Image.new('RGBA', (width,height))
      
    maxval = matrix.Max()
    pixels = img.load()

    # Iterating just over the non-zero data points is ideal when
    # plotting the whole image, but for generating tile sets, it might
    # make more sense for the caller to partition the points and pass in
    # a list of points to use for each image.  That way we only iterate
    # over the points once, rather than once per image.  That also gives
    # the caller an opportunity to do something better for tiles that
    # contain no data.
    for ((x,y), val) in matrix.iteritems():
      if bounding_box.IsInside((x,y)):
        if self.background:
          pixels[x - minX, y - minY] = _blend_pixels(self.colormap[int(255 * val / maxval)], self.background)
        else:
          pixels[x - minX, y - minY] = self.colormap[int(255 * val / maxval)]
    if self.background_image:
      img = Image.composite(img, self.background_image, img.split()[3])  # Is this really the best way?
    img.save(filename)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
  """Draws mask on an image.

  Args:
    image: uint8 numpy array with shape (img_height, img_height, 3)
    mask: a uint8 numpy array of shape (img_height, img_height) with
      values between either 0 or 1.
    color: color to draw the keypoints with. Default is red.
    alpha: transparency value between 0 and 1. (default: 0.4)

  Raises:
    ValueError: On incorrect data type for image or masks.
  """
  if image.dtype != np.uint8:
    raise ValueError('`image` not of type np.uint8')
  if mask.dtype != np.uint8:
    raise ValueError('`mask` not of type np.uint8')
  if np.any(np.logical_and(mask != 1, mask != 0)):
    raise ValueError('`mask` elements should be in [0, 1]')
  if image.shape[:2] != mask.shape:
    raise ValueError('The image has spatial dimensions %s but the mask has '
                     'dimensions %s' % (image.shape[:2], mask.shape))
  rgb = ImageColor.getrgb(color)
  pil_image = Image.fromarray(image)

  solid_color = np.expand_dims(
      np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
  pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
  pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
  pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
  np.copyto(image, np.array(pil_image.convert('RGB')))
Example #30
0
    def make_image(self, matrix):
        extent = self.config.extent_out
        if not extent:
            extent = matrix.extent()
        extent.resize((self.config.width or 1) - 1,
                      (self.config.height or 1) - 1)
        size = extent.size()
        size.x = int(size.x) + 1
        size.y = int(size.y) + 1
        logging.info('saving image (%d x %d)' % (size.x, size.y))
        if self.background:
            img = Image.new('RGB', (size.x, size.y), self.background)
        else:
            img = Image.new('RGBA', (size.x, size.y))

        maxval = max(matrix.values())
        pixels = img.load()
        for (coord, val) in matrix.items():
            x = int(coord.x - extent.min.x)
            y = int(coord.y - extent.min.y)
            if extent.is_inside(coord):
                color = self.config.colormap.get(val / maxval)
                if self.background:
                    pixels[x, y] = ImageMaker._blend_pixels(color,
                                                            self.background)
                else:
                    pixels[x, y] = color
        if self.config.background_image:
            img = Image.composite(img, self.config.background_image,
                                  img.split()[3])
        return img
Example #31
0
        "mohammad_blend.jpg".format(alpha))

mohammad_blend = Image.open("mohammad_blend.jpg")
mohammad_blend.show()

mohammad_blur = mohammad.filter(ImageFilter.BLUR)
mohammad_blur.show()


size = (128,128)
saved = ("mohammad_thumbnail.jpg")
mohammad_thumbnail = Image.open("mohammad.jpg")
mohammad_thumbnail.thumbnail(size)
mohammad_thumbnail.save(saved)
mohammad_thumbnail.show()

mohammad_rot_90 = mohammad.rotate(90)
mohammad_rot_90.show()




hani = Image.open("hani.jpg").resize(mohammad.size)
mask = Image.open("mask.jpg")
mask = mask.resize(mohammad.size)

Image.composite(mohammad,hani,mask).save(
        "mohammad_mask.jpg")

mohammad_mask = Image.open("mohammad_mask.jpg")
mohammad_mask.show()
Example #32
0
def mosaic(logo_dir, tmp_file, mask_file, max_h, max_w, img_size, img_dir,metric):
    worked_dir = os.path.join(logo_dir, str(img_size)+metric)
    src_dir = os.path.join(img_dir, str(img_size)+metric)
    h = int(max_h/img_size)
    w = int(max_w/img_size)
    with open(os.path.join(worked_dir,'parts.bin'),'rb') as f:
        any_images = np.load(f)
    cond = any_images==0
    any_parts = np.column_stack((np.where(cond)))
    worked_parts = np.column_stack((np.where(~cond)))
    
    images = get_images_bydate(src_dir)

    im = Image.open(os.path.join(logo_dir,tmp_file))
    mosaic = Image.new('RGBA', (max([max_w,im.size[0]]),max([max_h,im.size[1]])),(0,0,0,255))
    del im
    done_parts = []
    dic_map = {}

    dic_etalons = {}
    dic_alpha = {}
    bad_images = []
    for i in range(w):
        for j in range(h):
            filename = str(i)+'_'+str(j)
            with open(os.path.join(worked_dir,filename+'a.bin'),'rb') as f:
                dic_alpha[filename] = np.load(f)
            with open(os.path.join(worked_dir,filename+'.bin'),'rb') as f:
                dic_etalons[filename] = np.load(f)[dic_alpha[filename],:]

    for i,data in enumerate(images):
        if i >= h*w:
            break
        path = data[1]
        src_name = os.path.basename(path)[:-4]
        with open(os.path.join(src_dir+'t',src_name+'.bin'),'rb') as f:
            im_src = np.load(f)
        min_dist = -1
        bst_part = ''
        is_fill = worked_parts.shape[0] <= len(done_parts)
        for part in worked_parts:
            filename = str(part[0])+'_'+str(part[1])
            if not is_fill:
                if filename in done_parts:
                    continue
            im_arr = im_src[dic_alpha[filename],:]
            # dist = distance.euclidean(np.array(dic_etalons[filename]).ravel(),np.array(im_arr).ravel())
            dist = euclidean(np.array(dic_etalons[filename]).ravel(),np.array(im_arr).ravel())
            if bst_part == '':
                min_dist = dist
                bst_part = filename
            else:
                if min_dist> dist:
                    min_dist = dist
                    bst_part = filename
        if is_fill:
            if dic_map[bst_part][1]>min_dist:
                bad_images.append(dic_map[bst_part][0])
                dic_map[bst_part] = (src_name, min_dist)
            else:
                bad_images.append(src_name)
        else:
            dic_map[bst_part] = (src_name, min_dist)
            done_parts.append(bst_part)
    
    np.random.shuffle(any_parts)

    for part in any_parts:
        if len(bad_images) == 0:
            continue
        # image = bad_images.pop(np.random.choice(len(bad_images)))
        image = bad_images.pop()
        filename = str(part[0])+'_'+str(part[1])
        dic_map[filename] = (image, 0)

    black_mask = Image.new('L',mosaic.size, (0))

    for key, value in dic_map.items():
        im = Image.open(os.path.join(src_dir,value[0]+'.png'))
        i, j = key.split('_')
        i = int(i)
        j = int(j)
        box = (i*img_size,j*img_size,img_size*(i+1),img_size*(j+1))
        alpha_im = Image.new('L',(img_size,img_size), (255))
        black_mask.paste(alpha_im, box)
        mosaic.paste(im,box)

    mask_img = Image.open(os.path.join(logo_dir, mask_file)).convert('RGB')
    r, g, b = mask_img.getpixel((1, 1))
    mask_img_full = Image.new('RGBA',mosaic.size, (r,g,b,0))
    mask_img_full.paste(mask_img,(0,0))
    mask_img_full.putalpha(153)
    mosaic.paste(mask_img_full,(0,0),mask_img_full)
    mosaic = Image.composite(mosaic,Image.new('RGB',mosaic.size,(0,0,0)),black_mask)
    logo_img = Image.open(os.path.join(logo_dir, 'logo.png'))
    mosaic.paste(logo_img,(0,0),logo_img)
    mosaic.putalpha(255)
    mosaic.save('data/mosaic.png')
Example #33
0
        ]  # make the black background white

        if len(red_pixel_locations[0]) > 0 and len(
                red_pixel_locations[1]) > 0:  # if there is a red mask
            red_mask = np_raw_img_masks.copy()
            red_mask[green_pixel_locations] = [
                255, 255, 255, 255
            ]  # convert green pixels to background (white)
            red_mask[yellow_pixel_locations] = [
                255, 255, 255, 255
            ]  # convert yellow pixels to background (white)
            red_mask[red_pixel_locations] = [
                0, 0, 0, 255
            ]  # convert red pixels to mask (black)
            final_red_mask = Image.fromarray(red_mask).convert('L')
            red_masked_original_image = Image.composite(
                white_square, original_img, final_red_mask)
            red_masked_original_image.save(
                f'{final_segmented_img_output_location}prod_{prod_id_i}_UpperCloth.png'
            )

        if len(green_pixel_locations[0]) > 0 and len(
                green_pixel_locations[1]) > 0:  # if there is a green mask
            green_mask = np_raw_img_masks.copy()
            green_mask[red_pixel_locations] = [
                255, 255, 255, 255
            ]  # convert red pixels to background (white)
            green_mask[yellow_pixel_locations] = [
                255, 255, 255, 255
            ]  # convert yellow pixels to background (white)
            green_mask[green_pixel_locations] = [
                0, 0, 0, 255
def image_5_modify(guest_heritage, custom_font, rank_mark, settings):
    file = settings.templates + '5.jpg'
    image_template = Image.open(file).convert('RGBA')
    width, height = image_template.size
    txt = Image.new('RGBA', image_template.size, (255, 255, 255, 0))
    layer = Image.new('RGBA', image_template.size)
    draw = ImageDraw.Draw(txt)
    # left page (total result part two)
    for index in range(22, 42):
        start_x = 0.055 * width
        start_y = 0.132 * height + 0.0323 * height * (index - 22)
        # index
        draw.text((start_x, start_y),
                  f'{index + 1:02}',
                  font=custom_font.yh,
                  fill='black')
        # number
        draw.text(
            (start_x + 0.073 * width, start_y),
            guest_heritage.items_data.loc[index, 'number'],
            font=custom_font.yh,
            fill='black',
        )
        # name
        draw = center_draw(
            draw,
            start_x + 0.2235 * width,
            start_y,
            guest_heritage.items_data.loc[index, 'name'],
            custom_font.yh,
        )
        # rank
        layer = rank_mark.mark_print(
            layer,
            start_x + 0.316 * width,
            start_y - 0.005 * height,
            guest_heritage.items_data.loc[index, 'rank'],
        )
    # right page (radar map)
    # text part
    start_x = 0.681 * width
    start_y = 0.805 * height
    high = len(
        guest_heritage.items_data[guest_heritage.items_data['rank'] == 3])
    middle = len(
        guest_heritage.items_data[guest_heritage.items_data['rank'] == 2])
    low = len(
        guest_heritage.items_data[guest_heritage.items_data['rank'] == 1])
    # high
    draw.text((start_x, start_y),
              f'{high:02}',
              font=custom_font.yh,
              fill='black')
    # middle
    draw.text((start_x + 0.18 * width, start_y),
              f'{middle:02}',
              font=custom_font.yh,
              fill='black')
    # # normal
    # draw.text((start_x, start_y + 0.047 * height), f'{normal:02}', font=custom_font.yh, fill='black')
    # low
    draw.text((start_x + 0.09 * width, start_y + 0.053 * height),
              f'{low:02}',
              font=custom_font.yh,
              fill='black')
    # image composition
    image = Image.alpha_composite(image_template, txt)
    image = Image.composite(layer, image, layer)
    # radar part
    # Create custom colormaps
    custom_color_map = custom_color_map_set(settings)
    # draw radar figure by matplotlib
    figure = radar_get(guest_heritage, custom_color_map)
    image = radar_pic(figure, image)
    plt.close('all')
    return image
Example #35
0
def get_image_from_sprite(m_Sprite) -> Image:
    atlas = None
    if m_Sprite.m_SpriteAtlas:
        atlas = m_Sprite.m_SpriteAtlas.read()
    elif m_Sprite.m_AtlasTags:
        # looks like the direct pointer is empty, let's try to find the Atlas via its name
        for obj in m_Sprite.assets_file.objects.values():
            if obj.type == "SpriteAtlas":
                atlas = obj.read()
                if atlas.name == m_Sprite.m_AtlasTags[0]:
                    break
                atlas = None

    if atlas:
        sprite_atlas_data = atlas.render_data_map[m_Sprite.m_RenderDataKey]
    else:
        sprite_atlas_data = m_Sprite.m_RD

    m_Texture2D = sprite_atlas_data.texture
    alpha_texture = sprite_atlas_data.alphaTexture
    texture_rect = sprite_atlas_data.textureRect
    texture_rect_offset = sprite_atlas_data.textureRectOffset
    settings_raw = sprite_atlas_data.settingsRaw

    original_image = get_image(m_Sprite, m_Texture2D, alpha_texture)

    sprite_image = original_image.crop(
        (texture_rect.x, texture_rect.y, texture_rect.x + texture_rect.width,
         texture_rect.y + texture_rect.height))

    if settings_raw.packed == 1:
        rotation = settings_raw.packingRotation
        if rotation == SpritePackingRotation.kSPRFlipHorizontal:
            sprite_image = sprite_image.transpose(Image.FLIP_TOP_BOTTOM)
        # spriteImage = RotateFlip(RotateFlipType.RotateNoneFlipX)
        elif rotation == SpritePackingRotation.kSPRFlipVertical:
            sprite_image = sprite_image.transpose(Image.FLIP_LEFT_RIGHT)
        # spriteImage.RotateFlip(RotateFlipType.RotateNoneFlipY)
        elif rotation == SpritePackingRotation.kSPRRotate180:
            sprite_image = sprite_image.transpose(Image.ROTATE_180)
        # spriteImage.RotateFlip(RotateFlipType.Rotate180FlipNone)
        elif rotation == SpritePackingRotation.kSPRRotate90:
            sprite_image = sprite_image.transpose(Image.ROTATE_270)
    # spriteImage.RotateFlip(RotateFlipType.Rotate270FlipNone)

    if settings_raw.packingMode == SpritePackingMode.kSPMTight:
        # Tight

        # create mask to keep only the polygon
        mask = Image.new("1", sprite_image.size, color=0)
        draw = ImageDraw.ImageDraw(mask)
        for triangle in get_triangles(m_Sprite):
            draw.polygon(triangle, fill=1)

        # apply the mask
        if sprite_image.mode == "RGBA":
            # the image already has an alpha channel,
            # so we have to use composite to keep it
            empty_img = Image.new(sprite_image.mode,
                                  sprite_image.size,
                                  color=0)
            sprite_image = Image.composite(sprite_image, empty_img, mask)
        else:
            # add mask as alpha-channel to keep the polygon clean
            sprite_image.putalpha(mask)

    return sprite_image.transpose(Image.FLIP_TOP_BOTTOM)
def visualize_panoptic_seg_on_image_array(image,
                                          panoptic_seg,
                                          segments_info,
                                          category_index,
                                          alpha=0.8):
    """Draws masks on an image.

    Args:
        image: uint8 numpy array with shape [img_height, img_height, 3]
        panoptic_seg: a uint8 numpy array of shape [img_height, img_height]
        with values ranging between [0, num_classes).
        category_index:  a dict containing category dictionaries (each holding
            category index `id` and category name `name`) keyed by category indices.
        alpha: transparency value between 0 and 1. (default: 0.4)

    Raises:
        ValueError: On incorrect data type for image or panoptic_seg.
    """
    if image.dtype != np.uint8:
        raise ValueError('`image` not of type np.uint8')
    if image.shape[:2] != panoptic_seg.shape[:2]:
        raise ValueError(
            'The image has spatial dimensions %s but the panoptic_seg has '
            'dimensions %s' % (image.shape[:2], panoptic_seg.shape[:2]))
    pil_image = Image.fromarray(image)
    display_strs_and_colors = []
    for segment_info in segments_info:
        segment_id = segment_info["id"]
        mask = (panoptic_seg == segment_id).astype(np.uint8)
        mask_area = mask.sum().item()
        if mask_area == 0:
            continue

        category_id = segment_info["category_id"]
        color = STANDARD_COLORS[category_id % len(STANDARD_COLORS)]
        rgb = ImageColor.getrgb(color)
        name = category_index[category_id].get("display_name")
        if name is None:
            name = category_index[category_id]["name"]
        display_strs_and_colors.append((name if name else 'UnKnown', rgb))
        solid_color = np.expand_dims(np.ones_like(mask), axis=-1) * np.reshape(
            list(rgb), [1, 1, 3])
        pil_solid_color = Image.fromarray(
            np.uint8(solid_color)).convert('RGBA')
        pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
        pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)

    width, height = pil_image.size
    str_heights = [font.getsize(s)[1] for s, c in display_strs_and_colors]
    total_str_height = int(
        sum([(1 + 2 * 0.25) * str_height
             for str_height in str_heights]) + _TOP_MARGIN * 2)
    image_with_legend = Image.new("RGB", (width, height + total_str_height),
                                  "white")
    image_with_legend.paste(pil_image, (0, 0))
    legend_draw = ImageDraw.Draw(image_with_legend)
    left = _LEFT_MARGIN
    top = height + _TOP_MARGIN
    for display_str, rgb in display_strs_and_colors:
        _, text_height = font.getsize(display_str)
        margin = np.ceil(0.25 * text_height)
        legend_draw.rectangle([(left, top),
                               (left + _LEFT_MARGIN, top + text_height)],
                              fill=rgb)
        legend_draw.text((left + 2 * _LEFT_MARGIN, top),
                         display_str,
                         fill="black",
                         font=font)
        top += margin + text_height

    return np.array(image_with_legend)
Example #37
0
    def _compose_images(self, foregrounds, background_path, avoid_collisions=True, scale_by_img_size=True):
        # Composes a foreground image and a background image and creates a segmentation mask
        # using the specified color. Validation should already be done by now.
        # Args:
        #     foregrounds: a list of dicts with format:
        #       [{
        #           'super_category':super_category,
        #           'category':category,
        #           'foreground_path':foreground_path,
        #           'mask_rgb_color':mask_rgb_color
        #       },...]
        #     background_path: the path to a valid background image
        # Returns:
        #     composite: the composed image
        #     mask: the mask image

        # Open background and convert to RGBA
        background = Image.open(background_path)
        background = background.convert('RGBA')

        # Crop background to desired size (self.width x self.height), randomly positioned
        bg_width, bg_height = background.size
        max_crop_x_pos = bg_width - self.width
        max_crop_y_pos = bg_height - self.height
        assert max_crop_x_pos >= 0, f'desired width, {self.width}, is greater than background width, {bg_width}, for {str(background_path)}'
        assert max_crop_y_pos >= 0, f'desired height, {self.height}, is greater than background height, {bg_height}, for {str(background_path)}'
        crop_x_pos = random.randint(0, max_crop_x_pos)
        crop_y_pos = random.randint(0, max_crop_y_pos)
        composite = background.crop((crop_x_pos, crop_y_pos, crop_x_pos + self.width, crop_y_pos + self.height))
        composite_mask = Image.new('RGB', composite.size, 0)
    
        fg_list = []

        for fg in foregrounds:
            fg_path = fg['foreground_path']

            # Perform transformations
            if scale_by_img_size:
                fg_image = self._transform_foreground(fg, fg_path, self.width, self.height)
            else:
                fg_image = self._transform_foreground(fg, fg_path)

            # Choose a random x,y position for the foreground
            max_x_position = composite.size[0] - fg_image.size[0]
            max_y_position = composite.size[1] - fg_image.size[1]
            assert max_x_position >= 0 and max_y_position >= 0, \
            f'foreground {fg_path} is too big ({fg_image.size[0]}x{fg_image.size[1]}) for the requested output size ({self.width}x{self.height}), check your input parameters'
            paste_position = (random.randint(0, max_x_position), random.randint(0, max_y_position))

            # Check if colliding, try move if it is
            if avoid_collisions:
                fg_rect = [paste_position[0], # x1
                            paste_position[1], # y1
                            paste_position[0] + fg_image.size[0], # x2
                            paste_position[1] + fg_image.size[1]] # y2

                visited_centroids = []
                colliding_point = self._is_colliding(fg_rect, fg_list)

                while colliding_point is not None:
                    # Move the fg away from the colliding point
                    step_size = 50
                    curr_centroid_x = int((fg_rect[0] + fg_rect[2]) / 2)
                    curr_centroid_y = int((fg_rect[1] + fg_rect[3]) / 2)
                    new_centroid_pos = self._get_new_centroid_pos(colliding_point, 
                        (curr_centroid_x, curr_centroid_y),
                        step_size) 

                    # print("Moving {} from {} to {}".format(fg_path, (curr_centroid_x, curr_centroid_y), new_centroid_pos))
                    
                    if self._visited_point_before(new_centroid_pos, visited_centroids):
                        print("Tried to re-visit point {}".format(new_centroid_pos))
                        fg_rect = None
                        break
                    visited_centroids.append(new_centroid_pos)

                    fg_rect = self._get_rect_position(new_centroid_pos, fg_image)
                    colliding_point = self._is_colliding(fg_rect, fg_list)

                if fg_rect is None or self._outside_img(composite, fg_rect):
                    # print("Outside image {}".format(fg_rect))
                    continue
                else:
                    paste_position = (int(fg_rect[0]), int(fg_rect[1]))
                    fg_list.append(fg_rect)

            # Create a new foreground image as large as the composite and paste it on top
            new_fg_image = Image.new('RGBA', composite.size, color = (0, 0, 0, 0))
            new_fg_image.paste(fg_image, paste_position)

            # Extract the alpha channel from the foreground and paste it into a new image the size of the composite
            alpha_mask = fg_image.getchannel(3)
            new_alpha_mask = Image.new('L', composite.size, color = 0)
            new_alpha_mask.paste(alpha_mask, paste_position)
            composite = Image.composite(new_fg_image, composite, new_alpha_mask)

            # Grab the alpha pixels above a specified threshold
            alpha_threshold = 200
            mask_arr = np.array(np.greater(np.array(new_alpha_mask), alpha_threshold), dtype=np.uint8)
            uint8_mask = np.uint8(mask_arr) # This is composed of 1s and 0s

            # Multiply the mask value (1 or 0) by the color in each RGB channel and combine to get the mask
            mask_rgb_color = fg['mask_rgb_color']
            red_channel = uint8_mask * mask_rgb_color[0]
            green_channel = uint8_mask * mask_rgb_color[1]
            blue_channel = uint8_mask * mask_rgb_color[2]
            rgb_mask_arr = np.dstack((red_channel, green_channel, blue_channel))
            isolated_mask = Image.fromarray(rgb_mask_arr, 'RGB')
            isolated_alpha = Image.fromarray(uint8_mask * 255, 'L')

            composite_mask = Image.composite(isolated_mask, composite_mask, isolated_alpha)

        return composite, composite_mask
Example #38
0
def mask_image(image, mask):
    background = Image.new('RGB', image.size, 0)
    return Image.composite(background, image, mask)
Example #39
0
    x for x in os.listdir(path)
    if os.path.isfile(x) and os.path.splitext(x)[1] == '.png'
]

listFile.sort()

new_image = Image.new('RGBA', (720, 1280))
for file in listFile:

    FileNum = file.split('.')[1][-4:]
    CropLine = int((int(FileNum) - 1000) * 6.46)

    im = Image.open(path + file)
    r, g, b, a = im.split()

    box = (0, CropLine, 720, CropLine + 7)

    im_crop = im.crop(box)
    r, g, b, a = im_crop.split()

    i = 1
    for y in range(len(listFile)):
        i += 1
        print(i)
    comp_image = Image.composite(im_crop, im_crop, a)
    new_image.paste(comp_image, box)

new_image.save(path + 'comp.tif')

new_image.show()
Example #40
0
def watermark(im, mark, position):
    layer = Image.new("RGBA", im.size, (0,0,0,0))
    layer.paste(mark, position)
    return Image.composite(layer, im, layer)
def scaleTile(dsquery, dstile, resampling, tile=''):
    querysize = dsquery.RasterXSize
    tile_size = dstile.RasterXSize
    tilebands = dstile.RasterCount

    if resampling == 'average':

        # Function: gdal.RegenerateOverview()
        for i in range(1, tilebands + 1):
            # Black border around NODATA
            res = gdal.RegenerateOverview(dsquery.GetRasterBand(i),
                                          dstile.GetRasterBand(i), 'average')
            if res != 0:
                QgsMessageLog.logMessage(
                    "RegenerateOverview() failed on %s, error %d" %
                    (tile, res), CATEGORY, Qgis.Info)

    elif resampling == 'antialias' and numpy_available:

        # Scaling by PIL (Python Imaging Library) - improved Lanczos
        array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
        for i in range(tilebands):
            array[:, :,
                  i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
                                                 0, 0, querysize, querysize)
        im = Image.fromarray(array, 'RGBA')  # Always four bands
        im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
        if os.path.exists(tile):
            im0 = Image.open(tile)
            im1 = Image.composite(im1, im0, im1)
        im1.save(tile, 'PNG')

    else:

        if resampling == 'near':
            gdal_resampling = gdal.GRA_NearestNeighbour

        elif resampling == 'bilinear':
            gdal_resampling = gdal.GRA_Bilinear

        elif resampling == 'cubic':
            gdal_resampling = gdal.GRA_Cubic

        elif resampling == 'cubicspline':
            gdal_resampling = gdal.GRA_CubicSpline

        elif resampling == 'lanczos':
            gdal_resampling = gdal.GRA_Lanczos

        elif resampling == 'mode':
            gdal_resampling = gdal.GRA_Mode

        elif resampling == 'max':
            gdal_resampling = gdal.GRA_Max

        elif resampling == 'min':
            gdal_resampling = gdal.GRA_Min

        elif resampling == 'med':
            gdal_resampling = gdal.GRA_Med

        elif resampling == 'q1':
            gdal_resampling = gdal.GRA_Q1

        elif resampling == 'q3':
            gdal_resampling = gdal.GRA_Q3

        # Other algorithms are implemented by gdal.ReprojectImage().
        dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0,
                                 0.0, tile_size / float(querysize)))
        dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))

        res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
        if res != 0:
            QgsMessageLog.logMessage(
                "ReprojectImage() failed on %s, error %d" % (tile, res),
                CATEGORY, Qgis.Info)
Example #42
0
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-


from PIL import Image
im = Image.open("test.jpg")
mark=Image.open("mark.png")
layer=Image.new('RGBA', im.size, (0,0,0,0))
layer.paste(mark, (im.size[0]-170,im.size[1]-60))
out=Image.composite(layer,im,layer)
out.show()
out.save('target.jpg', 'JPEG', quality = 100)
def __rotate(img, degree):
    mode = img.mode
    img = img.convert('RGBA').rotate(degree)
    background = Image.new('RGBA', img.size, (255,) * 4)
    img=Image.composite(img, background, img)
    return img.convert(mode)
Example #44
0
 def rotate_with_fill(img, magnitude):
     rot = img.convert("RGBA").rotate(magnitude)
     return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
Example #45
0
 def compose(x, y):
     kwargs_ = kwargs.copy()
     kwargs_['length'] = x[1]
     kwargs_['scale'] = y[1]
     mask = radial_gradient_mask(size, **kwargs_)
     return (Image.composite(x[0], y[0], mask), y[1])
    os.path.join(os.getcwd(), 'textures', 'armour-masks', 'boots.png'))
boots_silhouette = Image.open(
    os.path.join(os.getcwd(), 'textures', 'armour-silhouettes', 'boots.png'))

armour_layer_1_mask = Image.open(
    os.path.join(os.getcwd(), 'textures', 'armour-masks', 'layer1.png'))
armour_layer_1_silhouette = Image.open(
    os.path.join(os.getcwd(), 'textures', 'armour-silhouettes', 'layer1.png'))

armour_layer_2_mask = Image.open(
    os.path.join(os.getcwd(), 'textures', 'armour-masks', 'layer2.png'))
armour_layer_2_silhouette = Image.open(
    os.path.join(os.getcwd(), 'textures', 'armour-silhouettes', 'layer2.png'))

ore_texture = ore_base_block_img
ore_overlay = Image.composite(solid_colour, ore_silhouette, ore_mask)
ore_texture.paste(ore_overlay, (0, 0), ore_overlay)

refined_item_texture = Image.composite(solid_colour, refined_item_silhouette,
                                       refined_item_mask)

refined_block_texture = Image.composite(solid_colour, refined_block_silhouette,
                                        refined_block_mask)

sword_texture = Image.composite(solid_colour, sword_silhouette, sword_mask)

pickaxe_texture = Image.composite(solid_colour, pickaxe_silhouette,
                                  pickaxe_mask)

axe_texture = Image.composite(solid_colour, axe_silhouette, axe_mask)
def main(unused_argv):
    args = flags.FLAGS

    # checks output dir and log file
    create_clear_dir(args.output, args.clear)
    change_log_handler(os.path.join(args.output, 'tracker.log'), args.verbosity)
    logging.info('===================================================================')

    # save args
    with open(os.path.join(args.output, 'args.json'), 'w') as fp:
        json.dump({k: args[k].value for k in args}, fp, indent=4)

    logging.info('___________________________________________________________________')
    logging.info('Tracking units in SC2 by replaying \'{}\'...'.format(args.replay_file))

    env = SC2Environment(
        args.replay_file, args.step_mul, 1., args.replay_sc2_version, 1, False, args.window_size, args.hide_hud, True,
        FEATURE_DIMENSIONS, CAMERA_WIDTH, True)
    env.start()

    # gets perspective transformation matrix
    left = int(0.04 * FEATURE_DIMENSIONS)
    right = int(0.96 * FEATURE_DIMENSIONS)
    top = int(0.04 * FEATURE_DIMENSIONS)
    bottom = int(0.7 * FEATURE_DIMENSIONS)
    m = cv2.getPerspectiveTransform(
        np.float32([(left, top), (right, top), (right, bottom), (left, bottom)]),
        np.float32([(0.09, 0.05), (0.91, 0.05), (1.04, 0.76), (-0.04, 0.76)]) * np.float32(env.visual_observation.size))

    # creates "square matrix"
    border_mat = np.zeros(tuple(env.agent_interface_format.feature_dimensions.screen))
    border_mat[0, :] = border_mat[-1, :] = border_mat[:, 0] = border_mat[:, -1] = 1

    ep = -1
    video_writer = None
    replay_file = os.path.basename(args.replay_file)
    while not env.finished:

        if env.t == 0 or env.new_episode:
            ep += 1

            if video_writer is not None:
                video_writer.close()

            # creates video writer
            ext_idx = replay_file.lower().find('.sc2replay')
            output_file = os.path.join(
                args.output, '{}-{}.mp4'.format(replay_file[:ext_idx], ep))
            video_writer = skvideo.io.FFmpegWriter(
                output_file, inputdict={'-r': str(args.fps)}, outputdict={'-crf': str(args.crf), '-pix_fmt': 'yuv420p'})

            logging.info('Recording episode {} of replay \'{}\' to \'{}\'...'.format(ep, replay_file, output_file))

        # capture units
        img = env.visual_observation
        if img is not None:
            masks_colors = [(border_mat, [255, 255, 255]),
                            (env.agent_obs.observation.feature_screen.player_relative == 1, [255, 0, 0]),
                            (env.agent_obs.observation.feature_screen.player_relative == 4, [0, 0, 255])]
            for mask, color in masks_colors:
                mask = np.asarray(mask * 100, dtype=np.uint8)
                mask = cv2.warpPerspective(mask, m, img.size)
                mask = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)))
                mask = cv2.GaussianBlur(mask, (51, 51), 0)
                mask = Image.fromarray(mask)

                overlay = np.zeros((img.size[1], img.size[0], 3), dtype=np.uint8)
                for i in range(len(color)):
                    overlay[:, :, i] = color[i]
                overlay = Image.fromarray(overlay)

                img = Image.composite(overlay, img, mask)
            video_writer.writeFrame(np.array(img))

        env.step()

    env.stop()

    logging.info('Finished after {} timesteps!'.format(env.t))
Example #48
0
def watermark(img,
              mark,
              position=(0, 0),
              opacity=1,
              scale=1.0,
              tile=False,
              greyscale=False,
              rotation=0,
              return_name=False,
              **kwargs):
    """Adds a watermark to an image"""

    if opacity < 1:
        mark = reduce_opacity(mark, opacity)

    if not isinstance(scale, tuple):
        scale = determine_scale(scale, img, mark)

    mark = mark.resize(scale, resample=Image.ANTIALIAS)

    if greyscale and mark.mode != 'LA':
        mark = mark.convert('LA')

    rotation = determine_rotation(rotation, mark)
    if rotation != 0:
        # give some leeway for rotation overlapping
        new_w = int(mark.size[0] * 1.5)
        new_h = int(mark.size[1] * 1.5)

        new_mark = Image.new('RGBA', (new_w, new_h), (0, 0, 0, 0))

        # center the watermark in the newly resized image
        new_l = int((new_w - mark.size[0]) / 2)
        new_t = int((new_h - mark.size[1]) / 2)
        new_mark.paste(mark, (new_l, new_t))

        mark = new_mark.rotate(rotation)

    position = determine_position(position, img, mark)

    if img.mode != 'RGBA':
        img = img.convert('RGBA')

    # make sure we have a tuple for a position now
    assert isinstance(position, tuple), 'Invalid position "%s"!' % position

    # create a transparent layer the size of the image and draw the
    # watermark in that layer.
    layer = Image.new('RGBA', img.size, (0, 0, 0, 0))
    if tile:
        first_y = int(position[1] % mark.size[1] - mark.size[1])
        first_x = int(position[0] % mark.size[0] - mark.size[0])

        for y in range(first_y, img.size[1], mark.size[1]):
            for x in range(first_x, img.size[0], mark.size[0]):
                layer.paste(mark, (x, y))
    else:
        layer.paste(mark, position)

    # composite the watermark with the layer
    return Image.composite(layer, img, layer)
Example #49
0
        # 匹配度低的不显示
        break

    # 颜色
    _color = _colors[(_output['detection_classes'][_index] - 1) % len(_colors)]

    # 添加遮罩
    _mask = _output['detection_masks'][_index]
    _rgb = ImageColor.getrgb(_color)
    _image_array = load_image_into_numpy_array(_image)

    _solid_color = np.expand_dims(
        np.ones_like(_mask), axis=2) * np.reshape(list(_rgb), [1, 1, 3])
    _pil_solid_color = Image.fromarray(np.uint8(_solid_color)).convert('RGBA')
    _pil_mask = Image.fromarray(np.uint8(255.0 * _alpha * _mask)).convert('L')
    _image = Image.composite(_pil_solid_color, _image, _pil_mask)
    np.copyto(_image_array, np.array(_image.convert('RGB')))
    _image = Image.fromarray(_image_array)

    # 画框图
    _draw = ImageDraw.Draw(_image)
    _class_name = category_index[_output['detection_classes'][_index]]['name']
    _box = tuple(_output['detection_boxes'][_index].tolist())
    _ymin, _xmin, _ymax, _xmax = _box
    _xmin = round(_xmin * _image.size[0])
    _xmax = round(_xmax * _image.size[0])
    _ymin = round(_ymin * _image.size[1])
    _ymax = round(_ymax * _image.size[1])
    _draw.rectangle(((_xmin, _ymin), (_xmax, _ymax)), outline=_color, width=_line_width)

    # 添加文字
    def attach_char(self, char_img, x=0.5, y=0.5):
        """ Attach a character to the environment. This method will overwrite
        the internal image to include the character with the environment in
        the background. In order to reset the environment you may use the
        .load_image() method
        :param char_img: Image object to be attached to the environment
        :param x: Value between [0,1) to control the position of the character
                  on the environment. It can be thought of like a percentage
                  of the total x axis of the image where the centre of the new
                  image will be pasted
        :param y: Value between [0,1) to control the position of the character
                  on the environment. It can be thought of like a percentage
                  of the total y axis of the image where the centre of the new
                  image will be pasted
        :returns: False if the operation is not successful (with appropriate
                  logging)
        :rtype: Boolean
        """

        if not char_img:
            logger.warn(
                "Char image to be attached to the env is None, will exit")
            return False

        if x < 0 or x >= 1:
            err_msg = "Argument x given to attach_char is out of bounds [0,1)"
            logger.error(err_msg)
            return False
        if y < 0 or y >= 1:
            err_msg = "Argument x given to attach_char is out of bounds [0,1)"
            logger.error(err_msg)
            return False

        if not self._img:
            err_msg = "Internal envir Image hasn't been loaded. Will load now"
            logger.debug(err_msg)
            self.load_image()

        # Resize avatar if we can't fit it in (Normally shouldn't happen
        # but it makes testing without properly sized assets easier)
        char_szx = char_img.size[0]
        char_szy = char_img.size[1]

        if char_szx > self.get_img().size[0] or \
           char_szy > self.get_img().size[1]:
            # First calculate the reduction coefficient
            # so that no side is larger than 90% of the background canvas
            coeff = 0.9
            c_x = coeff * self.get_img().size[0] / char_szx
            c_y = coeff * self.get_img().size[1] / char_szy
            c = min(c_x, c_y)
            new_size = (int(c * char_szx), int(c * char_szy))
            resized_char = char_img.resize(new_size, Image.ANTIALIAS)
        else:
            resized_char = char_img

        top_left_x = int(self.get_img().size[0] * x - resized_char.size[0] / 2)
        top_left_y = int(self.get_img().size[1] * y - resized_char.size[1] / 2)

        # We need to create a temporary image to hold the avatar since
        # in order to composite images they need to have the same size
        temp_img = Image.new(self.get_img().mode, self.get_img().size)
        temp_img.paste(resized_char, (top_left_x, top_left_y))

        self._img = Image.composite(temp_img, self.get_img(), temp_img)

        return True
Example #51
0
def display_topk(model,
                 dataset,
                 rows=3,
                 cols=3,
                 largest=True,
                 color=False,
                 figsize=(12, 6)):

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model.eval()
    loader = DataLoader(dataset, batch_size=64)
    label_probs_lst = []

    with torch.set_grad_enabled(False):
        for images, labels in loader:
            images = images.to(device)
            labels = labels.to(device)

            logits = model(images)
            probs = F.softmax(logits, -1)
            _, preds = torch.max(probs, 1)
            # Probability assigned to the correct label
            label_probs_lst += [probs[range(len(probs)), labels].cpu()]

        label_probs = torch.cat(label_probs_lst, 0)

        label_probs, args = torch.topk(label_probs,
                                       k=rows * cols,
                                       largest=largest)

        # Batch of best or worst images
        images = torch.stack([dataset[i][0] for i in args], 0)
        labels = [dataset[i][1] for i in args]
        # Run images through the feature visualizer
        model_vis = ResnetVisualizer(model.to("cpu")).eval()
        probs, vis = model_vis(images)
        _, preds = torch.max(probs, 1)

        fig, axs = plt.subplots(rows, cols, figsize=figsize)

        zipped = zip(preds, labels, label_probs, images, vis, axs.ravel())
        for pred, label, label_prob, image, feat, ax in zipped:
            if not color:
                image[...] = image.mean(0, keepdim=True)
            # Convert torch tensors to PIL images
            image = rescale_to(image, 0, 255).round().to(torch.uint8)
            image = ToPILImage()(image)

            feat = rescale_to(feat, 0, 255).round().to(torch.uint8)
            feat = ToPILImage()(feat).point(lambda px: np.abs(px - 255))

            # Overlay feature map on image
            red = Image.new('RGB', image.size, (255, 0, 0))
            mask = Image.new('RGBA', image.size, (0, 0, 0, 123))
            mask.putalpha(feat)

            out = Image.composite(image, red, mask)

            #image, feat = to_img(image), to_img(feat)
            ax.imshow(out, interpolation='bicubic')
            ax.axis('off')
            _pred = validset.classes[pred]
            _label = validset.classes[label]
            ax.set_title(
                f'Pred: {_pred}, Actual: {_label}, \nProb: {label_prob:.2f}')

        return axs
Example #52
0
'''
personalize images
'''

from PIL import Image, ImageDraw, ImageFilter

im2 = Image.open('/Users/mehrdadalemzadeh/Pictures/Matrix.jpg')
im1 = Image.open('/Users/mehrdadalemzadeh/Pictures/Bull.jpg').resize(im1.size)

mask = Image.new("L", im1.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((120, 30, 600, 550), fill=255)
im = Image.composite(im1, im2, mask)

mask_blur = mask.filter(ImageFilter.GaussianBlur(10))
im = Image.composite(im1, im2, mask_blur)

#mask = Image.open('/Users/mehrdadalemzadeh/Pictures/Bull.jpg').convert('L').resize(im1.size)
#im = Image.composite(im1, im2/2, mask)

im.save('/Users/mehrdadalemzadeh/Pictures/Blend.jpg')
    def __init__(self, canvas, root, motor, title):
        self.canvas = canvas
        self.total_theta = 0
        self.master = root
        self.motor = motor

        t = turtle.RawTurtle(canvas)
        t.hideturtle()
        t.speed(0)
        t.pencolor("#000000")

        screen_dim_to_use = min(float(canvas['width']),
                                float(canvas['height']))
        screen_dims = float(canvas['width']), float(canvas['height'])
        self.img_dim = float(screen_dim_to_use / 5)
        outer_circle_rad = screen_dim_to_use / 3.15
        inner_circle_rad = screen_dim_to_use / 2.15
        label_additional_height = float(screen_dims[0] / 20)
        self.center = 0, 0

        #self.draw_axes_center(t)

        # Plot axes on graph using turtle
        #ManualControlWheel.draw_axes(t, screen_dims)
        outer_y = -outer_circle_rad + self.img_dim / 2
        inner_y = -inner_circle_rad + self.img_dim / 2
        self.middle_circle_y = (outer_y + inner_y) / 2

        outer_rad = outer_circle_rad - self.img_dim / 2
        inner_rad = inner_circle_rad - self.img_dim / 2
        self.middle_circle_rad = (outer_rad + inner_rad) / 2

        t.penup()
        t.setx(0)
        t.sety(outer_y)

        t.pendown()
        t.circle(outer_rad)
        t.penup()

        t.sety(inner_y)

        t.pendown()
        t.circle(inner_rad)
        t.penup()

        canvas.create_text(
            (0, (-screen_dims[1] / 2) + label_additional_height),
            text=title,
            width=100)

        ManualControlWheel.wheels_created += 1
        self.number = ManualControlWheel.wheels_created

        self.curr_image = Image.open(
            "ManualControlWheel/ColorWheelArrow.png").convert("RGBA")

        self.curr_image = self.curr_image.resize(
            (int(self.img_dim), int(self.img_dim)), Image.ANTIALIAS)
        self.imgTk = ImageTk.PhotoImage(self.curr_image)

        img_pos = 0, -outer_circle_rad - self.img_dim / 2

        fff = Image.new("RGBA", self.curr_image.size, (255, 255, 255, 0))
        out = Image.composite(self.curr_image, fff, self.curr_image)
        self.imgTk = ImageTk.PhotoImage(out)

        canvas.create_image(
            (img_pos[0], img_pos[1] - math.sin(90) * self.img_dim / 4),
            image=self.imgTk,
            tags='image_tag',
            anchor="center")

        self.old_dim = img_pos
        self.assign_image(root, self.imgTk)
        self.prev_arrow_pos = 0
        self.t = turtle.RawTurtle(canvas)
        self.t.hideturtle()
        self.t._tracer(0)
        self.curr_arc = None
        #self.draw_middle_circle(t)
        #self.draw_circle_arc(t, 10, -300)
        """ Draws line at current arrow coordinate 
        t.penup()
        t.setx(canvas.coords('image_tag')[0])
        t.sety(-canvas.coords('image_tag')[1])
        t.pendown()
        t.forward(100)"""

        # Detect mouse clicked/dragged
        canvas.bind("<Button-1>", self.mouse_clicked)
        canvas.bind("<B1-Motion>", self.mouse_dragged)
        canvas.bind("<ButtonRelease-1>", self.mouse_released)
from PIL import Image

watermark = Image.open(
    "D:/pythonProject/ImageRecognitionAndClassification/UnitTest/ImgReco/watermark.jpg"
)
imageFile = Image.open(
    "D:/pythonProject/ImageRecognitionAndClassification/UnitTest/ImgReco/small_tool_1.png"
)
layer = Image.new('RGBA', imageFile.size,
                  (0, 0, 0, 0))  #使用给定的变量mode和size生成新的图像
print('layer:   ', layer)
layer.paste(
    watermark,
    (imageFile.size[0] - 500, imageFile.size[1] - 100))  #将watermark粘贴在layer上
out = Image.composite(
    layer, imageFile, layer
)  #复合类使用给定的两张图像及mask图像作为透明度,插值出一张新的图像。变量mask图像的模式可以为“1”,“L”或者“RGBA”。所有图像必须有相同的尺寸。
out.save(
    r"D:/pythonProject/ImageRecognitionAndClassification/UnitTest/ImgReco/watermark_res.png"
)
Example #55
0
def naive_cutout(img, mask):
    empty = Image.new("RGBA", (img.size), 0)
    cutout = Image.composite(img, empty, mask.resize(img.size, Image.LANCZOS))
    return cutout
        ]
        if predicted_prod_modanet_category_s is None:
            pass
        else:
            with open(f"{args.input_dir}{file_i}", 'rb') as f:
                segmentation_masks = pickle.load(f)
            mask_rcnn_class_labels = segmentation_masks[0]["class_labels"]
            predicted_prod_category_img_mask_overlap = set(
                predicted_prod_modanet_category_s).intersection(
                    set(mask_rcnn_class_labels))
            if len(predicted_prod_category_img_mask_overlap) > 0:
                mask_of_interest_indices = [
                    i for i in range(len(mask_rcnn_class_labels))
                    if mask_rcnn_class_labels[i] in
                    predicted_prod_category_img_mask_overlap
                ]
                mask_of_interest = np.sum(
                    segmentation_masks[0]["masks"][:, :,
                                                   mask_of_interest_indices],
                    axis=2) > 0
                local_image = Image.composite(
                    image1=global_img_i,
                    image2=Image.new(
                        'RGBA', global_img_i.size,
                        "white")  # white square same size as global image
                    ,
                    mask=Image.fromarray(mask_of_interest).convert("L"))
                local_image.save(f"{args.output_dir}{product_ID}.png")
    except:
        print(f"FAILED: {file_i}")
def mask_image(img):
    mask = Image.new("RGBA", img.size, (255, 255, 255, 255))
    return Image.composite(img, mask, img)
Example #58
0
def generate_images(
        # inputs
        tags,
        posts,
        humans,
        bgs,
        bg_metas,
        # params
        gen_type="full",  # full  : 720x1280 
        #                                # square: 28x28
    count=30,  # images to generate
        seed=0,
        verbosity=1,  # 0: no verbosity, 
        #                                # 1: print filename and annotations to console
        #                                # 2: previous + show images (avoid for count vars > 30)
    storage_setting="memory",  # where will generated images and annotations be stored?
        #                                #  filesystem: store to filesystem at dir_dest
        #                                #  memory:     store and output
        #                                #  both:
        # dir params
    dir_images="images",
        dir_annotations="annotations",
        dir_dest="generated",
        # pixel parameters
        square_width=28,  # used if gen_type == "square"
        # if square_width <= 0: skip resize
    _720p_height=720,
        _720p_width=1280,
        _1440p_height=1440,
        _1440p_width=2560,
        post_height_max=144,  # 10% of _1440p_height
        tag_width_max=28,  # 20% of post_height_max
        human_height_max=288  # 20% of _1440p_height
):
    # make dir_dest if not exist
    try:
        os.makedirs(dir_dest)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise
    # make dir_images if not exist
    try:
        os.makedirs("%s/%s" % (dir_dest, dir_images))
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise
    # make dir_annotations if not exist
    try:
        os.makedirs("%s/%s" % (dir_dest, dir_annotations))
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise
    # init randomizer
    np.random.seed(seed)
    # initialize buckets based on storage_setting
    image_nps = list() if storage_setting != "filesystem" else None
    annotations = list() if storage_setting != "filesystem" else None
    # generate by looping
    # _TODO_ parallelize this
    t_prev = perf_counter()
    for idx in range(count):
        # select a background
        bg_idx = np.random.randint(0, len(bgs))
        bg, bg_meta = bgs[bg_idx], bg_metas[bg_idx]
        # select a background sector
        sector_idx = np.random.randint(0, len(bg_meta))
        sector_np = bg_meta[sector_idx]
        sector = dict()
        sector["x_right"], \
        sector["y_bottom"], \
        sector["x_left"], \
        sector["y_top"], \
        sector["dist_bottom"], \
        sector["dist_top"] = np.split(sector_np, sector_np.shape[0])
        # select a background crop, inclusive of sector
        #   select a pixel within sector to center the crop on
        #   break if error in annotations
        #   _TODO_ should happen much earlier, consider moving to load_data(), unless deprecated via reciprocal drop-off approach
        try:
            crop_x = int(np.random.randint(sector["x_left"],
                                           sector["x_right"]))
        except ValueError:
            print("ValueError with bg_idx: %d, x_left: %d, x_right: %d" %
                  (bg_idx, sector["x_left"], sector["x_right"]))
            break
        try:
            crop_y = int(np.random.randint(sector["y_top"],
                                           sector["y_bottom"]))
        except ValueError:
            print("ValueError with bg_idx: %d, y_top: %d, y_bottom: %d" %
                  (bg_idx, sector["y_top"], sector["y_bottom"]))
            break
        #   try to center crop around the current pixel
        crop = dict()
        #   determine crop_x boundaries
        if (crop_x - _720p_width // 2) < 0:
            crop["x_left"] = 0
            crop["x_right"] = _720p_width - 1
        elif (crop_x + _720p_width // 2) > bg.shape[1]:
            crop["x_left"] = bg.shape[1] - _720p_width
            crop["x_right"] = bg.shape[1] - 1
        else:
            crop["x_left"] = crop_x - _720p_width // 2
            crop["x_right"] = crop_x + _720p_width // 2 - 1
        #   determine crop_y boundaries
        if (crop_y - _720p_height // 2) < 0:
            crop["y_top"] = 0
            crop["y_bottom"] = _720p_height - 1
        elif (crop_y + _720p_height // 2) > bg.shape[1]:
            crop["y_top"] = bg.shape[0] - _720p_height
            crop["y_bottom"] = bg.shape[0] - 1
        else:
            crop["y_top"] = crop_y - _720p_height // 2
            crop["y_bottom"] = crop_y + _720p_height // 2 - 1
        # select a insertion point
        #   randomly select a pixel at which to insert the image
        #   this pixel corresponds to the bottom left pixel of the image to be inserted
        #   this differs from convention (using top left pixel) because y_bottom is used to compute distance
        #   determine x position, y position, which must lie within crop and within sector
        insertion_x = np.random.randint(
            crop["x_left"] if sector["x_left"] < crop["x_left"] else
            sector["x_left"], crop["x_right"]
            if crop["x_right"] < sector["x_right"] else sector["x_right"])
        insertion_y = np.random.randint(
            crop["y_top"] if sector["y_top"] < crop["y_top"] else
            sector["y_top"], crop["y_bottom"]
            if crop["y_bottom"] < sector["y_bottom"] else sector["y_bottom"])
        # determine what to insert, then pillowize into image
        insertion_img = None
        selection = ("human", "post", "gate")[np.random.randint(3)]
        if selection is "human":
            insertion_img = Image.fromarray(humans[np.random.randint(
                len(humans))])
        elif selection is "post":
            tag_np = tags[np.random.randint(len(tags))]
            tag_img = prismatize(tag_np, np.random.uniform(0, 1))
            post_np = posts[np.random.randint(len(posts))]
            post_img = crop_and_pad_post(post_np, np.random.uniform(0.3, 1),
                                         tag_img.size[1])
            insertion_img = Image.composite(tag_img, post_img, tag_img)
        else:  # selection is "gate"
            tag0_idx = np.random.randint(3, 11)
            tag0_img = prismatize(tags[tag0_idx], np.random.uniform(0, 1))
            tag1_idx = tag0_idx + 1 if tag0_idx % 2 else tag0_idx - 1
            tag1_img = prismatize(tags[tag1_idx], np.random.uniform(0, 1))
            post_np = posts[np.random.randint(len(posts))]
            post_img = crop_and_pad_post(post_np, np.random.uniform(0.3, 1),
                                         tag_img.size[1])
            tag0_img = Image.composite(tag0_img, post_img, tag0_img)
            tag1_img = Image.composite(tag1_img, post_img, tag1_img)
            # pad tag0+post, then overlay tag1 onto tag0+post
            gate_width = np.random.randint(post_height_max * 2 +
                                           post_img.size[0])
            tag0_img = ImageOps.expand(tag0_img, (gate_width, 0, 0, 0))
            insertion_img = Image.composite(tag1_img, tag0_img, tag1_img)
        # scale the image
        dist  = ((sector["dist_top"] - sector["dist_bottom"]) \
                / (sector["y_top"] - sector["y_bottom"])) \
                * (insertion_y - sector["y_bottom"]) + sector["dist_bottom"]
        scale = (1/(2**(sector_idx+1)) - 1/(2**(sector_idx))) \
                / (sector["dist_top"] - sector["dist_bottom"]) \
                * (dist - sector["dist_bottom"]) + 1/(2**(sector_idx))
        # resize image based on y position and distance
        insertion_img = insertion_img.resize(
            (int(insertion_img.size[0] * scale),
             int(insertion_img.size[1] * scale)))
        # establish safety bounds
        safety = {
            "x_left": crop["x_left"],
            "y_top": crop["y_top"] - insertion_img.size[0],
            "x_right": crop["x_right"] - insertion_img.size[1],
            "y_bottom": crop["y_bottom"]
        }
        #   if insertion point out of safety bounds, set them onto safety bounds
        insertion_x = int(insertion_x if insertion_x < safety["x_right"] else
                          safety["x_right"])
        insertion_y = int(
            insertion_y if insertion_y > safety["y_top"] else safety["y_top"])
        # insert the image
        #   place image onto desert_bg
        bg_img = Image.fromarray(bg)
        padding = (insertion_x, insertion_y - insertion_img.size[1], 0, 0)
        insertion_img_padded = ImageOps.expand(insertion_img, padding)
        overlay = Image.composite(insertion_img_padded, bg_img,
                                  insertion_img_padded)
        # apply cropping onto overlay
        if gen_type == "full":
            # update crop region within bounds of bg_img
            #   adjust horizontal crop
            if crop["x_left"] < 0:  # not likely
                crop["x_right"] += 0 - crop["x_left"]
                crop["x_left"] = 0
            elif crop["x_right"] > bg_img.size[0]:
                crop["x_left"] -= crop["x_right"] - bg_img.size[0]
                crop["x_right"] = bg_img.size[0]
            else:
                pass  # nothing wrong with horizontal crop
            #   adjust vertical crop
            if crop["y_top"] < 0:  # not likely
                crop["y_bottom"] += 0 - crop["y_top"]
                crop["y_top"] = 0
            elif crop["y_bottom"] > bg_img.size[1]:
                crop["y_top"] -= crop["y_bottom"] - bg_img.size[1]
                crop["y_bottom"] = bg_img.size[1]
            # update insertion point based on crop region
            insertion_x, insertion_y = insertion_x - crop[
                "x_left"], insertion_y - crop["y_top"]
            overlay = overlay.crop((crop["x_left"], crop["y_top"],
                                    crop["x_right"], crop["y_bottom"]))
        else:  # gen_type == "square"
            # grab square crop centered on insertion_img
            #   insertion_img is wider than it is tall, possible for gates
            if insertion_img.size[0] >= insertion_img.size[1]:
                # use the width
                width = insertion_img.size[0]
                # if we fall below the limit of crop["y_top"], just use crop["y_top"] instead
                top = crop["y_top"] if (
                    insertion_y - width < crop["y_top"]) else (insertion_y -
                                                               width)
                crop["x_left"], crop["y_top"], crop["x_right"], crop[
                    "y_bottom"] = insertion_x, top, insertion_x + width, top + width
            else:  # insertion_img is taller than it is wide
                # use the height
                height = insertion_img.size[1]
                # find the leftmost bounds for our crop, which would otherwise be x_insertion
                leftmost_bound = insertion_x + insertion_img.size[0] - height
                leftmost_bound = leftmost_bound if crop[
                    "x_left"] < leftmost_bound else crop["x_left"]
                try:
                    left = leftmost_bound if leftmost_bound == insertion_x else np.random.randint(
                        leftmost_bound, insertion_x)
                except ValueError:
                    print(
                        "ValueError with bg_idx: %d, leftmost_bound: %d, height: %d, insertion_x: %d, insertion_img.size: (%d, %d), crop[\"x_left\"]: %d" % \
                        (bg_idx, leftmost_bound, height, insertion_x, insertion_img.size[0], insertion_img.size[1], crop["x_left"])
                    )
                    break
                crop["x_left"], crop["y_top"], crop["x_right"], crop[
                    "y_bottom"] = left, insertion_y - height, left + height, insertion_y
            # update crop region within bounds of bg_img
            #   adjust horizontal crop
            if crop["x_left"] < 0:  # not likely
                crop["x_right"] += 0 - crop["x_left"]
                crop["x_left"] = 0
            elif crop["x_right"] > bg_img.size[0]:
                crop["x_left"] -= crop["x_right"] - bg_img.size[0]
                crop["x_right"] = bg_img.size[0]
            else:
                pass  # nothing wrong with horizontal crop
            #   adjust vertical crop
            if crop["y_top"] < 0:  # not likely
                crop["y_bottom"] += 0 - crop["y_top"]
                crop["y_top"] = 0
            elif crop["y_bottom"] > bg_img.size[1]:
                crop["y_top"] -= crop["y_bottom"] - bg_img.size[1]
                crop["y_bottom"] = bg_img.size[1]
            overlay = overlay.crop((crop["x_left"], crop["y_top"],
                                    crop["x_right"], crop["y_bottom"]))
            # resize based on width limitations, so long as square_width >= 1
            if square_width >= 1:
                overlay = overlay.resize((square_width, square_width))
        # prepare annotations
        fn_image, fn_meta = None, None
        meta = "%s, %d, %d, %d" % (selection, insertion_x, insertion_y, sector_idx) if gen_type == "full" else \
               "%s, %d" % (selection, sector_idx)
        # store images and annotations
        if storage_setting == "filesystem" or storage_setting == "both":
            # save image to dir_dest/dir_image
            fn_image = "%s/%s/%s%s.png" % (
                dir_dest, dir_images, dir_dest,
                str(idx).zfill(math.floor(math.log(count + 1, 10)) + 1))
            overlay.save(fn_image, "PNG")
            # save annotations to dir_dest/dir_annotations
            fn_meta = "%s/%s/%s%s.txt" % (
                dir_dest, dir_annotations, dir_dest,
                str(idx).zfill(math.floor(math.log(count + 1, 10)) + 1))
            f = open(fn_meta, "w")
            f.write(meta)
            f.close()
        if storage_setting == "memory" or storage_setting == "both":
            image_nps.append(np.asarray(overlay))
            annotations.append(meta)
        # handle logging verbosity
        if verbosity >= 1:
            print("index: %d, meta: " % idx +
                  meta if fn_image is None else "filename: " + fn_image +
                  ", meta: " + meta)
            print("bg_img.size: ", bg_img.size)
            print("crop: ", crop)
            # how long did this iteration take?
            t_curr = perf_counter()
            print("elapsed time: ", t_curr - t_prev, " s")
            t_prev = t_curr
            print('')
        if verbosity >= 2:
            overlay.show()
    # return data depending on storage_setting
    if storage_setting == "memory" or storage_setting == "both":
        return image_nps, annotations
Example #59
0
 def buildImage(self):
     backImage = im.new("RGB", self.canvas.size,
                        WrongString.buildBackground())
     new = im.composite(backImage, self.paint, self.canvas.convert("L"))
     new.save("output/" + self.fileName + ".jpg")
     print("-- the image was saved!", end="\n")
Example #60
0
    def mixup(img1: np.ndarray = None,
              boxes1: list or np.ndarray = None,
              img2: np.ndarray = None,
              boxes2: list or np.ndarray = None,
              img_info_list: list = None,
              **kwargs):
        """

        combine two pictures together with a certain value(128) mask
        the final shape will be the same with img1's shape

        :param img1:
        :param img2:
        :param boxes1:
        :param boxes2:
        :return:

        examples:
            img_path1 = 'data/000030.jpg'
            boxes1 = [[36, 205, 180, 289, 1], [51, 160, 150, 292, 14], [295, 138, 450, 290, 14]]
            img_path2 = 'data/000003.jpg'
            boxes2 = [[123,155,215,195], [239,156,307,205]]
            img1 = cv.imread(img_path1)
            img2 = cv.imread(img_path2)
            new_image, new_boxes = Augment.mixup(img1, img2, boxes1, boxes2)
            for box in new_boxes:
                x1, y1, x2, y2, class_id = box
                new_image = cv.rectangle(new_image, (x1, y1), (x2, y2), (0, 250, 0))
            cv.imshow('', new_image)
            cv.waitKey()
            cv.destroyAllWindows()
        """

        img_info_list = img_info_list or kwargs.get('img_info_list')
        if img_info_list and not len(img1):
            imgs, boxes = Augment.load_file_from_list(img_info_list, 2)
            img1, img2 = imgs
            boxes1, boxes2 = boxes
        elif img_info_list and len(img1) and len(boxes1):
            img2, boxes2 = Augment.load_file_from_list(img_info_list, 1)
        elif len(img1) and len(boxes1) and not len(img2):
            img2 = copy.deepcopy(img1)
            boxes2 = copy.deepcopy(boxes1)
        else:
            assert 'lack of some params, check it again!'

        h1, w1 = img1.shape[:2]
        h2, w2 = img2.shape[:2]
        img2 = cv.resize(img2, (w1, h1), interpolation=cv.INTER_CUBIC)
        ratio_x = np.asarray(w1, dtype=np.float64) / w2
        ratio_y = np.asarray(h1, dtype=np.float64) / h2
        img1 = Image.fromarray(img1)
        img2 = Image.fromarray(img2)
        mask = np.ones(shape=(h1, w1), dtype=np.uint8) * 128
        mask = Image.fromarray(mask)
        new_image = Image.composite(img1, img2, mask)
        new_image = np.asarray(new_image)
        if not len(boxes2) and len(boxes1):
            return new_image, []
        else:
            boxes1 = np.asarray(boxes1, dtype=np.float64)
            boxes2 = np.asarray(boxes2, dtype=np.float64)
            boxes2[:, 0] *= ratio_x
            boxes2[:, 2] *= ratio_x
            boxes2[:, 1] *= ratio_y
            boxes2[:, 3] *= ratio_y
            # print(boxes1)
            # print('\n', boxes2)
            return new_image, np.concatenate([boxes1, boxes2]).astype(int)