def generateActivatedItemIcons(): icons = [ f for f in listdir("icons_activated/") if isfile(join("icons_activated/",f)) and f.split(".")[-1] == "png" ] for i in icons: filename = ".".join(i.split(".")[0:-1]) im = Image.open("icons_activated/"+i).convert("RGBA") color_layer = Image.new('RGBA', im.size, (0, 0, 0)) line_layer = Image.new('RGBA', im.size, (109, 109, 109)) alpha_mask = Image.new('L', im.size, 0) alpha_mask_draw = ImageDraw.Draw(alpha_mask) alpha_mask_draw.line([(im.size[0]//2, im.size[1]//2), (im.size[0]//2, 0)], fill=186, width = 3) newim = Image.blend(im, color_layer, 0.51) newim = Image.composite(line_layer, newim, alpha_mask) newim.save("icons_activated/"+filename+"-activated.png") newim = Image.blend(im, color_layer, 0.29) newim = Image.composite(line_layer, newim, alpha_mask) newim.save("icons_activated/"+filename+"-activated-done.png") rect_layer = Image.new('RGBA', im.size, (0, 0, 0)) rect_alpha_mask = Image.new("L", im.size, 0) rect_alpha_mask_draw = ImageDraw.Draw(rect_alpha_mask) rect_alpha_mask_draw.rectangle((0, 0, im.size[0]//2, im.size[1]), fill=131) rect_alpha_mask_draw.rectangle((im.size[0]//2+1, 0, im.size[0], im.size[1]), fill=75) newim = Image.composite(rect_layer, im, rect_alpha_mask) newim = Image.composite(line_layer, newim, alpha_mask) newim.save("icons_activated/"+filename+"-activated-half.png")
def create_images(self, path): self.path = path blank = Image.new('RGBA', (self.w, self.h), color='#FFFFFF') blank.save('/tmp/blank.png') del(blank) blank_in = blank_out = Image.open('/tmp/blank.png') count = 1 self.total_duration = 0 for img in self.frames: if not 'blank_in' in img: img['blank_in'] = blank_in else: if isinstance(img['blank_in'], str): img_tmp = Image.open(img['blank_in']) img['blank_in'] = img_tmp.convert('RGBA') #img['blank_in'] = '%s.png' % img['blank_in'] #img_tmp.save(img['blank_in']) #del(img_tmp) #img['blank_in'] = Image.open(img['blank_in']) if not 'blank_out' in img: img['blank_out'] = blank_out else: if isinstance(img['blank_out'], str): img_tmp = Image.open(img['blank_out']) img['blank_out'] = img_tmp.convert('RGBA') #img['blank_out'] = '%s.png' % img['blank_out'] #img_tmp.save(img['blank_out']) #del(img_tmp) #img['blank_out'] = Image.open(img['blank_out']) image = Image.open(img['filename']) image = image.convert('RGBA') if img['fade_in'] > 0: fade_in_total_frames = int(ceil(img['fade_in'] * self.fps)) for f in range(1, fade_in_total_frames + 1): img_tmp = Image.blend(image, img['blank_in'], 1 - (float(f) / fade_in_total_frames)) img_tmp.save('%s/%09d.jpg' % (path, count), quality=DEFAULT_IMAGE_QUALITY) count += 1 duration_total_frames = int(ceil(img['duration'] * self.fps)) for f in range(1, duration_total_frames + 1): copyfile(img['filename'], '%s/%09d.jpg' % (path, count)) count += 1 if img['fade_out'] > 0: fade_out_total_frames = int(ceil(img['fade_out'] * self.fps)) for f in range(1, fade_out_total_frames + 1): img_tmp = Image.blend(image, img['blank_out'], (float(f) / fade_out_total_frames)) img_tmp.save('%s/%09d.jpg' % (path, count), quality=DEFAULT_IMAGE_QUALITY) count += 1 self.total_duration += img['fade_in'] + img['duration'] + \ img['fade_out'] self.total_frames = count - 1
def broken_pilpaint(): """ this is a test for using pil to overlay images and export.. apparently there is a problem when the background image uses transparency """ FACE = Image.open(FP_FACE) LEDS = [Image.open(FP_LED_BASE + "{}.png".format(i)) for i in xrange(60)] LED0 = LEDS[0].copy() base = FACE.copy() blend0 = Image.blend(FACE, LED0, 0) #blend0.show() blend1 = Image.blend(FACE, LED0, 1) base.paste(LED0, mask=0)
def texture(request,project_name): try: get = request.GET filename = get['filename'].split('/')[-1] bg_color = get['bg_color'].split('/')[-1] percent = int(get['percent'])/100.0 except: return HttpResponseNotFound() # check cache for previously computed image cache = "%s/cache/%s_%s_%s" % (settings.DATA_DIR,bg_color,percent,filename) if os.path.isfile(cache): response = HttpResponse(mimetype="image/png") cache = open(cache,'r') response.write(cache.read()) cache.close() return response try: path = settings.TEXTURES_DIR+'/'+filename im = Image.open(path) except: return HttpResponseNotFound() transparent = Image.new('RGBA',im.size,(0,0,0,0)) blended = Image.blend(transparent,im,percent) bg = Image.new('RGBA',im.size,bg_color) bg.paste(blended,None,blended) response = HttpResponse(mimetype="image/png") bg.save(cache) bg.save(response,'PNG') return response
def render(self, total_frames): alpha_step = 1.0/total_frames alpha = 0.0 while alpha <=1.0: img = Image.blend(self.initial_frame, self.final_frame, alpha) yield img alpha += alpha_step
def __init__(self, page, scale=2, reduce=2, savedir='.', namefmt='img%s.png'): import Image import ImageDraw self.page = page self.scale = scale self.reduce = reduce self.savedir = savedir self.namefmt = namefmt orig_width = int(page.page.get('width')) orig_height = int(page.page.get('height')) requested_size = (orig_width / scale, orig_height / scale) image = Image.new('RGB', requested_size) self.leafnum = page.scandata.get('leafNum') image_str = page.book.get_page_image(self.leafnum, requested_size, out_img_type='ppm', kdu_reduce=reduce) page_image = None if image_str is not None: page_image = Image.open(StringIO(image_str)) if requested_size != page_image.size: page_image = page_image.resize(requested_size) try: image = Image.blend(image, page_image, 0.2) except ValueError: raise 'blending - images didn\'t match' self.image = image self.draw = ImageDraw.Draw(image)
def space(image): image = image.convert('RGB') colours = util.get_dominant_colours(image, 12) colours = util.order_colours_by_brightness(colours) indices = sorted(random.sample(range(len(colours)), 3)) colours = [colours[i] for i in indices] light, bg, dark = map(tuple, colours) light = (200, 200, 100) dark = (100, 200, 100) bg = (0, 0, 50, 255) layer = Image.open( os.path.dirname(os.path.abspath(__file__)) + '/' + 'assets/space.jpg') layer = util.random_crop(layer, util.WIDTH, util.HEIGHT) colours = util.get_dominant_colours(image, 10) colours = util.order_colours_by_saturation(colours)[:-3] colours = random.sample(colours, 5) colours = util.order_colours_by_hue(colours) layer = layer.convert('RGB') gradient = util.create_gradient(layer.size, colours) im = Image.blend(layer, gradient, .4) return im
def blend_img_with_opacity(markImg, backgroundImg, opacity=1): markImg, backgroundImg = make_img_to_same_size(markImg, backgroundImg) if opacity < 1: mark = reduce_opacity(markImg, opacity) #backgroundImg.paste(markImg,None,mark) backgroundImg = Image.blend(markImg, backgroundImg, opacity) return backgroundImg
def generate(srcfile1,srcfile2,destfile): srcimg1 = Image.open(srcfile1) srcimg2 = Image.open(srcfile2) (width1,height1) = srcimg1.size (width2,height2) = srcimg2.size maxwidth = width1>width2 and width1 or width2 maxheight = height1>height2 and height1 or height2 if width1 != maxwidth or height1 != maxheight: srcimg1.resize( (maxwidth,maxheight) ) if width2 != maxwidth or height2 != maxheight: srcimg2.resize( (maxwidth,maxheight) ) destimg = Image.new( "RGBA", (maxwidth*10,maxheight*2) ) for row in range(1,3): for col in range(1,11): if row==1: alpha = col*row/10.0 else: alpha = 1.0 - (col*row/20.0) img = Image.blend(srcimg1, srcimg2, alpha) x = maxwidth * col - maxwidth y = maxheight * row - maxheight destimg.paste(img,(x,y)) destimg.save(destfile)
def interpolate(image1,image2,t): if not ( image1.size[0] == image2.size[0] and image1.size[1] == image2.size[1] ): print_debug("Error: interploate function passed non compatable images") return image1 if t < 0: print_debug("Error: interploate function passed bad t value: %f" % t) return image2 elif t > 1: print_debug("Error: interploate function passed bad t value: %f" % t) return image1 #print "t value:", t #if t>.5: # print "result should look more like image 1" #else: # print "result should look more like image 2" #print "blending image:" #printPILImage(image1) #print "and image:" #printPILImage(image2) im=Image.blend(image1,image2,t) #print "to produce" #printPILImage(im) #print return im
def add_blending(self,source,dest,count=9): source_image=Image.open(source) dest_image=Image.open(dest) for i in range(count): blend_image=Image.blend(source_image,dest_image,(i+1.)/(count+1.)) blend_image.save(self.frame_file(self.frame)) self.frame+=1
def overlay_images_alpha(img1, img2): img1 = photoimage2image(img1) img2 = photoimage2image(img2) img1.convert('RGBA') img2.convert('RGBA') new = Image.blend(img1, img2, 0.5) return image2photoimage(new)
def draw_and_compare(): global save_im, remaining, fail_count, last_saved_count save_im_copy = save_im.copy() poly_rect_area = get_rand_rect() save_im_crop = save_im.crop(poly_rect_area) save_im_crop.load() draw_rand_polys(save_im_crop) im_crop = im.crop(poly_rect_area) save_im_copy_crop = save_im_copy.crop(poly_rect_area) im_crop.load(), save_im_copy_crop.load() blended_crop = Image.blend(save_im_copy_crop, save_im_crop, p_alpha) blended_crop.load() save_im.paste(blended_crop, (poly_rect_area[0], poly_rect_area[1])) old_diff = abs_diff(save_im_copy_crop, im_crop) new_diff = abs_diff(blended_crop, im_crop) if new_diff < old_diff: remaining -= old_diff - new_diff print str(i) + ':\timproved\t' + str(old_diff-new_diff)\ + '\tremaining:\t' + str(remaining) #autosave if fail_count > max_fail_count\ or last_saved_count > max_last_saved_count: save_im.save(save_path) last_saved_count = 0 fail_count = 0 else: save_im = save_im_copy fail_count += 1
def __init__(self, l, fname): self.frameno = 0 self.total_frames = transition_frames for i in l: self.total_frames += i.slide_frames+transition_frames prevSlide = BlackSlide() prevSlide.load() l.append(BlackSlide()) #self.dir = tempfile.mkdtemp('slideshow') #print self.dir #cmd = 'ffmpeg -v -1 -y -f image2pipe -vcodec ppm -i pipe: -r 30 -target ntsc-dvd "%s"'%fname cmd = 'ffmpeg -v -1 -y -f image2pipe -vcodec ppm -i pipe: -r 30 -b 5000k "%s"'%fname self.ffmpeg = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.pipe = self.ffmpeg.stdin self.ffmpeg.stdout.close() for slideno, slide in enumerate(l): self.message("Loading slide %i"%(slideno+1)) slide.load() for i in xrange(transition_frames): self.message("Rendering transition (frame %i of %i)"%(i+1, transition_frames)) self.output_frame(Image.blend(prevSlide.frame(i+transition_frames+prevSlide.slide_frames), slide.frame(i), float(i)/transition_frames)) for i in xrange(slide.slide_frames): self.message("Rendering slide %i (frame %i of %i)"%(slideno+1, i+1, slide.slide_frames)) self.output_frame(slide.frame(i+transition_frames)) prevSlide.destroy() prevSlide = slide self.message("Finishing") print ""
def getCombinedImageInfo(): print("processing...") file_like = cStringIO.StringIO(request.form["image1"].decode("base64")) image1 = Image.open(file_like) file_like = cStringIO.StringIO(request.form["image2"].decode("base64")) image2 = Image.open(file_like) if (image1.size == image2.size): combinedImg = Image.blend(image1, image2, .5) else: combinedStr = interleaveImages(image1, image2) size = getSize(image1, image2) combinedImg = Image.fromstring("RGB", size, combinedStr) output = StringIO.StringIO() combinedImg.save(output, "JPEG") imageInfo = { "imageSource": output.getvalue(), "height": combinedImg.size[1], "width": combinedImg.size[0] } file_like.close() output.close() return imageInfo
def generate_diff(img1, img2): _img1 = Image.open(img1) _img2 = Image.open(img2) #return ImageChops.blend(_img1, _img2, 0.5) #return ImageChops.difference(_img1, _img2) bld = Image.blend(_img1, _img2, 0.5) return Image.composite(_img1, _img2, bld)
def update(self, control): Effect.update(self, control) image1 = self.providers[0].getImage(self.currentFrame) image2 = self.providers[1].getImage(self.currentFrame) self.alpha += self.tween.getStep(self.currentFrame) img = Image.blend(image1, image2, self.alpha) self.keeper.setImage(img)
def add_text(self, string_user, string_id, orig_file, save_file): img = Image.open(orig_file) draw = ImageDraw.Draw(img) font = ImageFont.truetype(self.cfg.get_font(), 13) # Get the image and text size text_width_user, text_height = font.getsize(string_user) text_width_id = font.getsize(string_id)[0] image_width, image_height = img.size # Get a portion of the image to make it translucid at the top of the new image # cropper = img.crop((0, 0, image_width, text_height*2)) cropper = img.crop((0, 0, image_width, text_height*4)) new_image = Image.new(img.mode, cropper.size, self.BGCOLOR) new_image = Image.blend(new_image, cropper, 0.5) img.paste(new_image, (0,0)) # self.draw.rectangle((0, 0, image_height, text_height*2), fill = True) x_centered_user = image_width/2 - text_width_user/2 x_centered_id = image_width/2 - text_width_id/2 draw.text((x_centered_user, text_height/2), string_user, font = font, fill = self.FGCOLOR) draw.text((x_centered_id, text_height*2), string_id, font = font, fill = self.FGCOLOR) img.save(save_file)
def blend(im1, im2, amount, color=None): """Blend two images with each other. If the images differ in size the color will be used for undefined pixels. :param im1: first image :type im1: pil.Image :param im2: second image :type im2: pil.Image :param amount: amount of blending :type amount: int :param color: color of undefined pixels :type color: tuple :returns: blended image :rtype: pil.Image """ im2 = convert_safe_mode(im2) if im1.size == im2.size: im1 = convert(im1, im2.mode) else: if color is None: expanded = Image.new(im2.mode, im2.size) elif im2.mode in ('1', 'L') and type(color) != int: expanded = Image.new(im2.mode, im2.size, color[0]) else: expanded = Image.new(im2.mode, im2.size, color) im1 = im1.convert(expanded.mode) we, he = expanded.size wi, hi = im1.size paste(expanded, im1, ((we - wi) / 2, (he - hi) / 2), im1.convert('RGBA')) im1 = expanded return Image.blend(im1, im2, amount)
def texture(request, project_name): try: get = request.GET filename = get['filename'].split('/')[-1] bg_color = get['bg_color'].split('/')[-1] percent = int(get['percent']) / 100.0 except: return HttpResponseNotFound() # check cache for previously computed image cache = "%s/cache/%s_%s_%s" % (settings.DATA_DIR, bg_color, percent, filename) if os.path.isfile(cache): response = HttpResponse(mimetype="image/png") cache = open(cache, 'r') response.write(cache.read()) cache.close() return response try: path = settings.TEXTURES_DIR + '/' + filename im = Image.open(path) except: return HttpResponseNotFound() transparent = Image.new('RGBA', im.size, (0, 0, 0, 0)) blended = Image.blend(transparent, im, percent) bg = Image.new('RGBA', im.size, bg_color) bg.paste(blended, None, blended) response = HttpResponse(mimetype="image/png") bg.save(cache) bg.save(response, 'PNG') return response
def generate(srcfile1, srcfile2, destfile): srcimg1 = Image.open(srcfile1) srcimg2 = Image.open(srcfile2) (width1, height1) = srcimg1.size (width2, height2) = srcimg2.size maxwidth = width1 > width2 and width1 or width2 maxheight = height1 > height2 and height1 or height2 if width1 != maxwidth or height1 != maxheight: srcimg1.resize((maxwidth, maxheight)) if width2 != maxwidth or height2 != maxheight: srcimg2.resize((maxwidth, maxheight)) destimg = Image.new("RGBA", (maxwidth * 10, maxheight * 2)) for row in range(1, 3): for col in range(1, 11): if row == 1: alpha = col * row / 10.0 else: alpha = 1.0 - (col * row / 20.0) img = Image.blend(srcimg1, srcimg2, alpha) x = maxwidth * col - maxwidth y = maxheight * row - maxheight destimg.paste(img, (x, y)) destimg.save(destfile)
def taggedcopy(self, points, image, correction=False): MIN_SIZE = 1 draw = ImageDraw.Draw(image) def dist(p): return info.distance(p[0].lat, p[0].lon, self.lat, self.lon) points.sort(key=dist, reverse=True) # draw distant first if not correction: points = [(t,(d,p),1) for (t,(d,p)) in points] for tag, (dist, point), correction in points: color = self.colordist(dist, 30.0) size = int(300.0*correction/info.distance(tag.lat, tag.lon, self.lat, self.lon)) fontPath = "/usr/share/fonts/truetype/ttf-dejavu/DejaVuSans-Bold.ttf" font = ImageFont.truetype(fontPath, max(size, MIN_SIZE)) off_x = -size*2 off_y = -size*(len(tag)+1) # start black container top_left = (point[0] + off_x - 3, point[1] + off_y - 1) w = 10 for line in tag: w = max(w, draw.textsize(line, font)[0]) bottom_right = (point[0] + off_x + w + 3, point[1] + off_y + max(size, MIN_SIZE)*len(tag) + 3) img = image.copy() draw2 = ImageDraw.Draw(img) draw2.rectangle([top_left, bottom_right], fill='#000') image = Image.blend(image, img, 0.75) draw = ImageDraw.Draw(image) # end black container draw.ellipse((point[0]-size/2,point[1]-size/2,point[0]+size/2,point[1]+size/2), fill=color) for line in tag: draw.text((point[0] + off_x, point[1] + off_y), line, fill=color, font=font) off_y += max(size, MIN_SIZE) # if dist: # INFO('mapping tag at %f meters error' % dist) return image
def main(data_file, outdir, image_prefix, cell_images, label): images_by_class = dict() image_name = image_prefix + "_y{:02d}_x{:02d}.bmp" # read csv file r = csv.DictReader(data_file) for row in r: # Create lists of images from each class cell_image_name = image_name.format(int(row["j"]), int(row["i"])) if row[label] in images_by_class: images_by_class[row[label]].append(cell_image_name) else: images_by_class[row[label]] = [ cell_image_name ] for key in images_by_class: #print key, images_by_class[key], "\n" dilution = 1.0/float(len(images_by_class[key])) print key, "at a dilution of:", dilution # Create an image for each list of images size = Image.open(images_by_class[key][0]).size mean_image = Image.new("L", size, "white") template = None #print image_by_class[key] for image_name in images_by_class[key]: with open(image_name, "rb") as f: cell = Image.open(f) if template: image = align_images.align_to(template, cell) else: template = cell mean_image = Image.blend(mean_image, cell, dilution) del cell mean_image.save(os.path.join(outdir,key+".bmp") if outdir else key+".bmp")
def _overlay_pictures(pic1, pic2, contour): if (contour): import ImageFilter, ImageChops return ImageChops.multiply(pic2.filter(ImageFilter.CONTOUR), pic1) else: import Image return Image.blend(pic2, pic1, 0.5)
def blend(input_file1, input_file2, output_file, surimp): """ composite -blend $(SURIMP_PERCENT) base.jpg input output """ background = Image.open(input_file1) im = Image.open(input_file2) result = Image.blend(im, background, surimp) result.save(output_file)
def blend_images(imgs): count = len(imgs) if not imgs: raise ValueError('Not images given to blend') if count == 1: return imgs[0].convert('L') else: return Image.blend(blend_images(imgs[:count/2]), blend_images(imgs[count/2:]), .5)
def Classic(self, Button, alpha = 0.3): self.p = Image.open(self.Newpath) color = self.Entry.get_text() self.p1 = ImageEnhance.Color(self.p).enhance(0) self.p2 = Image.new(self.p.mode, (self.p.size[0], self.p.size[1]), color) self.s = Image.blend(self.p1, self.p2, alpha) self.s.show() self.s.save(self.Newpath)
def blend( input_file1, input_file2, output_file, surimp ): """ composite -blend $(SURIMP_PERCENT) base.jpg input output """ background = Image.open(input_file1) im = Image.open(input_file2) result = Image.blend(im, background, surimp) result.save(output_file)
def replay_detail(replay_url): # download replay # tempRep = urllib.urlretrieve(replay_url) # shutil.move(tempRep[0], tempRep[0] + ".sc2replay") # md5 = hashlib.md5() # f = open(tempRep[0] + ".sc2replay", 'rb') f = open(replay_url + ".sc2replay", "rb") for chunk in iter(lambda: f.read(8192), ""): md5.update(chunk) replay_key = str(md5.hexdigest()) replay = sc2reader.read_file(replay_url + ".sc2replay") # determine which map we're on chksum = "" for entry in replay.raw["initData"]["map_data"]: chksum = chksum + str(entry)[52 : len(str(entry)) - 2] maps = open("static/maps.json") all_maps = json.load(maps) for m in all_maps: if all_maps[m]["checksum"] == chksum: replay_map = m # Build event lists p1locations = list() p2locations = list() for event in replay.events: try: if str(event.player)[7] == str(1): p1locations.append(event.location) if str(event.player)[7] == "2": p2locations.append(event.location) except: pass mediapath = "./static/img" # grab minimap minimap = Image.open(mediapath + all_maps[replay_map]["filename"]) minimap = minimap.convert("RGBA") # run heatmap code hm = heatmap.Heatmap() hm.heatmap( p1locations, mediapath + replay_key + ".tmp.png", range=(replay_map.sizeX, replay_map.sizeY), dotsize=50, size=minimap.size, ) heat = Image.open(mediapath + replay_key + ".tmp.png") # heat = heat.resize(minimap.size) out = Image.blend(minimap, heat, 0.5) out.save(mediapath + replay_key + ".jpg")
def create_average(screen, photos): debug("starting") # some geometry (screen_width, screen_height) = screen center_x = screen_width / 2; center_y = screen_height / 2; # we will resize all photos to have an area around this. phi = (1 + math.sqrt(5)) / 2 standard_area = screen_width * (screen_width/phi) # prototype black screen all images get pasted onto. black = Image.new("RGB", screen, "black") average = black.copy() for i in range(len(photos)): debug("doing photo id %s" % photos[i].id) try: im = load_image(photos[i]); except FlickrError: debug("FlickrError") continue im.save("orig/%03d.jpg" % i) im = resize(im, screen, standard_area); # paste photo in the center of a black screen. (im_width, im_height) = im.size offset_x = center_x - im_width / 2; offset_y = center_y - im_height / 2; im_frame = black.copy() im_frame.paste(im, (offset_x,offset_y)) # and blend this with our average alpha = 1.0/(i+1) # <-- clever part. Get average in constant memory. # perhaps too clever. # images where most of the detail is just squished into one or two # bits of depth. This may account for the slow darkening (?) # may be better to combine images in a tree average = Image.blend(average, im_frame, alpha); if not (i % 10): average.save('progress-%03d.jpg' % i) # is this necessary? jclark had it. del im time.sleep(2) # be nice to their server return average
def Interpolate(file1,file2): a = Image.open(file1) b = Image.open(file2) a = a.convert("RGBA") b = b.convert("RGBA") for i in range(1,10): new = Image.blend(a,b,i/10.0) print "Writing {0}{1}.png".format(file1.split(".")[0],alpha[i]) new.save("{0}{1}.png".format(file1.split(".")[0],alpha[i]))
def Paint(self, Button, alpha = 0.3): self.p = Image.open(self.Newpath) color = self.Entr.get_text() con = int(self.Entr1.get_text()) self.p1 = ImageEnhance.Contrast(self.p).enhance(con) self.p2 = Image.new(self.p.mode, (self.p.size[0], self.p.size[1]), color) self.s = Image.blend(self.p1, self.p2, alpha) self.s.show() self.s.save(self.Newpath)
def disp_image(child, shape): rshape = (shape[1], shape[0]) # images use yx, not xy coordinates image = Image.new('RGB', rshape) for tri in child: mask = Image.new('RGB', rshape) draw_mask = ImageDraw.Draw(mask) draw_mask.polygon(tri[:6], fill=tri[-1]) image = Image.blend(image, mask, .5) image.show()
def do_blend(self): """usage: blend <image:pic1> <image:pic2> <float:alpha> Replace two images and an alpha with the blended image. """ image1 = self.do_pop() image2 = self.do_pop() alpha = float(self.do_pop()) self.push(Image.blend(image1, image2, alpha))
def create_average(screen, photos): debug("starting") # some geometry (screen_width, screen_height) = screen center_x = screen_width / 2 center_y = screen_height / 2 # we will resize all photos to have an area around this. phi = (1 + math.sqrt(5)) / 2 standard_area = screen_width * (screen_width / phi) # prototype black screen all images get pasted onto. black = Image.new("RGB", screen, "black") average = black.copy() for i in range(len(photos)): debug("doing photo id %s" % photos[i].id) try: im = load_image(photos[i]) except FlickrError: debug("FlickrError") continue im.save("orig/%03d.jpg" % i) im = resize(im, screen, standard_area) # paste photo in the center of a black screen. (im_width, im_height) = im.size offset_x = center_x - im_width / 2 offset_y = center_y - im_height / 2 im_frame = black.copy() im_frame.paste(im, (offset_x, offset_y)) # and blend this with our average alpha = 1.0 / (i + 1 ) # <-- clever part. Get average in constant memory. # perhaps too clever. # images where most of the detail is just squished into one or two # bits of depth. This may account for the slow darkening (?) # may be better to combine images in a tree average = Image.blend(average, im_frame, alpha) if not (i % 10): average.save('progress-%03d.jpg' % i) # is this necessary? jclark had it. del im time.sleep(2) # be nice to their server return average
def visualize_point_cloud(self, cloudfile, output): cloud = np.load(cloudfile).item() img = self.image.copy() pixels = img.load() for x,y in cloud: d = cloud[x,y] lat, lon = d['lat'], d['lon'] pixels[x,y] = self.colordist(info.distance(lat, lon, self.lat, self.lon)) img = Image.blend(img, self.image, 0.5) img.save(output, 'png')
def blendImgs(): size = (300, 300) file_name1 = '1.jpg' file_name2 = '2.png' img_1 = Image.open(file_name1).resize(size) img_2 = Image.open(file_name2).resize(size) # blend method takes 2 images and an alpha channel value img_blended = Image.blend(img_1, img_2, 0.4) img_blended.show()
def gpx(track, img, bbox, srs): """ Simple GPX renderer """ for i in track.tracks.keys(): coords = track.getTrack(i) canvas = img.copy() canvas = render_vector("LINESTRING", canvas, bbox, coords, srs) img = Image.blend(img, canvas, 0.5) return img
def compare(self, images): import Image try_mkdir(self.outputdir) for idx, (img1, img2) in enumerate(images): outfile = os.path.join(self.outputdir, 'slide%05d.png' % (idx+1)) inform("Creating %s" % outfile) img1 = self.load(img1) img2 = self.load(img2) img = Image.blend(img1, img2, 0.5) img.save(outfile)
def write_diff(self, diff_image, fuzz = 0.05): # make a difference image similar to ImageMagick's compare utility mask = ImageEnhance.Brightness(self.diff).enhance(1.0/fuzz) mask = mask.convert('L') lowlight = Image.new('RGB', self.src_im.size, (0xff, 0xff, 0xff)) highlight = Image.new('RGB', self.src_im.size, (0xf1, 0x00, 0x1e)) diff_im = Image.composite(highlight, lowlight, mask) diff_im = Image.blend(self.src_im, diff_im, 0xcc/255.0) diff_im.save(diff_image)
def addTransparency(self, img): """ Modify the transparency level of the image. @param img: The img whose transparency needs to be modified """ img = img.convert('RGBA') img_blender = Image.new('RGBA', img.size, (0, 0, 0, 0)) img = Image.blend(img_blender, img, self.t_factor) return img
def blend(img, mask, color, width, height, alpha=1): assert width > 0 and height > 0 bg = Image.new('RGBA', (width, height), color=image.color_humanize(color)) blended = Image.composite(img, bg, mask) if alpha != 1: bg2 = bg.copy() blended = Image.blend(bg2, blended, alpha) return blended
def overlay_mask(self, image, mask, blendRatio=0.1): size = image.size mode = image.mode # read mask, resize it to image size # and make sure they are the same mode for the blend function maskim = Image.open(mask) maskim = maskim.resize(size) maskim = maskim.convert(mode) # Use PIL Image blend with alpha value between 0.0 and 1.0 image = Image.blend(image, maskim, blendRatio) return image
def color_thermometer(): for i in range(1, 6): if i == 1: color = "darkblue" elif i == 2: color = "blue" elif i == 3: color = "yellow" elif i == 4: color = "orange" else: color = "red" im = Image.open("temperature-%d-icon.png" % i) im_copy = im.copy() draw = ImageDraw.Draw(im) draw.rectangle([0, 0, 24, 24], outline=color, fill=color) im_blend = Image.blend(im, im_copy, 0.5) im_blend.save("temp-%d.png" % i)
def process(self): srcRun = self.paths[0][3] dstRun = self.paths[1][3] quantity = self.paths[0][-1].split(".")[0] textToDraw = ("BINARY " if useSimpleColorDiff else "") + "DIFFERENCE BETWEEN " + srcRun + " AND " + dstRun + " FOR " + quantity textWidthHeight = self.font.getsize(textToDraw) myDraw = ImageDraw.Draw(self.finalImage) myDraw.text( (self.currSize[0] * 1.5 - textWidthHeight[0] * 0.5, 100), textToDraw, fill = (0, 0, 0), font = self.font) del myDraw if useSimpleColorDiff: #ONE COLOR FOR ALL DIFFERENCES myDiff = ImageChops.subtract(self.regions[0], self.regions[1]) for i in range(myDiff.size[0]): for j in range(myDiff.size[1]): # print(i, j) px = myDiff.getpixel((i, j)) if px[0] != 0 or px[1] != 0 or px[2] != 0: myDiff.putpixel((i, j), binaryDifferenceColor) self.regions[1].putpixel((i, j), binaryDifferenceColor) # create difference-masked image myDiff = ImageChops.add(myDiff, self.regions[2]) self.finalImage.paste(self.regions[1], (self.currSize[0], self.currSize[1] + yShift)) textToDraw = "DIFFERENCE-MASKED TRACKER MAP FOR RUN " + dstRun else: ###MANY COLORS INDICATE MANY POSSIBLE DIFFERENCES myDiff = ImageChops.subtract(self.regions[0], self.regions[1]) myDiff = ImageChops.add(myDiff, self.regions[2]) #2 - refRegion blendedImage = Image.blend(self.regions[0], self.regions[1], op) self.finalImage.paste(blendedImage, (self.currSize[0], self.currSize[1] + yShift)) textToDraw = "SUPERIMPOSED TRACKER MAPS" textWidthHeight = self.font.getsize(textToDraw) myDraw = ImageDraw.Draw(self.finalImage) myDraw.text( (self.currSize[0] * 1.5 - textWidthHeight[0] * 0.5, self.currSize[1] + 100), textToDraw, fill = (0, 0, 0), font = self.font) del myDraw self.finalImage.paste(myDiff, (self.currSize[0], yShift)) outputFileName = self.savePath + "comparisonImage_" + srcRun + "vs" + dstRun + ".png" print(outputFileName) self.finalImage.save(outputFileName)
def DrawBlurredRectangle(image, bbox, alpha=0.6): """Blends a rectangle into the image with given alpha.""" # Blend image other = image.copy() # other = Image.new(image.mode, image.size) draw = ImageDraw.Draw(other) # Draw rectangle before text draw.rectangle([(bbox[0], bbox[1]), (bbox[2], bbox[3])], fill=settings.QUOTE_BG_COLOR) # other = other.filter(ImageFilter.BLUR) return Image.blend(image, other, alpha)
def arrayForImage(uri): res = 15 hueStrength = 1.0 satStrength = .5 valStrength = .4 au = absoluteSite(uri) jpg = restkit.Resource(au).get(size='thumb').body_string() i = Image.open(StringIO(jpg)) i = Image.blend(i, ImageOps.autocontrast(i, cutoff=5), .8) i = i.resize((res, int(res * 3 / 4)), Image.ANTIALIAS) ar = numpy.asarray(i, dtype='f') / 255 ar.shape = i.size[1], i.size[0], 3 ar = hsv_from_rgb(ar) * [hueStrength / 360, satStrength, valStrength] return ar
def vis_seg(img_names, cls_names, output_dir, gt_dir): """ This function plot segmentation results to specific directory Args: img_names: list """ assert os.path.exists(output_dir) # a list of dictionary inst_dir = os.path.join(output_dir, 'SegInst') cls_dir = os.path.join(output_dir, 'SegCls') res_dir = os.path.join(output_dir, 'SegRes') if not os.path.isdir(inst_dir): os.mkdir(inst_dir) if not os.path.isdir(cls_dir): os.mkdir(cls_dir) if not os.path.isdir(res_dir): os.mkdir(res_dir) res_list = _prepare_dict(img_names, cls_names, output_dir) for img_ind, image_name in enumerate(img_names): target_inst_file = os.path.join(inst_dir, image_name + '.jpg') target_cls_file = os.path.join(cls_dir, image_name + '.jpg') print image_name gt_image = gt_dir + '/img/' + image_name + '.jpg' img_data = cv2.imread(gt_image) img_width = img_data.shape[1] img_height = img_data.shape[0] pred_dict = res_list[img_ind] inst_img, cls_img = _convert_pred_to_image(img_width, img_height, pred_dict) color_map = _get_voc_color_map() inst_out_img = np.zeros((img_height, img_width, 3)) cls_out_img = np.zeros((img_height, img_width, 3)) for i in xrange(img_height): for j in xrange(img_width): inst_out_img[i][j] = color_map[inst_img[i][j]][::-1] cls_out_img[i][j] = color_map[cls_img[i][j]][::-1] cv2.imwrite(target_inst_file, inst_out_img) cv2.imwrite(target_cls_file, cls_out_img) background = Image.open(gt_image) mask = Image.open(target_cls_file) background = background.convert('RGBA') mask = mask.convert('RGBA') superimpose_image = Image.blend(background, mask, 0.8) name = os.path.join(res_dir, image_name + '.png') superimpose_image.save(name, 'PNG')
def combine(fnum): ''' Take fnum, the PNG's file name sans the extension, and do wonderful things with the PNG. Acts on single file only. ''' #1) Open the two source images, which should be of the same size particle_img = Image.open("ParticleData/pngmovie/%d.png" %fnum).convert("RGBA") cfield_img = Image.open("FieldData/pngmovie/%d.png" %fnum).convert("RGBA") #3) Crop both images to remove the axis labels, then blend together data_w = 484; data_h = 484 particle_w_offset = 170; particle_h_offset = 58 cfield_w_offset = 113; cfield_h_offset = 58 particle_cropbox = (particle_w_offset, particle_h_offset, \ particle_w_offset + data_w, particle_h_offset + data_h) particle_temp_img = particle_img.crop(particle_cropbox) cfield_cropbox = (cfield_w_offset, cfield_h_offset, \ cfield_w_offset + data_w, cfield_h_offset + data_h) cfield_temp_img = cfield_img.crop(cfield_cropbox) #3) Combine the source images, with the cfield image half transparent # Image.blend(image1, image2, alpha) > image # out = image1 * (1.0 - alpha) + image2 * alpha data_img = Image.blend(cfield_temp_img, particle_temp_img, 0.2) #4) Paste over the original particle image, which has the desired axis labels particle_img.paste(data_img, particle_cropbox) print("Combined image saved as %d.png" %fnum) #5) Finally, add in the colour bar from cfield plot cbar_cropbox = ( data_w + cfield_w_offset + 40, cfield_h_offset, \ cfield_img.size[0]-5, cfield_h_offset + data_h ) colourbar = cfield_img.crop(cbar_cropbox) particle_img.paste(colourbar, cbar_cropbox) particle_img.save("CombinedPNGs/%d.png" %fnum, "PNG") cfield_img.close(); particle_img.close(); data_img.close()
def paint_lines(picture, coords, direction): width, height = picture.size outimg = Image.new('L', (width, height)) outpixels = outimg.load() v="vertical" h="horizontal" print "current coords: ", coords for coord in coords: if direction==v: max_line = height elif direction==h: max_line = width for i in xrange(max_line): if direction==v: outpixels[coord,i] = 255 elif direction==h: #print "fail i: {0}\tfail coord: {1}".format(i, coord) outpixels[i,coord] = 255 print "coords: ", coords return Image.blend(ImageOps.invert(outimg).convert("RGB"),picture.convert("RGB"),0.2)
def resize_image(img, height, width, opacity, dest): """ Resize image, set opacity and save to disk """ size = int(width), int(height) imagetype = imghdr.what(img) im = Image.open(img) im = im.resize(size, Image.ANTIALIAS) # Apply overlay if opacity is set opacity = float(opacity) if (opacity < 100): enhance = opacity / 100 # Create white overlay image overlay = Image.new('RGB', size, '#FFFFFF') # apply overlay to resized image im = Image.blend(overlay, im, enhance) if imagetype == 'jpeg': im.save(dest, 'JPEG', quality=95) im.save(dest, imagetype) return dest
def interpolateFrames(file1, file2, baseFormat, nframes, startindex): retval = 0 try: im1 = Image.open(file1) im2 = Image.open(file2) saveType = getTypeFromExtension(baseFormat) index = startindex for i in range(0, nframes): t = float(i) / float(nframes - 1) outname = baseFormat % index im3 = Image.blend(im1, im2, t) try: im3.save(outname, saveType) index = index + 1 except IOError: print("Can't save", outname) retval = 1 except IOError: print("Can't open one of the input files.") return retval