Example #1
0
def art_florp(bot,track_art):
	image = Image.open(track_art)
	image = image.filter(ImageFilter.FIND_EDGES)
	image = Image.blend(Image.blend(image, image.rotate(90), 0.5), Image.blend(image.rotate(180), image.rotate(270), 0.5), 0.5)
	new_track_art = track_art[:-3] + "rmx.florp." + track_art[-3:]
	image.save(new_track_art) 
	return new_track_art
Example #2
0
def process_ffx_loop():
    frameSize = (256, 256)
    gridSize = (8, 4)

    totalFrames = gridSize[0] * gridSize[1]
    offset = 4

    outImageSize = (gridSize[0] * frameSize[0], gridSize[1] * frameSize[1])
    outImage = Image.new('RGBA', outImageSize)
    #alphaMultImage = Image.new('L', outImageSize)

    x = 0
    y = 0
    for i in xrange(1, totalFrames + 1):

        frameIdx = i + offset

        frame = Image.open("C:/Projects/ffx/images/test2.{}.tga".format(str(frameIdx).zfill(4)))
        frame.thumbnail(frameSize)

        if i <= offset and False:
            frameIdx2 = totalFrames + i
            frame2 = Image.open("C:/Projects/ffx/images/test2.{}.tga".format(str(frameIdx2).zfill(4)))
            frame2.thumbnail(frameSize)

            _x = i
            opacity = 0.5 + 0.5 * _x / float(offset + 0.5)

            frame = Image.blend(frame2, frame, opacity)

        elif i >= totalFrames - offset and False:
            frameIdx2 = offset + 1 - (totalFrames - i)
            frame2 = Image.open("C:/Projects/ffx/images/test2.{}.tga".format(str(frameIdx2).zfill(4)))
            frame2.thumbnail(frameSize)

            _x = totalFrames - i
            opacity = 0.5 * (offset + 0.5 - _x) / float(offset + 0.5)

            frame = Image.blend(frame, frame2, opacity)

        #alphaMult = Image.open("C:/Projects/vfxutils/textures/alpha_mask.tga")
        #alphaMult.thumbnail(frameSize)

        location = (x * frameSize[0], y * frameSize[1])

        outImage.paste(frame, location)
        #alphaMultImage.paste(alphaMult, location)

        x += 1
        if x >= gridSize[0]:
            x = 0
            y += 1

    # Make smooth borders
    #r, g, b, a = outImage.split()
    #a = ImageChops.multiply(a, alphaMultImage)
    #outImage = Image.merge("RGBA", (r, g, b, a))

    outImage.save("y:/art/source/particles/textures/special/ffx_loop_test.tga")
Example #3
0
def octoflip(bot,art):
	image = Image.open(art)
	image = Image.blend(Image.blend(image, image.rotate(90), 0.5), Image.blend(image.rotate(180), image.rotate(270), 0.5), 0.5)
	image = Image.blend(image, image.rotate(45), 0.5)
	image = image.filter(ImageFilter.SHARPEN)
	image = image.filter(ImageFilter.SHARPEN)
	new_art = art[:-3] + "rmx.octo." + art[-3:]
	image.save(new_art)
	return new_art
Example #4
0
def Calculate(image,coord):
	# image = image.rotate(180)
	# mean = Calculate_Color(image)
	base_image = MatchImage(image,'cartoon')
	'''
		Cheating!merge image with original image
	'''
	if coord in test_matrix2:
		result = Image.blend(base_image,image,0.85)
	else :
		result = Image.blend(base_image,image,0.75)
	return result
Example #5
0
def cyan_glow(img):
	glow = img.filter(ImageFilter.BLUR).filter(ImageFilter.BLUR)
	glow = brightness(glow, 1.25)
	result = Image.blend(overlay_blend(glow, img), img, 0.3)
	
	# Tint Cyan
	r = ImageChops.constant(img, 64)
	g = ImageChops.constant(img, 255)
	b = ImageChops.constant(img, 255)
	cyan = Image.merge("RGB",(r,g,b))
	result = Image.blend(overlay_blend(cyan, result), result, 0.8)
	result = apply_vignette(result, 0.5)
	return result
Example #6
0
def to_png(in_path, page):
	with tempfile.TemporaryDirectory() as workdir:
        extension = in_path.split('.')[-1].lower()
        if extension == 'brd':
            layers = config.LAYERS.values()
            out_paths = [	os.path.join(workdir, layer + '.png')
                    for layer in config.LAYERS.keys() ]
        elif extension == 'sch':
            layers = [{'layers': ['ALL']}]
            out_paths = [os.path.join(workdir, 'all.png')]
        else:
            os.rmdir(workdir)
            raise BadExtension

        export = EaglePNGExport(workdir=workdir)
        export.set_page(page)
        export.export(in_path, layers, out_paths)

        oim = None
        for i, out_path in enumerate(out_paths):
            im = Image.open(out_path).convert("L")
            if oim is None:
                oim = im
            else:
                oim = Image.blend(oim, im, 1.0/(1.0+i))

            os.unlink(out_path)

		workdir.cleanup()

        return oim
Example #7
0
    def export_mask_blend(self, fn_img, rgb_img,alpha=0.6,plot_method="PIL",quality=60):

        if alpha > 0.0:
            mask_rgb = self.mask_rgb_array(dtype=np.uint8)
            zoom_fac = np.array([s1 / s2 for s1,s2 in zip(rgb_img.shape,mask_rgb.shape)])

            if (zoom_fac != [1.0,1.0,1.0]).all():
                mask_rgb = zoom(input=mask_rgb,order=0,zoom=zoom_fac)

        if plot_method == "PIL":
            if alpha > 0.0:
                img_rgb = Image.fromarray(rgb_img)
                img_msk = Image.fromarray(mask_rgb)
                img = Image.blend(img_rgb,img_msk,alpha)
                img.save(fn_img,quality=quality)
            else:
                img_rgb = Image.fromarray(rgb_img)
                img_rgb.save(fn_img,quality=quality)

        elif plot_method == "mpl":
            dpi = 100.0
            fig = plt.figure(figsize=np.array(rgb_img.shape[:2]) / dpi)
            plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
            ax = plt.subplot()
            # RGB image of scene as background
            ax.imshow(rgb_img, interpolation="none")
            # mask colors above, but transparent
            ax.imshow(mask_rgb, interpolation="none", alpha=alpha)
            ax.set_axis_off()
            plt.savefig(fn_img, dpi=dpi)
            fig.clear()
            plt.close(fig)
        else:
            raise ValueError("Plot method: %s not implemented." % str(plot_method))
Example #8
0
def diff_images(imagefile1, imagefile2, return_blocked1=False, tolerance=100):
	'''

	Parameters
	----------
	imagefile1, imagefile2 : images
		Two images to compare.  They should be the same size.  Give by filename or with
		raw data.
	return_blocked1 : bool, default False
		If true, also return an image `Elem` based on imagefile1 showing where the mismatch blocks are.
	tolerance : int
		This can be used as the sensitivity factor, the larger it is the less sensitive the comparison

	Returns
	-------
	int
		The number of mismatched blocks
	Elem
		optional, see return_blocked1
	'''
	screenshot_staging = Image.open(imagefile1)
	screenshot_production = Image.open(imagefile2)
	columns = 60
	rows = 80
	screen_width, screen_height = screenshot_staging.size

	block_width = ((screen_width - 1) // columns) + 1  # this is just a division ceiling
	block_height = ((screen_height - 1) // rows) + 1

	mismatch_blocks = 0

	if return_blocked1:
		degenerate = Image.new(screenshot_staging.mode, screenshot_staging.size, '#FFFFFF')

		if 'A' in screenshot_staging.getbands():
			degenerate.putalpha(screenshot_staging.getchannel('A'))

		screenshot_staging_1 = Image.blend(degenerate, screenshot_staging, 0.25)



	for y in range(0, screen_height, block_height + 1):
		for x in range(0, screen_width, block_width + 1):
			region_staging = _process_region(screenshot_staging, x, y, block_width, block_height, tolerance=tolerance)
			region_production = _process_region(screenshot_production, x, y, block_width, block_height, tolerance=tolerance)

			if region_staging is not None and region_production is not None and region_production != region_staging:
				mismatch_blocks += 1
				if return_blocked1:
					draw = ImageDraw.Draw(screenshot_staging_1)
					draw.rectangle((x, y, x + block_width, y + block_height), outline="red")

	if return_blocked1:
		buffered = BytesIO()
		screenshot_staging_1.save(buffered, format="PNG")
		from xmle import Elem
		blocked1 = Elem.from_png_raw(buffered.getvalue())
		return mismatch_blocks, blocked1

	return mismatch_blocks
Example #9
0
    def createImage(self):
        self.setProgress(0)
        # 파일 이름 걸러내기
        self.filenames = list(filter(self.isValidFile, self.filenames))

        self.row = (int)((len(self.filenames) + self.column - 1) / (self.column))
        self.width = (int)(self.cellWidth * self.column + self.paddingX * 2)
        self.height = (int)(self.cellHeight * self.row + self.paddingY * 2)
        self.cw = (int)(self.width / self.nIter)

        self.mergeImages()

        interpolation = getattr(Image, self.interpolation)

        mesh = self.createMesh(self.upperFunc, self.lowerFunc)
        mirrorMesh = self.createMirrorMesh(self.upperFunc, self.lowerFunc)

        img = self.mergedImage
        result = img.transform(img.size, Image.MESH, mesh, interpolation)
        self.setProgress(61)

        blank = Image.new("RGBA", (self.width, self.height))
        reflect = img.copy()
        reflect = img.transpose(Image.FLIP_TOP_BOTTOM)
        self.setProgress(81)
        reflect = reflect.transform(img.size, Image.MESH, mirrorMesh, interpolation)
        self.setProgress(92)
        reflect = Image.blend(reflect, blank, 0.5)
        y = int(self.paddingY * 2.9)
        result.paste(reflect, (0, self.height - y, 
            self.width, self.height * 2 - y), reflect)

        self.setProgress(100)

        self.result = result
Example #10
0
    def process_imgs(self, f0, f1, ctr=0):
        im0 = f0
        im1 = f1
        im_ctr = ctr
        out_file = self.o_path + "img"
        # for level in range(20):
        #     out_file_curr = out_file+utils.zero_str(4,im_ctr)+self.o_type
        #     im0.save(out_file_curr)
        #     im_ctr +=1
        transition = self.warp_frames / 2
        transition_start = self.warp_frames / 4
        transition_end = self.warp_frames - self.warp_frames / 4

        for level in range(int(self.warp_frames) + 1):
            out_file_curr = out_file + utils.zero_str(4, im_ctr) + self.o_type
            # if level< 10:
            #    im0.save(out_file_curr)
            # elif level>self.warp_frames-10:
            #    im1.save(out_file_curr)
            # else:

            if level < transition_start:
                alpha = 0
            elif level > transition_end:
                alpha = 1
            else:
                alpha = float(level - transition_start) / float(self.warp_frames + 1 - transition)

            im_new = Image.blend(im0, im1, alpha)
            ###### TODO HACK FOR 2000x1333 image
            im_new = im_new.crop((40, 120, 1960, 1200))
            # im_new=im_new.resize((1920,1080),Image.BILINEAR)
            im_new.save(out_file_curr)
            im_ctr += 1
	def update(self):
		if self.health!=self.healthmax:
			if self.health<0:
				self.health=0
			if self.health<self.healthmax:
				t=time.time()
				if globalconst.DEBUG:
					print t-self.restTime
				if (t-self.restTime)>60:
					self.health+=self.spirit+self.strength
					if self.health>self.healthmax:
						self.health=self.healthmax
					self.restTime=t
			
			alpha=(float)(self.healthmax-self.health)/self.healthmax
			image=Image.blend(self.PILimage, self.PILdyingimg,alpha )
			
			#drawing the health bar ... could be something like a function or an ineherit attribut of abstract class for all objet with life ...
			beginPixelHealth=alpha*image.size[0]
			beginPixelHealth=(int) (beginPixelHealth)
			boxhealth=(0,0,image.size[0]-beginPixelHealth,2)
			healthbar=Image.new('RGB',(image.size[0]-beginPixelHealth,2),(0,255,0))
			image.paste(healthbar,boxhealth)
			image=image.convert('RGBA')
			
			imgstr=image.tostring()
			self.image=pygame.image.fromstring(imgstr,image.size,'RGBA')
			if self.health<=0:
				for killer in self.killer:
					for obj in self.objectofthescene_group:
						if hasattr(obj,"name"):
							if obj.name==killer:
								obj.xpup(self.lvl)
				self.kill()
Example #12
0
 def __init__(self, master, func):
     Tkinter.Toplevel.__init__(self, master, relief=Tkinter.SOLID, highlightthickness=1, highlightcolor=fg)
     self.root = master
     self.root.withdraw()
     self.overrideredirect(Tkinter.TRUE)
     self.progress = Progressbar(self)
     if not config.python3:
         self.image1 = Image.open(config.relinuxdir + "/splash.png")
         self.image2 = Image.open(config.relinuxdir + "/splash_glowy.png")
         self.images = []
         for i in range(0, 11):
             percent = float(float(i) / 10)
             self.images.append(ImageTk.PhotoImage(Image.blend(self.image1, self.image2, percent)))
         # self.image = ImageTk.PhotoImage(Image.blend(self.image1, self.image2, 0.0))
         self.image = self.images[0]
         self.imgw = self.image.width()
         self.imgh = self.image.height()
     else:
         self.image = Tkinter.PhotoImage(file=config.relinuxdir + "/splash.ppm")
         self.imgw = self.image.width()
         self.imgh = self.image.height()
     self.textvar = Tkinter.StringVar()
     self.progresstext = Label(self, textvariable=self.textvar, height=15, width=480, anchor=Tkinter.W)
     self.w = self.imgw
     self.h = self.imgh + 32
     self.x = self.root.winfo_screenwidth() / 2 - self.w / 2
     self.y = self.root.winfo_screenheight() / 2 - self.h / 2
     self.geometry("%dx%d+%d+%d" % (self.w, self.h, self.x, self.y))
     self.panel = Label(self, image=self.image)
     self.panel.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=True)
     self.progress.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=True)
     self.progresstext.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=True)
     self.update()
     self.thread = FuncThread(func, self.endSplash, self)
     self.thread.start()
Example #13
0
    def set_background(self):
        img_file = "/tmp/root_window.jpg"
        w = gtk.gdk.get_default_root_window()
        sz = w.get_size()
        pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
        pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
        if (pb != None):
            pb.save(img_file,"jpeg")
            image = Image.open(img_file)
            color = 'black'
            alpha = 0.5
            mask = Image.new("RGB", image.size, color)
            image = Image.blend(image, mask, alpha)
            image.save(img_file,"jpeg")

        pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(img_file, self.screen_x, self.screen_y)
        pixmap, mask = pixbuf.render_pixmap_and_mask()
        # width, height = pixmap.get_size()
        self.window.set_app_paintable(True)
        self.window.resize(self.screen_x, self.screen_y)
        self.window.realize()
        self.window.window.set_back_pixmap(pixmap, False)
        self.window.move(0,0)
        del pixbuf
        del pixmap
Example #14
0
def blend_images(data_folder1, data_folder2, out_folder, alpha=.5):
    filename_queue = tf.placeholder(dtype=tf.string)
    label = tf.placeholder(dtype=tf.int32)
    tensor_image = tf.read_file(filename_queue)

    image = tf.image.decode_jpeg(tensor_image, channels=3)

    multiplier = tf.div(tf.constant(224, tf.float32),
                        tf.cast(tf.maximum(tf.shape(image)[0], tf.shape(image)[1]), tf.float32))
    x = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[0], tf.float32), multiplier)), tf.int32)
    y = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[1], tf.float32), multiplier)), tf.int32)
    image = tf.image.resize_images(image, [x, y])

    image = tf.image.rot90(image, k=label)

    image = tf.image.resize_image_with_crop_or_pad(image, 224, 224)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    for root, folders, files in os.walk(data_folder1):
        for each in files:
            if each.find('.jpg') >= 0:
                img1 = Image.open(os.path.join(root, each))
                img2_path = os.path.join(root.replace(data_folder1, data_folder2), each.split("-")[-1])
                rotation = int(each.split("-")[1])
                img2 = sess.run(image, feed_dict={filename_queue: img2_path, label: rotation})
                imsave(os.path.join(os.getcwd(), "temp", "temp.jpg"), img2)
                img2 = Image.open(os.path.join(os.getcwd(), "temp", "temp.jpg"))
                out_image = Image.blend(img1, img2, alpha)
                outfile = os.path.join(root.replace(data_folder1, out_folder), each)
                if not os.path.exists(os.path.split(outfile)[0]):
                    os.makedirs(os.path.split(outfile)[0])
                out_image.save(outfile)
            else:
                print(each)
    sess.close()
Example #15
0
    def WorkBook_plotPrisons(self, coords):
        lons = coords[0]
        lats = coords[1]

        print("Graphing image...")
        m = Basemap(llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64,
            urcrnrlat=49, projection='lcc', lat_1=33, lat_2=45,
            lon_0=-95, resolution='h', area_thresh=10000)
        x, y = m(lons, lats)

        m.drawmapboundary(fill_color='white')
        m.fillcontinents(color='white',lake_color='white')
        m.scatter(x,y,10,marker='D',color='m')

        plt.figure(frameon=False)
        plt.title('US Prison Locations',fontsize=12)
        plt.savefig("USPrisons.png")

        background = Image.open("SocioEconomicBackground.png")
        overlay = Image.open("USPrisons.png")

        background = background.convert("RGBA")
        overlay = overlay.convert("RGBA")

        new_img = Image.blend(background, overlay, 0.5)
        new_img.save("SocioEconomic_Prison.png","PNG")
Example #16
0
    def get_hdr(self, images, strength=0.0, naturalness=1.0):
        """
        process the hdr image(s)
        strength - a float that defines how strong the hdr
                   effect should be
                 - a value of zero will combine images by using a
                   greyscale image average
                 - a value greater than zero will use higher contrast
                   versions of those greyscale images
                 - suggest you a value between 0.0 and 2.0
        naturalness - values between zero and one
                 - zero will be a very high-contrast image
                 - 1.0 will be a very flat image
                 - 0.7 to 0.9 tend to give the best results
        """
        imgs = copy([Image.fromarray(img) for img in images])

        sat_img = self.merge_all(imgs, strength)
        if self.debug_show.get_pos_list() == 1:
            return np.array(sat_img)
        imgs.reverse()

        con_img = self.merge_all(imgs, strength)
        if self.debug_show.get_pos_list() == 2:
            return np.array(con_img)

        """
        combines a saturated image with a contrast image
        and puts them in a dictionary of completed images
        """
        images = Image.blend(con_img, sat_img, naturalness)
        images = np.array(images)
        return images
Example #17
0
def changeImage(slice_num):
    
    global PreViewImage, PreviewName, stlfilename
    global image_tk

    PreviewName.set("Preview Images - "+stlfilename[:-4]+str(slice_num)+".png")

    OperationValue = OperationVar.get()
    
    imageBlank  = Image.new("RGB", (768,480),0)
    
    image_im_m1        = imageBlank
        
    if (OperationValue == 1):      
        imageFile   = FileputPath+stlfilename[:-4]+str(int(slice_num))  +".png"
        try:
            image_im    = Image.open(imageFile)
        except:
            print imageFile+" error"
            showinfo("Error:", imageFile+" Open Error!")
            #checkslice_ui.destroy()
            return            
      
    if (OperationValue == 2):
        imageFile   = FileputPath+stlfilename[:-4]+str(int(slice_num))  +".png"
        try:
            image_im    = Image.open(imageFile)
        except:
            print imageFile+" error"
            showinfo("Error:", imageFile+" Open Error!")
            #checkslice_ui.destroy()
            return
                
        imageFilem1 = FileputPath+stlfilename[:-4]+str(int(slice_num)-1)+".png"
        try:
            image_im_m1 = Image.open(imageFilem1)
        except:
            image_im_m1 = imageBlank
    
        image_im    = image_im.convert("L")    
        image_im    = ImageOps.colorize(image_im, (0,0,0), (255,0,0)) 
        image_im    = image_im.convert("RGB") 
                                  
        image_im_m1 = image_im_m1.convert("L")    
        image_im_m1 = ImageOps.colorize(image_im_m1, (0,0,0), (255,255,255))
        image_im_m1 = image_im_m1.convert("RGB") 
        
        try:          
            image_im = Image.blend(image_im, image_im_m1, 0.3)
        except:
            null()
                
        image_im_enhance = ImageEnhance.Brightness(image_im)
        image_im = image_im_enhance.enhance(2.0)                       
                                        
    image_tk = ImageTk.PhotoImage(image_im)
        
    PreViewImage.configure(image = image_tk)
            
    return
Example #18
0
def blend(im1, im2, amount, color=None):
    """Blend two images with each other. If the images differ in size
    the color will be used for undefined pixels.

    :param im1: first image
    :type im1: pil.Image
    :param im2: second image
    :type im2: pil.Image
    :param amount: amount of blending
    :type amount: int
    :param color: color of undefined pixels
    :type color: tuple
    :returns: blended image
    :rtype: pil.Image
    """
    im2 = convert_safe_mode(im2)
    if im1.size == im2.size:
        im1 = convert(im1, im2.mode)
    else:
        if color is None:
            expanded = Image.new(im2.mode, im2.size)
        elif im2.mode in ('1', 'L') and type(color) != int:
            expanded = Image.new(im2.mode, im2.size, color[0])
        else:
            expanded = Image.new(im2.mode, im2.size, color)
        im1 = im1.convert(expanded.mode)
        we, he = expanded.size
        wi, hi = im1.size
        paste(expanded, im1, ((we - wi) / 2, (he - hi) / 2),
              im1.convert('RGBA'))
        im1 = expanded
    return Image.blend(im1, im2, amount)
Example #19
0
 def do_I_have_to_draw_you_a_picture(self):
     """ Return a little thumbs-up / thumbs-down image with text in it.
     """
     if self.success:
         bytes, color = _thumbs_up_bytes, _thumbs_up_color
     else:
         bytes, color = _thumbs_down_bytes, _thumbs_down_color
     
     thumb = Image.open(StringIO(bytes))
     image = Image.new('RGB', (256, 256), color)
     image.paste(thumb.resize((128, 128)), (64, 80))
     
     mapnik_url = 'http://tile.openstreetmap.org/%(zoom)d/%(column)d/%(row)d.png' % self.coord.__dict__
     mapnik_img = Image.open(StringIO(urlopen(mapnik_url).read()))
     mapnik_img = mapnik_img.convert('L').convert('RGB')
     image = Image.blend(image, mapnik_img, .15)
     
     draw = ImageDraw(image)
     margin, leading = 8, 12
     x, y = margin, margin
     
     for word in self.content.split():
         w, h = draw.textsize(word)
         
         if x > margin and x + w > 250:
             x, y = margin, y + leading
         
         draw.text((x, y), word, fill=(0x33, 0x33, 0x33))
         x += draw.textsize(word + ' ')[0]
     
     return image
Example #20
0
    def _get_image(overlay, x, y):
        """Superpose the picture of the timezone on the map"""
        def _get_x_offset():
            now = datetime.utcnow().timetuple()
            return - int((now.tm_hour * 60 + now.tm_min - 12 * 60) / (24 * 60) * MAP_SIZE[0])  # night is centered at UTC noon (12)

        im = BACK_IM.copy()
        if overlay:
            overlay_im = Image.open(TIMEZONE_RESOURCES + overlay)
            im.paste(BACK_ENHANCED_IM, overlay_im)
        night_im = ImageChops.offset(NIGHT_IM, _get_x_offset(), 0).crop(im.getbbox())
        if IS_WINTER:
            night_im = ImageOps.flip(night_im)

        # In Wheezy alpha_composite and tobytes are not implemented, yet
        try:
            im.paste(Image.alpha_composite(night_im, LIGHTS_IM), night_im)
        except:
            im.paste(Image.blend(night_im, LIGHTS_IM, 0.5), night_im)
        im.paste(DOT_IM, (int(x - DOT_IM.size[1] / 2), int(y - DOT_IM.size[0] / 2)), DOT_IM)
        try:
            data = im.tobytes()
            w, h = im.size
            data = GLib.Bytes.new(data)
            pb = GdkPixbuf.Pixbuf.new_from_bytes(data, GdkPixbuf.Colorspace.RGB,
                 False, 8, w, h, w * 3)
            return pb
        except:
            data = im.tostring()
            w, h = im.size
            pb = GdkPixbuf.Pixbuf.new_from_data(barr, GdkPixbuf.Colorspace.RGB,
                 False, 8, w, h, w * 3)
            return pb
Example #21
0
def space(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 12)
    colours = util.order_colours_by_brightness(colours)
    indices = sorted(random.sample(range(len(colours)), 3))
    colours = [colours[i] for i in indices]
    light, bg, dark = map(tuple, colours)
    light = (200, 200, 100)
    dark = (100, 200, 100)
    bg = (0, 0, 50, 255)

    layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' +
                       'assets/space.jpg')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)

    colours = util.get_dominant_colours(image, 10)
    colours = util.order_colours_by_saturation(colours)[:-3]
    colours = random.sample(colours, 5)
    colours = util.order_colours_by_hue(colours)

    layer = layer.convert('RGB')
    gradient = util.create_gradient(layer.size, colours)
    im = Image.blend(layer, gradient, .4)

    return im
def blending(im, alpha):
	dir_from = '..\\blending'
	blending_im_list = os.listdir(dir_from)
	while(True):
		blending_im_name = blending_im_list[random.randint(0, len(blending_im_list)-1)]
		if(blending_im_name.endswith('.db') == True):  #跳过缓存文件
			continue
		blending_im = Image.open(dir_from + '\\' + blending_im_name, mode = 'r')
		break
	
	if(min(blending_im.size) < IM_SIZE):
		blending_im = blending_im.crop((0, 0, min(blending_im.size), min(blending_im.size)))
		blending_im = blending_im.resize((IM_SIZE, IM_SIZE))
	else:
		horizontal_pos = random.randint(0, blending_im.size[0] - IM_SIZE - 1)
		vertical_pos = random.randint(0, blending_im.size[1] - IM_SIZE - 1)
		blending_im = blending_im.crop((horizontal_pos, vertical_pos, horizontal_pos + IM_SIZE, vertical_pos + IM_SIZE))

	im.save('tmp.png', "PNG")
	im2 = Image.open('tmp.png', mode = 'r')
	im2 = im2.convert("RGBA")
	blending_im = blending_im.convert("RGBA")
	im3 = Image.blend(im2, blending_im, alpha)

	return im3
Example #23
0
def transition(path_wall, wallpapers, size, step, trans_step):
    """
        overlay two following images from the wallpapers list, save them and
        write the new overlay to the next list
    """
    from PIL import Image
    key = checkRun(path_wall)
    if key is True:

        ## for a clean picture folder, all unvisible images will be stored in here
        path_trans = path_wall + ".PyNit_trash/"
        if not os.path.isdir(path_trans):
            os.mkdir(path_trans)

        ## shuffle the wallpaper list in place
        random.shuffle(wallpapers)

        for i in range(len(wallpapers)-1):

            old_pic = wallpapers[i]
            new_pic = wallpapers[i+1]
            background = Image.open(path_wall + old_pic)
            overlay = Image.open(path_wall + new_pic)

            ## check if both pictures have desired size
            ## a dummy is produced there will be no harm done to your pictures
            if background.size != size:
                background = resize(size, background)
                background.save(path_trans + "dummy_background.jpg","JPEG")
                background = Image.open(path_trans + "dummy_background.jpg")

            if overlay.size != size:
                overlay = resize(size, overlay)
                overlay.save(path_trans + "dummy_overlay.jpg","JPEG")
                overlay = Image.open(path_trans + "dummy_overlay.jpg")

            background = background.convert("RGBA")
            overlay = overlay.convert("RGBA")

            new_img_lst = []

            for k in range(0, trans_step, 1):
                new_img = Image.blend(background, overlay, (k/trans_step))
                new_img.save(path_trans + "new" + str(k) + ".jpg","JPEG")
                new_img_lst.append("new" + str(k) + ".jpg")

            for transit_pic in new_img_lst:
                ## just a reminder: "-a" uses the wallpaper path
                os.system("PyNit.py -a " + ".PyNit_trash/" + transit_pic)

            ## end loop with 100% of the new pic
            os.system("PyNit.py -a " + new_pic)
            ## rest STEP seconds
            time.sleep(step)

            if i == len(wallpapers)-1:
                ## if end of shuffled list is reached shuffle again
                random.shuffle(wallpapers)
            else:
                continue
Example #24
0
def renderCollage(solution, grid, sampleGrid, imageLibrary, outputFile, cheatFactor=0):
  """Post-optimizes the solution and renders the output."""
  logger.info("Post-optimizing ...")
  optimalParameters = {}
  for i in range(grid.imageCountX):
    logger.progress(i, grid.imageCountX)
    for j in range(grid.imageCountY):
      imageIndex = solution[i, j]
      image = imageLibrary.images[imageIndex]
      sampleImage = image.get(sampleGrid.imageWidth, sampleGrid.imageHeight).get()
      optimalParameters[i, j] = postOptimize(sampleImage, sampleGrid[i, j].get())

  logger.info("Rendering collage ...")
  background = Image.new("RGB", grid.size, "white")
  collage = Image.new("RGB", grid.size, "white")
  for i in range(grid.imageCountX):
    logger.progress(i, grid.imageCountX)
    for j in range(grid.imageCountY):
      offset = (i * grid.imageWidth, j * grid.imageHeight)
      imageIndex = solution[i, j]
      image = imageLibrary.images[imageIndex]
      subImage = image.get(grid.imageWidth, grid.imageHeight).get()
      image = adjustImage(subImage, optimalParameters[i, j])
      background.paste(grid[i, j].get(), offset)
      collage.paste(image, offset)

  logger.info("Saving ...")
  output = Image.blend(collage, background, cheatFactor)
  output.save(outputFile)
Example #25
0
def draw_ellipse_to_file(jpgfile, imgarray, major, minor, angle, center=None, 
		numpoints=64, color="#3d3df2", width=4):
	"""
	major - major axis radius (in pixels)
	minor - minor axis radius (in pixels)
	angle - angle (in degrees)
	center - position of centre of ellipse
	numpoints - # of points used that make an ellipse

	angle is positive toward y-axis
	"""
	if center is None:
		center = numpy.array(imgarray.shape, dtype=numpy.float)/2.0

	points = ellipse.generate_ellipse(major, minor, angle, center, numpoints, None, "step", True)
	x = points[:,0]
	y = points[:,1]

	## wrap around to end
	x = numpy.hstack((x, [x[0],]))
	y = numpy.hstack((y, [y[0],]))
	## convert image
	originalimage = imagefile.arrayToImage(imgarray)
	originalimage = originalimage.convert("RGB")
	pilimage = originalimage.copy()
	draw = ImageDraw.Draw(pilimage)
	for i in range(len(x)-1):
		xy = (x[i], y[i], x[i+1], y[i+1])
		draw.line(xy, fill=color, width=width)

	## create an alpha blend effect
	originalimage = Image.blend(originalimage, pilimage, 0.9)
	originalimage.save(jpgfile, "JPEG", quality=85)
	return
Example #26
0
 def _overlay(self, im0, im1, alpha):
     try:
         arr = Image.blend(im1, im0, alpha)
         self.source_frame = asarray(arr)
         self.refresh_needed = True
     except ValueError:
         pass
def marksimilar(image, clust, size):
 """
 Draw discovered similar image regions.
 """
 global opt
 blocks = []
 if clust:
  draw = ImageDraw.Draw(image)
  mask = Image.new('RGB', (size,size), 'cyan')
  for cl in clust:
   for x,y in cl:
   	im = image.crop((x,y,x+size,y+size))
   	im = Image.blend(im,mask,0.5)
   	blocks.append((x,y,im))
  for bl in blocks:
  	x,y,im = bl
  	image.paste(im,(x,y,x+size,y+size))
  if int(opt.imauto):
   for cl in clust:
    cx1 = min([cx for cx,cy in cl])
    cy1 = min([cy for cx,cy in cl])
    cx2 = max([cx for cx,cy in cl]) + block_len
    cy2 = max([cy for cx,cy in cl]) + block_len
    draw.rectangle([cx1,cy1,cx2,cy2],outline="magenta")
 return image
def filter_blend(image, baseDir, *args):
  if len(args) == 2:
    filename, opacity = args

    overlay = load_image(os.path.join(
      baseDir,
      *filename.split('/')
    ))
  else:
    red, green, blue, opacity = args

    overlay = Image.new('RGB', image.size, (
      int(red),
      int(green),
      int(blue),
    ))

    # if the background image has an alpha channel copy it to
    # the overlay, so that transparent areas stay transparent.
    alpha = get_alpha(image)
    if alpha:
      overlay.putalpha(alpha)

  image, overlay = ensure_same_mode(image, overlay)
  return Image.blend(image, overlay, float(opacity))
Example #29
0
    def handle_btn(self):
        if self.current_image_index + 1 < len(self.sortedoriginal):
            self.current_image_index = self.current_image_index + 1

            #Set new original image
            self.current_original_image = QPixmap(os.path.join(self.original_path, self.sortedoriginal[self.current_image_index]))
            self.origim = Image.open(os.path.join(self.original_path, self.sortedoriginal[self.current_image_index]))
            self.origimresized = resizeimage.resize_cover(self.origim, [1003, 752])
            self.b.setPixmap(self.current_original_image)
            self.b.setPixmap(self.current_original_image.scaledToWidth(1003))
            self.w.setWindowTitle("Original image: " + self.sortedoriginal[self.current_image_index])

            #set new mask image
            self.current_mask = QPixmap(os.path.join(self.full_masks_path, self.sortedmasks[self.current_image_index]))
            self.maskim = Image.open(os.path.join(self.full_masks_path, self.sortedmasks[self.current_image_index]))
            self.b2.setPixmap(self.current_mask)
            self.w2.setWindowTitle("Full mask: " + self.sortedmasks[self.current_image_index])

            #set new overlay image
            self.overlayed_image = Image.blend(self.origimresized, self.maskim, 0.3)
            self.qim = ImageQt(self.overlayed_image)
            self.pix = QPixmap.fromImage(self.qim)
            self.b3.setPixmap(self.pix)
            self.w3.setWindowTitle('Overlayed Image: ' + self.sortedoriginal[self.current_image_index])
            self.w3.show()
        else:
            print("No more images in directory!")
Example #30
0
def compare_graphs(img1, img2, alpha = 0.3):
	# img1 = 'fig_sales_price.png'
	background = Image.open(img1)
	foreground = Image.open(img2)

	out = Image.blend(background, foreground, alpha)
	out.show()
test_images = image.img_to_array(img).astype('float32') / 255
test_images = np.reshape(test_images, (1, 224, 224, 3))

# network
def build_network():
    base_model = inception_v3.InceptionV3(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    base_model2 = models.Model(inputs=base_model.input, outputs=base_model.get_layer('mixed10').output)
    x = layers.GlobalAveragePooling2D()(base_model2.output)
    x = layers.Dense(128, activation='relu')(x)
    output_amdState = layers.Dense(2, activation='sigmoid', name='output_amdState')(x)
    x = layers.Dense(4, activation='relu')(output_amdState)
    output_time = layers.Dense(1, activation='sigmoid', name='output_time')(x)
    model = models.Model(inputs=base_model2.input, outputs=[output_amdState, output_time])
    opt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc'])
    return model

model = build_network()
model.load_weights(options.weights + "/analysis_cat" + options.cutoff_yr + "/model_finetune_img2amdState2time.h5")

mapp = visualize_saliency(model, -1, 0, test_images[0])
background = test_images[0]
background = Image.fromarray((background*255).astype('uint8'), 'RGB')
overlay = Image.fromarray((mapp*255).astype('uint8'), 'RGB')
background = background.convert("RGBA")
overlay = overlay.convert("RGBA")
new_img = Image.blend(background, overlay, 0.5)
fig = plt.figure(1)
plt.imshow(new_img)
fig.savefig(options.out_image) 
Example #32
0
 def wipeexec(self, canvasdata, type, speed, option):
     speed = float(speed)
     if type == "randomfade":
         listarray = ["crossfade", "tile"]
         type = listarray[random.randrange(len(listarray))]
     elif type == "random":
         listarray = ["crossfade", "tile", "tinytile", "slide", "slideh"]
         type = listarray[random.randrange(len(listarray))]
     if type == "crossfade":
         speed = speed / 50
         for i in range(50):
             j = i + 1
             j = float(1) / float(50) * float(j)
             self.crosscanvas = Image.blend(self.ledcanvasprev, canvasdata,
                                            j)
             self.ledoutput(self.crosscanvas)
             time.sleep(speed)
     elif type == "tile":
         speed = speed / 32
         masktilebase = Image.new("L", (8, 40), 0)
         draw = ImageDraw.Draw(masktilebase)
         draw.rectangle((0, 32, 7, 39), fill=(255), outline=(255))
         for i in range(24):
             j = i + 8
             k = int(float(255) / float(24) * float(j))
             draw.rectangle((0, j, 7, j), fill=k, outline=k)
         for i in range(32):
             mask = Image.new("L", (canvasdata.width, canvasdata.height), 0)
             masktile = Image.new("L", (16, 32), 0)
             j = i + 8
             masktilebase_crop = masktilebase.crop((0, i, 8, j))
             masktile.paste(masktilebase_crop, (0, 0))
             masktilebase_crop = ImageOps.flip(masktilebase_crop)
             masktile.paste(masktilebase_crop, (8, 0))
             for k in range(4):
                 l = k * 8
                 masktile.paste(masktile, (0, l))
             for k in range(8):
                 l = k * 16
                 mask.paste(masktile, (l, 0))
             self.crosscanvas = Image.composite(canvasdata,
                                                self.ledcanvasprev, mask)
             self.ledoutput(self.crosscanvas)
             time.sleep(speed)
     elif type == "tinytile":
         speed = speed / 8
         for i in range(8):
             j = i + 1
             mask = Image.new("L", canvasdata.size, 0)
             masktile = Image.new("L", (16, 32), 0)
             draw = ImageDraw.Draw(masktile)
             draw.rectangle((0, 0, 7, j), fill=(255), outline=(255))
             j = 8 - j
             draw.rectangle((8, 7, 15, j), fill=(255), outline=(255))
             for k in range(4):
                 l = k * 8
                 masktile.paste(masktile, (0, l))
             for k in range(8):
                 l = k * 16
                 mask.paste(masktile, (l, 0))
             self.crosscanvas = Image.composite(canvasdata,
                                                self.ledcanvasprev, mask)
             self.ledoutput(self.crosscanvas)
             time.sleep(speed)
     elif type == "slide":
         speed = speed / self.ledoptions_cols
         canvasmaskbase = Image.new(
             'RGB', ((int(self.ledoptions_cols) * 2), self.ledoptions_rows),
             (0, 0, 0))
         canvasmaskbase.paste(self.ledcanvasprev, (0, 0))
         canvasmaskbase.paste(canvasdata, (self.ledoptions_cols, 0))
         for i in range(self.ledoptions_cols):
             cropx = i + self.ledoptions_cols
             self.crosscanvas = canvasmaskbase.crop(
                 (i, 0, cropx, (int(self.ledoptions_rows))))
             self.ledoutput(self.crosscanvas)
             time.sleep(speed)
     elif type == "slideh":
         speed = speed / self.ledoptions_rows
         canvasmaskbase = Image.new('RGB',
                                    (self.ledoptions_cols,
                                     (int(self.ledoptions_rows) * 2)),
                                    (0, 0, 0))
         canvasmaskbase.paste(self.ledcanvasprev, (0, 0))
         canvasmaskbase.paste(canvasdata, (0, self.ledoptions_rows))
         for i in range(self.ledoptions_rows):
             cropy = i + self.ledoptions_rows
             self.crosscanvas = canvasmaskbase.crop(
                 (0, i, (int(self.ledoptions_cols)), cropy))
             self.ledoutput(self.crosscanvas)
             time.sleep(speed)
     else:
         self.ledoutput(canvasdata)
     self.ledoutput(canvasdata)
     self.ledcanvasprev = copy.copy(canvasdata)
Example #33
0
imgs = os.listdir("img")
for jpg in imgs:

    img = Image.open("img/" + jpg)
    old_img = copy.deepcopy(img)
    orininal_h = np.array(img).shape[0]
    orininal_w = np.array(img).shape[1]

    img = img.resize((WIDTH, HEIGHT))
    img = np.array(img)
    img = img / 255
    img = img.reshape(-1, HEIGHT, WIDTH, 3)
    pr = model.predict(img)[0]

    pr = pr.reshape(
        (int(HEIGHT / 2), int(WIDTH / 2), NCLASSES)).argmax(axis=-1)

    seg_img = np.zeros((int(HEIGHT / 2), int(WIDTH / 2), 3))
    colors = class_colors

    for c in range(NCLASSES):
        seg_img[:, :, 0] += ((pr[:, :] == c) * (colors[c][0])).astype('uint8')
        seg_img[:, :, 1] += ((pr[:, :] == c) * (colors[c][1])).astype('uint8')
        seg_img[:, :, 2] += ((pr[:, :] == c) * (colors[c][2])).astype('uint8')

    seg_img = Image.fromarray(np.uint8(seg_img)).resize(
        (orininal_w, orininal_h))

    image = Image.blend(old_img, seg_img, 0.3)
    image.save("img_out" + jpg)
Example #34
0
def segmentChamber(videofile, dicomdir, view):
    mean = 24
    weight_decay = 1e-12
    learning_rate = 1e-4
    maxout = False
    sesses = []
    models = []
    if view == "a4c":
        g_1 = tf.Graph()
        with g_1.as_default():
            label_dim = 6  #a4c
            sess1 = tf.Session()
            model1 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess1.run(tf.local_variables_initializer())
            sess = sess1
            model = model1
        with g_1.as_default():
            saver = tf.train.Saver()
            saver.restore(sess1, './models/a4c_45_20_all_model.ckpt-9000')
    elif view == "a2c":
        g_2 = tf.Graph()
        with g_2.as_default():
            label_dim = 4
            sess2 = tf.Session()
            model2 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess2.run(tf.local_variables_initializer())
            sess = sess2
            model = model2
        with g_2.as_default():
            saver = tf.train.Saver()
            saver.restore(sess2, './models/a2c_45_20_all_model.ckpt-10600')
    elif view == "a3c":
        g_3 = tf.Graph()
        with g_3.as_default():
            label_dim = 4
            sess3 = tf.Session()
            model3 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess3.run(tf.local_variables_initializer())
            sess = sess3
            model = model3
        with g_3.as_default():
            saver.restore(sess3, './models/a3c_45_20_all_model.ckpt-10500')
    elif view == "psax":
        g_4 = tf.Graph()
        with g_4.as_default():
            label_dim = 4
            sess4 = tf.Session()
            model4 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess4.run(tf.local_variables_initializer())
            sess = sess4
            model = model4
        with g_4.as_default():
            saver = tf.train.Saver()
            saver.restore(sess4, './models/psax_45_20_all_model.ckpt-9300')
    elif view == "plax":
        g_5 = tf.Graph()
        with g_5.as_default():
            label_dim = 7
            sess5 = tf.Session()
            model5 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess5.run(tf.local_variables_initializer())
            sess = sess5
            model = model5
        with g_5.as_default():
            saver = tf.train.Saver()
            saver.restore(sess5, './models/plax_45_20_all_model.ckpt-9600')
    outpath = "./segment/" + view + "/"
    if not os.path.exists(outpath):
        os.makedirs(outpath)
    framedict = create_imgdict_from_dicom(dicomdir, videofile)
    images, orig_images = extract_images(framedict)
    if view == "a4c":
        a4c_lv_segs, a4c_la_segs, a4c_lvo_segs, preds = extract_segs(
            images, orig_images, model, sess, 2, 4, 1)
        np.save(outpath + '/' + videofile + '_lv',
                np.array(a4c_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_la',
                np.array(a4c_la_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lvo',
                np.array(a4c_lvo_segs).astype('uint8'))
    elif view == "a2c":
        a2c_lv_segs, a2c_la_segs, a2c_lvo_segs, preds = extract_segs(
            images, orig_images, model, sess, 2, 3, 1)
        np.save(outpath + '/' + videofile + '_lv',
                np.array(a2c_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_la',
                np.array(a2c_la_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lvo',
                np.array(a2c_lvo_segs).astype('uint8'))
    elif view == "psax":
        psax_lv_segs, psax_lvo_segs, psax_rv_segs, preds = extract_segs(
            images, orig_images, model, sess, 2, 1, 3)
        np.save(outpath + '/' + videofile + '_lv',
                np.array(psax_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lvo',
                np.array(psax_lvo_segs).astype('uint8'))
    elif view == "a3c":
        a3c_lv_segs, a3c_la_segs, a3c_lvo_segs, preds = extract_segs(
            images, orig_images, model, sess, 2, 3, 1)
        np.save(outpath + '/' + videofile + '_lvo',
                np.array(a3c_lvo_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lv',
                np.array(a3c_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_la',
                np.array(a3c_la_segs).astype('uint8'))
    elif view == "plax":
        plax_lv_inter_ventricular_segs, preds = extract_segs(
            images, orig_images, model, sess, 2)
        np.save(outpath + '/' + videofile + '_lv_inter_ventricular_G2',
                np.array(plax_lv_inter_ventricular_segs).astype('uint8'))
    j = 0
    nrow = orig_images[0].shape[0]
    ncol = orig_images[0].shape[1]
    print(nrow, ncol)
    plt.figure(figsize=(25, 25))
    plt.axis('off')
    plt.imshow(imresize(preds, (nrow, ncol)))
    plt.savefig(outpath + '/' + videofile + '_' + str(j) + '_' +
                'CHOICE_FULL_segmentation_IVS.png')
    plt.close()
    plt.figure(figsize=(25, 25))
    plt.axis('off')
    plt.imshow(orig_images[0])
    plt.savefig(outpath + '/' + videofile + '_' + str(j) + '_' +
                'CHOICE_FULL_original_IVS.png')
    plt.close()
    background = Image.open(outpath + '/' + videofile + '_' + str(j) + '_' +
                            'CHOICE_FULL_original_IVS.png')
    overlay = Image.open(outpath + '/' + videofile + '_' + str(j) + '_' +
                         'CHOICE_FULL_segmentation_IVS.png')
    background = background.convert("RGBA")
    overlay = overlay.convert("RGBA")
    outImage = Image.blend(background, overlay, 0.5)
    outImage.save(
        outpath + '/' + videofile + '_' + str(j) + '_' +
        'CHOICE_FULL_overlay_IVS.png', "PNG")
    return 1
                                hspace=0,
                                wspace=0)
            plt.margins(0, 0)
            plt.savefig(path + 'tmp.png',
                        transparent=False,
                        bbox_inches='tight',
                        pad_inches=0)

            # 2 Bilder überlappen
            img1 = Image.open(path + filename_1)
            img1 = img1.convert('RGBA')

            img2 = Image.open(path + 'tmp.png')
            img2 = img2.convert('RGBA')

            img = Image.blend(img1, img2, 0.3)
            # img.show()
            filename_1 = filename_1.split('.')[0] + '_Gradient.png'
            img.save(path + filename_1)
            new_name = filename_1.split('.')[0] + '.txt'
            newfile_path = os.path.join(path, new_name)
            shutil.copyfile(file_path, newfile_path)
            os.remove(path + 'tmp.png')

    # print(np.array(Image.open(path + filename)).shape)

# Defokus, Blur

        random_2 = np.random.uniform(0, 10)
        if random_2 <= 6:
            # filename_defokus = filename_1.split('.')[0] + '_Gradient.png' + '_Defokus.png'
Example #36
0
def greenTint(im):
    '''green tinted image'''
    layer = Image.new('RGB', im.size, 'green')
    return Image.blend(im, layer, 0.5)
Example #37
0

# Take two images for blending them together
path = '../../data/geoPose3K/eth_ch1_2011-10-04_14_25_54_01024'
image1 = Image.open(join(path, "photo.jpg"))
image2 = Image.open(join(path, "depth.png"))

# Make the images of uniform size
image3 = changeImageSize(1500, 1000, image1)
image4 = changeImageSize(1500, 1000, image2)

# Make sure images got an alpha channel
image5 = image3.convert("RGBA")
image6 = image4.convert("RGBA")

# Display the images
# image6.show()
# image5.show()

# alpha-blend the images with varying values of alpha
alphaBlended1 = Image.blend(image5, image6, alpha=.2)
alphaBlended2 = Image.blend(image5, image6, alpha=.4)

# Display the alpha-blended images
# alphaBlended1.show()
alphaBlended2.save(join(path, 'blend.png'))
# plt.imshow(alphaBlended2)
# # plt.show()
#
# plt.imsave(join(path, 'blend.png'), alphaBlended2)
    den = den.astype(np.float32, copy=False)
    gt = np.sum(Image.fromarray(den))
    print('gt:', gt)
    den = den / np.max(den + 1e-20)
    colored_density_map = cm(den)
    density_map = Image.fromarray(
        (colored_density_map[:, :, :3] * 255).astype(np.uint8))

    img = Image.open(imagename)
    img = img.resize((960, 544))
    if img.mode == 'L':
        img = img.convert('RGB')

    img_RGBA = img.convert("RGBA")
    density_map = density_map.convert("RGBA")
    new_img = Image.blend(img_RGBA, density_map, 0.15)

    input_img = img_transform(img)
    d = ImageDraw.Draw(img)
    d.text((10, 10),
           "Ground Truth:{:.1f}".format(gt),
           font=font,
           fill=(255, 0, 0))

    with torch.no_grad():
        start_time = time.time()
        pred_map = net.test_forward(Variable(input_img[None, :, :, :]).cuda())
        elapsed_time = time.time() - start_time
    print('inference time:{}'.format(elapsed_time))
    fps += (1 / elapsed_time)
    pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]
Example #39
0
 def blend_image(self, image_obj, opacity, color="salmon"):
     img1 = image_obj.convert("RGB")
     img2 = Image.new("RGB", img1.size, color)
     return Image.blend(img1, img2, opacity)
Example #40
0
def texture_overlay(image, texture_file, alpha=0.5):
    texture = util.load_pil_image_from_resource(TEXTURES_RESOURCE_PREFIX +
                                                texture_file)
    texture = texture.resize(image.size)
    return Image.blend(image, texture, alpha)
Example #41
0
def purpleTint(im):
    '''purple tinted image'''
    layer = Image.new('RGB', im.size, 'purple')
    return Image.blend(im, layer, 0.5)
Example #42
0
def redTint(im):
    '''red tinted image'''
    layer = Image.new('RGB', im.size, 'red')
    return Image.blend(im, layer, 0.5)
genres = ['base', 'back', 'person', 'logo', 'deco', 'deco']
for i, url in enumerate(urls):
    imgs.append(Image.open(url))
    
if button:
    for trials in range(num_trials):
        # 背景に色付け
        draw = ImageDraw.Draw(imgs[0])
        full_size = np.array(imgs[0].size)
        base_color = np.random.randint(0, 255, 3)
        draw.rectangle((0, 0, tuple(full_size)), fill=(
            base_color[0], base_color[1], base_color[2]))

        # 背景と混ぜた枠をセット※mode'RGBA'と'RGB'とかを揃える必要あり
        imgs[1] = imgs[1].resize(tuple(full_size))
        imgs[0] = Image.blend(imgs[0], imgs[1].convert('RGBA'), 0.5)

        # 背景画像をセット
        imgs[1] = imgs[1].resize(tuple(full_size-[20, 20]))
        imgs[0].paste(imgs[1], (15, 10))

        # 背景画像(通常1280,720)内に画像を散りばめる乱数を生成
        number = len(imgs)
        widths = []
        heights = []
        for k in range(number-2):
            img_ratio = (imgs[k+2].width / imgs[k+2].height) ** 0.5
            if genres[k+2] == 'logo':
                size_base = 400
            elif genres[k+2] == 'deco':
                size_base = 100
Example #44
0
def blueTint(im):
    '''blue tinted image'''
    layer = Image.new('RGB', im.size, 'blue')
    return Image.blend(im, layer, 0.5)
# The second image - 'September.' Uses Mandelbrot, Color Method 2. 
xmin, xmax, ymin, ymax = -1.786400592936561140507, -1.786395803871461587019, -0.000002098513044253179, 0.000001493285780411937
ImgC3 = Image.new("RGBA",(imgx,imgy))
ImgC3.paste((0,0,0), (0,0,imgx,imgy))
ImgC3.save("ImgC3.png","PNG")
VALCOL = "M2"
mandelbrot(ImgC3,"M2")
ImgC3.save("ImgC3.png","PNG")
ImgC3.show()

# The third image - 'Archipelago.' Uses Julia, Method 1, plus some Image Filtering from PIL to enhance color borders.
xmin, xmax, ymin, ymax, imgx, imgy, maxIt = 0.2, 0.25, -0.60, -0.55, 1500, 1500, 200
ImgC4 = Image.new("RGBA",(imgx,imgy))
ImgC4.paste((0,0,0), (0,0,imgx,imgy))
julia(ImgC4,complex(0.5,-0.3),"J1")
ImgC41 = ImgC4.filter(ImageFilter.EDGE_ENHANCE_MORE)
ImgC42 = ImgC4.filter(ImageFilter.CONTOUR)
ImgC4F = Image.blend(ImgC41, ImgC42, 0.05)
ImgC4F.save("ImgC4.png","PNG")
ImgC4F.show()

# The fourth image - 'Quartz.' Uses Julia, Method 2.
xmin, xmax, ymin, ymax, imgx, imgy, maxIt = 0.65, 0.75, -0.1, 0, 1000, 1000, 200
ImgC5 = Image.new("RGBA",(imgx,imgy))
julia(ImgC5,complex(-0.5,0.55),"J2")
ImgC5 = ImgC5.filter(ImageFilter.SHARPEN)
ImgC5.save("ImgC5.png","PNG")
ImgC5.show()


Example #46
0
    def inf(self, imgs, img_names, gt, inference, net, scales, pbar, base_img,
            pos):

        ######################################################################
        # Run inference
        ######################################################################

        self.img_name = img_names[0]
        col_img_name = '{}/{}_color.png'.format(self.rgb_path, self.img_name)
        pred_img_name = '{}/{}.png'.format(self.pred_path, self.img_name)
        diff_img_name = '{}/{}_diff.png'.format(self.diff_path, self.img_name)
        compose_img_name = '{}/{}_compose.png'.format(self.compose_path,
                                                      self.img_name)
        to_pil = transforms.ToPILImage()
        if self.inference_mode == 'pooling':
            img = imgs
            pool_base_img = to_pil(base_img[0])
        else:
            img = to_pil(imgs[0])
        prediction_pre_argmax_collection = inference(net, img, scales, pos)
        # print(len(prediction_pre_argmax_collection))
        # print(prediction_pre_argmax_collection[0].shape)

        if self.inference_mode == 'pooling':
            prediction = prediction_pre_argmax_collection
            prediction = np.concatenate(prediction, axis=0)
        else:
            prediction_pre_argmax = np.mean(prediction_pre_argmax_collection,
                                            axis=0)
            prediction = np.argmax(prediction_pre_argmax, axis=0)

        if self.metrics:
            self.hist += fast_hist(prediction.flatten(),
                                   gt.cpu().numpy().flatten(),
                                   self.dataset_cls.num_classes)
            iou_w = round(np.nanmean(per_class_iu(self.hist)) * 100, 2)
            # acc_w = np.diag(self.hist).sum() / self.hist.sum()

            H, W = prediction.shape
            pred_split = np.split(prediction,
                                  [H // 4, (H // 4) * 2, (H // 4) * 3],
                                  axis=0)
            gt_split = np.split(gt.cpu().numpy(),
                                [H // 4, (H // 4) * 2, (H // 4) * 3],
                                axis=1)
            self.hist_up += fast_hist(pred_split[0].flatten(),
                                      gt_split[0].flatten(),
                                      self.dataset_cls.num_classes)
            iou_u = round(np.nanmean(per_class_iu(self.hist_up)) * 100, 2)
            # acc_u = np.diag(self.hist_up).sum() / self.hist_up.sum()

            self.hist_mid1 += fast_hist(pred_split[1].flatten(),
                                        gt_split[1].flatten(),
                                        self.dataset_cls.num_classes)
            iou_m1 = round(np.nanmean(per_class_iu(self.hist_mid1)) * 100, 2)
            # acc_m1 = np.diag(self.hist_mid1).sum() / self.hist_mid1.sum()

            self.hist_mid2 += fast_hist(pred_split[2].flatten(),
                                        gt_split[2].flatten(),
                                        self.dataset_cls.num_classes)
            iou_m2 = round(np.nanmean(per_class_iu(self.hist_mid2)) * 100, 2)
            # acc_m2 = np.diag(self.hist_mid2).sum() / self.hist_mid2.sum()

            self.hist_down += fast_hist(pred_split[3].flatten(),
                                        gt_split[3].flatten(),
                                        self.dataset_cls.num_classes)
            iou_d = round(np.nanmean(per_class_iu(self.hist_down)) * 100, 2)
            # acc_d = np.diag(self.hist_down).sum() / self.hist_down.sum()

            pbar.set_description(
                "Mean IOU (Whole,Up,Mid1,Mid2,DOWN): %s %s %s %s %s" %
                (str(iou_w), str(iou_u), str(iou_m1), str(iou_m2), str(iou_d)))
            # pbar.set_description("ACC (Whole,Up,Mid,DOWN): %s %s %s %s" % (str(acc_w), str(acc_u), str(acc_m), str(acc_d)))

        ######################################################################
        # Dump Images
        ######################################################################
        if self.write_image:

            if self.inference_mode == 'pooling':
                img = pool_base_img

            colorized = self.dataset_cls.colorize_mask(prediction)
            colorized.save(col_img_name)
            blend = Image.blend(img.convert("RGBA"), colorized.convert("RGBA"),
                                0.5)
            blend.save(compose_img_name)

            if gt is not None:
                gt = gt[0].cpu().numpy()
                # only write diff image if gt is valid
                diff = (prediction != gt)
                diff[gt == 255] = 0
                diffimg = Image.fromarray(diff.astype('uint8') * 255)
                PIL.ImageChops.lighter(
                    blend,
                    PIL.ImageOps.invert(diffimg).convert("RGBA")).save(
                        diff_img_name)

            label_out = np.zeros_like(prediction)
            for label_id, train_id in self.dataset_cls.id_to_trainid.items():
                label_out[np.where(prediction == train_id)] = label_id
            cv2.imwrite(pred_img_name, label_out)
Example #47
0
    def inf(self, imgs, img_names, gt, inference, net, scales, pbar, base_img):

        ######################################################################
        # Run inference
        ######################################################################

        self.img_name = img_names[0]
        col_img_name = '{}/{}_color.png'.format(self.rgb_path, self.img_name)
        pred_img_name = '{}/{}.png'.format(self.pred_path, self.img_name)
        diff_img_name = '{}/{}_diff.png'.format(self.diff_path, self.img_name)
        compose_img_name = '{}/{}_compose.png'.format(self.compose_path,
                                                      self.img_name)
        to_pil = transforms.ToPILImage()
        if self.inference_mode == 'pooling':
            img = imgs
            pool_base_img = to_pil(base_img[0])
        else:
            img = to_pil(imgs[0])
        prediction_pre_argmax_collection = inference(net, img, scales)

        if self.inference_mode == 'pooling':
            prediction = prediction_pre_argmax_collection
            prediction = np.concatenate(prediction, axis=0)[0]
        else:
            prediction_pre_argmax = np.mean(prediction_pre_argmax_collection,
                                            axis=0)
            prediction = np.argmax(prediction_pre_argmax, axis=0)

        if self.metrics:
            self.hist += fast_hist(prediction.flatten(),
                                   gt.cpu().numpy().flatten(),
                                   self.dataset_cls.num_classes)
            iou = round(np.nanmean(per_class_iu(self.hist)) * 100, 2)
            pbar.set_description("Mean IOU: %s" % (str(iou)))

        ######################################################################
        # Dump Images
        ######################################################################
        if self.write_image:

            if self.inference_mode == 'pooling':
                img = pool_base_img

            colorized = self.dataset_cls.colorize_mask(prediction)
            colorized.save(col_img_name)
            blend = Image.blend(img.convert("RGBA"), colorized.convert("RGBA"),
                                0.5)
            blend.save(compose_img_name)

            if gt is not None and args.split != 'test':
                gt = gt[0].cpu().numpy()
                # only write diff image if gt is valid
                diff = (prediction != gt)
                diff[gt == 255] = 0
                diffimg = Image.fromarray(diff.astype('uint8') * 255)
                PIL.ImageChops.lighter(
                    blend,
                    PIL.ImageOps.invert(diffimg).convert("RGBA")).save(
                        diff_img_name)

            label_out = np.zeros_like(prediction)
            for label_id, train_id in self.dataset_cls.label2trainid.items():
                label_out[np.where(prediction == train_id)] = label_id
            cv2.imwrite(pred_img_name, label_out)
Example #48
0
 def __getitem__(self, item):
     im_name = os.path.basename(self.image_lists[item])
     # print(self.image_lists[item])
     img = Image.open(self.image_lists[item]).convert("RGB")
     width, height = img.size
     if self.gts_dir is not None:
         gt_path = os.path.join(self.gts_dir, im_name + ".txt")
         words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt(
             gt_path, height, width)
         if words[0] == "":
             use_char_ann = False
         else:
             use_char_ann = True
         if not self.use_charann:
             use_char_ann = False
         target = BoxList(boxes[:, :4],
                          img.size,
                          mode="xyxy",
                          use_char_ann=use_char_ann)
         if self.ignore_difficult:
             labels = torch.from_numpy(np.array(labels))
         else:
             labels = torch.ones(len(boxes))
         target.add_field("labels", labels)
         masks = SegmentationMask(segmentations, img.size)
         target.add_field("masks", masks)
         char_masks = SegmentationCharMask(charsbbs,
                                           words=words,
                                           use_char_ann=use_char_ann,
                                           size=img.size,
                                           char_num_classes=len(
                                               self.char_classes))
         target.add_field("char_masks", char_masks)
     else:
         target = None
     if self.transforms is not None:
         img, target = self.transforms(img, target)
     if self.vis:
         new_im = img.numpy().copy().transpose([1, 2, 0]) + [
             102.9801,
             115.9465,
             122.7717,
         ]
         new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
         mask = target.extra_fields["masks"].polygons[0].convert("mask")
         mask = Image.fromarray(
             (mask.numpy() * 255).astype(np.uint8)).convert("RGB")
         if self.use_charann:
             m, _ = (target.extra_fields["char_masks"].chars_boxes[0].
                     convert("char_mask"))
             color = self.creat_color_map(37, 255)
             color_map = color[m.numpy().astype(np.uint8)]
             char = Image.fromarray(color_map.astype(
                 np.uint8)).convert("RGB")
             char = Image.blend(char, new_im, 0.5)
         else:
             char = new_im
         new = Image.blend(char, mask, 0.5)
         img_draw = ImageDraw.Draw(new)
         for box in target.bbox.numpy():
             box = list(box)
             box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]
                                                           ] + box[:2]
             img_draw.line(box, fill=(255, 0, 0), width=2)
         new.save("./vis/char_" + im_name)
     return img, target, self.image_lists[item]
Example #49
0
def segmentChamber(videofile, dicomdir, view, model_path):
    """
    
    """
    mean = 24
    weight_decay = 1e-12
    learning_rate = 1e-4
    maxout = False
    modeldir = model_path

    if view == "a4c":
        g_1 = tf.Graph()
        with g_1.as_default():
            label_dim = 6  # a4c
            sess1 = tf.Session()
            model1 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess1.run(tf.local_variables_initializer())
            sess = sess1
            model = model1
        with g_1.as_default():
            saver = tf.train.Saver()
            saver.restore(
                sess1, os.path.join(modeldir, "a4c_45_20_all_model.ckpt-9000"))
    elif view == "a2c":
        g_2 = tf.Graph()
        with g_2.as_default():
            label_dim = 4
            sess2 = tf.Session()
            model2 = Unet(mean,
                          weight_decay,
                          learning_rate,
                          label_dim,
                          maxout=maxout)
            sess2.run(tf.local_variables_initializer())
            sess = sess2
            model = model2
        with g_2.as_default():
            saver = tf.train.Saver()
            saver.restore(
                sess2, os.path.join(modeldir,
                                    "a2c_45_20_all_model.ckpt-10600"))

    outpath = "/home/ubuntu/data/04_segmentation/" + view + "/"
    if not os.path.exists(outpath):
        os.makedirs(outpath)

    images, orig_images = dcm_to_segmentation_arrays(dicomdir, videofile)
    np_arrays_x3 = []
    images_uuid_x3 = []

    if view == "a4c":
        a4c_lv_segs, a4c_la_segs, a4c_lvo_segs, preds = extract_segs(
            images, orig_images, model, sess, 2, 4, 1)
        np_arrays_x3.append(np.array(a4c_lv_segs).astype("uint8"))
        np_arrays_x3.append(np.array(a4c_la_segs).astype("uint8"))
        np_arrays_x3.append(np.array(a4c_lvo_segs).astype("uint8"))
        number_frames = (np.array(a4c_lvo_segs).astype("uint8").shape)[0]
        model_name = "a4c_45_20_all_model.ckpt-9000"
    elif view == "a2c":
        a2c_lv_segs, a2c_la_segs, a2c_lvo_segs, preds = extract_segs(
            images, orig_images, model, sess, 2, 3, 1)
        np_arrays_x3.append(np.array(a2c_lv_segs).astype("uint8"))
        np_arrays_x3.append(np.array(a2c_la_segs).astype("uint8"))
        np_arrays_x3.append(np.array(a2c_lvo_segs).astype("uint8"))
        number_frames = (np.array(a2c_lvo_segs).astype("uint8").shape)[0]
        model_name = "a2c_45_20_all_model.ckpt-10600"

    j = 0
    nrow = orig_images[0].shape[0]
    ncol = orig_images[0].shape[1]
    plt.figure(figsize=(5, 5))
    plt.axis("off")
    plt.imshow(imresize(preds, (nrow, ncol)))
    plt.savefig(outpath + "/" + videofile + "_" + str(j) + "_" +
                "segmentation.png")
    images_uuid_x3.append(
        hashlib.md5((outpath + "/" + videofile + "_" + str(j) + "_" +
                     "segmentation.png").encode()).hexdigest())
    plt.close()
    plt.figure(figsize=(5, 5))
    plt.axis("off")
    plt.imshow(orig_images[0])
    plt.savefig(outpath + "/" + videofile + "_" + str(j) + "_" +
                "originalimage.png")
    images_uuid_x3.append(
        hashlib.md5((outpath + "/" + videofile + "_" + str(j) + "_" +
                     "originalimage.png").encode()).hexdigest())
    plt.close()
    background = Image.open(outpath + "/" + videofile + "_" + str(j) + "_" +
                            "originalimage.png")
    overlay = Image.open(outpath + "/" + videofile + "_" + str(j) + "_" +
                         "segmentation.png")
    background = background.convert("RGBA")
    overlay = overlay.convert("RGBA")
    outImage = Image.blend(background, overlay, 0.5)
    outImage.save(
        outpath + "/" + videofile + "_" + str(j) + "_" + "overlay.png", "PNG")
    images_uuid_x3.append(
        hashlib.md5((outpath + "/" + videofile + "_" + str(j) + "_" +
                     "overlay.png").encode()).hexdigest())

    return [number_frames, model_name, np_arrays_x3, images_uuid_x3]
Example #50
0
 def show_image(self):
     plt.imshow(Image.blend(self.body.raw, self.veins.raw, 0.5),
                cmap='gray')
     plt.show()
Example #51
0
 def addTransparency(self, img, factor=0.0):
     img = img.convert('RGBA')
     img_blender = Image.new('RGBA', img.size, (0, 0, 256, 256))
     img = Image.blend(img_blender, img, factor)
     return img
    def __and__(self, other):
        """ Suporta a "conjunção bitwise" de 2 Imagem's ou de 1 Imagem e 1 Filtro

    Suporta a sintaxe de uso do operador & em duas variantes:
      objectoImagem & objectoImagem
      objectoImagem & objectoFiltro
    Se other for Imagem, é criada e devolvida uma nova Imagem cuja imagem
    armazenada mistura as imagens dos dois operandos, usando a função
    PIL.Image.blend com 50% de peso para cada imagem.
    Se other for Filtro, é criada e devolvida uma nova Imagem cuja imagem
    armazenada resulta de filtrar a imagem do self com o método PIL.Image.filter
    usando o filtro armazenado no other.
    Requires: other é instância de Imagem ou de Filtro.
    Ensures:
      uma nova Imagem com self.pathFicheiro == None, i.e., obtida no modo ii)
        de __init__;
     Caso em que other é Imagem:
      novaImagem.nome é a concatenação dos nomes (sem sufixo) das duas Imagem's
        "conjugadas" (misturadas), com a string "_mix_" de permeio;
      novaImagem.etiquetas contém a união das etiquetas das duas Imagem's
        "conjugadas", à qual é acrescentada, no fim da lista, a etiqueta
        "mistura" (eliminando-se repetições de etiquetas);
      novaImagem.descricao começa com a concatenação das descrições das duas
        Imagem's "conjugadas", com mudança de linha entre uma e outra descrição;
        seguem-se nova mudança de linha, e a string "A imagem descrita acima
        resulta de uma mistura."
     Caso em que other é Filtro:
      novaImagem.nome é a concatenação do nome (sem sufixo) desta Imagem
        com a string "_comFiltro_" + str(other)
      novaImagem.etiquetas é uma cópia das etiquetas do self, à qual é
        acrescentada, no fim da lista, a etiqueta "filtrada" (eliminando-se
        repetições de etiquetas);
      novaImagem.descricao é obtida repetindo a descrição da Imagem self,
        fazendo uma mudança de linha, e acrescentando a string
        "A imagem descrita acima foi filtrada."
    """
        if isinstance(other, Imagem):
            otherResize = other.imagem.resize(self.getTamanho())
            blend = Image.blend(self.imagem, otherResize, 0.5)
            novoNome = self.getNomeSemSufixo(
            ) + "_mix_" + other.getNomeSemSufixo()
            novasEtiquetas = self.etiquetas[:]  # evitar o aliasing!
            for etiqueta in other.etiquetas:
                if etiqueta not in novasEtiquetas:
                    novasEtiquetas.append(etiqueta)
            if "mistura" not in novasEtiquetas:
                novasEtiquetas.append("mistura")
            novaDescricao = self.descricao + "\n" + other.descricao + "\n" + \
                            "A imagem descrita acima resulta de uma mistura."
            novaImagem = Imagem(novoNome, imagem=blend)
        else:  # other é uma instância da classe Filtro
            filtrada = self.imagem.filter(other.getFiltro())
            novoNome = self.getNomeSemSufixo() + "_comFiltro_" + str(other)
            novasEtiquetas = self.etiquetas[:]  # evitar o aliasing!
            if "filtrada" not in novasEtiquetas:
                novasEtiquetas.append("filtrada")
            novaDescricao = self.descricao + "\n" + \
                            "A imagem descrita acima foi filtrada."
            novaImagem = Imagem(novoNome, imagem=filtrada)
        novaImagem.etiquetas = novasEtiquetas  # acesso directo ao atributo
        novaImagem.setDescricao(novaDescricao)
        return novaImagem
Example #53
0
                       a.HEIGHT,
                       bgColor=a.BG_COLOR,
                       overwrite=a.OVERWRITE,
                       verbose=False)
    # otherwise, draw image
    else:
        nfade = 1.0
        # fading in or out
        if fadeInStart <= ms <= fadeInEnd:
            nfade = norm(ms, (fadeInStart, fadeInEnd), limit=True)
        elif fadeOutStart <= ms <= fadeOutEnd:
            nfade = 1.0 - norm(ms, (fadeOutStart, fadeOutEnd), limit=True)
        if 0.0 < nfade < 1.0:
            nfade = ease(nfade, a.FADE_EASE)
        if nfade >= 1.0:
            im.save(filename)
        else:
            fadedImg = Image.blend(blankImage, im, nfade)
            fadedImg.save(filename)
    printProgress(frame, totalFrames)

# Create a blank audio track
audioFile = a.OUTPUT_FILE.replace(".mp4", ".mp3")
makeBlankAudio(totalMs, audioFile)

compileFrames(a.OUTPUT_FRAME,
              a.FPS,
              a.OUTPUT_FILE,
              getZeroPadding(totalFrames),
              audioFile=audioFile)
Example #54
0
# 通过Image.blend()可以将两张图片混合起来。它要求两张图片的大小必须一致。

from PIL import Image

image1 = Image.open('flower_01.jpg')
# 使用图片1的尺寸创建图片2,颜色为绿色。颜色那里也可以是元组表示RGB。
image2 = Image.new('RGB', image1.size, 'green')
image2.show()
# 将两张图片混合起来,最后一个参数是透明度,取值为0-1,它的值越大则表明image2的占比越高。image1*(1-alpha)+image2*alpha。透明度为0则完全为image1,透明度为1则完全为image2。
image3 = Image.blend(image1, image2, 0.6)
# 展示合成的图片
image3.show()
Example #55
0
 def f(img1, v):
     i = np.random.choice(len(imgs))
     img2 = Image.fromarray(imgs[i])
     return Image.blend(img1, img2, v)
Example #56
0
def overlap_image(img1, img2):
    img = Image.blend(img1, img2, (math.sqrt(5) - 1) / 2)
    return img
Example #57
0
# filter: 滤波
# BLUR、CONTOUR、DETAIL、EDGE_ENHANCE、EDGE_ENHANCE_MORE、EMBOSS、FIND_EDGES、SMOOTH、SMOOTH_MORE、SHARPEN
# 模糊,均值滤波
image_blur = image.filter(ImageFilter.BLUR)
# 轮廓
image_contour = image.filter(ImageFilter.CONTOUR)
# 边缘检测
image_edge = image.filter(ImageFilter.FIND_EDGES)
# image_blur.show()
# image_contour.show()
# image_edge.show()

# blend: 融合,按照透明度
# out = image1 * (1 - alpha) + image2 * alpha
image_background = Image.open('images/background.jpg')
image_blend = Image.blend(image, image_background, 0.2)
# image_blend.show()

# split: 图片通道分离
image_r, image_g, image_b = image.split()
print(image_r.mode)
print(image_r.size)
'''
L
(768, 576)
'''

# composite: 图像融合
image_new = Image.composite(image, image_background, image_b)
# image_new.show()
Example #58
0
from PIL import Image
im1 = Image.open("images/1.png")
im2 = Image.open("images/2.png")
im4 = Image.open("images/4.png")
im5 = Image.open("images/5.png")
im6 = Image.open("images/6.png")
im7 = Image.open("images/7.png")
im8 = Image.open("images/8.png")
im9 = Image.open("images/9.png")
i

for 
Image.blend(im1, im2, .5).save("blended1.png")


blended = Image.open("blended1.png")

Image.blend(blended , im3, .5).save("blended1.png")
Example #59
0
import operator
from PIL import Image
from PIL import ImageDraw
import cv2
cam = cv2.VideoCapture(0)
ret_val, original1 = cam.read()
img2 = cv2.flip(original1, 1)
ret_val, original = cam.read()
img1 = cv2.flip(original, 1)

img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img1 = Image.fromarray(img1)
# suppose img2 is to be shifted by `shift` amount
shift = (50, 60)
print(img2.size)
# compute the size of the panorama
nw, nh = map(max, map(operator.add, img1.size, shift), img1.size)

# paste img1 on top of img2
newimg1 = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
newimg1.paste(img1, shift)
newimg1.paste(img1, (0, 0))

# paste img2 on top of img1
newimg2 = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
newimg2.paste(img1, (0, 0))
newimg2.paste(img1, shift)

# blend with alpha=0.5
result = Image.blend(newimg1, newimg2, alpha=0.5)
Example #60
0
#Open Images
img = Image.open(inputImage)
triggImg = Image.open("triggered.jpg")

#Convert image to jpg
rgbimg = img.convert('RGB')
img = rgbimg

#Adding a red effect to an image
redimg = Image.open("red.jpg")
p, q = img.size
redimg = redimg.resize((p, q), Image.ANTIALIAS)
h, k = redimg.size
print(p, q, h, k, img.mode, redimg.mode)
blendedimg = Image.blend(img, redimg, 0.4)
img = blendedimg

#Adding the "triggered" to an image
newimg = img
p, q = newimg.size
newimg = newimg.crop((0, 0, p, q + math.ceil(q / 6)))
p, q = newimg.size
newTrigg = triggImg.resize((p, math.ceil(q / 6)), Image.ANTIALIAS)
newimg.paste(newTrigg, (0, math.ceil(q - (q / 6))))
newimg.save('test.png')
img = newimg

#Counter for the loop
i = 1