예제 #1
0
    def loadImage(self, path):
        global zoomAmount, currentImage
        pixmap = QtGui.QPixmap(path)
        self.ui.image.setPixmap(pixmap)
        self.ui.image.setFixedSize(pixmap.size())

        self.zoomedPixmap = pixmap.scaled (self.ui.image.width()*zoomAmount, self.ui.image.height()*zoomAmount, QtCore.Qt.KeepAspectRatio)
        myPixmap = self.zoomedPixmap.copy(0,0, self.ui.zoomImage.width(), self.ui.zoomImage.height())
        self.ui.zoomImage.setPixmap(myPixmap)
        self.ui.zoomImage.setFixedSize(myPixmap.size())

        currentImage = Image.open(path)
        # convert to grayscale
        if currentImage.mode != "L":
            currentImage= currentImage.convert("L")
            
        # Sobel operator
        # edge1 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 0, -1, 2, 0, -2, 1, 0, -1], scale=4))
        # edge2 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 2, 1, 0, 0, 0, -1, -2, -1], scale=4))
        # edge3 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, 0, 1, -2, 0, 2, -1, 0, 1], scale=4))
        # edge4 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, -2, -1, 0, 0, 0, 1, 2, 1], scale=4))
        
        # Scharr operator
        edge1 = currentImage.filter(ImageFilter.Kernel((3,3), [3, 0, -3, 10, 0, -10, 3, 0, -3], scale=16))
        edge2 = currentImage.filter(ImageFilter.Kernel((3,3), [3, 10, 3, 0, 0, 0, -3, -10, -3], scale=16))
        edge3 = currentImage.filter(ImageFilter.Kernel((3,3), [-3, 0, 3, -10, 0, 10, -3, 0, 3], scale=16))
        edge4 = currentImage.filter(ImageFilter.Kernel((3,3), [-3, -10, -3, 0, 0, 0, 3, 10, 3], scale=16))
        
        currentImage = ImageChops.add(ImageChops.add(ImageChops.add(edge1, edge2), edge3), edge4)
예제 #2
0
    def process(self):

        srcRun = self.paths[0][3]
        dstRun = self.paths[1][3]
        quantity = self.paths[0][-1].split(".")[0]
        textToDraw = ("BINARY " if useSimpleColorDiff else "") + "DIFFERENCE BETWEEN " + srcRun + " AND " + dstRun + " FOR " + quantity
        textWidthHeight = self.font.getsize(textToDraw)
        myDraw = ImageDraw.Draw(self.finalImage)
        myDraw.text( (self.currSize[0] * 1.5  - textWidthHeight[0] * 0.5, 100), textToDraw, fill = (0, 0, 0), font = self.font)
        del myDraw

        if useSimpleColorDiff:
            #ONE COLOR FOR ALL DIFFERENCES

            myDiff = ImageChops.subtract(self.regions[0], self.regions[1])
            for i in range(myDiff.size[0]):
                for j in range(myDiff.size[1]):
                    # print(i, j)
                    px = myDiff.getpixel((i, j))
                    if px[0] != 0 or px[1] != 0 or px[2] != 0:
                        myDiff.putpixel((i, j), binaryDifferenceColor)

                        self.regions[1].putpixel((i, j), binaryDifferenceColor) # create difference-masked image

            myDiff = ImageChops.add(myDiff, self.regions[2])

            self.finalImage.paste(self.regions[1], (self.currSize[0], self.currSize[1] + yShift))

            textToDraw = "DIFFERENCE-MASKED TRACKER MAP FOR RUN " + dstRun   

        else:
            ###MANY COLORS INDICATE MANY POSSIBLE DIFFERENCES
            myDiff = ImageChops.subtract(self.regions[0], self.regions[1])
            myDiff = ImageChops.add(myDiff, self.regions[2]) #2 - refRegion

            blendedImage = Image.blend(self.regions[0], self.regions[1], op)
            self.finalImage.paste(blendedImage, (self.currSize[0], self.currSize[1] + yShift))
    
            textToDraw = "SUPERIMPOSED TRACKER MAPS"

        textWidthHeight = self.font.getsize(textToDraw)
        myDraw = ImageDraw.Draw(self.finalImage)
        myDraw.text( (self.currSize[0] * 1.5  - textWidthHeight[0] * 0.5,  self.currSize[1] + 100), textToDraw, fill = (0, 0, 0), font = self.font)
        del myDraw


        self.finalImage.paste(myDiff, (self.currSize[0],  yShift))

        outputFileName = self.savePath + "comparisonImage_" + srcRun + "vs" + dstRun + ".png"
        print(outputFileName)
        self.finalImage.save(outputFileName)
예제 #3
0
def noisify(im):
    random.seed(0)
    noise = Image.fromstring(
        "L", im.size, "".join([
            chr(random.randrange(0, 8)) for i in range(im.size[0] * im.size[1])
        ]))
    return ImageChops.add(im, noise)
예제 #4
0
    def launchCPU(self):
        frames_PIL = [Image.fromstring("RGB", self.pre_video.getSize(), i.tostring()) for i in self.pre_video.getAllFrames()]
        for i in xrange(1, len(frames_PIL)):
            # Diferencia
            diferencia = DifferenceFilter(frames_PIL[i], frames_PIL[i + 1])
            diferencia.Apply(Filter.CPU)
            tmp = diferencia.fetchResult()

            # Threshold
            threshold = ThresholdFilter(tmp, level=20)
            threshold.Apply(Filter.CPU)
            tmp2 = threshold.fetchResult()

            # Erosion
            erosion = ErosionFilter(tmp2)
            erosion.Apply(Filter.CPU)
            post = erosion.fetchResult()

            # TODO Mergeado en una clase aparte
            r, g, b = frames_PIL[i + 1].split()
            tmp = ImageChops.add(r, post)
            merged = Image.merge("RGB", (tmp, g, b))

            self.post_video.appendFrame(merged)
            print "#",

        # Terminado, salvamos resultado
        self.post_video.Save("./out.mpg")
예제 #5
0
    def test_r(self):
        """ Line of circles (varying r) across image each produces a strong response """

        im = Image.new("L", (1000, 200), (0))
        npoints = 18
        xs = [ (100 + 50 * t) for t in range(npoints) ]
        for t in range(npoints):
            r = (2.0+0.5*t)
            circle(im, xs[t], 100, r, 248)

        # Add noise into the image.  If the image does not contain noise,
        # then the non maximum suppression can - like Buridan's ass - be
        # presented with two adjacent responses that are equal, and reject
        # both because neither is a maximum.  The chance of this happening
        # with real-world images is very remote indeed.

        noise = Image.fromstring("L", (1000,200), "".join([ chr(random.randrange(0, 8)) for i in range(1000 * 200)]))
        im = ImageChops.add(im, noise)

        result = sorted([(x,y,s,response) for (x,y,s,response) in simple(im, 7, 1.0, 999999.0, 999999.0)][-npoints:])

        # Must have npoints
        self.assertEqual(len(result), npoints)

        # X coordinates must be within 1 point of expected
        for i,(x,y,s,r) in enumerate(result):
            self.assert_(abs(x - xs[i]) <= 1)

        # Already ordered by x, so scale should be increasing
        ss = [s for (x,y,s,r) in result]
        self.assertEqual(ss, sorted(ss))
예제 #6
0
    def launchCPU(self):
        frames_PIL = [
            Image.fromstring("RGB", self.pre_video.getSize(), i.tostring())
            for i in self.pre_video.getAllFrames()
        ]
        for i in xrange(1, len(frames_PIL)):
            # Diferencia
            diferencia = DifferenceFilter(frames_PIL[i], frames_PIL[i + 1])
            diferencia.Apply(Filter.CPU)
            tmp = diferencia.fetchResult()

            # Threshold
            threshold = ThresholdFilter(tmp, level=20)
            threshold.Apply(Filter.CPU)
            tmp2 = threshold.fetchResult()

            # Erosion
            erosion = ErosionFilter(tmp2)
            erosion.Apply(Filter.CPU)
            post = erosion.fetchResult()

            # TODO Mergeado en una clase aparte
            r, g, b = frames_PIL[i + 1].split()
            tmp = ImageChops.add(r, post)
            merged = Image.merge("RGB", (tmp, g, b))

            self.post_video.appendFrame(merged)
            print "#",

        # Terminado, salvamos resultado
        self.post_video.Save("./out.mpg")
예제 #7
0
 def dilate(self, image):
     paddedImage = self.createPaddedImage(image, 1)
     thresholdImg = paddedImage.point(lambda i, v=128: i > v and 255)
     thresholdImg = ImageChops.invert(thresholdImg)
     filteredImg = thresholdImg.filter(ImageFilter.FIND_EDGES)
     thresholdImg = filteredImg.point(lambda i, v=128: i > v and 255)
     arithImg = ImageChops.add(paddedImage, thresholdImg)
     box = (1, 1, arithImg.size[0]-1, arithImg.size[1]-1)
     outImage = arithImg.crop(box)
     return outImage
예제 #8
0
파일: code.py 프로젝트: mdonahoe/personal
def addup(ims,offset):
	##must be of len 2**m
	n=1
	while len(ims)>1:
		newims=[]
		for i in range(0,len(ims),2):
			#print 'offset = %d'%(-offset*n)
			newims.append(ImageChops.add(ims[i],ImageChops.offset(ims[i+1],offset*n,0),2,0))
		ims = newims
		n*=2
	return ims[0]
예제 #9
0
    def do_add(self):
        """usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>

        Pop the two top images, produce the scaled sum with offset.
        """
        import ImageChops
        image1 = self.do_pop()
        image2 = self.do_pop()
        scale = float(self.do_pop())
        offset = int(self.do_pop())
        self.push(ImageChops.add(image1, image2, scale, offset))
예제 #10
0
    def do_add(self):
        """usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>

        Pop the two top images, produce the scaled sum with offset.
        """
        import ImageChops
        image1 = self.do_pop()
        image2 = self.do_pop()
        scale = float(self.do_pop())
        offset = int(self.do_pop())
        self.push(ImageChops.add(image1, image2, scale, offset))
예제 #11
0
파일: annotator.py 프로젝트: msingh172/MIP
    def loadImage(self, path):
        global zoomAmount, currentImage
        pixmap = QtGui.QPixmap(path)
        self.ui.image.setPixmap(pixmap)
        self.ui.image.setFixedSize(pixmap.size())

        self.zoomedPixmap = pixmap.scaled(self.ui.image.width() * zoomAmount,
                                          self.ui.image.height() * zoomAmount,
                                          QtCore.Qt.KeepAspectRatio)
        myPixmap = self.zoomedPixmap.copy(0, 0, self.ui.zoomImage.width(),
                                          self.ui.zoomImage.height())
        self.ui.zoomImage.setPixmap(myPixmap)
        self.ui.zoomImage.setFixedSize(myPixmap.size())

        currentImage = Image.open(path)
        # convert to grayscale
        if currentImage.mode != "L":
            currentImage = currentImage.convert("L")

        # Sobel operator
        # edge1 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 0, -1, 2, 0, -2, 1, 0, -1], scale=4))
        # edge2 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 2, 1, 0, 0, 0, -1, -2, -1], scale=4))
        # edge3 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, 0, 1, -2, 0, 2, -1, 0, 1], scale=4))
        # edge4 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, -2, -1, 0, 0, 0, 1, 2, 1], scale=4))

        # Scharr operator
        edge1 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [3, 0, -3, 10, 0, -10, 3, 0, -3],
                               scale=16))
        edge2 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [3, 10, 3, 0, 0, 0, -3, -10, -3],
                               scale=16))
        edge3 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [-3, 0, 3, -10, 0, 10, -3, 0, 3],
                               scale=16))
        edge4 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [-3, -10, -3, 0, 0, 0, 3, 10, 3],
                               scale=16))

        currentImage = ImageChops.add(
            ImageChops.add(ImageChops.add(edge1, edge2), edge3), edge4)
예제 #12
0
	def process(self, image):
		width, height = image.size
		yield 'Glow...', image
		highlights = self._draw_glow(image)
		yield 'Contrast, Brightness...', highlights
		highlights = highlights.filter(ImageFilter.GaussianBlur(20))
		yield 'Blur...', highlights
		highlights = highlights.filter(ImageFilter.BLUR)
		highlights = highlights.filter(ImageFilter.GaussianBlur(20))
		yield 'Merge..', highlights
		image = ImageChops.add(image, highlights)
		yield 'Done...', image
예제 #13
0
파일: code.py 프로젝트: mdonahoe/personal
def addup2(ims,offset):
	##must be of len 2**m
	
	n=len(ims)+1
	#do all the offsets
	ims = [ImageChops.offset(im,-offset*(n/2-i),0) for i,im in enumerate(ims)]
	
	#add all the images, two at a time to avoid overflow
	while len(ims)>1:		
		ims = [ImageChops.add(ims[i],ims[i+1],2,0) for i in range(0,len(ims),2)]

	return ims[0]
예제 #14
0
파일: myfilter.py 프로젝트: Nuos/lab
    def filter(self, image):
        if image.mode != "L":
            raise ValueError("image mode must be L")
        dx = (3, 3), 1, 0, [-1, 0, 1, -2, 0, 2, -1, 0, 1]
        dy = (3, 3), 1, 0, [1, 2, 1, 0, 0, 0, -1, -2, -1]
        imx = Image.Image()._new(apply(image.filter, dx)) 
        imy = Image.Image()._new(apply(image.filter, dy))

        imx = imx.point(lambda i: abs(i), None)
        imy = imy.point(lambda i: abs(i), None)
        im = ImageChops.add(imx, imy)
        return im.im
예제 #15
0
def combine(ims,offset):
	
	n=len(ims)-1
	
	#do all the offsets
	ims = [ImageChops.offset(im,-offset*(n/2-i),0) for i,im in enumerate(ims)]
	
	#add all the images, two at a time to avoid overflow
	while len(ims)>1:		
		ims = [ImageChops.add(ims[i],ims[i+1],2,0) for i in range(0,len(ims),2)]
	
	#return the final result image
	return ims[0]
예제 #16
0
파일: myfilter.py 프로젝트: Nuos/lab
def canny_edge_detection(image, nonmaxsize=3):
    #gaussian smooth
    ga = np.array([2, 4, 5, 4, 2,
                   4, 9, 12, 9, 4,
                   5, 12, 15, 12, 5,
                   4, 9, 12, 9, 4,
                   2, 4, 5, 4, 2])
    ga = 1.0 / 159 * ga
    dga = (5, 5), ga, 1, 0
    image = image.filter(ImageFilter.Kernel(*dga))

    #sobel edge
    dx = (3, 3), [-1, 0, 1, -2, 0, 2, -1, 0, 1], 1, 0
    dy = (3, 3), [1, 2, 1, 0, 0, 0, -1, -2, -1], 1, 0
    imx = image.filter(ImageFilter.Kernel(*dx))
    imy = image.filter(ImageFilter.Kernel(*dy))

    imx = imx.point(lambda i: abs(i))
    imy = imy.point(lambda i: abs(i))
    im = ImageChops.add(imx, imy, 2)

    sizex, sizey = im.size
    
    mx = imx.load()
    my = imy.load()
    #edge direction
    theta = np.zeros((sizex, sizey))
    
    for i in xrange(sizex):
        for j in xrange(sizey):
            if mx[i, j] == 0:
                if my[i, j] == 0:
                    v = 0
                else:
                    v = 90.0
            else:
                v = np.degrees( np.arctan( my[i, j] / mx[i, j]) )

            if 22.5 >= v >= 0 or  180 >= v >=157.5:
                v = 0.0
            elif 67.5 >= v >= 22.5:
                v = 45.0
            elif 112.5 >= v >= 67.5:
                v = 90.0
            else:
                v = 135.0
            theta[i, j] = v
    #nonmax supression
    out = nonmax_supression(im, nonmaxsize)
    return out
예제 #17
0
파일: photo.py 프로젝트: kif/imagizer
    def contrastMask(self, outfile):
        """Ceci est un filtre de debouchage de photographies, aussi appelé masque de contraste,
        il permet de rattrapper une photo trop contrasté, un contre jour, ...
        Écrit par Jérôme Kieffer, avec l'aide de la liste python@aful,
        en particulier A. Fayolles et F. Mantegazza avril 2006
        necessite numpy et PIL.

        @param: the name of the output file (JPEG)
        @return: filtered Photo instance
        """

        try:
            import numpy
#            import scipy.signal as signal
        except:
            logger.error("This filter needs the numpy library available on https://sourceforge.net/projects/numpy/files/")
            return

        t0 = time.time()
        dimX, dimY = self.pil.size

        ImageFile.MAXBLOCK = dimX * dimY
        img_array = numpy.fromstring(self.pil.tostring(), dtype="UInt8").astype("float32")
        img_array.shape = (dimY, dimX, 3)
        red, green, blue = img_array[:, :, 0], img_array[:, :, 1], img_array[:, :, 2]
        # nota: this is faster than desat2=(ar.max(axis=2)+ar.min(axis=2))/2
        desat_array = (numpy.minimum(numpy.minimum(red, green), blue) + numpy.maximum(numpy.maximum(red, green), blue)) / 2.0
        inv_desat = 255. - desat_array
        blured_inv_desat = self._gaussian.blur(inv_desat, config.ContrastMaskGaussianSize)
        bisi = numpy.round(blured_inv_desat).astype("uint8")
        k = Image.fromarray(bisi, "L").convert("RGB")
        S = ImageChops.screen(self.pil, k)
        M = ImageChops.multiply(self.pil, k)
        F = ImageChops.add(ImageChops.multiply(self.pil, S), ImageChops.multiply(ImageChops.invert(self.pil), M))
        dst_filename = op.join(config.DefaultRepository, outfile)
        F.save(dst_filename, quality=80, progressive=True, Optimize=True)
        try:
            os.chmod(dst_filename, config.DefaultFileMode)
        except IOError:
            logger.error("Unable to chmod %s" % outfile)
        exifJpeg = Exif(dst_filename)
        exifJpeg.read()
        self.exif.copy(exifJpeg)
        exifJpeg.comment = self.exif.comment
        logger.debug("Write metadata to %s", dst_filename)
        exifJpeg.write()
        logger.info("The whoole contrast mask took %.3f" % (time.time() - t0))
        res = Photo(outfile)
        res.autorotate()
        return res
예제 #18
0
	def process(self, image):
		yield 'Loading...', image
		imgp = appconfig.AppConfig().get_data_file(self._IMAGE)
		img = Image.open(imgp)
		img = ImageEnhance.Contrast(img).enhance(0.7)
		img = ImageEnhance.Brightness(img).enhance(0.5)
		yield 'Curves...', image
		width, height = image.size
		yield 'Create fog layer...', img
		canvas = Image.new('RGB', image.size)
		for x in xrange(0, width, img.size[0]):
			for y in xrange(0, height, img.size[1]):
				canvas.paste(img, (x, y))
		yield 'Merge...', canvas
		image = ImageChops.add(image, canvas)
		yield 'Done', image
예제 #19
0
	def process(self, image):
		yield 'Color...', image
		image = ImageEnhance.Color(image).enhance(1.3)
		yield 'Color balance...', image
		color = colors.fill_with_color(image.copy(), (50, 50, 0))
		yield 'Merge', color
		image = ImageChops.add(image, color, 1.3)
		yield 'Curves...', image
		rgcurv = list(curves.create_curve(
				[(0, 0), (32, 0), (64, 11), (128, 111), (192, 241), (224, 255),
				(240, 255), (255, 255)]
		))
		bcurv = curves.create_line([(0, 62), (36, 62), (218, 191), (255, 191)])
		image = curves.apply_curves(image, None, rgcurv, rgcurv, bcurv)
#		curves.draw_curve(image, rgcurv, 10, 10, (255, 255, 255))
#		curves.draw_curve(image, bcurv, 10, 300, (0, 0, 255))
		yield 'Done', image
예제 #20
0
 def detectMotion(self,rec_time=0.5):
     "Accumulates a number of frame-to-frame differences."
     # Gather accumulated frame differences
     frames = self.frames.getFrames(rec_time=rec_time)
     sample_rate = 3
     sample_indices = [ind*sample_rate for ind in range(len(frames)/sample_rate)]
     frames = [frames[ind] for ind in sample_indices]
     #frame.point(lambda i: i*(i<250)+128*(i>250)) #turn bright white to mid-grey to reduce edge effects
     num_frames = len(frames)
     motion = Image.new('L',(640,480))
     scale = num_frames/50.
     for i in range(num_frames-2):
         diff = ImageChops.difference(frames[i+2],frames[i])  # abs of difference
         diff = diff.point(lambda i: int(i/scale)) # dividing by scale prevents saturation
         motion = ImageChops.add(motion,diff)
     motion = ImageChops.multiply(motion,self.curr_mask)   #only look at the middle of the fly
     self.window.displayEngine3(motion)         
     return motion 
예제 #21
0
	def process(self, image):
		width, height = image.size
		yield 'Glow...', image
		result = []
		for src in image.split():
			maxv = int(src.getextrema()[1] * 0.98)
			result.append(src.point(lambda i: i > maxv and 255 or 0))
		highlights = Image.merge("RGB", tuple(result))
		for x in xrange(10):
			yield 'Contrast, Brightness...', highlights
			highlights = ImageEnhance.Brightness(highlights).enhance(1.5)
			yield 'Blur...', highlights
			highlights = highlights.filter(ImageFilter.BLUR)
			highlights = highlights.filter(ImageFilter.GaussianBlur(10))
		highlights = ImageEnhance.Brightness(highlights).enhance(0.3)
		yield 'Merge..', highlights
		image = ImageChops.add(image, highlights)
		yield 'Done...', image
예제 #22
0
	def process(self, image):
		yield 'Blink...', image
		width, height = image.size
		pos = random.random() * 0.25
		size = random.random() * 0.3 + 1.0 + pos * 0.3
		print size, pos
		if random.randint(0, 1):
			pos = 1 - pos
		grad = gradients.create_circular_gradient(image.size, size, pos, 0.0,
				False)
		yield 'Gradient', grad
		grad = grad.convert("RGB")
		bred = list(curves.create_curve([(0, 0), (120, 150), (255, 255)]))
		bgre = list(curves.create_curve([(0, 0), (200, 150), (255, 255)]))
		bblu = list(curves.create_curve([(0, 0), (160, 10), (255, 255)]))
		grad_color = curves.apply_curves(grad, None, bred, bgre, bblu)
		yield 'Merge..', grad_color
		image = ImageChops.add(image, grad_color)
		yield 'Done', image
예제 #23
0
def _im_trim(im_obj, border=0):
    'Trims an image object using Python Image Library'
    if not isinstance(border, int):
        msg = 'Input border must be an int, but is %s, %s instead' % (
            border, type(border))
        raise ValueError(msg)
    # make a white background:
    backg = Image.new(im_obj.mode, im_obj.size, im_obj.getpixel((0, 0)))
    # do an image difference:
    diff = ImageChops.difference(im_obj, backg)
    # add it together
    diff = ImageChops.add(diff, diff, 1.0, -100)
    # and see what the bbox is of that...
    bbox = diff.getbbox()

    if border != 0:
        border_bbox = [-border, -border, border, border]
        # now apply that trim:
        bbox_tr = [x + y for x, y in zip(bbox, border_bbox)]

        # bbox defines the first corner as top+left, then the second corner as bottom+right
        # (not the bottom left corner, and the width, height from there)
        if bbox_tr[0] < 0:
            bbox_tr[0] = 0
        if bbox_tr[1] < 0:
            bbox_tr[1] = 0
        if bbox_tr[2] > im_obj.size[0]:
            bbox_tr[2] = im_obj.size[0]
        if bbox_tr[3] > im_obj.size[1]:
            bbox_tr[3] = im_obj.size[1]
        # now check to see if that's actually foing to do anything:
        if bbox_tr == [0, 0, im_obj.size[0], im_obj.size[1]]:
            bbox = None
        else:
            bbox = bbox_tr

    if bbox:
        # crop:
        return im_obj.crop(bbox)
    else:
        return im_obj
예제 #24
0
	def process(self, image):
		yield 'Creating...', image
		width, height = image.size
		count = width * height / 500
		layer = Image.new("L", image.size, 0)
		draw = ImageDraw.Draw(layer)
		fill = 16
		randint = random.randint
		height4 = height / 16
		for idx in xrange(count):
			x1 = randint(0, width)
			x2 = x1 + randint(-width, width)
			y1 = randint(0, height)
			y2 = y1 + randint(-height4, height4) * 2
			draw.line((x1, y1, x2, y2), fill=fill)
		del draw
		yield 'Smooth...', layer
		layer = layer.filter(ImageFilter.SMOOTH)
		layer = layer.convert("RGB")
		yield 'Merge...', layer
		image = ImageChops.add(image, layer)
		yield 'Done', image
예제 #25
0
	def process(self, image):
		yield 'Coffe...', image
		width, height = image.size
		burn_layer_size = (width / 6, height / 6)
		burn_layer = Image.new("RGB", burn_layer_size, (0, 0, 0))
		burn_width = burn_layer_size[0] / 3
		burn_height = burn_layer_size[1] / 3
		burn_l = burn_layer_size[0] / 3
		burn_t = burn_layer_size[1] / 3
		burn_r = burn_l + burn_width
		burn_b = burn_t + burn_height
		draw = ImageDraw.Draw(burn_layer)
		self._burn_x(burn_layer, draw, burn_l,
				burn_t + random.randint(0, 1) * burn_height)
		self._burn_x(burn_layer, draw, burn_r,
				burn_t + random.randint(0, 1) * burn_height)
		self._burn_y(burn_layer, draw,
				burn_l + random.randint(0, 1) * burn_width,
				burn_t)
		self._burn_y(burn_layer, draw,
				burn_l + random.randint(0, 1) * burn_width,
				burn_b)
		del draw
		burn_layer = burn_layer.filter(ImageFilter.SMOOTH_MORE)
		yield 'Coffe resize..', burn_layer
		burn_layer = burn_layer.crop((burn_l, burn_t, burn_r, burn_b))
		burn_layer = burn_layer.resize((width, height), 2)
		yield 'Coffe Colorize...', burn_layer
		bred = list(curves.create_curve([(0, 0), (120, 150), (255, 255)]))
		bgre = list(curves.create_curve([(0, 0), (200, 150), (255, 255)]))
		bblu = list(curves.create_curve([(0, 0), (160, 10), (255, 255)]))
		burn_layer = curves.apply_curves(burn_layer, None, bred, bgre, bblu)
		yield 'Coffe blur...', burn_layer
		burn_layer = burn_layer.filter(ImageFilter.BLUR)
		burn_layer = burn_layer.filter(ImageFilter.BLUR)
		burn_layer = burn_layer.filter(ImageFilter.BLUR)
		yield 'Merge...', burn_layer
		image = ImageChops.add(image, burn_layer)
		yield 'Done', image
예제 #26
0
파일: remember.py 프로젝트: cl4rke/HERS
def trim(im):
	bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
	diff = ImageChops.difference(im, bg)
	diff = ImageChops.add(diff, diff, 2.0, -100)
	bbox = diff.getbbox();
	
	s = max([bbox[2]-bbox[0], bbox[3]-bbox[1]])
	center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
	
	bbox = [center[0]-s/2, center[1]-s/2, center[0]+s/2, center[1]+s/2]
	
	if bbox:
		
		if im.mode == 'RGBA':
			r,g,b,a = im.split()
			im = Image.merge('RGB', (r,g,b))
			
		im = ImageOps.invert(im)
		crop = im.crop(bbox)
		
		return ImageOps.invert(crop)
	else: return im
예제 #27
0
	def process(self, base):
		yield 'Colors...', base
		image = ImageEnhance.Color(base).enhance(1.15)
		yield 'Contrast...', image
		image = ImageEnhance.Contrast(image).enhance(1.1)
		yield 'Curves...', image
		cur_r = list(curves.create_curve([(0, 0), (88, 47), (170, 188), (221, 249),
				(255, 255)]))
		cur_g = list(curves.create_curve([(0, 0), (65, 57), (184, 208), (255, 255)]))
		cur_b = list(curves.create_curve([(0, 29), (255, 255)]))
		image = curves.apply_curves(image, None, cur_r, cur_g, cur_b)
		yield 'Sepia...', image
		lsepia = base.copy()
		lsepia = colors.colorize_hls(lsepia, (0.08, -0.4, 0.4))
		lsepia = ImageEnhance.Contrast(lsepia).enhance(1.3)
		yield 'Sepia merging...', lsepia
		image = ImageChops.add(image, lsepia)
		yield 'Magenta...', image
		lmagenta = base.copy()
		lmagenta = colors.fill_with_color(lmagenta, (255, 0, 220))
		image = layers.merge_layers_screen(image, lmagenta, 0.04)
		yield 'Done', image
예제 #28
0
파일: lightsim.py 프로젝트: shakaran/light9
 def _remix(self):
     """mix all the images in self.im together according to the
     weights in self.amounts.  Each of those attributes are dicts
     with the light names for keys."""
     global fpslabel
     i = None
     amounts = self.amounts
     layers = 0
     start = time.time()
     for k in self.im.keys():
         scale = amounts.get(k, 0)
         if scale != 0:
             layers += 1
             acc = ImageEnhance.Brightness(self.im[k]).enhance(scale)  # scale the image before adding
             #            acc = ImageChops.add(base,self.im[k],1/scale) ## slower!
             if i == None:
                 i = acc  # use first layer directly
             else:
                 i = ImageChops.add(i, acc)  # add subsequent layers
     dur = time.time() - start
     fps = 1.0 / dur
     fpslabel.config(text="%.02f fps, %.02f layers/sec" % (fps, layers / dur))
     if i is not None:
         self.itk.paste(i)  # put image i in the PhotoImage
예제 #29
0
def _im_logo(im_obj, logo_file, logo_width, logo_padding, logo_pos):
    'adds a logo to the required corner of an image object (usually after an im_trim)'

    # load in the logo file image:
    logo_obj = Image.open(logo_file)
    # rescale to the new width and height:
    if logo_width != logo_obj.size[0]:
        logo_height = int(logo_obj.size[1] * float(logo_width) /
                          logo_obj.size[0])
        res_logo_obj = _img_premult_resize(logo_obj,
                                           size=(logo_width, logo_height))
    else:
        res_logo_obj = logo_obj

    # TODO: this is written for putting a logo in the top-left corner, but could be extended:
    if logo_pos in [0, 2]:

        # now pull out a sub image from the main image, that's just where the logo would go,
        # it it were this is the size we want to have blank, to put the logo, including padding:
        req_logo_size = [x + 2 * logo_padding for x in res_logo_obj.size]

        corner_obj = im_obj.crop((0, 0, req_logo_size[0], req_logo_size[1]))
        #
        # now get a bounding box as though we were trimming this image:
        backg = Image.new(corner_obj.mode, corner_obj.size,
                          corner_obj.getpixel((0, 0)))
        # do an image difference:
        diff = ImageChops.difference(corner_obj, backg)
        # add it together
        diff = ImageChops.add(diff, diff, 1.0, -100)
        # and see what the bbox is of that...
        bbox = diff.getbbox()

        #        # get the offset in x and y:
        #        if bbox is None:
        #            # the corner object is empty so no need to offset:
        #            offsets = (0,0)
        #        else:
        #            offsets = (req_logo_size[0] - bbox[0], req_logo_size[1] - bbox[1])
        #        # but you only ever need to offset in one direction (the shortest one):
        #        offset = min(offsets)
        #        offset_ind = offsets.index(offset)

        # as this is the top left corner of a plot, the logo should be offset only
        # in x (so the title is still at the top)
        if bbox is None:
            # the corner object is empty so no need to offset:
            offset = 0
        else:
            offset = req_logo_size[0] - bbox[0]
        offset_ind = 0

        # now put that together to make an image:

        # create the blank image:
        new_size = list(im_obj.size)
        new_size[offset_ind] += offset
        new_obj = Image.new(im_obj.mode, new_size, im_obj.getpixel((0, 0)))

        # now put the main image into it, offset:
        if offset_ind == 0:
            offsets = (offset, 0)
        else:
            offsets = (0, offset)
        new_obj.paste(im_obj, offsets)
        # and put the rescaled logo onto it too, in the right place:
        new_obj.paste(res_logo_obj, (logo_padding, logo_padding))

    else:
        msg = 'logo positions other than 0 and 2 (both top left) have not been implemented yet'
        raise NotImplementedError(msg)

    return new_obj
def render(points,
           filename,
           width=3000,
           height=1800,
           fontfile=DEFAULT_FONT,
           fontsize=12,
           margin=0.05,
           transparency=0.5):
    """
    Render t-SNE text points to an image file.
    points is a list of tuples of the form (title, x, y).
    filename should be a .png, typically.
    margin is the amount of extra whitespace added at the edges.
    transparency is the amount of transparency in the text.
    @warning: Make sure you open the PNG in Gimp, or something that supports alpha channels. Otherwise, it will just look completely black.
    """

    points = filter_points(points, wanted_set)

    W = width
    H = height

    #im = Image.new("L", (W, H), 255)
    im = Image.new("RGBA", (W, H), (0, 0, 0))

    # use a bitmap font
    #font = ImageFont.load("/usr/share/fonts/liberation/LiberationSans-Italic.ttf")

    if fontfile is not None:
        assert os.path.exists(fontfile)
        font = ImageFont.truetype(fontfile, fontsize)

    #draw = ImageDraw.Draw(im)
    #draw.text((10, 10), "hello", font=font)

    minx = 0
    maxx = 0
    miny = 0
    maxy = 0
    for (title, x, y) in points:
        if minx > x: minx = x
        if maxx < x: maxx = x
        if miny > y: miny = y
        if maxy < y: maxy = y

    dx = maxx - minx
    dy = maxy - miny
    assert dx > 0
    assert dy > 0
    minx -= dx * margin
    miny -= dy * margin
    maxx += dx * margin
    maxy += dy * margin

    alpha = Image.new("L", im.size, "black")

    for (idx, pt) in enumerate(points):
        (title, x, y) = pt
        #    print x, minx
        #    print 1. * (x - minx) / (maxx - minx)
        #    print y, miny
        #    print 1. * (y - miny) / (maxy - miny)
        x = 1. * (x - minx) / (maxx - minx) * W
        y = 1. * (y - miny) / (maxy - miny) * H
        #    draw.text((x, y), w, fill=255, font=font)

        # Make a grayscale image of the font, white on black.
        pos = (x, y)
        imtext = Image.new("L", im.size, 0)
        drtext = ImageDraw.Draw(imtext)
        print >> sys.stderr, "Rendering title (#%d):" % idx, repr(title)
        if fontfile is not None:
            drtext.text(pos, title, font=font, fill=(256 - 256 * transparency))
        else:
            drtext.text(pos, title, fill=(256 - 256 * transparency))
#        drtext.text(pos, title, font=font, fill=128)

# Add the white text to our collected alpha channel. Gray pixels around
# the edge of the text will eventually become partially transparent
# pixels in the alpha channel.
#    alpha = ImageChops.lighter(alpha, imtext)
        alpha = ImageChops.add(alpha, imtext)

        # Make a solid color, and add it to the color layer on every pixel
        # that has even a little bit of alpha showing.
        #    solidcolor = Image.new("RGBA", im.size, "#ffffff")
        #    immask = Image.eval(imtext, lambda p: 120 * (int(p != 0)))
        #    im = Image.composite(solidcolor, im, immask)
        #    draw.text((x, y), w, fill=0, font=font)

        print >> sys.stderr, "Rendered word #%d" % idx
    #    if idx % 100 == 99:
    #        break

    # Add the alpha channel to the image, and save it out.
    im.putalpha(alpha)

    tmpf = tempfile.NamedTemporaryFile(suffix=".png")

    #im.save("transtext.png", "PNG")
    print >> sys.stderr, "Rendering alpha image to file", tmpf.name
    im.save(tmpf.name)

    cmd = "convert %s -background white -flatten %s" % (tmpf.name, filename)
    print >> sys.stderr, "Flattening image", tmpf.name, "to", filename, "using command:", cmd
    os.system(cmd)
예제 #31
0
        print 'Usage:', sys.argv[0], '<image1> <image2'
        sys.exit(1)

    im1 = Image.open(sys.argv[1])
    im2 = Image.open(sys.argv[2])

    # Diferencia
    diferencia = DifferenceFilter(im1, im2)
    diferencia.Apply(Filter.CPU)
    tmp = diferencia.fetchResult()
    tmp.save("diferencia.png", "PNG")

    # Threshold
    threshold = ThresholdFilter(tmp, level=20)
    threshold.Apply(Filter.CPU)
    tmp2 = threshold.fetchResult()
    tmp2.save("threshold.png", "PNG")

    # Erosion
    erosion = ErosionFilter(tmp2)
    erosion.Apply(Filter.CPU)
    post = erosion.fetchResult()
    post.save("erosion.png", "PNG")

    # TODO Mergeado en una clase aparte
    r, g, b = im2.split()
    tmp = ImageChops.add(r, post)
    merged = Image.merge("RGB", (tmp, g, b))
    merged.save("merged.png", "PNG")
  
import Image, ImageChops
import numpy as np

hr_flist = 'flist/set5_predict.flist'
lr_flist = 'flist/set5_lrX2.flist'

with open(hr_flist) as f:
    hr_filename_list = f.read().splitlines()
with open(lr_flist) as f:
    lr_filename_list = f.read().splitlines()

for hr_filename, lr_filename in zip(hr_filename_list, lr_filename_list):
    hr_image = Image.open(hr_filename)
    lr_image = Image.open(lr_filename)
    lr_image = lr_image.resize(hr_image.size, Image.ANTIALIAS)
    hr_image = ImageChops.add(hr_image, lr_image, 1, -127)
    hr_image.save(hr_flist)
예제 #33
0
파일: App.py 프로젝트: jess010/PaintApp
 def r6(self):
     self.im3=ImageChops.add(self.im,self.im0,100,1)
     self.tkimage.paste(self.im3)
예제 #34
0
def noisify(im):
    random.seed(0)
    noise = Image.fromstring("L", im.size, "".join([ chr(random.randrange(0, 8)) for i in range(im.size[0] * im.size[1])]))
    return ImageChops.add(im, noise)
예제 #35
0
pygame.init()
screen = pygame.display.set_mode(res)
pygame.display.set_caption("Predator Vision: by Tech B")

fr=False
while 1:
    #check for quit
    for event in pygame.event.get():
        if event.type == pygame.QUIT: sys.exit()

    #grab two images
    if not fr:
        im1 = cam.getImage()
    #you will need to wait a sec before taking another image
    #if not, the screen will remain black and buggy
    time.sleep(.2)
    im2 = cam.getImage()

    #im2-im1 per pixel
    addx = ImageChops.add(im1,im2,1.3)
    #diff = ImageChops.difference(im1,im2)
    
    #mux = ImageChops.multiply(addx,diff)
    diff2 = ImageChops.blend(addx, im1,0.9)
    fr=True
    im1=diff2
    
    #convert image to pygame type and then display
    shot = pygame.image.frombuffer(diff2.tostring(), res, "RGB")
    screen.blit(shot,(0,0))
    pygame.display.flip()
예제 #36
0
import Image, ImageChops,ImageFilter

#load all the shiftadded images from the previous step
ims = [Image.open('added%03d.jpg'%i) for i in range(20)]

#return the difference between an image and a shifted version of itself
def differ(im,off): return ImageChops.difference(im,ImageChops.offset(im,off,0))


#find the median contrast at each pixel in each of the 20 shiftadded photos
ds = [ImageChops.add(differ(im,1),differ(im,-1),2,0).filter(ImageFilter.MedianFilter(11)) for im in ims]
	
#create a depth image where the brighter the pixel, the closer to the camera it is
depth = ims[0].copy()
w,h = depth.size
for x in range(1,w-1):
	for y in range(1,h-1):
		c = max([(sum(d.getpixel((x,y))),i) for i,d in enumerate(ds)])[1]
		depth.putpixel((x,y),(c*10,c*10,c*10))
depth.save('depth.jpg')
예제 #37
0
def add_solid(image, **kwargs):
    colour = kwargs.pop('colour')
    img2 = get_overlay_image(image.size, colour)
    return ImageChops.add (image, img2)
예제 #38
0
    def contrastMask(self, outfile):
        """Ceci est un filtre de debouchage de photographies, aussi appelé masque de contraste, 
        il permet de rattrapper une photo trop contrasté, un contre jour, ...
        Écrit par Jérôme Kieffer, avec l'aide de la liste python@aful, 
        en particulier A. Fayolles et F. Mantegazza avril 2006
        necessite numpy et PIL.
        
        @param: the name of the output file (JPEG)
        @return: filtered Photo instance
        """

        try:
            import numpy
#            import scipy.signal as signal
        except:
            logger.error("This filter needs the numpy library available on https://sourceforge.net/projects/numpy/files/")
            return

        t0 = time.time()
        dimX, dimY = self.pil.size

        ImageFile.MAXBLOCK = dimX * dimY
        img_array = numpy.fromstring(self.pil.tostring(), dtype="UInt8").astype("float32")
        img_array.shape = (dimY, dimX, 3)
        red, green, blue = img_array[:, :, 0], img_array[:, :, 1], img_array[:, :, 2]
        #nota: this is faster than desat2=(ar.max(axis=2)+ar.min(axis=2))/2
        desat_array = (numpy.minimum(numpy.minimum(red, green), blue) + numpy.maximum(numpy.maximum(red, green), blue)) / 2.0
        inv_desat = 255. - desat_array
        blured_inv_desat = self._gaussian.blur(inv_desat, config.ContrastMaskGaussianSize)
        bisi = numpy.round(blured_inv_desat).astype("uint8")
        k = Image.fromarray(bisi, "L").convert("RGB")
        S = ImageChops.screen(self.pil, k)
        M = ImageChops.multiply(self.pil, k)
        F = ImageChops.add(ImageChops.multiply(self.pil, S), ImageChops.multiply(ImageChops.invert(self.pil), M))
        exitJpeg = op.join(config.DefaultRepository, outfile)
        F.save(exitJpeg, quality=80, progressive=True, Optimize=True)
        try:
            os.chmod(exitJpeg, config.DefaultFileMode)
        except IOError:
            logger.error("Unable to chmod %s" % outfile)
        exifJpeg = Exif(exitJpeg)
        exifJpeg.read()
        self.exif.copy(exifJpeg)
        exifJpeg.comment = self.exif.comment
#
#        for metadata in [ 'Exif.Image.Make', 'Exif.Image.Model', 'Exif.Photo.DateTimeOriginal',
#                         'Exif.Photo.ExposureTime', 'Exif.Photo.FNumber', 'Exif.Photo.ExposureBiasValue',
#                         'Exif.Photo.Flash', 'Exif.Photo.FocalLength', 'Exif.Photo.ISOSpeedRatings',
#                         "Exif.Image.Orientation", "Exif.Photo.UserComment"
#                         ]:
#            if metadata in self.exif:
#                logger.debug("Copying metadata %s", metadata)
#                try:
#                    exifJpeg[metadata] = self.exif[metadata]
#                except KeyError:
#                    pass #'Tag not set'-> unable to copy it
#                except:
#                    logger.error("Unable to copying metadata %s in file %s, value: %s" % (metadata, self.filename, self.exif[metadata]))
        logger.debug("Write metadata to %s", exitJpeg)
        exifJpeg.write()
        logger.info("The whoole contrast mask took %.3f" % (time.time() - t0))
        return Photo(outfile)
예제 #39
0
def render(points, filename, width=3000, height=1800, fontfile=DEFAULT_FONT, fontsize=12, margin=0.05, transparency=0.5):
    """
    Render t-SNE text points to an image file.
    points is a list of tuples of the form (title, x, y).
    filename should be a .png, typically.
    margin is the amount of extra whitespace added at the edges.
    transparency is the amount of transparency in the text.
    @warning: Make sure you open the PNG in Gimp, or something that supports alpha channels. Otherwise, it will just look completely black.
    """
    W = width
    H = height

    #im = Image.new("L", (W, H), 255)
    im = Image.new("RGBA", (W, H), (0,0,0))

    # use a bitmap font
    #font = ImageFont.load("/usr/share/fonts/liberation/LiberationSans-Italic.ttf")

    if fontfile is not None:
        assert os.path.exists(fontfile)
        font = ImageFont.truetype(fontfile, fontsize)
    
    #draw = ImageDraw.Draw(im)
    #draw.text((10, 10), "hello", font=font)
    
    minx = 0
    maxx = 0
    miny = 0
    maxy = 0
    for (title, x, y) in points:
        if minx > x: minx = x
        if maxx < x: maxx = x
        if miny > y: miny = y
        if maxy < y: maxy = y

    dx = maxx - minx
    dy = maxy - miny
    assert dx > 0
    assert dy > 0
    minx -= dx * margin
    miny -= dy * margin
    maxx += dx * margin
    maxy += dy * margin


    alpha = Image.new("L", im.size, "black")

    for (idx, pt) in enumerate(points):
        (title, x, y) = pt
    #    print x, minx
    #    print 1. * (x - minx) / (maxx - minx)
    #    print y, miny
    #    print 1. * (y - miny) / (maxy - miny)
        x = 1. * (x - minx) / (maxx - minx) * W
        y = 1. * (y - miny) / (maxy - miny) * H
    #    draw.text((x, y), w, fill=255, font=font)

    # Make a grayscale image of the font, white on black.
        pos = (x, y)
        imtext = Image.new("L", im.size, 0)
        drtext = ImageDraw.Draw(imtext)
        print >> sys.stderr, "Rendering title (#%d):" % idx, repr(title)
        if fontfile is not None:
            drtext.text(pos, title, font=font, fill=(256-256*transparency))
        else:
            drtext.text(pos, title, fill=(256-256*transparency))
#        drtext.text(pos, title, font=font, fill=128)

    # Add the white text to our collected alpha channel. Gray pixels around
    # the edge of the text will eventually become partially transparent
    # pixels in the alpha channel.
    #    alpha = ImageChops.lighter(alpha, imtext)
        alpha = ImageChops.add(alpha, imtext)
            
    # Make a solid color, and add it to the color layer on every pixel
    # that has even a little bit of alpha showing.
    #    solidcolor = Image.new("RGBA", im.size, "#ffffff")
    #    immask = Image.eval(imtext, lambda p: 120 * (int(p != 0)))
    #    im = Image.composite(solidcolor, im, immask)
    #    draw.text((x, y), w, fill=0, font=font)
    
        print >> sys.stderr, "Rendered word #%d" % idx
    #    if idx % 100 == 99:
    #        break
    
    # Add the alpha channel to the image, and save it out.
    im.putalpha(alpha)

    tmpf = tempfile.NamedTemporaryFile(suffix=".png")

    #im.save("transtext.png", "PNG")
    print >> sys.stderr, "Rendering alpha image to file", tmpf.name
    im.save(tmpf.name)

    cmd = "convert %s -background white -flatten %s" % (tmpf.name, filename)
    print >> sys.stderr, "Flattening image", tmpf.name, "to", filename, "using command:", cmd
    os.system(cmd)