Example #1
0
    def autofocus(self,step=5000):
        if self.slide.pos[2] >= 0:  step = -step
        self.slide.moveZ(-step/2)
        z_start = self.slide.pos[2]
        self.frames.fillBuffer()
        self.slide.displaceZ(step)
        z_frames = self.frames.getBuffer()

        #sample every kth plus its lth neighbor:  for k=10,l=2 sample frame 0,10,20 and 2,12,22
        k = 10
        l = 2
        sample_ind = [ind*k for ind in range(len(z_frames)/k)]
        sample_ind2 = [ind*k+l for ind in range(len(z_frames)/k)]
        f = [z_frames[ind] for ind in sample_ind]
        f2 = [z_frames[ind] for ind in sample_ind2]
        n = len(f)
        diffs = []
        for i in range(n-2):
            diffs.append(ImageChops.difference(f[i],f2[i]))
        motion = []
        for f in diffs:
            f = ImageChops.multiply(f,self.curr_mask)
            motion.append(ImageStat.Stat(f).sum[0])
        #g = Gnuplot.Gnuplot()
        #g.plot(motion)

        max_frame = scipy.argmax(motion)
        max_focus = (max_frame/float(n))*step + z_start
        self.slide.moveZ(max_focus)
        return max_focus
Example #2
0
def matchTemplate(searchImage, templateImage):
    minScore = -1000
    matching_xs = 0
    matching_ys = 0
    # convert images to "L" to reduce computation by factor 3 "RGB"->"L"
    searchImage = searchImage.convert(mode="L")
    templateImage = templateImage.convert(mode="L")
    searchWidth, searchHeight = searchImage.size
    templateWidth, templateHeight = templateImage.size
    # make a copy of templateImage and fill with color=1
    templateMask = Image.new(mode="L", size=templateImage.size, color=1)
    # loop over each pixel in the search image
    for xs in range(searchWidth - templateWidth + 1):
        for ys in range(searchHeight - templateHeight + 1):
            # for ys in range(10):
            # set some kind of score variable to "All equal"
            score = templateWidth * templateHeight
            # crop the part from searchImage
            searchCrop = searchImage.crop((xs, ys, xs + templateWidth, ys + templateHeight))
            diff = ImageChops.difference(templateImage, searchCrop)
            notequal = ImageChops.darker(diff, templateMask)
            countnotequal = sum(notequal.getdata())
            score -= countnotequal

            if minScore < score:
                minScore = score
                matching_xs = xs
                matching_ys = ys

    if minScore > 100:
        print "Location=", (matching_xs, matching_ys), "Score=", minScore
        quit()
def Problem1ImageSolver():

	[dirImagesQuestions, dirImagesAnswers] = get_images_for_directory("Representations/Frames/Problem 1/")
	print "Problem 1 Image Questions: \n=================="
	compare_k = dirImagesQuestions.keys()[0]
	compare_v = dirImagesQuestions[compare_k]
	compare_v = ImageChops.invert(compare_v)
	compare_v = findRelevantBoxEdges(compare_v)
	# print compare_k
	for k,v in dirImagesQuestions.iteritems():
		temp_v = ImageChops.invert(dirImagesQuestions[k])
		temp_v = findRelevantBoxEdges(temp_v)
		image_equality = check_shape_equality(compare_v, temp_v)
		equality_string = ""
		if not image_equality:
			equality_string = "different"
		else:
			equality_string = "equal with " + image_equality[1] + " transformation"
		print str(k) + " and " + str(compare_k) + " are " + equality_string

	print "\nProblem 1 Image Answers: \n=================="
	for k,v in dirImagesAnswers.iteritems():
		temp_v = ImageChops.invert(dirImagesAnswers[k])
		temp_v = findRelevantBoxEdges(temp_v)
		# if compare_v.size != temp_v.size:
		# 	print str(compare_k) + " is " + str(compare_v.size) + " but " + str(k) + " is " + str(temp_v.size)
		image_equality = check_shape_equality(compare_v, temp_v)
		equality_string = ""
		if not image_equality:
			equality_string = "different"
		else:
			equality_string = "equal with " + image_equality[1] + " transformation"
		print str(k) + " and " + str(compare_k) + " are " + equality_string
Example #4
0
def color_grad_magnitude(image):
    red, green, blue = image.split()
    red_grad_mag = grad_magnitude(red)
    green_grad_mag = grad_magnitude(green)
    blue_grad_mag = grad_magnitude(blue)
    tmp_image = ImageChops.lighter(red_grad_mag, green_grad_mag)
    return ImageChops.lighter(tmp_image, blue_grad_mag)
Example #5
0
    def loadImage(self, path):
        global zoomAmount, currentImage
        pixmap = QtGui.QPixmap(path)
        self.ui.image.setPixmap(pixmap)
        self.ui.image.setFixedSize(pixmap.size())

        self.zoomedPixmap = pixmap.scaled (self.ui.image.width()*zoomAmount, self.ui.image.height()*zoomAmount, QtCore.Qt.KeepAspectRatio)
        myPixmap = self.zoomedPixmap.copy(0,0, self.ui.zoomImage.width(), self.ui.zoomImage.height())
        self.ui.zoomImage.setPixmap(myPixmap)
        self.ui.zoomImage.setFixedSize(myPixmap.size())

        currentImage = Image.open(path)
        # convert to grayscale
        if currentImage.mode != "L":
            currentImage= currentImage.convert("L")
            
        # Sobel operator
        # edge1 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 0, -1, 2, 0, -2, 1, 0, -1], scale=4))
        # edge2 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 2, 1, 0, 0, 0, -1, -2, -1], scale=4))
        # edge3 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, 0, 1, -2, 0, 2, -1, 0, 1], scale=4))
        # edge4 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, -2, -1, 0, 0, 0, 1, 2, 1], scale=4))
        
        # Scharr operator
        edge1 = currentImage.filter(ImageFilter.Kernel((3,3), [3, 0, -3, 10, 0, -10, 3, 0, -3], scale=16))
        edge2 = currentImage.filter(ImageFilter.Kernel((3,3), [3, 10, 3, 0, 0, 0, -3, -10, -3], scale=16))
        edge3 = currentImage.filter(ImageFilter.Kernel((3,3), [-3, 0, 3, -10, 0, 10, -3, 0, 3], scale=16))
        edge4 = currentImage.filter(ImageFilter.Kernel((3,3), [-3, -10, -3, 0, 0, 0, 3, 10, 3], scale=16))
        
        currentImage = ImageChops.add(ImageChops.add(ImageChops.add(edge1, edge2), edge3), edge4)
Example #6
0
    def contrast(self, cropped_img):
        """
        Provides a high contrast board image for input into Tesseract OCR
        """

        # Convert the board image into greyscal

        bwImg = cropped_img.convert("L")

        # Multiply board image with inverted image so that text is black

        bwImgM = ImageChops.multiply(ImageChops.invert(bwImg), bwImg)

        # Increase contrast

        enhancedImg = ImageEnhance.Contrast(bwImgM)
        bwImgM = enhancedImg.enhance(5.0)

        # Produce pixel image object (array) for operation (operates in place)

        bwMDat = bwImgM.load()

        # If the pixel value is not black, make it white
        # (No colours any more, I want them to turn black)

        for i in range(0, bwImgM.size[1]):
            for j in range(0, bwImgM.size[0]):
                if bwMDat[j, i] != 0:
                    bwMDat[j, i] = 255
        # Debugging
        # bwImgM.show()

        return bwImgM
Example #7
0
def draw_line(image, point, neighbor, size):
    width, height = size
    center_point = (width//2, height//2)
    offset = (width//2 - point[0], height//2 - point[1])
    image = ImageChops.offset(image, offset[0], offset[1])
    draw = ImageDraw.Draw(image)
    to_point = ((neighbor[0] + offset[0]) % width, (neighbor[1] + offset[1]) % height)
    draw.line((center_point, to_point))
    return ImageChops.offset(image, -offset[0], -offset[1])
Example #8
0
 def dilate(self, image):
     paddedImage = self.createPaddedImage(image, 1)
     thresholdImg = paddedImage.point(lambda i, v=128: i > v and 255)
     thresholdImg = ImageChops.invert(thresholdImg)
     filteredImg = thresholdImg.filter(ImageFilter.FIND_EDGES)
     thresholdImg = filteredImg.point(lambda i, v=128: i > v and 255)
     arithImg = ImageChops.add(paddedImage, thresholdImg)
     box = (1, 1, arithImg.size[0]-1, arithImg.size[1]-1)
     outImage = arithImg.crop(box)
     return outImage
Example #9
0
def screen_mode(im, wm, wmbuffer):
    imsize = im.size
    wmsize = wm.size
    brightness = float(_OPACITY) / 100
    brightval = int(round(255 * brightness))
    wm_pos = _wm_pos(wmbuffer, imsize, wmsize)
    black_bg = Image.new('RGB', imsize, (0, 0, 0) )
    black_bg.paste(wm, wm_pos)
    darkener = Image.new('RGB', imsize, (brightval, brightval, brightval) )
    darkened_fit_wm = ImageChops.multiply(black_bg, darkener)
    return ImageChops.screen(darkened_fit_wm, im)
Example #10
0
def addup(ims,offset):
	##must be of len 2**m
	n=1
	while len(ims)>1:
		newims=[]
		for i in range(0,len(ims),2):
			#print 'offset = %d'%(-offset*n)
			newims.append(ImageChops.add(ims[i],ImageChops.offset(ims[i+1],offset*n,0),2,0))
		ims = newims
		n*=2
	return ims[0]
Example #11
0
def vignette(image, off=0.2, stop=0.7, center_w=0.5, center_h=0.5):
	width, height = image.size
	vlayer = create_circular_gradient(image.size, 1.3, center_w, center_h, False)
	curv = list(curves.create_curve([(0, 0), (96, 200), (255, 255)]))
	vlayer = curves.apply_curves(vlayer, curv)
	vlayer = vlayer.filter(ImageFilter.BLUR).convert("RGB")
	clouds = create_clouds_bw(vlayer.size, 3)
	clouds = ImageEnhance.Brightness(clouds).enhance(3)
	clouds = ImageEnhance.Contrast(clouds).enhance(0.9)
	clouds = ImageChops.multiply(clouds, ImageChops.invert(vlayer))
	return ImageChops.multiply(image, ImageChops.invert(clouds))
Example #12
0
def addup2(ims,offset):
	##must be of len 2**m
	
	n=len(ims)+1
	#do all the offsets
	ims = [ImageChops.offset(im,-offset*(n/2-i),0) for i,im in enumerate(ims)]
	
	#add all the images, two at a time to avoid overflow
	while len(ims)>1:		
		ims = [ImageChops.add(ims[i],ims[i+1],2,0) for i in range(0,len(ims),2)]

	return ims[0]
def seg_mask(iseries, sbinfilepath, segmaskfilepath, segsbfilepath,origfilepath,expfilepath,segexpfilepath,segorigfilepath):
	#iseries is a filename, without jpg on the end and with sb on the end
	# First, apply mask to sb image - mask is black (or grey) on white background
	filename = re.sub('_mask','',iseries) + '.jpg' #this is the sb image
	# print 'Initial', filename

	maskim = Image.open(segmaskfilepath+ re.sub('.jpg','_mask.jpg',filename)).convert("L")
	# Mask not always black so first make sure it is
	threshold = 141
	maskim = maskim.point(lambda p: p > threshold and 255)

	threshfilename = re.sub('_sb','_sbthres', filename)
	sbim = Image.open(sbinfilepath + threshfilename)
	try:
		# print 'Get thresh'
		seg_sb = ImageChops.lighter(sbim,maskim)
		seg_sb.save(segsbfilepath+ re.sub('.jpg','_seg.jpg',threshfilename) )
	except IOError:
		print 'error in file'

	#Now open the original image - get rid of sb from filename
	filename = re.sub('_sb','', filename)
	origim = Image.open(origfilepath + filename).convert("L")
	seg_orig = ImageChops.lighter(origim,maskim)
	seg_orig.save(segorigfilepath+ re.sub('.jpg','_seg_orig.jpg',filename))

	#Now open the exp image and apply mask
	# First make mask white on black
	maskim = ImageChops.invert(maskim)

	# Now extract all the pixels that are white and make this region a transparent region on the mask
	maskim = maskim.convert('LA')
	datas = maskim.getdata()
	newData = list()
	for item in datas:
		if item[0] == 255:
			newData.append((255, 0))
		else:
			newData.append(item)

	maskim.putdata(newData)
	#img.save("img2.png", "PNG")
	l,a = maskim.split()

	# Check that exp file exists
	if os.path.exists(expfilepath +  re.sub('ish','exp',filename)):
		#seg_exp = ImageChops.logical_and(expim,maskim)
		expim = Image.open(expfilepath +  re.sub('ish','exp',filename)).convert("LA") # should be a grayscale image
		expim.paste(maskim, mask = a)
		expim = expim.convert("L")
		expim.save(segexpfilepath+ re.sub('.jpg','_seg_exp.tif',filename))
	else: print 'N'
Example #14
0
def combine(ims,offset):
	
	n=len(ims)-1
	
	#do all the offsets
	ims = [ImageChops.offset(im,-offset*(n/2-i),0) for i,im in enumerate(ims)]
	
	#add all the images, two at a time to avoid overflow
	while len(ims)>1:		
		ims = [ImageChops.add(ims[i],ims[i+1],2,0) for i in range(0,len(ims),2)]
	
	#return the final result image
	return ims[0]
Example #15
0
 def _gen_pants(self):
     if self.command.has_key("pants"):
         self.base_image = self.image["pants"]
         self.pattern_image = self.image[self.command["pants"]]
     if self.command.has_key("color"):
         self.color_image = Image.new("RGBA", (240, 240), self.command["color"])
     if self.command.has_key("base_color"):
         self.base_color_image = Image.new("RGBA", (240, 240), self.command["base_color"])
     
     self.icon.paste(self.base_image, mask=self.base_image)
     self.icon = ImageChops.composite(self.icon, self.base_color_image, self.image["base_mask"])
     self.icon = ImageChops.composite(self.icon, self.color_image, self.pattern_image)
     self.icon.paste(self.image["frame"], mask=self.image["frame"])
Example #16
0
def voronoi(players=4):
    width = randrange(64, 256)
    height = randrange(64, 256)
    point_count = randrange(players*3, players*6)
    min_dist = width * height / point_count
    print('%s, %s  %s %s' % (width, height, min_dist, sqrt(min_dist)))
    px, py = 0, 0
    points = []
    while min_dist > 100 and len(points) < point_count:
        while min_dist > 100:
            px, py = randrange(width), randrange(height)
            for nx, ny in points:
                if distance(px, py, nx, ny, width, height) < min_dist:
                    break
            else:
                break
            min_dist -= 1
        points.append((px, py))
    #for px, py in points:
    #    for nx, ny in points:
    #        print('(%s)-(%s) = %s' % ((px,py),(nx,ny),distance(px, py, nx, ny, width, height)))
    path = {}
    closest = {}
    for p_x, p_y in points:
        nearest = {}
        for n_x, n_y in points:
            if (p_x, p_y) != (n_x, n_y):
                dist = distance(p_x, p_y, n_x, n_y, width, height)
                nearest[dist] = (n_x, n_y)
        sorted = nearest.keys()
        sorted.sort()
        path[(p_x, p_y)] = [nearest[key] for key in sorted[:3]]
        closest[(p_x, p_y)] = sorted[0]
    image = Image.new('RGB', (width, height), BARRIER_COLOR)
    draw = ImageDraw.Draw(image)
    for point in points:
        image.putpixel(point, (0,0,0))
        size = int(sqrt(closest[point]))//2 - 2
        draw.ellipse((point[0]-size, point[1]-size, point[0]+size, point[1]+size),
                fill=LAND_COLOR, outline=LAND_COLOR)
    from_point = (width//2, height//2)
    for point, path_neighbors in path.items():
        offset = (width//2 - point[0], height//2 - point[1])
        image = ImageChops.offset(image, offset[0], offset[1])
        draw = ImageDraw.Draw(image)
        for neighbor in path_neighbors:
            to_point = ((neighbor[0] + offset[0]) % width, (neighbor[1] + offset[1]) % height)
            draw.line((from_point, to_point), width=randrange(3,6), fill=LAND_COLOR)
        image = ImageChops.offset(image, -offset[0], -offset[1])
    image = image.resize((width*4, height*4))
    image.save('voronoi.png')
Example #17
0
	def process(self, base):
		yield 'Desaturate...', base
		lay1 = colors.convert_to_luminosity(base)
		yield 'Invert...', lay1
		lay1 = ImageChops.invert(lay1)
		yield 'Smoth...', lay1
		lay1 = lay1.filter(ImageFilter.BLUR)
		yield 'Merge...', lay1
		lay2 = base.copy()
		lay2 = ImageChops.blend(lay1, lay2, 0.75)
		yield 'Mege softlight...', lay2
		image = base.copy()
		image = layers.merge_layers_soft_light(image, lay2, 0.9)
		yield 'Done', image
Example #18
0
 def images_equal(image1, image2, acceptable_rms = 10):
     try:
         img1 = Image.open(image1)
         img2 = Image.open(image2)
     except:
         return False
     try:
         diff = ImageChops.difference(img1, img2)
     except ValueError:
         return False
     h = ImageChops.difference(img1, img2).histogram()
     sq = (value*((idx%256)**2) for idx, value in enumerate(h))
     sum_of_squares = sum(sq)
     rms = math.sqrt(sum_of_squares/float(img1.size[0] * img1.size[1]))
     return rms <= acceptable_rms
Example #19
0
def autocrop(im, bgcolor):
    bg = Image.new(im.mode, im.size, bgcolor)
    diff = ImageChops.difference(im, bg)
    bbox = diff.getbbox()
    print "bbox",bbox
    if bbox: return im.crop(bbox) # cropped
    else: return im # no contents
Example #20
0
    def equal(self, img1, img2, skip_area=None):
        """Compares two screenshots using Root-Mean-Square Difference (RMS).
        @param img1: screenshot to compare.
        @param img2: screenshot to compare.
        @return: equal status.
        """
        if not HAVE_PIL:
            return None

        # Trick to avoid getting a lot of screen shots only because the time in the windows
        # clock is changed.
        # We draw a black rectangle on the coordinates where the clock is locates, and then
        # run the comparison.
        # NOTE: the coordinates are changing with VM screen resolution.
        if skip_area:
            # Copying objects to draw in another object.
            img1 = img1.copy()
            img2 = img2.copy()
            # Draw a rectangle to cover windows clock.
            for img in (img1, img2):
                self._draw_rectangle(img, skip_area)

        # To get a measure of how similar two images are, we use
        # root-mean-square (RMS). If the images are exactly identical,
        # this value is zero.
        diff = ImageChops.difference(img1, img2)
        h = diff.histogram()
        sq = (value * ((idx % 256)**2) for idx, value in enumerate(h))
        sum_of_squares = sum(sq)
        rms = math.sqrt(sum_of_squares/float(img1.size[0] * img1.size[1]))

        # Might need to tweak the threshold.
        return rms < 8
Example #21
0
def savePicture(picture, filename):
    if type(picture) == type([]):
        import ImageChops
        from GifImagePlugin import getheader, getdata
        # open output file
        fp = open(filename, "wb")
        previous = None
        for im in picture:
            if type(im) == type(""): # filename
                im = Image.open(im)
                im.load()
                im = im.convert("P") # in case jpeg, etc
            else:
                im = im.image.convert("P")
            if not previous:
                for s in getheader(im) + getdata(im):
                    fp.write(s)
            else:
                delta = ImageChops.subtract_modulo(im, previous)
                bbox = delta.getbbox()
                if bbox:
                    for s in getdata(im.crop(bbox), offset = bbox[:2]):
                        fp.write(s)
            previous = im.copy()
        fp.write(";")
        fp.close()
    else:
        return picture.image.save(filename)
Example #22
0
 def _equal(self, img1, img2):
     """
     Checks if two screenshots are identical.
     @param img1: first screenshot to check
     @param img2: second screenshot to check
     """
     return ImageChops.difference(img1, img2).getbbox() is None
Example #23
0
def _getBounds(size, glDispID, filename, scale, rotation, partRotation):
    
    # Clear the drawing buffer with white
    glClearColor(1.0, 1.0, 1.0, 1.0)
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
    
    # Draw the piece in black
    glColor3f(0, 0, 0)
    adjustGLViewport(0, 0, size, size)
    rotateToView(rotation, scale)
    rotateView(*partRotation)

    glCallList(glDispID)

    # Use PIL to find the image's bounding box (sweet)
    pixels = glReadPixels(0, 0, size, size, GL_RGB,  GL_UNSIGNED_BYTE)
    img = Image.fromstring("RGB", (size, size), pixels)
    
    bg = bgCache.setdefault(size, Image.new("RGB", img.size, (255, 255, 255)))
    box = ImageChops.difference(img, bg).getbbox()

    if box is None:
        return (0, 0, 0, 0, 0, 0)  # Rendered entirely out of frame

#    if filename:
#        import os
#        rawFilename = os.path.splitext(os.path.basename(filename))[0]
#        img.save("C:\\lic\\tmp\\%s_%dx%d.png" % (rawFilename, box[2] - box[0], box[3] - box[1]))
#        print filename + "box: " + str(box if box else "No box = shit")

    # Find the bottom left corner inset, used for placing PLIItem quantity labels
    data = img.load()
    leftInset = _getLeftInset(data, size, box[1])
    bottomInset = _getBottomInset(data, size, box[0])
    return box + (leftInset - box[0], bottomInset - box[1])
Example #24
0
    def test_r(self):
        """ Line of circles (varying r) across image each produces a strong response """

        im = Image.new("L", (1000, 200), (0))
        npoints = 18
        xs = [ (100 + 50 * t) for t in range(npoints) ]
        for t in range(npoints):
            r = (2.0+0.5*t)
            circle(im, xs[t], 100, r, 248)

        # Add noise into the image.  If the image does not contain noise,
        # then the non maximum suppression can - like Buridan's ass - be
        # presented with two adjacent responses that are equal, and reject
        # both because neither is a maximum.  The chance of this happening
        # with real-world images is very remote indeed.

        noise = Image.fromstring("L", (1000,200), "".join([ chr(random.randrange(0, 8)) for i in range(1000 * 200)]))
        im = ImageChops.add(im, noise)

        result = sorted([(x,y,s,response) for (x,y,s,response) in simple(im, 7, 1.0, 999999.0, 999999.0)][-npoints:])

        # Must have npoints
        self.assertEqual(len(result), npoints)

        # X coordinates must be within 1 point of expected
        for i,(x,y,s,r) in enumerate(result):
            self.assert_(abs(x - xs[i]) <= 1)

        # Already ordered by x, so scale should be increasing
        ss = [s for (x,y,s,r) in result]
        self.assertEqual(ss, sorted(ss))
Example #25
0
def autocrop(im, bgcolor, borderWidth = 0):
    if im.mode != 'RGB':
        im = im.convert('RGB')
    bg = Image.new('RGB', im.size, bgcolor)
    diff = ImageChops.difference(im, bg)
    bbox = diff.getbbox()
    if bbox:
        if borderWidth > 0:
            (x0,y0,x2,y2) = bbox

            if x0 > borderWidth:
                x0 = x0 - borderWidth
            else:
                x0 = 0

            if y0 > borderWidth:
                y0 = y0 - borderWidth
            else:
                y0 = 0

            if x2 + borderWidth < im.size[0]:
                x2 = x2 + borderWidth
            else:
                x2 = im.size[0]

            if y2 + borderWidth < im.size[1]:
                y2 = y2 + borderWidth
            else:
                y2 = im.size[1]

            bbox = (x0,y0,x2,y2)

        return im.crop(bbox)
    return im
Example #26
0
    def difference(self, first, second):
        import ImageChops
        from hashlib import md5
        difference = ImageChops.difference(first, second)
        diff_box = difference.getbbox()
        diffs = []
        if diff_box is not None:
            # If there is any difference, just retrieve the box
            # that has changed in the new frame
            image = first.crop((diff_box[0],
                                     diff_box[1],
                                     diff_box[2],
                                     diff_box[3]))
            size = (diff_box[2] - diff_box[0], diff_box[3] - diff_box[1])
            position = (diff_box[0], diff_box[1])
            hash = str(md5(image.tostring()).hexdigest())
            diff = {
                "hash": hash,  # Hash of the diff for checking if portion is repeated (md5 string)
                "image": image,  # Portion of the image <Image>
                "position": position,  # Position of the portion in the frame (x, y)
                "size": size  # Size of the portion (x, y)
            }
            diffs.append(diff)

        return diffs
Example #27
0
def imagecompare(imgfile1, imgfile2):
    try:
        import ImageChops, Image
    except ImportError:
        raise Exception('Python-Imaging package not installed')
    try:
        diffcount = 0.0
        im1 = Image.open(imgfile1)
        im2 = Image.open(imgfile2)

        imgcompdiff = ImageChops.difference(im1, im2)
        diffboundrect = imgcompdiff.getbbox()
        imgdiffcrop = imgcompdiff.crop(diffboundrect)

        data = imgdiffcrop.getdata()

        seq = []
        for row in data:
            seq += list(row)

        for i in xrange(0, imgdiffcrop.size[0] * imgdiffcrop.size[1] * 3, 3):
            if seq[i] != 0 or seq[i+1] != 0 or seq[i+2] != 0:
                diffcount = diffcount + 1.0
        
        diffImgLen = imgcompdiff.size[0] * imgcompdiff.size[1] * 1.0
        diffpercent = (diffcount * 100) / diffImgLen
        return diffpercent
    except IOError:
        raise Exception('Input file does not exist')
Example #28
0
def draw_marker(width, height, offset, filename):
    im = Image.new('L', (128, 256), 255)
    draw = ImageDraw.Draw(im)

    cx = (im.size[0] / 2) + 1.5
    cy = (im.size[1] / 4) + 1.5
    radius = width * 4.0 / 2.0
    offset = int(offset * 4.0)
    for thickness, colour in ((0, 0x60), (4, 0xE0)):
        ol = im.copy()
        r = radius + 0.5 - thickness
        draw.ellipse((cx - r, cy - r, cx + r, cy + r), fill=colour)
        x1 = offset + 0.5 - (thickness * 1.5)
        y1 = (height * 4.0) + 0.5 - (width * 2.0) - (thickness * 2.0)
        draw.polygon((cx + 0.5, cy + y1, cx - 0.5, cy + y1,
                      cx - x1, cy + 16.5, cx + x1, cy + 16.5),
                     fill=colour)
    del draw

    im = im.resize((im.size[0] / 4, im.size[1] / 4), Image.ANTIALIAS)
    ol = ol.resize(im.size, Image.ANTIALIAS)
    mask = Image.eval(ol, lambda x: (0, 255)[x < 210])
    im = Image.composite(im, ImageChops.constant(im, 0), mask)
    im = im.crop(im.getbbox())
    im.save(filename, transparency=0)
Example #29
0
    def do_invert(self):
        """usage: invert <image:pic1>

        Invert the top image.
        """
        import ImageChops
        self.push(ImageChops.invert(self.do_pop()))
Example #30
0
 def __init__(self, pathDataFilename = "", worldImageFilename = "", resolution = 0.01):
     self.worldImageFilename = worldImageFilename
     self.pathDataFilename = pathDataFilename
     self.outfile = ""
     self.app = 0
     path = pyrobotdir()
     self.fontFilename = path + "/tools/pilfonts/courR08.pil"
     self.symbols = 1        # activates/deactivates symbol mode
     self.color = "0"          # activates/deactivates color
     self.length = 10     # the length of lines in non-symbol mode
     # the resolution given for the bitmap in the world file
     self.resolution = resolution
     self.interval = 2       # frequency datapoints should be displayed
     self.robotPathData = self.readDataFile()
     im = Image.open(self.worldImageFilename)
     if self.color == "0":
         self.imageData = ImageChops.invert(im)
     self.imageData = im.convert("RGB")
     self.convertXPositionData(self.imageData, self.robotPathData)
     self.drawObj = ImageDraw.Draw(self.imageData)
     self.textDict = {}
     self.symbolDict = {}
     self.symbolSet = SymbolSet()
     self.colorSet = ColorSet()
     self.quitWhenDone = 1
Example #31
0
    def do_add(self):
        """usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>

        Pop the two top images, produce the scaled sum with offset.
        """
        import ImageChops
        image1 = self.do_pop()
        image2 = self.do_pop()
        scale = float(self.do_pop())
        offset = int(self.do_pop())
        self.push(ImageChops.add(image1, image2, scale, offset))
Example #32
0
def estimate(file, s=5):
    """Estimates the amount of focus of an image file.
    Returns a real number: lower values indicate better focus.
    """
    im = Image.open(file).convert("L")
    w, h = im.size
    box = (w / 2 - 50, h / 2 - 50, w / 2 + 50, h / 2 + 50)
    im = im.crop(box)
    imf = im.filter(ImageFilter.MedianFilter(s))
    d = ImageChops.subtract(im, imf, 1, 100)
    return ImageStat.Stat(d).stddev[0]
Example #33
0
    def do_subtract(self):
        """usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>

        Pop the two top images, produce the scaled difference with offset.
        """
        import ImageChops
        image1 = self.do_pop()
        image2 = self.do_pop()
        scale = float(self.do_pop())
        offset = int(self.do_pop())
        self.push(ImageChops.subtract(image1, image2, scale, offset))
Example #34
0
def _im_trim(im_obj, border=0):
    'Trims an image object using Python Image Library'
    if not isinstance(border, int):
        msg = 'Input border must be an int, but is %s, %s instead' % (
            border, type(border))
        raise ValueError(msg)
    # make a white background:
    backg = Image.new(im_obj.mode, im_obj.size, im_obj.getpixel((0, 0)))
    # do an image difference:
    diff = ImageChops.difference(im_obj, backg)
    # add it together
    diff = ImageChops.add(diff, diff, 1.0, -100)
    # and see what the bbox is of that...
    bbox = diff.getbbox()

    if border != 0:
        border_bbox = [-border, -border, border, border]
        # now apply that trim:
        bbox_tr = [x + y for x, y in zip(bbox, border_bbox)]

        # bbox defines the first corner as top+left, then the second corner as bottom+right
        # (not the bottom left corner, and the width, height from there)
        if bbox_tr[0] < 0:
            bbox_tr[0] = 0
        if bbox_tr[1] < 0:
            bbox_tr[1] = 0
        if bbox_tr[2] > im_obj.size[0]:
            bbox_tr[2] = im_obj.size[0]
        if bbox_tr[3] > im_obj.size[1]:
            bbox_tr[3] = im_obj.size[1]
        # now check to see if that's actually foing to do anything:
        if bbox_tr == [0, 0, im_obj.size[0], im_obj.size[1]]:
            bbox = None
        else:
            bbox = bbox_tr

    if bbox:
        # crop:
        return im_obj.crop(bbox)
    else:
        return im_obj
Example #35
0
    def loadImage(self, path):
        global zoomAmount, currentImage
        pixmap = QtGui.QPixmap(path)
        self.ui.image.setPixmap(pixmap)
        self.ui.image.setFixedSize(pixmap.size())

        self.zoomedPixmap = pixmap.scaled(self.ui.image.width() * zoomAmount,
                                          self.ui.image.height() * zoomAmount,
                                          QtCore.Qt.KeepAspectRatio)
        myPixmap = self.zoomedPixmap.copy(0, 0, self.ui.zoomImage.width(),
                                          self.ui.zoomImage.height())
        self.ui.zoomImage.setPixmap(myPixmap)
        self.ui.zoomImage.setFixedSize(myPixmap.size())

        currentImage = Image.open(path)
        # convert to grayscale
        if currentImage.mode != "L":
            currentImage = currentImage.convert("L")

        # Sobel operator
        # edge1 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 0, -1, 2, 0, -2, 1, 0, -1], scale=4))
        # edge2 = currentImage.filter(ImageFilter.Kernel((3,3), [1, 2, 1, 0, 0, 0, -1, -2, -1], scale=4))
        # edge3 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, 0, 1, -2, 0, 2, -1, 0, 1], scale=4))
        # edge4 = currentImage.filter(ImageFilter.Kernel((3,3), [-1, -2, -1, 0, 0, 0, 1, 2, 1], scale=4))

        # Scharr operator
        edge1 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [3, 0, -3, 10, 0, -10, 3, 0, -3],
                               scale=16))
        edge2 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [3, 10, 3, 0, 0, 0, -3, -10, -3],
                               scale=16))
        edge3 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [-3, 0, 3, -10, 0, 10, -3, 0, 3],
                               scale=16))
        edge4 = currentImage.filter(
            ImageFilter.Kernel((3, 3), [-3, -10, -3, 0, 0, 0, 3, 10, 3],
                               scale=16))

        currentImage = ImageChops.add(
            ImageChops.add(ImageChops.add(edge1, edge2), edge3), edge4)
Example #36
0
def background_crop(image_name, new_name, bgcolor):
    """ removes frame of bgcolor from and image and saves the image under new_name """
    print('background_crop:' + image_name)
    image = Image.open(image_name)
    bg = Image.new("RGBA", image.size, bgcolor)
    diff = ImageChops.difference(image, bg)
    bbox = diff.getbbox()

    if bbox:
        new_image = image.crop(bbox)
        new_image.save(new_name)
    return bbox
Example #37
0
def PreProcess(im):
    im = NMode(im)
    im1 = ndimage.grey_erosion(im, size=(10, 10))
    scipy.misc.imsave("eroded.jpg", im1)
    im1 = Image.open("eroded.jpg")

    im = ImageOps.equalize(im, 0)
    im = ImageChops.difference(im1, im)
    #print ("image height %d and width %d\n"%(imh,imw))

    im = GBinarization(im)  #binarize the image
    return im
Example #38
0
    def compare(self, img1, img2):
        img = ImageChops.difference(img1, img2)
        xsize, ysize = img.size

        for s in range(0, xsize / 3):
            s = s * 3
            for m in range(0, ysize / 3):
                m = m * 3
                img.getpixel((s, m))
                if g > 150:
                    return True
        return False
Example #39
0
def main():
    '''Main Function'''

    url1 = raw_input('Enter the link to the first image: ')
    url2 = raw_input('Enter the link to the second image: ')

    ext1 = extension(url1)
    ext2 = extension(url2)

    if string.upper(ext1) != string.upper(ext2):
        print 'File-types dont match..!'
        sys.exit(1)

    file1 = 'im1' + ext1
    file2 = 'im2' + ext2

    urlretrieve(url1, file1)
    urlretrieve(url2, file2)

    diff_perc_seq = []

    im1 = Image.open(file1)
    im2 = Image.open(file2)

    im1, im2 = make_even(im1, im2)  #The image sizes are even now.

    diff_image = ImageChops.difference(
        im1,
        im2)  #diff_image contains the pixel level difference as an RGB tuple.

    #getbbox returns box containing the non-zero regions of the image. If its none, difference is none..!
    if diff_image.getbbox() is None:
        print 'Mirror Images..! The images are 100% similar. Similarity Scale value - 100'
    else:
        pixel_tuple_seq = diff_image.getdata(
        )  #pixel_tuple_seq contains the list of difference pixel tuples(R, G, B).

        pixel_rms_seq = map(
            rms, pixel_tuple_seq
        )  #pixel_rms_seq contains the rms list of difference pixel tuples.
        #The percentage of difference is found out using formula, perc = (value/255) * 100
        for item in pixel_rms_seq:
            diff_perc_seq.append(item / 255 * 100)
        avg_diff = sum(diff_perc_seq) / len(
            diff_perc_seq)  #The total average difference is found out.
        similarity = 100 - avg_diff

        if avg_diff == 100:
            print 'The images are completely dissimilar. Similarity scale value - 0'
        else:
            print 'The images are %.2f%% similar. Similarity scale value - %.2f ' % (
                similarity, similarity)
    os.system('rm ' + file1 + ' ' + file2)
Example #40
0
def matchTemplate(searchImage, templateImage):
    minScore = -1000
    matching_xs = 0
    matching_ys = 0
    # convert images to "L" to reduce computation by factor 3 "RGB"->"L"
    searchImage = searchImage.convert(mode="L")
    templateImage = templateImage.convert(mode="L")
    searchWidth, searchHeight = searchImage.size
    templateWidth, templateHeight = templateImage.size
    # make a copy of templateImage and fill with color=1
    templateMask = Image.new(mode="L", size=templateImage.size, color=1)
    #loop over each pixel in the search image
    xs = 0
    for ys in range(searchHeight - templateHeight + 1):
        #for ys in range(10):
        #set some kind of score variable to "All equal"
        score = templateWidth * templateHeight
        # crop the part from searchImage
        searchCrop = searchImage.crop(
            (xs, ys, xs + templateWidth, ys + templateHeight))
        diff = ImageChops.difference(templateImage, searchCrop)
        notequal = ImageChops.darker(diff, templateMask)
        countnotequal = sum(notequal.getdata())
        score -= countnotequal

        if minScore < score:
            minScore = score
            matching_ys = ys
        elif score == templateWidth * templateHeight:
            print "Conflicting score", score, (matching_xs, matching_ys), (xs,
                                                                           ys)
            return None

    #print "  - Location=",(matching_xs, matching_ys), "Score=",minScore
    im1 = Image.new('RGB', (searchWidth, searchHeight), (80, 147, 0))
    im1.paste(templateImage, ((matching_xs), (matching_ys)))
    #searchImage.show()
    #im1.show()
    #im1.save('template_matched_in_search.png')
    return matching_ys
Example #41
0
 def test_L_symmetry(self):
     """
     image and negated image have the same keypoints
     """
     ref = simple(self.im640)
     self.assert_(len(ref) != 0) # if no hits in the image, have a test bug
     result = simple(ImageChops.invert(self.im640))
     self.assertEqual(len(ref), len(result))
     for (a,e) in zip(sorted(result), sorted(ref)):
         self.assertAlmostEqual(a[0], e[0], 3)
         self.assertAlmostEqual(a[1], e[1], 3)
         self.assertAlmostEqual(a[2], e[2], 3)
         self.assertAlmostEqual(a[3], -e[3], 3)
Example #42
0
 def cropify(self):
     """Crops output images"""
     print "crop images"
     # Oh we are back!
     for fpath in self._outpaths():
         print "\t%s" % fpath
         im = Image.open(fpath)
         im = im.convert("RGBA")
         bg = Image.new("RGBA", im.size, (255, 255, 255, 255))
         diff = ImageChops.difference(im, bg)
         bbox = diff.getbbox()
         im2 = im.crop(bbox)
         im2.save(fpath)
Example #43
0
def estimate(file, s=5):
    """Estimates the amount of focus of an image file.
    Returns a real number: higher values indicate better focus.
    Bug: a high-contrast, blurry image can be considered with better focus
    than a low-contrast, perfectly focused image.
    """
    im = Image.open(file).convert("L")
    w, h = im.size
    box = (w / 2 - 50, h / 2 - 50, w / 2 + 50, h / 2 + 50)
    im = im.crop(box)
    imf = im.filter(ImageFilter.MedianFilter(s))
    d = ImageChops.subtract(im, imf, 1, 100)
    return ImageStat.Stat(d).stddev[0]
Example #44
0
def round_image(image, cache={}, round_all=True, rounding_type=None,
        radius=100, opacity=255, pos=ROUNDED_POS, back_color='#FFFFFF'):
    if image.mode != 'RGBA':
        image = image.convert('RGBA')

    if round_all:
        pos = 4 * (rounding_type, )

    mask = create_rounded_rectangle(image.size, cache, radius, opacity, pos)

    imtools.paste(image, Image.new('RGB', image.size, back_color), (0, 0),
        ImageChops.invert(mask))
    image.putalpha(mask)
    return image
Example #45
0
    def _open(self):

        s = self.fp.read(1)

        if ord(s[0]) != 255:
            raise SyntaxError("not a JPEG file")

        # Create attributes
        self.bits = self.layers = 0

        # JPEG specifics (internal)
        self.layer = []
        self.huffman_dc = {}
        self.huffman_ac = {}
        self.quantization = {}
        self.app = {}  # compatibility
        self.applist = []

        while 1:

            s = s + self.fp.read(1)

            i = i16(s)

            if MARKER.has_key(i):
                name, description, handler = MARKER[i]
                # print hex(i), name, description
                if handler is not None:
                    handler(self, i)
                if i == 0xFFDA:  # start of scan
                    rawmode = self.mode
                    # patch by Kevin Cazabon to comment this out - nobody should be using Photoshop 2.5 any more (and it breaks newer versions)
                    # CMYK images are still inverted, we'll fix that just before returning.
                    #if self.mode == "CMYK" and self.info.has_key("adobe"):
                    #    rawmode = "CMYK;I" # Photoshop 2.5 is broken!

                    self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))
                                 ]
                    # self.__offset = self.fp.tell()
                    break
                s = self.fp.read(1)
            elif i == 0 or i == 65535:
                # padded marker or junk; move on
                s = "\xff"
            else:
                raise SyntaxError("no marker found")

        # patch by Kevin Cazabon to re-invert CMYK JPEG files
        if self.mode == "CMYK":
            self.im = ImageChops.invert(self).im
Example #46
0
    def apply(self):
        keys = self._params.keys()
        if ('width' in keys or 'height' in keys) and not 'crop' in keys:
            if 'bgcolor' in keys:
                bgcolor = hex_to_color(self._params['bgcolor'])
            else:
                bgcolor = (255, 255, 255, 255)

            bg = Image.new(self._image.mode, self._image.size, bgcolor)
            diff = ImageChops.difference(self._image, bg)
            bbox = diff.getbbox()

            return self._image.crop(bbox)
        return self._image
Example #47
0
    def composite(self, destination, user_mask=None):
        """Composite ourselves into the destination image, generating
		all cached components as needed"""
        Logger.log.debug("Compositing '" + self.base_name() + "' at level " +
                         self.__class__.__name__)

        im = self.get_image()
        if destination.size != im.size:
            Logger.log.info("Rescaling image from " + str(im.size) + " to " +
                            str(destination.size))
            im = im.resize(destination.size, Image.BICUBIC)

        # Find any alpha layer in the image
        transp = None
        for band in zip(im.getbands(), im.split()):
            if band[0] == "A":
                Logger.log.debug("Found transparency layer")
                transp = band[1]

        # Decide what blending we will be doing
        if user_mask is None:
            if transp is None:
                # We have no concept of transparency at all -- use a
                # flat setting
                Logger.log.debug("Using flat mask")
                mask = Image.new("1", im.size, 1)
            else:
                # We have a transparency but no user mask
                Logger.log.debug("Using existing transparency")
                mask = transp
        else:
            if transp is None:
                # We have a mask but no transparency -- use the user's
                # mask
                Logger.log.debug("Using provided mask")
                mask = user_mask
            else:
                # If we have both a supplied mask and our own
                # transparency, use both -- where either is
                # transparent, set transparency (could use
                # ImageChops.multiply() instead?)
                Logger.log.debug("Using combination mask")
                mask = ImageChops.multiply(user_mask.convert("L"),
                                           transp.convert("L"))

        name = os.tmpnam() + ".png"
        Logger.log.debug("Saving mask as: " + name)
        mask.save(name)

        return Image.composite(im, destination, mask)
Example #48
0
def get_diff_feature(imagefile1, imagefile2):
    image1 = Image.open(imagefile1)
    image2 = Image.open(imagefile2)
    diff_pixel = 0
    diff = ImageChops.difference(image1, image2)
    if diff.getbbox():
        left, upper, right, lower = diff.getbbox()
    else:
        return 0
    for x in range(left, right):
        for y in range(upper, lower):
            if diff.load()[x, y] != (0, 0, 0, 0):
                diff_pixel += 1
    return diff_pixel
Example #49
0
    def create(self):
        mask = ImageChops.subtract(self.white, self.black, -1, 255)
        maskgray = mask.convert("L")

        #out = self.black.convert("RGBA") # this doesn't premultiply alpha correctly?
        #this better?
        def divide(ch):
            env = {"val": ch, "mask": maskgray}
            return ImageMath.eval("val*255/mask", env).convert("L")

        out = channelMap(divide, self.black).convert("RGBA")

        out.putalpha(maskgray)
        return out
Example #50
0
 def cropImage(self, size):
     """ Cropping image method will crop image based on the provided new size
     @param size: the size of the new image
     @param type: tuple (width, height) 
     """
     if self.__im.mode == "P":
         self.__im = self.__im.convert("RGB")
     
     bgColor = (255, 255, 255)
     bg = Image.new("RGB", size, bgColor)
     diff = ImageChops.difference(self.__im, bg)
     bbox = diff.getbbox()
     if bbox:
         self.__im = self.__im.crop(bbox)
Example #51
0
def demo1():
    from PIL import Image
    import ImageChops

    path = "D:/code/image/image/"
    im1 = Image.open(path + "1.jpg")
    im2 = Image.open(path + "2.jpg")

    diff = ImageChops.difference(im2, im1)
    print diff

    im1.show()
    im2.show()
    diff.show()
Example #52
0
def root_mean_square_difference(im1, im2):
    try:

        diff = ImageChops.difference(
            im1, im2
        )  #Returns the absolute value of the difference between the two images
        h = diff.histogram()
        sq = (value * (idx**2) for idx, value in enumerate(h))
        sum_of_squares = sum(sq)
        rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
    except ValueError:
        return -1
    else:
        return rms
Example #53
0
    def runtools(self, filename, page, file1, file2, file12):

        badness = 0.0

        self.runtool(COMPARE[0], filename, page, file2)
        self.runtool(COMPARE[1], filename, page, file1)

        unlink(file12)

        pic1 = Image.open(file1)
        pic1.load()
        self.width1 = pic1.size[0]
        self.height1 = pic1.size[1]

        pic2 = Image.open(file2)
        pic2.load()
        self.width2 = pic2.size[0]
        self.height2 = pic2.size[1]

        if abs(self.width1 -
               self.width2) > 5 or abs(self.height1 != self.height2) > 5:
            badness += 65536 * abs(self.width2 - self.width1) * max(
                self.height1,
                self.height2) + 65536 * abs(self.height2 - self.height1) * max(
                    self.width1, self.width2)

        minx = min(self.width1, self.width2)
        miny = min(self.height1, self.height2)

        pic1 = pic1.crop((0, 0, minx, miny))
        pic1 = pic1.convert("RGB")
        pic1 = pic1.filter(ImageFilter.BLUR)
        pic2 = pic2.crop((0, 0, minx, miny))
        pic2 = pic2.convert("RGB")
        pic2 = pic2.filter(ImageFilter.BLUR)

        diffimage = ImageChops.difference(pic1, pic2)
        diffimage.save(file12, "PNG")

        # compute quadratical difference
        diff = diffimage.histogram()
        for i in range(1, 128):
            badness += (diff[i] + diff[256 - i]) * float(i * i)
            badness += (diff[256 + i] + diff[256 + 256 - i]) * float(i * i)
            badness += (diff[512 + i] + diff[512 + 256 - i]) * float(i * i)

        badness /= (minx * miny) * 3

        return badness
Example #54
0
def compare_images(path_one, path_two, diff_save_location):
    """
    Compares to images and saves a diff image, if there
    is a difference

    @param: path_one: The path to the first image
    @param: path_two: The path to the second image
    """
    image_one = Image.open(path_one)
    image_two = Image.open(path_two)

    diff = ImageChops.difference(image_one, image_two)

    if diff.getbbox():
        diff.save(diff_save_location)
Example #55
0
    def findClosestImageAndToneDiff(self, cmp_img):
        if len(self) == 1:
            return self[0].image, self._getToneDiff(get_tone(cmp_img), self[0].tone)

        record_fill = self[0]
        record_avg = sys.maxint

        for fill in self:
            diff_image = ImageChops.difference(cmp_img, fill.image)
            image_avg = sum(Stat(diff_image).mean)

            if image_avg < record_avg:
                record_fill = fill
                record_avg = image_avg

        return record_fill.image, self._getToneDiff(get_tone(cmp_img), record_fill.tone)
 def cropImage(image, bgColor=(255, 255, 255)):
     """ cropping the give Image
     @param image: image file need to be cropped
     @type image: String
     @param bgColor: optional background color for the image
     @type bgColor: tuple of three
     @rtype: Image or None 
     @return: image data if successfully cropped or None if not
     """
     if image.mode != "RGB":
         image = image.convert("RGB")
     bg = Image.new("RGB", image.size, bgColor)
     diff = ImageChops.difference(image, bg)
     bbox = diff.getbbox()
     if bbox:
         return image.crop(bbox)
     return None
Example #57
0
    def refresh(self, image, bbox=None):

        if bbox == None:
            # find the difference from the current framebuffer
            bbox = ImageChops.difference(image, self.image).getbbox()

        if bbox != None:

            (left, top, right, bottom) = bbox

            # send the updated rows
            update = image.crop((0, 0, self.display.W, bottom))
            update = update.tostring()
            self.write(update, 0)

            # update the framebuffer
            self.image = image
Example #58
0
def neglaplacian(img):
    if img.mode != "RGB":
        img = img.convert("RGB")
    newimg = Image.new(img.mode, img.size, None)
    width, height = img.size

    mask = {}
    mask[(0, 0)] = 0
    mask[(0, 1)] = 1
    mask[(0, 2)] = 0
    mask[(1, 0)] = 1
    mask[(1, 1)] = -4
    mask[(1, 2)] = 1
    mask[(2, 0)] = 0
    mask[(2, 1)] = 1
    mask[(2, 2)] = 0

    for row in range(1, width - 1, 1):
        for col in range(1, height - 1, 1):
            pixelmask = ct = 0
            for i in range(0, 3, 1):
                for j in range(0, 3, 1):
                    r, g, b = img.getpixel((row + i - 1, col + j - 1))
                    value = (r + g + b) / 3
                    c = value * mask[(i, j)]
                    ct = ct + c

            newimg.putpixel((row + 1, col + 1), (ct, ct, ct))
    img.show()
    newimg.show()
    newimg.save("faceneg6.jpg")
    #newimg = ImageChops.invert(newimg)
    newim = ImageChops.difference(img, newimg)
    newim.show()
    width, height = newim.size
    for i in range(width):
        for j in range(height):
            r, g, b = newim.getpixel((i, j))
            c = (r + g + b) / 3
            if c > 70.6:
                c = 255
            else:
                c = 0
            newim.putpixel((i, j), (c, c, c))
    newim.show()
    newim.save("fce2logg.jpg")
Example #59
0
def draw_bins(filename, sourceImage, nx, ny, bgaArray):
    """
    Draw an image containing the bins and detected ball positions, for
    user verification.
    
    Parameters:
    * filename: filename to use for saving processed image
    * sourceImage: source image that was processed to extract the bga array
    * nx: number of bins on X axis in image
    * ny: number of bins on Y axis in image
    * bgaArray: boolean array of occupied ball positions
    """
    # Get a new drawing context
    newImage = sourceImage.copy()
    newImage = ImageChops.invert(newImage).convert("RGB")
    gc = ImageDraw.Draw(newImage)
    sx, sy = newImage.size

    # Draw a circle in every detected pad bin
    for py in range(ny):
        for px in range(nx):
            xmin, ymin, xmax, ymax = get_bin_bounds(sourceImage, nx, ny, px,
                                                    py)
            gc.line([(0, ymax), (sx, ymax)], fill="blue")
            gc.line([(xmax, 0), (xmax, sy)], fill="blue")

            width = (xmax - xmin) + 1
            height = (ymax - ymin) + 1

            xmin2 = xmin + round(float(width) * 0.4)
            xmax2 = xmin + round(float(width) * 0.6)
            ymin2 = ymin + round(float(height) * 0.4)
            ymax2 = ymin + round(float(height) * 0.6)

            if bgaArray[py, px]:
                gc.ellipse((xmin2, ymin2, xmax2, ymax2),
                           outline="red",
                           fill="red")
    del gc

    try:
        newImage.save(filename)
    except IOError:
        return None

    return filename
Example #60
0
    def run(self):
        while 1:
            screen_img = ssw.ScreenCapture((0, 0), (640, 480))
            _img = imagetopil(screen_img.ConvertToImage())
            time.sleep(0.1)

            if CONNECTION and CONNECTION.next_state == 'command_dispatcher':

                screen_img = ssw.ScreenCapture((0, 0), (640, 480))
                img = imagetopil(screen_img.ConvertToImage())

                diff = ImageChops.difference(img, _img)
                _b = diff.getbbox()

                if _b:
                    x_pos1, y_pos1, x_pos2, y_pos2 = diff.getbbox()
                    _diff = diff.crop((x_pos1, y_pos1, x_pos2, y_pos2))
                    CONNECTION.update_frame_buffer(_diff, x_pos1, y_pos1)