Пример #1
0
    def getPygImage(self, th = False, color = 0):
        img = self.getImage()
        w = self.width
        h = self.height
        if False == th:
            m = self.mode
            d = img.tostring()
            p = self.pitch
        else:
            th = 255.0 * th
            r, g, b = img.split()
            if 0 == color:
                r = Image.eval(r, lambda i: 255 if i < th else 0)
                g = Image.eval(g, lambda i: 255 if i > th else 0)
                b = Image.eval(b, lambda i: 255 if i > th else 0)
            elif 1 == color:
                r = Image.eval(r, lambda i: 255 if i > th else 0)
                g = Image.eval(g, lambda i: 255 if i < th else 0)
                b = Image.eval(b, lambda i: 255 if i > th else 0)
            else:
                r = Image.eval(r, lambda i: 255 if i > th else 0)
                g = Image.eval(g, lambda i: 255 if i > th else 0)
                b = Image.eval(b, lambda i: 255 if i < th else 0)

            m = img.mode
            i = Image.merge(m, (r, g, b))
            d = i.tostring()
            p = -1 * w * len(m)
        return pyglet.image.ImageData(w, h, m, d, p)
Пример #2
0
    def draw_page(self):
        im = self.get_background()

        page = self.book.pages[self.current_page]

        for line in page.lines:
            line_image = line.get_image(color=0x91)
            line_image_shadow = line.get_image(color=0x9C)
            if line_image is None:
                continue
            x, y = line.get_pos()
            x += self.left_margin
            y += self.top_margin
            mask = line_image.copy()
            mask = Image.eval(mask, lambda a: 255 if a != 0 else 0)
            im.paste(line_image_shadow, (x + 1, y + 1), mask=mask)
            im.paste(line_image, (x, y), mask=mask)

        im = im.resize((640, 400), Image.NEAREST)

        # Keep the old label and destroy it after to reduce flickering
        oldlabel = self.tklabel

        self.tkimage = ImageTk.PhotoImage(im)
        self.tklabel = Label(self.root, image=self.tkimage)

        self.tklabel.pack()
        if oldlabel is not None:
            oldlabel.destroy()
Пример #3
0
Файл: views.py Проект: koval/ocr
    def post(self, request):
        # get image data in PNG format from request post body
        data = request.POST['i']
        data = base64.decodestring(data.split(',', 1)[-1])

        # resize, convert and invert image
        img = Image.open(StringIO(data))
        img = img.resize((SIZE, SIZE))
        img = img.convert('L')
        img = Image.eval(img, lambda p: 255-p)

        # store image data in a global variable to see it in a browser on GET request
        global IMG
        out = StringIO()
        img.save(out, 'JPEG')
        IMG = out.getvalue()

        global NN
        if NN is None and os.path.exists(PICKLE_FILE):
            NN = pickle.load(file(PICKLE_FILE, 'rb'))

        if NN is not None:
            # get array of image pixels
            pixels = list(img.getdata())

            X = numpy.zeros([32,32,1], dtype='f4', order='F')
            X[2:30,2:30,0] = numpy.array(pixels).reshape(SIZE, SIZE)
            X /= 100.0

            NN.forward(X)
            num = numpy.argmax(NN.Y)
            return HttpResponse(str(num))
        else:
            return HttpResponse('?')
Пример #4
0
def generate_image(text, font_file, size, colour, image_destination):
    """
    Text transform based on:
    http://nedbatchelder.com/blog/200801/truly_transparent_text_with_pil.html
    http://phark.typepad.com/phark/2003/08/accessible_imag.html
    """
    pos = (0,0)
    image = Image.new("RGB", (1, 1), (0,0,0))
    font = ImageFont.truetype(font_file, size)
    image = image.resize(font.getsize(text))
    alpha = Image.new("L", image.size, "black")
    # Make a grayscale image of the font, white on black.
    imtext = Image.new("L", image.size, 0)
    drtext = ImageDraw.Draw(imtext)
    drtext.text(pos, text, font=font, fill="white")
    # Add the white text to our collected alpha channel. Gray pixels around
    # the edge of the text will eventually become partially transparent
    # pixels in the alpha channel.
    alpha = ImageChops.lighter(alpha, imtext)
    # Make a solid colour, and add it to the colour layer on every pixel
    # that has even a little bit of alpha showing.
    solidcolour = Image.new("RGBA", image.size, colour)
    immask = Image.eval(imtext, lambda p: 255 * (int(p != 0)))
    image = Image.composite(solidcolour, image, immask)
    # These two save()s are just to get demo images of the process.
    #image.save("transcolour.png", "PNG")
    #alpha.save("transalpha.png", "PNG")
    # Add the alpha channel to the image, and save it out.
    image.putalpha(alpha)
    image.save(image_destination, 'PNG')
    return image
Пример #5
0
 def Inverse(self,event=None): 
         if(self.filename==""): 
                 return 
         self.image=Image.eval(self.image,lambda x:256-x) 
         self.label.image=ImageTk.PhotoImage(self.image) 
         Label.__init__(self.label,self.root,image=self.label.image,bd=0) 
         self.label.pack()
Пример #6
0
    def reduce (self, quality=30, ) :
        _fn = StringIO.StringIO()

        # reset the quality fo JPEG.
        if self._im.format in ("JPG", "JPEG", ) :
            _fn.name = "1.jpg"
            self._im.save(_fn, quality=quality, )
        elif self._im.format in ("PNG", ) :
            if "A" not in self._im.mode :
                _fn.name = "1.jpg"
                self._im.save(_fn, quality=quality, )
            else :
                _fn.name = "1.gif"
                _mask = Image.eval(self._im.split()[3], lambda a: 255 if a <=128 else 0)
                self._im.convert("RGB").convert(
                    "P", palette=Image.ADAPTIVE, colors=255,
                ).paste(255, _mask, )

                self._im.save(_fn, transparency=255, )
        else :
            _fn.name = self._fn.name
            self._im.save(_fn, )

        if len(_fn.getvalue()) > len(self._fn.getvalue()) :
            _fn = StringIO.StringIO(self._fn.getvalue(), )
            _fn.name = self._fn.name
            
        _fn.seek(0, 0, )
        return (
            _fn,
            self._get_mimetype(_fn.name, ),
        )
Пример #7
0
def draw_marker(width, height, offset, filename):
    im = Image.new('L', (128, 256), 255)
    draw = ImageDraw.Draw(im)

    cx = (im.size[0] / 2) + 1.5
    cy = (im.size[1] / 4) + 1.5
    radius = width * 4.0 / 2.0
    offset = int(offset * 4.0)
    for thickness, colour in ((0, 0x60), (4, 0xE0)):
        ol = im.copy()
        r = radius + 0.5 - thickness
        draw.ellipse((cx - r, cy - r, cx + r, cy + r), fill=colour)
        x1 = offset + 0.5 - (thickness * 1.5)
        y1 = (height * 4.0) + 0.5 - (width * 2.0) - (thickness * 2.0)
        draw.polygon((cx + 0.5, cy + y1, cx - 0.5, cy + y1,
                      cx - x1, cy + 16.5, cx + x1, cy + 16.5),
                     fill=colour)
    del draw

    im = im.resize((im.size[0] / 4, im.size[1] / 4), Image.ANTIALIAS)
    ol = ol.resize(im.size, Image.ANTIALIAS)
    mask = Image.eval(ol, lambda x: (0, 255)[x < 210])
    im = Image.composite(im, ImageChops.constant(im, 0), mask)
    im = im.crop(im.getbbox())
    im.save(filename, transparency=0)
Пример #8
0
    def __init__(self, inputFileName):

        self.inputImages = []
        self.paths = []
        self.savePath = savePath

        with open(inputFileName, "r") as file:
            for i, line in enumerate(file):
                if i < 2:
                    self.inputImages.append(Image.open(basePath + line.strip()))
                    self.paths.append(line.strip().split("/"))
                elif len(line.strip()):
                    if i == 2:
                        self.savePath = line.strip()

        cleanRef = Image.open("DATA/white.png")
        self.inputImages.append( Image.eval(cleanRef, lambda x: 0 if x==255 else x) )

        # im.show()
        self.currSize = self.inputImages[0].size
       # print("%d x %d" % self.currSize)

        self.bbox = (2, yShift, self.currSize[0] - xShift, self.currSize[1] - 2)
        self.regions = [self.inputImages[i].crop(self.bbox) for i in range(len(self.inputImages))]
       # self.refRegion = cleanRef.crop(bbox)

        self.finalImage = Image.new("RGB", size = (self.currSize[0] * 2, self.currSize[1] * 2), color = mapBackgroundColor)

        for i in range(2):
            self.finalImage.paste(self.inputImages[i], (0, self.currSize[1] * i))

        self.font = ImageFont.truetype("DATA/barial.ttf",115)
Пример #9
0
def draw_marker(width, height, offset, filename):
    im = Image.new('L', (128, 256), 255)
    draw = ImageDraw.Draw(im)

    cx = (im.size[0] / 2) + 1.5
    cy = (im.size[1] / 4) + 1.5
    radius = width * 4.0 / 2.0
    offset = int(offset * 4.0)
    for thickness, colour in ((0, 0x60), (4, 0xE0)):
        ol = im.copy()
        r = radius + 0.5 - thickness
        draw.ellipse((cx - r, cy - r, cx + r, cy + r), fill=colour)
        x1 = offset + 0.5 - (thickness * 1.5)
        y1 = (height * 4.0) + 0.5 - (width * 2.0) - (thickness * 2.0)
        draw.polygon((cx + 0.5, cy + y1, cx - 0.5, cy + y1, cx - x1, cy + 16.5,
                      cx + x1, cy + 16.5),
                     fill=colour)
    del draw

    im = im.resize((im.size[0] / 4, im.size[1] / 4), Image.ANTIALIAS)
    ol = ol.resize(im.size, Image.ANTIALIAS)
    mask = Image.eval(ol, lambda x: (0, 255)[x < 210])
    im = Image.composite(im, ImageChops.constant(im, 0), mask)
    im = im.crop(im.getbbox())
    im.save(filename, transparency=0)
Пример #10
0
    def draw_page(self):
        im = self.get_background()

        page = self.book.pages[self.current_page]

        for line in page.lines:
            line_image = line.get_image(color=0x91)
            line_image_shadow = line.get_image(color=0x9C)
            if line_image is None:
                continue
            x, y = line.get_pos()
            x += self.left_margin
            y += self.top_margin
            mask = line_image.copy()
            mask = Image.eval(mask, lambda a: 255 if a != 0 else 0)
            im.paste(line_image_shadow, (x+1, y+1), mask=mask)
            im.paste(line_image, (x, y), mask=mask)

        im = im.resize((640, 400), Image.NEAREST)

        # Keep the old label and destroy it after to reduce flickering
        oldlabel = self.tklabel

        self.tkimage = ImageTk.PhotoImage(im)
        self.tklabel = Label(self.root, image=self.tkimage)

        self.tklabel.pack()
        if oldlabel is not None:
            oldlabel.destroy()
Пример #11
0
def WritePic(img, FrameNum):
	imgsolid = Image.new("RGB", (1024, 768), "#9a7e2a")
	bg = Image.new("RGB", (1024, 768), 0)
	imgmask = Image.eval(img, lambda p: 255*(int(p > 60)))
	#imgmask.save("mask.png", "PNG")
	#img.save("orig.png", "PNG")
	imgcomp = Image.composite(imgsolid, bg, imgmask)
	imgcomp.save("images/frame%03d.png" % (FrameNum), "PNG")
Пример #12
0
def invertImg(image):

	im = Image.fromarray(np.uint8(image))
	#plotImg(np.array(im))
	maxval = np.max(im)
	inverted = Image.eval(im, lambda(x):maxval-x)
	inverted = np.array(inverted)
	#plotImg(inverted)
	return inverted
Пример #13
0
def movementDetector(prev, current) :
	# meassure the difference
	step = ImageChops.difference(prev, current)
	# join channels
	step = step.convert("L")
	# thresholding
	step = Image.eval(step, lambda p: 100 if p>10 else 0)
	# compositing with current
	step = Image.composite(red, current, step)
	return step
Пример #14
0
def collage():  
  shootphoto()
  #make blur of base 
  base = setbase()
  blurbase = base.filter(ImageFilter.BLUR)
  print 'base is blurred'
  collage = base
  print 'collage set to base'

  mainloop = 0
  while mainloop < 50:
    print 'mainloop is', mainloop
    try:  
      checkbuttons()
      shootphoto()
      checkbuttons()
      newphoto = openphoto()
      print 'new photo shot'
      #blur the new photo
      blurphoto = newphoto.filter(ImageFilter.BLUR)
      checkbuttons()
      print 'new photo blurred'
      #make alphachannel
      alphachannel = ImageChops.difference(blurbase, blurphoto)
      checkbuttons()
      #alphachannel.save('alphadif%s.jpg' %numgen.imgno)
      alphachannel = ImageOps.grayscale(alphachannel)
      checkbuttons()
      #alphachannel.save('alphagre%s.jpg' %numgen.imgno)
      alphachannel = Image.eval(alphachannel, lambda px:0 if px <15 else 255)
      checkbuttons()
      print 'alphachannel generated'
      alphachannel.save('alphaeval%s.jpg' %numgen.imgno)
      #optional alphachannel blur
      alphachannel = alphachannel.filter(ImageFilter.BLUR)
      checkbuttons()
      alphachannel.convert('1')
    
      #use alphachannel to mask newphoto over oldcollage
      collage.paste(newphoto, None, alphachannel)
      collage.save('collage%s.jpg' %numgen.imgno)
      checkbuttons()
      
      #-----  done making new collage   

      mainloop = mainloop + 1      

      # --- update display
      showimageandtext(numgen.imgno, ' Most recent collage')
      
      time.sleep(3)
      
    except Exception, e:
      print 'program problem in collage loop', e
Пример #15
0
    def save(self, filename):
        # draw a zero "zero" line
        a = 25
        for x in range(self.image_width):
            self.pix[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pix[x, self.image_height/2]))

        alpha = self.image.split()[0]
        self.image = self.image.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=255)
        mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0)
        self.image.paste(255, mask)

        self.image.save(filename,transparency=255)
Пример #16
0
    def generate_transition_list_from_zones(self,image,regionlist,column_bounds,left,middle):
        """ given the pair of zone lists, generate a comprehensive list

        We should then be able to merge these sets of split information:
        anything where we find solid black or halftone is a definite break
        which may be followed either by another black or halftone area, by
        a description area, or by a vote area.
        """
        ccontest_default = "No current contest"
        ccontest = ccontest_default
        cjurisdiction_default = "No current jurisdiction"
        cjurisdiction = cjurisdiction_default
        contest_instance = None
        for n in range(len(left)):
            this_y = left[n][0]
            try:
                next_zone = left[n+1]
            except IndexError:
                next_zone = [0,'X']
            next_y = next_zone[0]
            rel_end = next_y - (const.dpi/10)
            if left[n][1]=='B' or left[n][1]=='G':
                self.log.debug("%s zone at %d to %d %s" % (left[n][1],
                                                           this_y,
                                                           next_y,
                                                           next_zone))
                # if it's a legitimage gray zone and the next zone is white,
                # that white zone is a voting area (or empty)
                if (next_y - this_y) > (const.dpi/4):
                    crop = image.crop((column_bounds[0],
                                       this_y,
                                       column_bounds[1],
                                       next_y))
                    crop = Image.eval(crop,elim_halftone)
                    cjurisdiction = ocr.tesseract(crop)
                    cjurisdiction = cjurisdiction.replace("\n","//").strip()
                    self.log.debug( "Jurisdiction %s" % (cjurisdiction,))
                    cjurisdiction = ocr.clean_ocr_text(cjurisdiction)
                    self.log.debug( "Cleaned Jurisdiction %s" % (cjurisdiction,))
            if left[n][1]=='W':
                self.get_title_and_votes_from(image,regionlist,
                                         (column_bounds[0],
                                          this_y,
                                          column_bounds[1],
                                          next_y))
                self.log.debug( "White zone at %d to %d %s" % (this_y,next_y,next_zone))
        # filter regionlist to contain only contests with choices
        regionlist = [x for x in regionlist if len(x.choices)>0]
        return regionlist
Пример #17
0
    def generate_transition_list_from_zones(self,image,regionlist,column_bounds,left,middle):
        """ given the pair of zone lists, generate a comprehensive list

        We should then be able to merge these sets of split information:
        anything where we find solid black or halftone is a definite break
        which may be followed either by another black or halftone area, by
        a description area, or by a vote area.
        """
        ccontest_default = "No current contest"
        ccontest = ccontest_default
        cjurisdiction_default = "No current jurisdiction"
        cjurisdiction = cjurisdiction_default
        contest_instance = None
        for n in range(len(left)):
            this_y = left[n][0]
            try:
                next_zone = left[n+1]
            except IndexError:
                next_zone = [0,'X']
            next_y = next_zone[0]
            rel_end = next_y - (const.dpi/10)
            if left[n][1]=='B' or left[n][1]=='G':
                self.log.debug("%s zone at %d to %d %s" % (left[n][1],
                                                           this_y,
                                                           next_y,
                                                           next_zone))
                # if it's a legitimage gray zone and the next zone is white,
                # that white zone is a voting area (or empty)
                if (next_y - this_y) > (const.dpi/4):
                    crop = image.crop((column_bounds[0],
                                       this_y,
                                       column_bounds[1],
                                       next_y))
                    crop = Image.eval(crop,elim_halftone)
                    cjurisdiction = ocr.tesseract(crop)
                    cjurisdiction = cjurisdiction.replace("\n","//").strip()
                    self.log.debug( "Jurisdiction %s" % (cjurisdiction,))
                    cjurisdiction = ocr.clean_ocr_text(cjurisdiction)
                    self.log.debug( "Cleaned Jurisdiction %s" % (cjurisdiction,))
            if left[n][1]=='W':
                self.get_title_and_votes_from(image,regionlist,
                                         (column_bounds[0],
                                          this_y,
                                          column_bounds[1],
                                          next_y))
                self.log.debug( "White zone at %d to %d %s" % (this_y,next_y,next_zone))
        # filter regionlist to contain only contests with choices
        regionlist = [x for x in regionlist if len(x.choices)>0]
        return regionlist
Пример #18
0
    def save(self, filename):
        # draw a zero "zero" line
        a = 25
        for x in range(self.image_width):
            self.pix[x, self.image_height / 2] = tuple(
                map(lambda p: p + a, self.pix[x, self.image_height / 2]))

        alpha = self.image.split()[0]
        self.image = self.image.convert('RGB').convert('P',
                                                       palette=Image.ADAPTIVE,
                                                       colors=255)
        mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
        self.image.paste(255, mask)

        self.image.save(filename, transparency=255)
Пример #19
0
  def draw_text(self, text, position=(0, 0), color='black', font=None,
                font_size=12, rotation=0, **kwargs):
    """Draws a text on the base image."""
    font = self.font(font_size)

    text_image = Image.new('L', self.dimensions, 'black')
    draw_text_image = ImageDraw.Draw(text_image)
    draw_text_image.text(position, text, font=font, fill='white')

    alpha = Image.new('L', self.dimensions)
    alpha = ImageChops.lighter(alpha, text_image)

    solidcolor = Image.new('RGBA', self.dimensions, color)
    image_mask = Image.eval(text_image, lambda p: 255 * (int(p != 0)))
    self.base_image = Image.composite(solidcolor, self.base_image, image_mask)
    self.base_image.putalpha(alpha)
Пример #20
0
def palettize(im, ncol):
    """ Given an input image or list of images, convert to a palettized version using at most ``ncol`` colors.
    This function preserves transparency: if the input(s) have transparency then the returned
    image(s) have ``.info['transparency']`` set to the transparent color.

    If ``im`` is a single image, returns a single image.  If ``im`` is a list of images, returns a list of images.
    """

    assert ncol in (4, 16, 256)
    if isinstance(im, list):
        # For a list of images, paste them all into a single image,
        # palettize the single image, then return cropped subimages

        for i in im:
            i.load()
        (ms, lpos) = glom([i.size for i in im])
        master = Image.new(im[0].mode, ms)
        for i, ps in zip(im, lpos):
            master.paste(i, ps)
        master = palettize(master, ncol)
        ims = [master.crop(ps) for ps in lpos]
        for i in ims:
            i.info = master.info
        return ims
    else:
        im.load()

    if im.mode == 'P':
        if ord(max(im.tostring())) < ncol:
            return im  # already done
        if 'transparency' in im.info:
            im = im.convert("RGBA")
        else:
            im = im.convert("RGB")
    assert im.mode in ("RGBA", "RGB")
    if im.mode == "RGB":
        return im.convert('P', palette=Image.ADAPTIVE, colors=ncol)
    else:
        alpha = im.split()[3]
        mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
        im.paste((0, 0, 0), mask)
        im = im.convert('RGB').convert('P',
                                       palette=Image.ADAPTIVE,
                                       colors=(ncol - 1))
        im.paste(ncol - 1, mask)
        im.info['transparency'] = ncol - 1
        return im
Пример #21
0
def iconify(im):
    """
    sometimes there can be hundreds of subtly different pixel
    representations of the same characters, thanks to anti-aliasing.
    This is an attempt to reduce the complexity by converting
    each character to a 5x5 2-bit icon.
    
    ( Not an entirely awful idea, but only a first step. 
      What you really want to do is map this to a vector 
      search engine. )
      
    """

    def cutoff(pixel):
        return 255 if pixel > 240 else 0

    return Image.eval(im.resize((5, 5)), cutoff).convert("1")
Пример #22
0
def fill_transparent(image, color, threshold=0):
	"""Fill transparent image parts with the specified color
	"""
	def quantize_and_invert(alpha):
		if alpha <= threshold:
			return 255
		return 0
	# Get the alpha band from the image
	if image.mode == 'RGBA':
		red, green, blue, alpha = image.split()
	elif image.mode == 'LA':
		gray, alpha = image.split()
	# Set all pixel values below the given threshold to 255,
	# and the rest to 0
	alpha = Image.eval(alpha, quantize_and_invert)
	# Paste the color into the image using alpha as a mask
	image.paste(color, alpha)
Пример #23
0
def fill_transparent(image, color, threshold=0): 
	"""Fill transparent image parts with the specified color 
	"""
	def quantize_and_invert(alpha):
		if alpha <= threshold:
			return 255
		return 0
	# Get the alpha band from the image
	if image.mode == 'RGBA':
		red, green, blue, alpha = image.split()
	elif image.mode == 'LA':
		gray, alpha = image.split()
	# Set all pixel values below the given threshold to 255,
	# and the rest to 0
	alpha = Image.eval(alpha, quantize_and_invert)
	# Paste the color into the image using alpha as a mask
	image.paste(color, alpha)
Пример #24
0
def palettize(im, ncol):
    """ Given an input image or list of images, convert to a palettized version using at most ``ncol`` colors.
    This function preserves transparency: if the input(s) have transparency then the returned
    image(s) have ``.info['transparency']`` set to the transparent color.

    If ``im`` is a single image, returns a single image.  If ``im`` is a list of images, returns a list of images.
    """
    
    assert ncol in (4, 16, 256)
    if isinstance(im, list):
        # For a list of images, paste them all into a single image,
        # palettize the single image, then return cropped subimages

        for i in im:
            i.load()
        (ms, lpos) = glom([i.size for i in im])
        master = Image.new(im[0].mode, ms)
        for i,ps in zip(im, lpos):
            master.paste(i, ps)
        master = palettize(master, ncol)
        ims = [master.crop(ps) for ps in lpos]
        for i in ims:
            i.info = master.info
        return ims
    else:
        im.load()
        
    if im.mode == 'P':
        if ord(max(im.tostring())) < ncol:
            return im # already done
        if 'transparency' in im.info:
            im = im.convert("RGBA")
        else:
            im = im.convert("RGB")
    assert im.mode in ("RGBA", "RGB")
    if im.mode == "RGB":
        return im.convert('P', palette=Image.ADAPTIVE, colors=ncol)
    else:
        alpha = im.split()[3]
        mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
        im.paste((0,0,0), mask)
        im = im.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors = (ncol - 1))
        im.paste(ncol - 1, mask)
        im.info['transparency'] = ncol - 1
        return im
Пример #25
0
    # Sort the colours used on this line, with the most common first
    imline = im128.crop(line)
    lcols = map(lambda x: x[1], sorted(imline.getcolors(), reverse=True))

    # Build the line palette from the static colours and as many extra
    # line colours, taking care to avoid duplicates
    cols = uniquified(scols + lcols)[:16]
    cols += cols[:1] * (16 - len(cols))

    # Create a map to preserve matched colours, with others set to 255
    colset = set(cols)
    newpal = map(lambda x: (x if x in colset else 255), range(256))

    # Create a mask from the map, used to select non-matched line pixels
    immask = imline.point(newpal)
    immask = Image.eval(immask, lambda x: 127 if x==255 else 0).convert("1")
    matched += immask.getcolors()[0][0]

    # Re-convert the line image using the line-specific palette
    # This provides nearest alternatives for non-matched pixels
    imline = imline.convert("RGB")
    impal.putpalette(make_palette(cols))
    imline16 = imline.quantize(palette=impal).crop((0,0,w,1))

    # Paste the alternative pixels using the mask
    imline.paste(imline16, immask)
    imham.paste(imline, line)

    # Re-convert the new line (lossless) to get a palettised image
    imline = imline.quantize(palette=impal)
    Time = End - Start
    if Time <0:
        # Adds a day (in MS) if the end time falls in the next day.
        Time = End + (3600000*24) - Start

    # Converts to greyscale and crops to remove timestamp. Timestamp must not be visible.
    im1 = (Image.open(filelist[a]).convert("L")).crop(box)
    im2 = (Image.open(filelist[b]).convert("L")).crop(box)
    im3 = (Image.open(filelist[c]).convert("L")).crop(box)

    # compares the photos and makes new pictures where changed pixels are white and
    # unchanged are black.
    diff2 = ImageChops.difference(im1, im2)
    diff3 = ImageChops.difference(im1, im3)
    EvalPic2 = ImageChops.invert(Image.eval(diff2, lambda px: px <= Sensitivity and 255 or 0))
    EvalPic3 = ImageChops.invert(Image.eval(diff3, lambda px: px <= Sensitivity and 255 or 0))

    # Saves copies of the above photos if needed for testing.
    if TestSensitivity == 1:
        EvalPic2.save("Test2_" + filelist[b], quality=100)
        EvalPic3.save("Test3_" + filelist[b], quality=100)

    # Finds the difference in x-axis coordinates of the leading edge of each photo.
    # If the object is moving left, the difference in left leading edges will be greater.
    # If the object is moving right, the difference in right leading edges will be greater.
    # This is because the trailing side of the photo is always the same in each photo,
    # it is where the object was in picture 1 (or picture a).
    L = EvalPic2.getbbox()[0] - EvalPic3.getbbox()[0]
    R = EvalPic3.getbbox()[2] - EvalPic2.getbbox()[2]
    Speed = max(L,R)
Пример #27
0
    def generate_transition_list_from_zones(self, image, regionlist,
                                            column_bounds, left, middle):
        """ given the pair of zone lists, generate a comprehensive list

        We should then be able to merge these sets of split information:
        anything where we find solid black or halftone is a definite break
        which may be followed either by another black or halftone area, by
        a description area, or by a vote area.
        """
        ccontest_default = "No current contest"
        ccontest = ccontest_default
        cjurisdiction_default = "No current jurisdiction"
        cjurisdiction = cjurisdiction_default
        contest_instance = None
        next_white_is_votearea = False
        this_white_is_votearea = False
        next_white_is_yesno = False
        this_white_is_yesno = False
        for n in range(len(left)):
            this_white_is_votearea = False
            if next_white_is_votearea == True:
                this_white_is_votearea = True
                next_white_is_votearea = False
            this_white_is_yesno = False
            if next_white_is_yesno == True:
                this_white_is_yesno = True
                next_white_is_yesno = False
            this_y = left[n][0]
            try:
                next_zone = left[n + 1]
            except IndexError:
                next_zone = [0, 'X']
            next_y = next_zone[0]
            rel_end = next_y - (const.dpi / 10)
            if left[n][1] == 'B':
                self.log.debug("Black zone at %d to %d %s" %
                               (this_y, next_y, next_zone))
                # if it's a legitimate black zone and the next zone is white,
                # that white zone is a Yes/No Vote Area (or empty)
                if (next_y - this_y) > (const.dpi / 4):
                    next_white_is_yesno = True
                    # this zone becomes the current Jurisdiction
                    crop = image.crop(
                        (column_bounds[0], this_y, column_bounds[1], next_y))
                    cjurisdiction = self.extensions.ocr_engine(crop)
                    self.log.debug("Jurisdiction %s" % (cjurisdiction, ))
                    cjurisdiction = self.extensions.ocr_cleaner(cjurisdiction)
                    cjurisdiction = cjurisdiction.replace("\n", "//").strip()
                    self.log.debug("Cleaned Jurisdiction %s" %
                                   (cjurisdiction, ))
                    # and the current contest is set
                    # from the descriptive text
                    # at the start of the Yes No Vote area
            if left[n][1] == 'G':
                self.log.debug("Gray zone at %d to %d %s" %
                               (this_y, next_y, next_zone))
                # if it's a legitimage gray zone and the next zone is white,
                # that white zone is a voting area (or empty)
                if (next_y - this_y) > (const.dpi / 2):
                    next_white_is_votearea = True
                    crop = image.crop(
                        (column_bounds[0], this_y, column_bounds[1], next_y))
                    crop = Image.eval(crop, elim_halftone)
                    ccontest = self.extensions.ocr_engine(crop)
                    ccontest = ccontest.replace("\n", "//").strip()
                    self.log.debug("Contest %s" % (ccontest, ))
                    ccontest = self.extensions.ocr_cleaner(ccontest)
                    self.log.debug("Cleaned Contest %s" % (ccontest, ))
                    contest_instance = Ballot.Contest(column_bounds[0], this_y,
                                                      column_bounds[1],
                                                      this_y + next_y, 0,
                                                      ccontest)
                    regionlist.append(contest_instance)
            if left[n][1] == 'W':
                if this_white_is_votearea:
                    # no descriptive text anticipated
                    self.get_only_votes_from(
                        image, contest_instance,
                        (column_bounds[0], this_y, column_bounds[1], next_y))
                if this_white_is_yesno:
                    # descriptive text sets current contest,
                    # votes are in stretches where the middle is white
                    self.get_contests_and_votes_from(
                        image, regionlist,
                        (column_bounds[0], this_y, column_bounds[1], next_y))
                self.log.debug("White zone at %d to %d %s" %
                               (this_y, next_y, next_zone))
        return regionlist
Пример #28
0
    #im = im.convert('1')

    #im = im.filter(ImageFilter.ModeFilter(3))
    #im = im.filter(ImageFilter.MinFilter(3))
    #im = im.filter(ImageFilter.MaxFilter(3))

    #im = im.filter(ImageFilter.RankFilter(3, 8))
    #im = im.filter(ImageFilter.RankFilter(3, 7))

    #im = ImageOps.equalize(im)

    im = im.convert('L')

    #                              x <= 127 ? 0 : 255

    im = Image.eval(im, (lambda x: 0 if x <= 127 else 255))
    #im = Image.eval(im, (lambda x: x-50))

    #borderwalker.walk(im, (0,0))

    border = borderwalker.nemo(im)

    im = Image.merge('RGB', (im, im, im))
    #im = im.filter(ImageFilter.SMOOTH)
    
    pg_img = pygame.image.frombuffer(im.tostring(), im.size, im.mode)
    screen.fill((0,0,0))
    screen.blit(pg_img, (0,0))

    #print len(border) #, border
Пример #29
0
            cell = grid[y][x]
            template = (1, 0), (0, 1), (-1, 1), (1, 1)
            cell = sweep(grid, cell, x, y, template)
            grid[y][x] = cell
        for x in xrange(0, width):
            cell = grid[y][x]
            template = (-1, 0),
            cell = sweep(grid, cell, x, y, template)
            grid[y][x] = cell

print "Allocating the destination image..."
inside, outside = (0,0), (9999, 9999)
sourceImage = Image.open(sourceFile)
sourceImage.load()
alphaChannel = sourceImage.split()[3]
alphaChannel = Image.eval(alphaChannel, invert)
w = alphaChannel.size[0] + spreadFactor * 2
h = alphaChannel.size[1] + spreadFactor * 2
srcImage = Image.new("L", (w, h), 0)
srcImage.paste(alphaChannel, (spreadFactor, spreadFactor))
width, height = srcImage.size

print "Creating the two grids..."
pixels = srcImage.load()
grid0 = [[initCell(pixels[x, y]) for x in xrange(width)] for y in xrange(height)] 
grid1 = [[initCell(invert(pixels[x, y])) for x in xrange(width)] for y in xrange(height)] 

print "Propagating grid 0..."
propagate(grid0)

print "Propagating grid 1..."
Пример #30
0
import Image

#opens an image:
im = Image.open("l.jpg")
#creates a new empty image, RGB mode, and size 400 by 400.
new_im = Image.new('RGB', (4400,510))

#Here I resize my opened image, so it is no bigger than 100,100
im.thumbnail((200,150))
#Iterate through a 4 by 4 grid with 100 spacing, to place my image
for i in xrange(20,4400,220):
    for j in xrange(20,510,170):
        #I change brightness of the images, just to emphasise they are unique copies.
        im=Image.eval(im,lambda x: x+(i+j)/300)
        #paste the image at location i,j:
        new_im.paste(im, (i,j))

new_im.save('gallery.jpg')
Пример #31
0
for i in range(1,60,1):
    text = str(i)
    # A fully transparent image to work on, and a separate alpha channel.
    im = Image.new("RGB", (30, 30), (0,0,0))
    alpha = Image.new("L", im.size, "black")

    # Make a grayscale image of the font, white on black.
    imtext = Image.new("L", im.size, 0)
    drtext = ImageDraw.Draw(imtext)
    font = ImageFont.truetype(fontfile, size)
    drtext.text(position, text, font=font, fill="white")
        
    # Add the white text to our collected alpha channel. Gray pixels around
    # the edge of the text will eventually become partially transparent
    # pixels in the alpha channel.
    alpha = ImageChops.lighter(alpha, imtext)

    # Make a solid color, and add it to the color layer on every pixel
    # that has even a little bit of alpha showing.
    solidcolor = Image.new("RGBA", im.size, color)
    immask = Image.eval(imtext, lambda p: 255 * (int(p != 0)))
    im = Image.composite(solidcolor, im, immask)

    # These two save()s are just to get demo images of the process.
    # im.save("transcolor.png", "PNG")
    # alpha.save("transalpha.png", "PNG")

    # Add the alpha channel to the image, and save it out.
    im.putalpha(alpha)
    im.save(text+".png", "PNG")
def as_black_and_white(image, split_point=128, contrast=False):
    image = as_greyscale(image)
    if contrast:
        image = ImageOps.autocontrast(image)
    image = Image.eval(image, lambda px: 0 if px < split_point else 255)
    return image.convert('1', dither=Image.NONE)
Пример #33
0
    def get(self, format):
        t = time.time()

        rendr_desc, _, format = self.request.path[1:].rpartition('.')

        if self.request.host.partition('.')[0] in self.static_subdomains:
            # Invalid hostname -- don't serve rendrs on static domains
            raise tornado.web.HTTPError(404)
        elif self.request.host.startswith('l') and \
                len(self.request.host.split('.')) > 2:
            # Library ID is in hostname
            library_id = self.request.host.partition('.')[0][1:]
            rendr_id, _, param = rendr_desc.partition('/')
        else:
            # Library ID is first parameter
            library_id, _, rendr_desc = rendr_desc.partition('/')
            rendr_id, _, param = rendr_desc.partition('/')

        library_id = urllib.unquote(library_id)
        rendr_id = urllib.unquote(rendr_id)

        log.debug("%s %s" % (library_id, rendr_id))

        # Assemble parameter set
        data = dict((k, v if len(v) != 1 else v[0])
                for k, v in self.request.arguments.iteritems())
        data["params"] = [urllib.unquote(p) for p in param.split('/')]

        format = format.lower()

        if format in ("jpg", "gif", "png"):
            # Check recency of last total failure -- if less than timeout * 10,
            # throw an error immediately. This prevents infinite rendr loops,
            # provided there are fewer than 8 or so rendrs in the loop.
            if t - Renderer._rendr_failure_times.get((library_id, rendr_id), 0) \
                    < self.timeout * 10:
                raise tornado.web.HTTPError(503)

            query_uri = "http://127.0.0.1:%s/%s/%s.html%s" % (self.port,
                library_id, rendr_desc,
                "?" + self.request.query if self.request.query else "")
            fd, output_path = tempfile.mkstemp(suffix=".png")
            os.close(fd) # not using it yet

            result = yield gen.Task(asyncprocess.run_cmd,
                [self.phantomjs, "--disk-cache=yes",
                "--max-disk-cache-size=524288", self.rasterize, query_uri,
                output_path],
                self.timeout)

            # Track the last failure
            if not result[0][0].startswith("success"):
                log.error("Renderer.get failure (%s): %s" % (self.request.uri,
                    result[0][0]))
                # Any rendr failures caused by timeouts will trigger a lockout
                # for 10 times the timeout duration
                if time.time() - t > self.timeout - 1.0:
                    Renderer._rendr_failure_times[(library_id, rendr_id)] = t
                self.send_error(504, message='\n'.join(
                    l for l in result[0][0].split('\n')
                        if not l.startswith('success: written')))
                return

            # Serve output path
            self.set_header("Date", datetime.datetime.utcnow())
            self.set_header("Expires", datetime.datetime.utcnow() +
                datetime.timedelta(seconds=3600))
            self.set_header("Cache-Control", "public, max-age=" +
                str(3600))
            self.set_header("Content-Type",
                "image/" + ("jpeg" if format == "jpg" else format))
            # Use PIL to convert image to the desired output format, if it's
            # not PNG
            if format == "png":
                with open(output_path, "rb") as f:
                    self.write(f.read())
            else:
                img = Image.open(output_path, "r")
                buf = cStringIO.StringIO()
                if format == "jpg":
                    try:
                        quality = int(data["q"])
                    except Exception:
                        quality = 70
                    img = img.convert("RGB")
                    img.save(buf, "jpeg", quality=quality)
                elif format == "gif":
                    # Convert to GIF while maintaining transparency
                    img.load()
                    alpha = img.split()[3]
                    img = img.convert("RGB").convert("P",
                        palette=Image.ADAPTIVE, colors=255)
                    # Set all pixel values below 128 to 255, and the rest to 0
                    mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0)
                    img.paste(255, mask)
                    # The transparency index is 255
                    img.save(buf, "png", transparency=255)

                self.write(buf.getvalue())

            self.finish()

            # Delete upto 2 files older than 60 seconds
            delete_files(os.path.dirname(output_path), 2, 60)
        elif format in ("html", "json"):
            # Retrieve the rendr file
            rendr = yield gen.Task(self.db.read_rendr, library_id,
                rendr_id)
            if not rendr or "error" in rendr:
                raise tornado.web.HTTPError(404)

            # Render the rendr
            if format == "json":
                self.set_header("Content-Type", "application/json")
                self.write(rendr)
            elif format == "html":
                self.set_header("Content-Type", "text/html")
                self.write(pystache.render("""
                    <!DOCTYPE html>
                    <html>
                        <head>
                            <style>{{{css}}}</style>
                            <script>
                                window.query = {{{data}}};
                                window.decodeBase64UrlSafe = function (s) {
                                    s = s.replace(/-/g, '+').replace(/_/g, '/');
                                    return decodeURIComponent(escape(atob(s)));
                                };
                            </script>
                        </head>
                        <body style="margin:0;padding:0;overflow:hidden">
                            {{{html}}}
                        </body>
                    </html>
                """, {
                    "css": pystache.render(rendr["css"], data),
                    "html": pystache.render(rendr["body"], data),
                    "data": json.dumps(data),
                }))

            self.finish()
        else:
            raise tornado.web.HTTPError(400)
Пример #34
0
#!/usr/bin/env python

import Image
import ImageFilter

if __name__ == '__main__':
    in_im = Image.open('westbrook.jpg')
    out_im = Image.eval(in_im, lambda x: x * 0.5)
    out_im.save('Q2,jpg')
Пример #35
0
def _img_from_text(text, font, size=12, color='#000', decoration={}):
    """
        Draws text with font, size, color and decoration parameters.
        Caches images and returns (html or object, width, size) of
        new or exists image
    """
    
    image_path = path.join(settings.MEDIA_ROOT, HEADLINE_CACHE_DIR)
    font_path = path.join(settings.MEDIA_ROOT, HEADLINE_FONTS_DIR)

    id = "headline-%s" % md5(smart_str(''.join((text, font, size.__str__(), color, decoration.__str__())))).hexdigest()
    image_file = path.join(image_path, "%s.png" % id)
    

    if not path.isfile(image_file) or HEADLINE_NO_CACHE:
        
        size = int(size)
        font = ImageFont.truetype(path.join(font_path, font), size)
        width, height = font.getsize(text)
        
        ### Init surfaces
        image = Image.new("RGB", (width, height), (0,0,0))
        alpha = Image.new("L", image.size, "black")
        imtext = Image.new("L", image.size, 0)
        draw = ImageDraw.Draw(imtext)
        
        ### Real Drawings on alpha with white color
        if decoration.has_key('opacity'):
            opacity = float(decoration['opacity']) * 255
        else:
            opacity = 255
            
        ### Draws text
        draw.text((0, 0), text, font=font, fill=opacity)
        
        ### Draws an underline
        if decoration.has_key('underline'):
            val = int(decoration['underline'])
            draw.line((0 + size/20, height * 4 / 5 + val,
                       width - size/20, height * 4 / 5 + val),
                       fill=opacity, width=size / 20)
        
        ### Draws an strikeout line
        if decoration.has_key('strikeout'):
            val = int(decoration['strikeout'])
            draw.line((0 + size/20, height / 2  + val,
                       width - size/20, height / 2  + val),
                       fill=opacity, width=size / 20)
            
        ### Alpha color black-magic
        alpha = ImageChops.lighter(alpha, imtext)
        solidcolor = Image.new("RGBA", image.size, color)
        immask = Image.eval(imtext, lambda p: 255 * (int(p != 0)))
        image = Image.composite(solidcolor, image, immask)
        image.putalpha(alpha)
        
        ### Rotation
        if decoration.has_key('rotate') and decoration['rotate']:
            angle = float(decoration['rotate'])
            if angle == 90:
                image = image.transpose(Image.ROTATE_90)
            elif angle == 180:
                image = image.transpose(Image.ROTATE_180)
            elif angle == 270:
                image = image.transpose(Image.ROTATE_270)
            else:
                # XXX: Bad rotation
                # Really bicubic transformation works only
                # when canvas doesn`t resize: last param is False
                image = image.rotate(angle, Image.BICUBIC, True)
            width, height = image.size
        
        ### Save image
        image.save(image_file, "PNG")
        
        ### Optimize png with external tool
        if HEADLINE_PNG_OPTIMIZER:
            from os import system
            system(HEADLINE_PNG_OPTIMIZER % {"file": image_file})
        
    else:
        ### We need just dimentions
        width, height = Image.open(image_file).size
        
    return "%s%s/%s.png" % \
           (settings.MEDIA_URL, HEADLINE_CACHE_DIR, id), width, height
Пример #36
0
 def loadFile(self,fileName):
     self.image = Image.eval(ImageOps.grayscale(Image.open(fileName)), lambda a: 1 if a <128 else 0)
     x,y = self.image.size
     self.x = x
     self.y = y
Пример #37
0
for i in range(1, 60, 1):
    text = str(i)
    # A fully transparent image to work on, and a separate alpha channel.
    im = Image.new("RGB", (30, 30), (0, 0, 0))
    alpha = Image.new("L", im.size, "black")

    # Make a grayscale image of the font, white on black.
    imtext = Image.new("L", im.size, 0)
    drtext = ImageDraw.Draw(imtext)
    font = ImageFont.truetype(fontfile, size)
    drtext.text(position, text, font=font, fill="white")

    # Add the white text to our collected alpha channel. Gray pixels around
    # the edge of the text will eventually become partially transparent
    # pixels in the alpha channel.
    alpha = ImageChops.lighter(alpha, imtext)

    # Make a solid color, and add it to the color layer on every pixel
    # that has even a little bit of alpha showing.
    solidcolor = Image.new("RGBA", im.size, color)
    immask = Image.eval(imtext, lambda p: 255 * (int(p != 0)))
    im = Image.composite(solidcolor, im, immask)

    # These two save()s are just to get demo images of the process.
    # im.save("transcolor.png", "PNG")
    # alpha.save("transalpha.png", "PNG")

    # Add the alpha channel to the image, and save it out.
    im.putalpha(alpha)
    im.save(text + ".png", "PNG")
    def commitToDatabase(self, imgdata):
        sessiondata = imgdata['session']
        rundir = self.params['rundir']
        maskname = self.params['runname']
        assessname = self.params['runname']
        bin = self.params['bin']
        maskdir = os.path.join(rundir, "masks")

        maskrundata, maskparamsdata = apMask.getMaskParamsByRunName(
            maskname, sessiondata)

        if not maskrundata:
            apMask.insertManualMaskRun(sessiondata, rundir, maskname, bin)
            maskrundata, maskparamsdata = apMask.getMaskParamsByRunName(
                maskname, sessiondata)
            try:
                apParam.createDirectory(maskdir)
            except:
                apDisplay.printWarning('can not create mask directory')

        massessrundata, exist = apMask.insertMaskAssessmentRun(
            sessiondata, maskrundata, assessname)

        # The mask file should only exist if the em_hole_finder found a region to mask.
        # If it does not exist, do not insert anything to the DB.
        if (os.path.exists(self.outfile)):
            apDisplay.printMsg("Writing results to database: " +
                               time.asctime())

            # Set black pixels to white and anything else to 0
            img1 = Image.open(self.outfile)
            img2 = Image.eval(img1, lambda px: 255 if px == 0 else 0)

            # make sure the images have the same shape
            imgshape = numpy.asarray(imgdata['image'].shape)
            apDisplay.printMsg("MRC Image Shape:")
            print imgshape
            imgsize = imgshape[0] * imgshape[1]
            apDisplay.printMsg("MRC Image Size:")
            print imgsize

            maskshape = numpy.shape(img2)
            apDisplay.printMsg("Mask Image Shape:")
            print maskshape

            apDisplay.printMsg("resizing mask image with scale:")
            scaleFactorx = float(imgshape[0]) / float(maskshape[0])
            scaleFactory = float(imgshape[1]) / float(maskshape[1])
            scale = scaleFactorx, scaleFactory
            print scale

            img3 = imagefun.scale(img2, scale)
            maskshape = numpy.shape(img3)
            apDisplay.printMsg("Mask Image Shape:")
            print maskshape
            #img3 = numpy.resize(img2, imgshape) # not working
            img3path = self.outfile + "_tmp.jpg"
            scipy.misc.imsave(img3path, img3)

            labeled_regions, clabels = ndimage.label(img3)

            testlog = [False, 0, ""]
            infos = {}

            apDisplay.printMsg("getting mask region info.")
            infos, testlog = apCrud.getLabeledInfo(imgdata['image'], img3,
                                                   labeled_regions,
                                                   range(1, clabels + 1),
                                                   False, infos, testlog)

            apDisplay.printMsg("inserting mask regions to DB.")
            print len(infos)

            area_max = imgsize * .9
            offset = 1
            for l1 in range(0, len(infos)):
                l = l1 + offset
                info = infos[l]
                area = info[0]
                print area
                if (area > 400 and area < area_max):
                    apDisplay.printMsg("saving a region of size:")
                    print area
                    info.append(l)
                    regiondata = apMask.insertMaskRegion(
                        maskrundata, imgdata, info)

        # Insert mask assessment regions. This keeps track of the mask regions that the user wants to reject.
        allregiondata = apMask.getMaskRegions(maskrundata, imgdata)

        for regiondata in allregiondata:
            apMask.insertMaskAssessment(massessrundata, regiondata, True)

#               if self.assess != self.assessold and self.assess is not None:
#                       #imageaccessor run is always named run1
#                       apDatabase.insertImgAssessmentStatus(imgdata, 'run1', self.assess)
        return
Пример #39
0
labels.seek(8)

images = file('train-images-idx3-ubyte', 'rb')
images.seek(16)

finished = 0
idx = {}

i = 0
while finished < 10:
    d = struct.unpack('B', labels.read(1))[0]
    l = idx.setdefault(d, [])
    if len(l) < 10:
        l.append(i)
        if len(l) == 10:
            finished += 1
    i += 1
from pprint import pprint
pprint(idx)

size = 28
img = Image.new('L', (size*10, size*10))
fragment = Image.new('L', (size, size))
for i in range(10):
    for j in range(10):
        images.seek(16+idx[j][i]*size*size)
        fragment.putdata(struct.unpack('B'*size*size, images.read(size*size)))
        img.paste(fragment, (j*size, i*size))
img = Image.eval(img, lambda p: 255-p)
img.save('digits.png', 'PNG')
Пример #40
0
def threshold_image(image, value):
    return Image.eval(image, cutoff(value)).convert('1')
Пример #41
0
def crop_edges_get_sum(im, save_as, edge=25):
    c = im.crop((edge, edge, im.size[0] - edge, im.size[1] - edge))
    c.save(save_as)
    s = ImageStat.Stat(c)
    return int(s.sum[0] / 1000.)


i1 = Image.open("/home/mitch/data/hart/unproc/000/000001.jpg")
marked = Image.open("/home/mitch/data/hart/unproc/000/000001m.jpg")

# shrink as much as possible, but make sure lines still register as pixels
scaling_factor = 2
small = i1.resize((i1.size[0] / scaling_factor, i1.size[1] / scaling_factor))
marked = marked.resize(
    (marked.size[0] / scaling_factor, marked.size[1] / scaling_factor))
i1 = Image.eval(small, bw)
i1.save("/tmp/i1.jpg")
print "I1, orig, saved."
i2 = ImageChops.duplicate(i1)
marked = Image.eval(marked, bw)

merge = i2

# calculate spread that works well with your scaling factor
spread = int(round(i1.size[0] / (scaling_factor * 75.)))

# wherever there's a dark pixel, darken all pixels within spread pixels
for x in range(-spread, spread, 1):
    for y in range(-spread, spread, 1):
        print spread, x, y
        newi = ImageChops.offset(i2, x, y)
Пример #42
0
 def fade(self, factor = 0.9):
     "Fade the display to black by the given factor"
     faded = Image.eval(self._img, lambda x : x * factor)
     self._img.paste(faded)
Пример #43
0
    def generate_transition_list_from_zones(self,image,regionlist,column_bounds,left,middle):
        """ given the pair of zone lists, generate a comprehensive list

        We should then be able to merge these sets of split information:
        anything where we find solid black or halftone is a definite break
        which may be followed either by another black or halftone area, by
        a description area, or by a vote area.
        """
        ccontest_default = "No current contest"
        ccontest = ccontest_default
        cjurisdiction_default = "No current jurisdiction"
        cjurisdiction = cjurisdiction_default
        contest_instance = None
        next_white_is_votearea = False
        this_white_is_votearea = False
        next_white_is_yesno = False
        this_white_is_yesno = False
        for n in range(len(left)):
            this_white_is_votearea = False
            if next_white_is_votearea == True:
                this_white_is_votearea = True
                next_white_is_votearea = False
            this_white_is_yesno = False
            if next_white_is_yesno == True:
                this_white_is_yesno = True
                next_white_is_yesno = False
            this_y = left[n][0]
            try:
                next_zone = left[n+1]
            except IndexError:
                next_zone = [0,'X']
            next_y = next_zone[0]
            rel_end = next_y - (const.dpi/10)
            if left[n][1]=='B':
                self.log.debug("Black zone at %d to %d %s" % (this_y,
                                                              next_y,
                                                              next_zone))
                # if it's a legitimate black zone and the next zone is white,
                # that white zone is a Yes/No Vote Area (or empty)
                if (next_y - this_y) > (const.dpi/4):
                    next_white_is_yesno = True
                    # this zone becomes the current Jurisdiction
                    crop = image.crop((column_bounds[0],
                                       this_y,
                                       column_bounds[1],
                                       next_y))
                    cjurisdiction = self.extensions.ocr_engine(crop)
                    self.log.debug( "Jurisdiction %s" % (cjurisdiction,))
                    cjurisdiction = self.extensions.ocr_cleaner(cjurisdiction)
                    cjurisdiction = cjurisdiction.replace("\n","//").strip()
                    self.log.debug( "Cleaned Jurisdiction %s" % (cjurisdiction,))
                    # and the current contest is set 
                    # from the descriptive text
                    # at the start of the Yes No Vote area
            if left[n][1]=='G':
                self.log.debug("Gray zone at %d to %d %s" % (this_y,
                                                             next_y,
                                                             next_zone))
                # if it's a legitimage gray zone and the next zone is white,
                # that white zone is a voting area (or empty)
                if (next_y - this_y) > (const.dpi/2):
                    next_white_is_votearea = True
                    crop = image.crop((column_bounds[0],
                                       this_y,
                                       column_bounds[1],
                                       next_y))
                    crop = Image.eval(crop,elim_halftone)
                    ccontest = self.extensions.ocr_engine(crop)
                    ccontest = ccontest.replace("\n","//").strip()
                    self.log.debug( "Contest %s" % (ccontest,))
                    ccontest = self.extensions.ocr_cleaner(ccontest)
                    self.log.debug( "Cleaned Contest %s" % (ccontest,))
                    contest_instance = Ballot.Contest(column_bounds[0],
                                                      this_y,
                                                      column_bounds[1],
                                                      this_y+next_y,
                                                      0,
                                                      ccontest)
                    regionlist.append(contest_instance)
            if left[n][1]=='W':
                if this_white_is_votearea:
                    # no descriptive text anticipated
                    self.get_only_votes_from(image,contest_instance,
                                             (column_bounds[0],
                                              this_y,
                                              column_bounds[1],
                                              next_y))
                if this_white_is_yesno:
                    # descriptive text sets current contest,
                    # votes are in stretches where the middle is white
                    self.get_contests_and_votes_from(image,
                    regionlist,
                                                     (column_bounds[0],
                                                      this_y,
                                                      column_bounds[1],
                                                      next_y))
                self.log.debug( "White zone at %d to %d %s" % (this_y,next_y,next_zone))
        return regionlist