Пример #1
0
	def process(self, image):
		red = 1.0  # (0.1 - 1.0)
		green = 0.5  # (0.1 - 0.5)
		red_r = red
		red_g = (1. - red) / 2.
		red_b = (1. - red) / 2.
		cyan_r = 0
		cyan_g = green
		cyan_b = 1. - green
		yield 'Red layer', image
		redlayer = image.copy()
		redlayer = colors.color_mixer_monochrome(redlayer, red_r, red_g, red_b)
		redlayer = ImageChops.invert(redlayer)
		redlayer = colors.apply_color(redlayer, (255, 0, 0))
		redlayer = ImageChops.invert(redlayer)
		yield 'Cyan layer', redlayer
		cyanlayer = image.copy()
		cyanlayer = colors.color_mixer_monochrome(cyanlayer, cyan_r, cyan_g, cyan_b)
		cyanlayer = ImageChops.invert(cyanlayer)
		cyanlayer = colors.apply_color(cyanlayer, (0, 255, 255))
		cyanlayer = ImageChops.invert(cyanlayer)
		yield 'Cyan + Red...', cyanlayer
		image = ImageChops.multiply(cyanlayer, redlayer)
		yield 'Yellow layer', image
		yellowlayer = image.copy()
		colors.fill_with_color(yellowlayer, (255, 255, 240))
		image = ImageChops.multiply(image, yellowlayer)
		yield 'Contrast...', image
		image = ImageEnhance.Contrast(image).enhance(1.1)
		yield 'Sharpness...', image
		image = ImageEnhance.Sharpness(image).enhance(1.1)
		yield 'Done', image
Пример #2
0
def vignette(image, off=0.2, stop=0.7, center_w=0.5, center_h=0.5):
	width, height = image.size
	vlayer = create_circular_gradient(image.size, 1.3, center_w, center_h, False)
	curv = list(curves.create_curve([(0, 0), (96, 200), (255, 255)]))
	vlayer = curves.apply_curves(vlayer, curv)
	vlayer = vlayer.filter(ImageFilter.BLUR).convert("RGB")
	clouds = create_clouds_bw(vlayer.size, 3)
	clouds = ImageEnhance.Brightness(clouds).enhance(3)
	clouds = ImageEnhance.Contrast(clouds).enhance(0.9)
	clouds = ImageChops.multiply(clouds, ImageChops.invert(vlayer))
	return ImageChops.multiply(image, ImageChops.invert(clouds))
Пример #3
0
    def contrastMask(self, outfile):
        """Ceci est un filtre de debouchage de photographies, aussi appelé masque de contraste,
        il permet de rattrapper une photo trop contrasté, un contre jour, ...
        Écrit par Jérôme Kieffer, avec l'aide de la liste python@aful,
        en particulier A. Fayolles et F. Mantegazza avril 2006
        necessite numpy et PIL.

        @param: the name of the output file (JPEG)
        @return: filtered Photo instance
        """

        try:
            import numpy
#            import scipy.signal as signal
        except:
            logger.error("This filter needs the numpy library available on https://sourceforge.net/projects/numpy/files/")
            return

        t0 = time.time()
        dimX, dimY = self.pil.size

        ImageFile.MAXBLOCK = dimX * dimY
        img_array = numpy.fromstring(self.pil.tostring(), dtype="UInt8").astype("float32")
        img_array.shape = (dimY, dimX, 3)
        red, green, blue = img_array[:, :, 0], img_array[:, :, 1], img_array[:, :, 2]
        # nota: this is faster than desat2=(ar.max(axis=2)+ar.min(axis=2))/2
        desat_array = (numpy.minimum(numpy.minimum(red, green), blue) + numpy.maximum(numpy.maximum(red, green), blue)) / 2.0
        inv_desat = 255. - desat_array
        blured_inv_desat = self._gaussian.blur(inv_desat, config.ContrastMaskGaussianSize)
        bisi = numpy.round(blured_inv_desat).astype("uint8")
        k = Image.fromarray(bisi, "L").convert("RGB")
        S = ImageChops.screen(self.pil, k)
        M = ImageChops.multiply(self.pil, k)
        F = ImageChops.add(ImageChops.multiply(self.pil, S), ImageChops.multiply(ImageChops.invert(self.pil), M))
        dst_filename = op.join(config.DefaultRepository, outfile)
        F.save(dst_filename, quality=80, progressive=True, Optimize=True)
        try:
            os.chmod(dst_filename, config.DefaultFileMode)
        except IOError:
            logger.error("Unable to chmod %s" % outfile)
        exifJpeg = Exif(dst_filename)
        exifJpeg.read()
        self.exif.copy(exifJpeg)
        exifJpeg.comment = self.exif.comment
        logger.debug("Write metadata to %s", dst_filename)
        exifJpeg.write()
        logger.info("The whoole contrast mask took %.3f" % (time.time() - t0))
        res = Photo(outfile)
        res.autorotate()
        return res
Пример #4
0
def _set_opacity_layer(layer, opacity):
	if opacity >= 1:
		return layer
	opacity = max(min(opacity, 1), 0)
	color = int(opacity * 255)
	olayer = Image.new("RGB", layer.size, (color, color, color))
	return ImageChops.multiply(layer, olayer)
Пример #5
0
    def autofocus(self,step=5000):
        if self.slide.pos[2] >= 0:  step = -step
        self.slide.moveZ(-step/2)
        z_start = self.slide.pos[2]
        self.frames.fillBuffer()
        self.slide.displaceZ(step)
        z_frames = self.frames.getBuffer()

        #sample every kth plus its lth neighbor:  for k=10,l=2 sample frame 0,10,20 and 2,12,22
        k = 10
        l = 2
        sample_ind = [ind*k for ind in range(len(z_frames)/k)]
        sample_ind2 = [ind*k+l for ind in range(len(z_frames)/k)]
        f = [z_frames[ind] for ind in sample_ind]
        f2 = [z_frames[ind] for ind in sample_ind2]
        n = len(f)
        diffs = []
        for i in range(n-2):
            diffs.append(ImageChops.difference(f[i],f2[i]))
        motion = []
        for f in diffs:
            f = ImageChops.multiply(f,self.curr_mask)
            motion.append(ImageStat.Stat(f).sum[0])
        #g = Gnuplot.Gnuplot()
        #g.plot(motion)

        max_frame = scipy.argmax(motion)
        max_focus = (max_frame/float(n))*step + z_start
        self.slide.moveZ(max_focus)
        return max_focus
Пример #6
0
	def process(self, image):
		yield 'Color...', image
		image = ImageEnhance.Color(image).enhance(0.5)
		yield 'Curves...', image
		bcurv = list(curves.create_curve([(0, 0), (90, 20), (140, 80), (200, 206),
				(255, 255)]))
		image = curves.apply_curves(image, bcurv)
		yield 'Smoth...', image
		blur = image.filter(ImageFilter.BLUR)
		yield 'Merge smoth...', blur
		gradient = gradients.create_gradient(image, 1.2, 1.4)
		mask = gradient.convert("L")
		mask = ImageEnhance.Brightness(mask).enhance(2.0)
		image = Image.composite(image, blur, mask)
		yield 'vignette...', image
		image = vignette.vignette(image)
		yield 'Color...', image
		cyanlayer = image.copy()
		colors.fill_with_color(cyanlayer, (230, 255, 255))
		image = ImageChops.multiply(image, cyanlayer)
		yield 'Aspect...', image
		width, height = image.size
		if width > height:
			dheight = int(width / 16. * 9)
			margin = (height - dheight) / 2
			if margin > 0:
				draw = ImageDraw.Draw(image)
				draw.rectangle((0, 0, width, margin), fill=0)
				draw.rectangle((0, height, width, height - margin), fill=0)
			del draw
		#image = ImageEnhance.Sharpness(image).enhance(1.1)
		#image = ImageEnhance.Brightness(image).enhance(1.2)
		yield 'Done', image
Пример #7
0
    def savePolygon(self):
        #print "saving polygon " + self.selectedPolygon
        if self.selectedPolygon == "":
            print "no polygon selected"
            return
        pobj = self.Polygons[self.selectedPolygon]
        screencoords = self.getPixelCoords(pobj.coords)
        polyname = self.selectedPolygon
       
        # get PIL image of datapoints, polygon
        Pmw.showbusycursor()
        im, imsk = self.mkDataMasks(polyname, pobj.coords)
        
        im = im.convert("1")       # binarize the images
        imsk = imsk.convert("1")

        res = ImageChops.multiply(im,imsk)  

        rp = self.getpixels(res)   # rp = list of pixels found in image
        fk = self.findPixelsInList(rp)  # fk = list of matched elements

        # add the found elements to the Polygon instance
        pobj.elements = fk
        self.Polygons[self.selectedPolygon] = pobj
        self.selectedPolyMarkers()  # colors the selected markers

        self.saveSelectedMarkers()

        del(im, imsk, res)  # does this speed up or slow down next call?
        Pmw.hidebusycursor()
Пример #8
0
    def contrast(self, cropped_img):
        """
        Provides a high contrast board image for input into Tesseract OCR
        """

        # Convert the board image into greyscal

        bwImg = cropped_img.convert("L")

        # Multiply board image with inverted image so that text is black

        bwImgM = ImageChops.multiply(ImageChops.invert(bwImg), bwImg)

        # Increase contrast

        enhancedImg = ImageEnhance.Contrast(bwImgM)
        bwImgM = enhancedImg.enhance(5.0)

        # Produce pixel image object (array) for operation (operates in place)

        bwMDat = bwImgM.load()

        # If the pixel value is not black, make it white
        # (No colours any more, I want them to turn black)

        for i in range(0, bwImgM.size[1]):
            for j in range(0, bwImgM.size[0]):
                if bwMDat[j, i] != 0:
                    bwMDat[j, i] = 255
        # Debugging
        # bwImgM.show()

        return bwImgM
Пример #9
0
def myComposite(pic,word_2,position):
	im = Image.new("RGBA", (3366, 4961), (255,255,255,255))
	im.paste(pic,(0,0)) 
	im_2 = Image.new("RGBA", (3366, 4961), (255,255,255,255))
	im_2.paste(word_2,(int(position[0]),int(position[1]))) 
	out = ImageChops.multiply(im,im_2) 
	return out
Пример #10
0
def myComposite(pic, word_2, position):
    im = Image.new("RGBA", (3366, 4961), (255, 255, 255, 255))
    im.paste(pic, (0, 0))
    im_2 = Image.new("RGBA", (3366, 4961), (255, 255, 255, 255))
    im_2.paste(word_2, (int(position[0]), int(position[1])))
    out = ImageChops.multiply(im, im_2)
    return out
Пример #11
0
def get_wnd2_mask(request, item_name, l, t, w, h):
    mask_name = request.REQUEST['mask_name']

    image_filename = os.path.join(settings.DATASETS_ROOT, item_name)
    mask_filename = os.path.join(settings.DATASETS_ROOT, mask_name)
    #if not os.path.exists(image_filename):
    #	raise Http404();
    #if not os.path.exists(mask_filename):
    #	raise Http404();
    im = Image.open(image_filename)
    mask = Image.open(mask_filename)

    box = map(lambda v: int(round(v)),
              [float(l),
               float(t),
               float(l) + float(w),
               float(t) + float(h)])
    c = im.crop(box)
    m = mask.crop(box)

    m = m.convert("RGB")
    print m.size
    print c.size
    c_masked = ImageChops.multiply(c, m)
    response = HttpResponse(mimetype="image/jpeg")
    c_masked.save(response, "JPEG")

    return response
Пример #12
0
def add_overlay( tileName, mapName ):
    tile = Image.open( tileName )
    tile = tile.convert( 'RGB' )
    map = Image.open( mapName )
    map = map.convert( 'RGB' )
    outimg = ImageChops.multiply( tile, map )
    outimg.save( tileName )
    return
Пример #13
0
def _overlay_pictures(pic1, pic2, contour):

    if (contour):
        import ImageFilter, ImageChops
        return ImageChops.multiply(pic2.filter(ImageFilter.CONTOUR), pic1)
    else:
        import Image
        return Image.blend(pic2, pic1, 0.5)
Пример #14
0
    def do_multiply(self):
        """usage: multiply <image:pic1> <image:pic2>

        Pop the two top images, push the multiplication image.
        """
        import ImageChops
        image1 = self.do_pop()
        image2 = self.do_pop()
        self.push(ImageChops.multiply(image1, image2))
Пример #15
0
    def do_multiply(self):
        """usage: multiply <image:pic1> <image:pic2>

        Pop the two top images, push the multiplication image.
        """
        import ImageChops
        image1 = self.do_pop()
        image2 = self.do_pop()
        self.push(ImageChops.multiply(image1, image2))
Пример #16
0
	def process(self, image):
		red = 1.2
		green = 1.2
		blue = 1.2
		red_r = red
		red_g = (1. - red) / 2.
		red_b = (1. - red) / 2.
		green_r = (1. - green) / 2.
		green_g = green
		green_b = 1. - green
		blue_r = (1. - blue) / 2.
		blue_g = (1. - blue) / 2.
		blue_b = blue
		yield 'Red layer', image
		redlayer = image.copy()
		redlayer = colors.color_mixer_monochrome(redlayer, red_r, red_g, red_b)
		redlayer = ImageChops.invert(redlayer)
		redlayer = colors.apply_color(redlayer, (255, 0, 0))
		redlayer = ImageChops.invert(redlayer)
		yield 'Green layer', redlayer
		greenlayer = image.copy()
		greenlayer = colors.color_mixer_monochrome(greenlayer, green_r, green_g,
				green_b)
		greenlayer = ImageChops.invert(greenlayer)
		greenlayer = colors.apply_color(greenlayer, (0, 255, 0))
		greenlayer = ImageChops.invert(greenlayer)
		yield 'Blue layer', redlayer
		bluelayer = image.copy()
		bluelayer = colors.color_mixer_monochrome(bluelayer, blue_r, blue_g, blue_b)
		bluelayer = ImageChops.invert(bluelayer)
		bluelayer = colors.apply_color(bluelayer, (0, 0, 255))
		bluelayer = ImageChops.invert(bluelayer)
		yield 'Red + green...', bluelayer
		image = ImageChops.multiply(greenlayer, redlayer)
		yield '+ blue...', image
		image = ImageChops.multiply(image, bluelayer)
		yield 'Contrast...', image
		image = ImageEnhance.Contrast(image).enhance(1.1)
		yield 'Sharpness...', image
		image = ImageEnhance.Sharpness(image).enhance(1.1)
		yield 'Color...', image
		image = ImageEnhance.Color(image).enhance(1.2)
		yield 'Done', image
Пример #17
0
def heatmap(logfile, background, dotsize, every, minvalue):
  parsed = parselog(logfile)

  image = Image.open(background)

  width, height = image.size

  xmin = parsed["xmin"]
  zmin = parsed["zmin"]
  xgrid = (parsed["xmax"] - xmin) / width
  zgrid = (parsed["zmax"] - zmin) / height

  heatimage = Image.new("RGBA", image.size, "white")
  dot = gendot(dotsize, minvalue)
  tmptmp = Image.new("RGBA", image.size, "white")
  #heatarr = asarray(heatimage).astype("locale.atof")

  i = 1

  # Parse the log and set heatmap
  for t in parsed["coords"]:
    if i % every == 0:
      x = int(round((t[0] - xmin) / xgrid))
      z = int(round((t[2] - zmin) / zgrid))
      x = int(round(x - dotsize / 2))
      z = int(round(z - dotsize / 2))
    
      tempimg = Image.new("RGBA", image.size, "white")
      tempimg.paste(dot, (x, z))
    
      tmptmp = ImageChops.multiply(tmptmp, tempimg)

    i = (i + 1) % every

    #temparr = asarray(tempimg).astype("locale.atof")
    
    #heatimage = ImageChops.multiply(heatimage, tempimg)
    #heatarr = heatarr + temparr;
    
 # heatimage = Image.fromarray(heatarr)
  heatimage = ImageChops.multiply(heatimage, tmptmp)
  heatimage.save(logfile + ".png")
Пример #18
0
def screen_mode(im, wm, wmbuffer):
    imsize = im.size
    wmsize = wm.size
    brightness = float(_OPACITY) / 100
    brightval = int(round(255 * brightness))
    wm_pos = _wm_pos(wmbuffer, imsize, wmsize)
    black_bg = Image.new('RGB', imsize, (0, 0, 0) )
    black_bg.paste(wm, wm_pos)
    darkener = Image.new('RGB', imsize, (brightval, brightval, brightval) )
    darkened_fit_wm = ImageChops.multiply(black_bg, darkener)
    return ImageChops.screen(darkened_fit_wm, im)
Пример #19
0
	def process(self, image, maximum=1):
		yield 'Creating...', image
		width, height = image.size
		noise = colors.create_hls_noise(image.size, 0, 1, 0)
		yield 'Curves...', noise
		bcurv = list(curves.create_curve([(0, 255), (128, 220), (255, 255)]))
		noise = curves.apply_curves(noise, bcurv)
		if maximum < 1:
			noise = ImageEnhance.Brightness(noise).enhance(1.2 - maximum / 5.)
		yield 'merge...', noise
		image = ImageChops.multiply(image, noise)
		yield 'Done', image
Пример #20
0
	def process(self, image):
		yield 'Loading...', image
		imgp = appconfig.AppConfig().get_data_file(self._IMAGE)
		img = Image.open(imgp)
		width, height = image.size
		yield 'Create fabric...', img
		canvas = Image.new('RGB', image.size)
		for x in xrange(0, width, img.size[0]):
			for y in xrange(0, height, img.size[1]):
				canvas.paste(img, (x, y))
		yield 'Merge...', canvas
		image = ImageChops.multiply(image, canvas)
		yield 'Done', image
Пример #21
0
	def process(self, image):
		yield 'Blur...', image
		blur = image.filter(ImageFilter.SMOOTH)
		yield 'Saturation Blur...', image
		blur = ImageEnhance.Color(blur).enhance(0.2)
		yield 'Background...', image
		back = Image.new(image.mode, image.size, (193, 191, 170))
		yield 'Merge multiply...', image
		top = ImageChops.multiply(blur, back)
		yield 'Saturation...', top
		image = ImageEnhance.Color(image).enhance(0.2)
		yield 'Tint...', image
		image = colors.tint(image, 30, 45, 40, 110, 190, 110)
		yield 'Merge multiply 2...', image
		image = ImageChops.multiply(top, image)
		yield 'Brightness...', image
		image = ImageEnhance.Brightness(image).enhance(1.2)
		yield 'Sharpen...', image
		image = ImageEnhance.Sharpness(image).enhance(1.5)
		yield 'Contrast...', image
		image = ImageEnhance.Contrast(image).enhance(1.1)
		yield 'Done', image
Пример #22
0
    def composite(self, destination, user_mask=None):
        """Composite ourselves into the destination image, generating
		all cached components as needed"""
        Logger.log.debug("Compositing '" + self.base_name() + "' at level " +
                         self.__class__.__name__)

        im = self.get_image()
        if destination.size != im.size:
            Logger.log.info("Rescaling image from " + str(im.size) + " to " +
                            str(destination.size))
            im = im.resize(destination.size, Image.BICUBIC)

        # Find any alpha layer in the image
        transp = None
        for band in zip(im.getbands(), im.split()):
            if band[0] == "A":
                Logger.log.debug("Found transparency layer")
                transp = band[1]

        # Decide what blending we will be doing
        if user_mask is None:
            if transp is None:
                # We have no concept of transparency at all -- use a
                # flat setting
                Logger.log.debug("Using flat mask")
                mask = Image.new("1", im.size, 1)
            else:
                # We have a transparency but no user mask
                Logger.log.debug("Using existing transparency")
                mask = transp
        else:
            if transp is None:
                # We have a mask but no transparency -- use the user's
                # mask
                Logger.log.debug("Using provided mask")
                mask = user_mask
            else:
                # If we have both a supplied mask and our own
                # transparency, use both -- where either is
                # transparent, set transparency (could use
                # ImageChops.multiply() instead?)
                Logger.log.debug("Using combination mask")
                mask = ImageChops.multiply(user_mask.convert("L"),
                                           transp.convert("L"))

        name = os.tmpnam() + ".png"
        Logger.log.debug("Saving mask as: " + name)
        mask.save(name)

        return Image.composite(im, destination, mask)
Пример #23
0
    def populate(self,list):
        global iext
        global vext
        lis=list
        for filen in lis:
            
            e=''
            ext=filen[-4:]
            if ext[0]=='.':
                e=ext[1:]
            else :
                e=ext
            
            
                
            #mib=MTImageButton(filename=filen)
            #print e
            if e in iext:
                #print filen
	        im=Image.open(filen)
                #im.thumbnail(sizethumb, Image.ANTIALIAS)
		#print im.size
		im=im.resize((100,75),Image.ANTIALIAS)
		#print im.size
		im.save("thumb.jpg")
                mib=MTImageButton(filename="thumb.jpg")
                mib.type=e
                mib.infos=filen
                self.coverflow.add_widget(mib)
            elif e in vext:
                tmp=current_dir+"video.png"
		film=Image.open("vtemp.jpg")
		capture=cv.CaptureFromFile(filen)
		vrand=random.randint(500,1500)
		cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_POS_FRAMES,vrand)
		cvimg=cv.QueryFrame(capture)
		cv.SaveImage("thumb.jpg",cvimg)
		vt=Image.open("thumb.jpg")
		vt=vt.resize((100,75),Image.ANTIALIAS)
		fin=ImageChops.multiply(vt,film)
		fin.save("vthumb.jpg")
		
		
                print current_dir
                mib=MTImageButton(filename="vthumb.jpg")
                mib.type=e
                mib.infos=filen
                self.coverflow.add_widget(mib)
Пример #24
0
	def process(self, image):
		yield 'Tint...', image
		image2 = colors.tint(image, 20, 35, 10, 150, 160, 230)
		yield 'Saturation...', image2
		image2 = ImageEnhance.Color(image2).enhance(0.6)
		yield 'Mixer...', image
		image = colors.color_mixer(image, [0.1, 0, 0], [0, 0.7, 0], [0, 0, 0.4])
		yield 'Saturation 2...', image
		image = ImageEnhance.Color(image).enhance(0.6)
		yield 'Contrast...', image
		image = ImageEnhance.Contrast(image).enhance(0.6)
		yield 'Multiply...', image
		image = ImageChops.multiply(image, image2)
		yield 'Brightness...', image
		image = ImageEnhance.Brightness(image).enhance(1.3)
		yield 'Done', image
Пример #25
0
 def detectMotion(self,rec_time=0.5):
     "Accumulates a number of frame-to-frame differences."
     # Gather accumulated frame differences
     frames = self.frames.getFrames(rec_time=rec_time)
     sample_rate = 3
     sample_indices = [ind*sample_rate for ind in range(len(frames)/sample_rate)]
     frames = [frames[ind] for ind in sample_indices]
     #frame.point(lambda i: i*(i<250)+128*(i>250)) #turn bright white to mid-grey to reduce edge effects
     num_frames = len(frames)
     motion = Image.new('L',(640,480))
     scale = num_frames/50.
     for i in range(num_frames-2):
         diff = ImageChops.difference(frames[i+2],frames[i])  # abs of difference
         diff = diff.point(lambda i: int(i/scale)) # dividing by scale prevents saturation
         motion = ImageChops.add(motion,diff)
     motion = ImageChops.multiply(motion,self.curr_mask)   #only look at the middle of the fly
     self.window.displayEngine3(motion)         
     return motion 
Пример #26
0
def screenMode(im, wm, opacityVal, position, wmbuffer):

    imsize = im.size
    wmsize = wm.size

    brightnuss = float(opacityVal) / 100
    brightval = int(round(255 * brightnuss))

    wmPos = wmCalculatePos(position, wmbuffer, imsize, wmsize)

    blackTempBG = Image.new("RGB", imsize, (0, 0, 0))
    blackTempBG.paste(wm, wmPos)

    darkener = Image.new("RGB", imsize, (brightval, brightval, brightval))
    darkenedFitWm = ImageChops.multiply(blackTempBG, darkener)

    out = ImageChops.screen(darkenedFitWm, im)

    return out
Пример #27
0
 def pre_process(self, im):
     if self.transpose_method != '':
         method = getattr(Image, self.transpose_method)
         im = im.transpose(method)
     if im.mode != 'RGB' and im.mode != 'RGBA':
         return im
     for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
         factor = getattr(self, name.lower())
         if factor != 1.0:
             im = getattr(ImageEnhance, name)(im).enhance(factor)
     for name in self.filters.split('->'):
         image_filter = getattr(ImageFilter, name.upper(), None)
         if image_filter is not None:
             try:
                 im = im.filter(image_filter)
             except ValueError:
                 pass
     if self.tint not in ['',None]:
         im = ImageChops.multiply(im, Image.new('RGB', im.size, "%s" %self.tint))
     return im
Пример #28
0
    def heatmap(self, points, fout, dotsize=150, opacity=128, size=(1024,1024), scheme="classic"):
        """
        points  -> an iterable list of tuples, where the contents are the 
                   x,y coordinates to plot. e.g., [(1, 1), (2, 2), (3, 3)]
        fout    -> output file for the PNG
        dotsize -> the size of a single coordinate in the output image in 
                   pixels, default is 150px.  Tweak this parameter to adjust 
                   the resulting heatmap.
        opacity -> the strength of a single coordiniate in the output image.  
                   Tweak this parameter to adjust the resulting heatmap.
        size    -> tuple with the width, height in pixels of the output PNG 
        scheme  -> Name of color scheme to use to color the output image.
                   Use schemes() to get list.  (images are in source distro)
        """
        
        self.dotsize = dotsize
        self.opacity = opacity
        self.size = size
        self.imageFile = fout
 
        if scheme not in self.schemes():
            tmp = "Unknown color scheme: %s.  Available schemes: %s"  % (scheme, self.schemes())                           
            raise Exception(tmp)

        self.minXY, self.maxXY = self._ranges(points)
        dot = self._buildDot(self.dotsize)

        img = Image.new('RGBA', self.size, 'white')
        for x,y in points:
            tmp = Image.new('RGBA', self.size, 'white')
            tmp.paste( dot, self._translate([x,y]) )
            img = ImageChops.multiply(img, tmp)


        colors = colorschemes.schemes[scheme]
        img.save("bw.png", "PNG")
        self._colorize(img, colors)

        img.save(fout, "PNG")
Пример #29
0
def get_wnd2_mask(request,item_name,l,t,w,h):
	mask_name=request.REQUEST['mask_name']

	image_filename=os.path.join(settings.DATASETS_ROOT,item_name);
	mask_filename=os.path.join(settings.DATASETS_ROOT,mask_name);
	#if not os.path.exists(image_filename):
	#	raise Http404();
	#if not os.path.exists(mask_filename):
	#	raise Http404();
	im = Image.open(image_filename);	
	mask = Image.open(mask_filename);	

	box=map(lambda v:int(round(v)),[float(l),float(t),float(l)+float(w),float(t)+float(h)]);
	c = im.crop(box);
	m = mask.crop(box);

	m = m.convert("RGB");
	print m.size
	print c.size
	c_masked = ImageChops.multiply(c, m)
	response = HttpResponse(mimetype="image/jpeg")
	c_masked.save(response, "JPEG")

	return response
Пример #30
0
    def savePolygon(self):
        #print "saving polygon " + self.selectedPolygon
        if self.selectedPolygon == "":
            print "no polygon selected"
            return
        pobj = self.Polygons[self.selectedPolygon]
        screencoords = self.getPixelCoords(pobj.coords)
        polyname = self.selectedPolygon

        # get PIL image of datapoints, polygon
        Pmw.showbusycursor()
        im, imsk = self.mkDataMasks(polyname, pobj.coords)

        im = im.convert("1")  # binarize the images
        imsk = imsk.convert("1")

        res = ImageChops.multiply(im, imsk)

        rp = self.getpixels(res)  # rp = list of pixels found in image
        fk = self.findPixelsInList(rp)  # fk = list of matched elements
        #remove duplicates
        dup = []
        for f in fk:
            if f not in dup:
                dup.append(f)
        fk = dup

        # add the found elements to the Polygon instance
        pobj.elements = fk
        self.Polygons[self.selectedPolygon] = pobj
        self.selectedPolyMarkers()  # colors the selected markers

        self.saveSelectedMarkers()

        del (im, imsk, res)  # does this speed up or slow down next call?
        Pmw.hidebusycursor()
Пример #31
0
def imageMask(img, mask):
    if mask is not None:
        return ImageChops.multiply(img, mask)
    else:
        return img
Пример #32
0
def main():

    # main_img is used as screen buffer, all image composing/drawing is done in PIL,
    # the main_img is then copied to the display (drawing on the disp itself is no fun)
    # main_img = Image.new("1", (epd2in9.EPD_WIDTH, epd2in9.EPD_HEIGHT))
    # draw = ImageDraw.Draw(main_img)

    # fonts for drawing within PIL
    andale_ttf_small = ImageFont.truetype(
        "source/fonts/andale_mono/AndaleMono.ttf", 16)
    andale_ttf_large = ImageFont.truetype(
        "source/fonts/andale_mono/AndaleMono.ttf", 26)

    epd = epd2in9.EPD()
    epd.init(epd.lut_full_update)

    # For simplicity, the arguments are explicit numerical coordinates
    image = Image.new('1', (epd2in9.EPD_WIDTH, epd2in9.EPD_HEIGHT),
                      255)  # 255: clear the frame
    draw = ImageDraw.Draw(image)

    # perform initial setup of display and GPIO
    button_logic.setup_gpio(change_state_pin, trigger_pin, yellow_led,
                            blue_led, green_led)

    # announce that we're ready
    GPIO.output(green_led, True)

    # FIXME: draw to epaper display
    default.main()

    while True:
        starttime = time.time()

        state_button = GPIO.input(change_state_pin)
        trigger_button = GPIO.input(trigger_pin)

        if state_button == False:
            button_logic.change_state(yellow_led, blue_led, green_led)
            print "Button press!"
            time.sleep(0.2)

        elif trigger_button == False:
            text, pos_x, pos_y = generate_sentence(font=andale_ttf_small)

            # create binary image filled with white
            base_image = Image.new("1",
                                   size=(epd2in9.EPD_WIDTH,
                                         epd2in9.EPD_HEIGHT),
                                   color=255)

            # create the text image
            text_image = Image.new('1',
                                   (epd2in9.EPD_HEIGHT, epd2in9.EPD_WIDTH))
            # draw the text and rotate it -90 degrees so that it fits the portait orientation
            text_draw_buffer = ImageDraw.Draw(text_image)
            text_draw_buffer.text((pos_x, pos_y),
                                  text,
                                  font=andale_ttf_small,
                                  fill=255)
            text_image = text_image.rotate(270, expand=1)

            result = ImageChops.multiply(text_image, base_image)
            result.save("result.png")

            epd.clear_frame_memory(0xFF)
            epd.set_frame_memory(result, 0, 0)
            epd.display_frame()

            epd.delay_ms(2000)
Пример #33
0
 def r4(self):
     self.im3=ImageChops.multiply(self.im,self.im0)
     self.tkimage.paste(self.im3)
Пример #34
0
def normalize(image):
    image = image.filter(ImageFilter.BLUR)
    picture = ImageChops.blend(ImageOps.equalize(image), image, .5)
    return ImageChops.multiply(picture, picture)
Пример #35
0
def normalize(image):
    image = image.filter(ImageFilter.BLUR)
    picture = ImageChops.blend(ImageOps.equalize(image), image, .5)
    return ImageChops.multiply(picture, picture)
def extract_ultrasound_pixels(image):
    """
    Returns a bitmask of the same size as the image with the pixels where there is ultrasound data white, all others black.
    The white region is guaranteed to be one connected block of pixels.
    
    :param image The image on which to segment the ultrasound data
    :return an image of mode "1" in the PIL library, aka a bit mask
    """
    
    # IMPORTANT: for various images, different variants of these two values are necessary.
    #            If you get an image, and cleaning doesn't work, change these values to the commented-out variant.
    # Options / tuning of this function:
    hole_close_radius = 11 # 37
    expand_mask = True # False
    
    # The ultrasound images have the following characteristics:
    # Due to the way ultrasound works, the image is usually a cropped version of an arc over 90 degrees whose center is at the top:
    #             **
    #            ****
    #          ********
    #        ************
    #           ******
    #     
    # In our dataset, there is sometimes a green line at the bottom as well.
    #
    # Additionally, the data contain jpeg artifacts.

    img_orig_data = image.load()
    size = image.size
    
    # Untint, so as to make most pixels grayscale
    # First, find the tint color. We do that by taking the average of a 20x20 pixels sample on the center of the image:
    istart = size[0] / 2 - 10
    jstart = size[1] / 2 - 10
    n = 0
    avg = np.zeros(3)
    for i in xrange(istart,istart+20):
        for j in xrange(jstart,jstart+20):
            px = img_orig_data[i,j]
            px_channels = np.asarray(list(px))
            avg = avg + px_channels
            n = n+1
    avg = avg / n
    avg_r = int(avg[0])
    avg_g = int(avg[1])
    avg_b = int(avg[2])
    gr = (avg_r + avg_g + avg_b) / 3
    # Remove tint:
    for i in xrange(size[0]):
        for j in xrange(size[1]):
            px = img_orig_data[i,j]
            mx = max(max(px[0],px[1]),px[2])
            mn = min(min(px[0],px[1]),px[2])
        
            if mx - mn < 5:
                av = (px[0] + px[1] + px[2]) / 3
                img_orig_data[i,j] = (av,av,av)
            else:
                img_orig_data[i,j] = ((px[0] - avg_r) + gr, (px[1] - avg_g) + gr, (px[2] - avg_b) + gr)
                
    # do some filtering / blurring. Median and minimum filters are good for getting rid of JPEG artifacts.
    img_data_min = image.filter(ImageFilter.MinFilter(size=1)).load()
    img_data_max = image.filter(ImageFilter.MaxFilter(size=1)).load()
    
    # The mask to return. Contains only 1 bit per pixel
    mask = Image.new("1",size,"white") # We don't use a Python array because we want PIL's image filtering functions
    mask_access = mask.load()
    
    # If the bottom two rows are mostly black, we want those in our mask, in order to get those ultrasound curves that are basically gray
    non_black_cnt = 0
    for i in xrange(size[0]):
        for j in xrange(size[1] - 2, size[1]):
            px = img_data_min[i,j]
            if px[0] >= 30 or px[1] >= 30 or px[2] >= 30:
                non_black_cnt = non_black_cnt + 1
                
    if not non_black_cnt < 30:
        non_black_cnt = 0
        for i in xrange(size[0]):
            for j in xrange(size[1] - 6, size[1] - 4):
                px = img_data_min[i,j]
                if px[0] >= 30 or px[1] >= 30 or px[2] >= 30:
                    non_black_cnt = non_black_cnt + 1
                    
    if not non_black_cnt < 30:
        non_black_cnt = 0
        for i in xrange(size[0]):
            for j in xrange(size[1] - 30, size[1] - 28):
                px = img_data_min[i,j]
                if px[0] >= 30 or px[1] >= 30 or px[2] >= 30:
                    non_black_cnt = non_black_cnt + 1
    
    if non_black_cnt < 30:
        for i in xrange(size[0]):
            for j in xrange(size[1] - 2, size[1]):
                mask_access[i,j] = 0
                

    # Find high-contrast places, as those are not typical of ultrasound either and therefore hint at artifacts
    for i in xrange(size[0]):
        for j in xrange(size[1]):
            px = img_orig_data[i,j] # do not use img_data here, since that might not contain smaller white patches any longer
            px_channels = np.asarray(list(px))
    
            mn = min(min(px[0],px[1]),px[2])
            if mn >= 250:
                neighbors = find_neighbors((i,j),size)
    
                add_px = False
                # find out if some neighbor pixel is almost black
                for i0,j0 in neighbors:
                    px_n = img_data_min[i0,j0] # yes, here we use img_data.
                    mx_n = max(max(px_n[0],px_n[1]),px_n[2])
                    if mx_n < 12:
                        mask_access[i0,j0] = 0 # also add the neighbors that lead to the px being added
                        add_px = True
                        
                if add_px:
                    mask_access[i,j] = 0
                    for i0,j0 in neighbors:
                        px_n = img_data_max[i0,j0]
                        mn_n = min(min(px_n[0],px_n[1]),px_n[2])
                        if mn_n >= 240:
                            mask_access[i0,j0] = 0


    # Find pixels that are not basically gray by looking for those with high relative variance between their three channels
    # do some filtering / blurring. Median filters are good for getting rid of JPEG artifacts.
    image = image.filter(ImageFilter.MedianFilter()).filter(ImageFilter.MedianFilter())
    img_data = image.load()
    # img_data = image.filter(ImageFilter.GaussianBlur(radius=3)).load()
    for i in xrange(size[0]):
        for j in xrange(size[1]/2,size[1]):
            px = img_data[i,j]
            px_channels = np.asarray(list(px))
    
            # find non-gray pixels
            mx = np.max(px_channels)
            if mx > 7:
                mx = min(mx,50)
                relative_variance = np.var(px_channels) / mx
                
                if relative_variance > 5:
                    mask_access[i,j] = 0
                    
    # Second, do some processing to remove artifacts from the result
    mask = mask.filter(ImageFilter.MedianFilter()) # denoise
    mask = mask.filter(ImageFilter.MinFilter(size=5)) # expand
    
    mask = mask.filter(ImageFilter.MinFilter(size=3)) # expand & contract to close holes
    mask = mask.filter(ImageFilter.MaxFilter(size=3))
    
    mask_access = mask.load()
    
    work_img = ImageOps.autocontrast(ImageChops.multiply(image,ImageChops.invert(Image.fromarray(ndi.gaussian_filter(np.asarray(image),3)))))
    work_img_data = work_img.load()
    # work_img.show()
    
    # Expand the area by almost-black and colored pixels using breadth-first search
    px_queue = []
    for i in xrange(size[0]):
        for j in xrange(size[1]):
            if mask_access[i,j] == 0:
                px_queue.append((i,j))
                
    while px_queue:
        pos = px_queue.pop(0)
        
        neighbors = find_neighbors(pos,size)
                  
        for i,j in neighbors:
            if mask_access[i,j] == 0:
                continue
            px = work_img_data[i,j]
            mx = max(max(px[0],px[1]),px[2])
            if mx < 5:
                mask_access[i,j] = 0
                px_queue.append((i,j))
            else:
                px_channels1 = np.asarray(list(px))
                
                px2 = img_data[i,j]
                px_channels2 = np.asarray(list(px2))
                
                mx2 = max(max(px2[0],px2[1]),px2[2])                
                
                if mx > 7 or mx2 > 7:
                    mx = min(mx,30)
                    mx2 = min(mx2,30)
                    relative_variance = np.var(np.asarray(list(px_channels))) / mx
                    relative_variance2 = np.var(np.asarray(list(px_channels2))) / mx2
                    
                    if relative_variance > 5 or relative_variance2 > 2:
                        mask_access[i,j] = 0
                        px_queue.append((i,j))
                

    # Expand & contract to close holes. This step is needed because some ultrasound lines are actually gray
    # FIXME: these might in some cases lead to undesirable results (i.e removing too much)
    mask = mask.filter(ImageFilter.MinFilter(size=hole_close_radius)) 
    mask = mask.filter(ImageFilter.MaxFilter(size=hole_close_radius))
    del mask_access
    
    # IMPORTANT: this part is needed for some images, and not for others.
    if expand_mask:    
        # Expand the area in the mask to the corners, i.e we assume that the actual ultrasound data is in a connected region somewhere in the "middle" of the image
        inner_region = mask.copy()
        inner_region_access = inner_region.load()
    
        init_pos = (size[0]/2,size[1]/2)
        px_queue = [init_pos]
        inner_region_access[init_pos] = 0
        while px_queue:
            pos = px_queue.pop(0)
            neighbors = find_neighbors(pos,size)
            for pos_n in neighbors:
                if inner_region_access[pos_n] != 0:
                    inner_region_access[pos_n] = 0
                    px_queue.append(pos_n)
                
        inner_region = ImageChops.invert(inner_region)
    
        mask = ImageChops.multiply(mask, inner_region)
        
    mask_access = mask.load()
    
    for j in range(size[1] - 50,size[1]):
        masked_in_line = 0
        for i in range(size[0]):
            if mask_access[(i,j)] == 0:
                masked_in_line = masked_in_line + 1
                
        if masked_in_line > 0.6 * size[1]:
            for i in range(size[0]):
                px = img_data[(i,j)]
                mx = max(max(px[0],px[1]),px[2])
                if mask_access[(i,j)] == 0 or mx < 5:
                    masked_in_line = masked_in_line + 1
        if masked_in_line > 0.8 * size[1]:
            for i in range(size[0]):
                mask_access[(i,j)] = 0

    return mask
Пример #37
0
def pma(im):
    im = im.convert("RGBA")
    (r, g, b, a) = im.split()
    (r, g, b) = [ImageChops.multiply(a, c) for c in (r, g, b)]
    return Image.merge("RGBA", (r, g, b, a))
Пример #38
0
	def _apply_color(self, img, color, mask):
		img = colors.apply_color(img, color)
		img = ImageChops.multiply(img, mask)
		return img
Пример #39
0
	def _apply_color(self, img, color, mask):
		oimg = colors.apply_hue_lightness_saturation(img, color, 0, 1, True)
		oimg = ImageChops.multiply(oimg, mask)
		return oimg
Пример #40
0
def vdiff(im0, im1):
    im0 = ImageOps.colorize( im0.convert('L'), (255, 0, 0), (255, 255, 255))
    im1 = ImageOps.colorize( im1.convert('L'), (0, 255, 0), (255, 255, 255))            
    ImageChops.multiply(im0, im1).show()
Пример #41
0
    def contrastMask(self, outfile):
        """Ceci est un filtre de debouchage de photographies, aussi appelé masque de contraste, 
        il permet de rattrapper une photo trop contrasté, un contre jour, ...
        Écrit par Jérôme Kieffer, avec l'aide de la liste python@aful, 
        en particulier A. Fayolles et F. Mantegazza avril 2006
        necessite numpy et PIL.
        
        @param: the name of the output file (JPEG)
        @return: filtered Photo instance
        """

        try:
            import numpy
#            import scipy.signal as signal
        except:
            logger.error("This filter needs the numpy library available on https://sourceforge.net/projects/numpy/files/")
            return

        t0 = time.time()
        dimX, dimY = self.pil.size

        ImageFile.MAXBLOCK = dimX * dimY
        img_array = numpy.fromstring(self.pil.tostring(), dtype="UInt8").astype("float32")
        img_array.shape = (dimY, dimX, 3)
        red, green, blue = img_array[:, :, 0], img_array[:, :, 1], img_array[:, :, 2]
        #nota: this is faster than desat2=(ar.max(axis=2)+ar.min(axis=2))/2
        desat_array = (numpy.minimum(numpy.minimum(red, green), blue) + numpy.maximum(numpy.maximum(red, green), blue)) / 2.0
        inv_desat = 255. - desat_array
        blured_inv_desat = self._gaussian.blur(inv_desat, config.ContrastMaskGaussianSize)
        bisi = numpy.round(blured_inv_desat).astype("uint8")
        k = Image.fromarray(bisi, "L").convert("RGB")
        S = ImageChops.screen(self.pil, k)
        M = ImageChops.multiply(self.pil, k)
        F = ImageChops.add(ImageChops.multiply(self.pil, S), ImageChops.multiply(ImageChops.invert(self.pil), M))
        exitJpeg = op.join(config.DefaultRepository, outfile)
        F.save(exitJpeg, quality=80, progressive=True, Optimize=True)
        try:
            os.chmod(exitJpeg, config.DefaultFileMode)
        except IOError:
            logger.error("Unable to chmod %s" % outfile)
        exifJpeg = Exif(exitJpeg)
        exifJpeg.read()
        self.exif.copy(exifJpeg)
        exifJpeg.comment = self.exif.comment
#
#        for metadata in [ 'Exif.Image.Make', 'Exif.Image.Model', 'Exif.Photo.DateTimeOriginal',
#                         'Exif.Photo.ExposureTime', 'Exif.Photo.FNumber', 'Exif.Photo.ExposureBiasValue',
#                         'Exif.Photo.Flash', 'Exif.Photo.FocalLength', 'Exif.Photo.ISOSpeedRatings',
#                         "Exif.Image.Orientation", "Exif.Photo.UserComment"
#                         ]:
#            if metadata in self.exif:
#                logger.debug("Copying metadata %s", metadata)
#                try:
#                    exifJpeg[metadata] = self.exif[metadata]
#                except KeyError:
#                    pass #'Tag not set'-> unable to copy it
#                except:
#                    logger.error("Unable to copying metadata %s in file %s, value: %s" % (metadata, self.filename, self.exif[metadata]))
        logger.debug("Write metadata to %s", exitJpeg)
        exifJpeg.write()
        logger.info("The whoole contrast mask took %.3f" % (time.time() - t0))
        return Photo(outfile)