def prepare(filename, bgcolor = background_default, chatty = chatty_default):
    """
    Prepare a large image for tiling.
    
    Load an image from a file. Resize the image so that it is square,
    with dimensions that are an even power of two in length (e.g. 512,
    1024, 2048, ...). Then, return it.
    """

    src = Image.open(filename)

    if chatty:
        print "original size: %s" % str(src.size)
    
    full_size = (1, 1)

    while full_size[0] < src.size[0] or full_size[1] < src.size[1]:
        full_size = (full_size[0] * 2, full_size[1] * 2)
    
    img = Image.new('RGBA', full_size)
    img.paste("#" + bgcolor)
    
    src.thumbnail(full_size, scaling_filter)
    img.paste(src, (int((full_size[0] - src.size[0]) / 2),
                    int((full_size[1] - src.size[1]) / 2)))
    
    if chatty:
        print "full size: %s" % str(full_size)
        
    return img
    def test_jpeg(self):
        path = os.path.join(TEST_DATA_PATH, "Sam_Hat1.jpg")
        image = Image.objects.create_from_path(path)

        # Re-load the image, now that the task is done
        image = Image.objects.get(id=image.id)

        self.assertTrue(image.source.path.endswith("Sam_Hat1.jpg"))
        self.assertEqual(image.width, 3264)
        self.assertEqual(image.height, 2448)
        self.assertEqual(image.jpeg_quality, None)
        self.assertTrue(os.path.exists(image.optimized.path))
        self.assertTrue(os.path.exists(image.source.path))

        source = PILImage.open(image.source.path)
        optimized = PILImage.open(image.optimized.path)

        self.assertEqual(
            source.quantization,
            optimized.quantization
        )

        self.assertEqual(
            JpegImagePlugin.get_sampling(source),
            JpegImagePlugin.get_sampling(optimized),
        )
def query(query_term, folder_name, path):

    BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q=' + query_term + '&start=%d'

    BASE_PATH = os.path.join(path, folder_name.replace(' ', '_'))

    if not os.path.exists(BASE_PATH):
        os.makedirs(BASE_PATH)
        print "made: " + BASE_PATH

    start = 0  # start query string parameter for pagination
    while start < 40:   # query 20 pages
        r = requests.get(BASE_URL % start)
        for image_info in json.loads(r.text)['responseData']['results']:
            url = image_info['unescapedUrl']
            try:
                image_r = requests.get(url)
            except ConnectionError, e:
                print 'could not download %s' % url
                continue

            #remove file-system path characters from name
            title = query_term.replace(' ', '_') + '_' + image_info['imageId']
            file = open(os.path.join(BASE_PATH, '%s.jpg') % title, 'w')
            try:
                Image.open(StringIO(image_r.content)).save(file, 'JPEG')
            except IOError, e:
                # throw away gifs and stuff
                print 'couldnt save %s' % url
                continue
            finally:
Exemple #4
0
    def test_save_screenshot_valid(self, tmpdir):
        """Check that 'save_screenshot' works"""
        # Run the test crawl
        manager_params, browser_params = self.get_config(str(tmpdir))
        manager = TaskManager.TaskManager(manager_params, browser_params)
        cs = CommandSequence.CommandSequence(url_a)
        cs.get(sleep=1)
        cs.save_screenshot('test')
        cs.screenshot_full_page('test_full')
        manager.execute_command_sequence(cs)
        manager.close()

        # Check that viewport image is not blank
        pattern = os.path.join(str(tmpdir), 'screenshots', '1-*-test.png')
        screenshot = glob.glob(pattern)[0]
        im = Image.open(screenshot)
        bands = im.split()
        is_blank = all(band.getextrema() == (255, 255) for band in bands)
        assert not is_blank

        # Check that full page screenshot is not blank
        pattern = os.path.join(str(tmpdir), 'screenshots', '1-*-test_full.png')
        screenshot = glob.glob(pattern)[0]
        im = Image.open(screenshot)
        bands = im.split()
        is_blank = all(band.getextrema() == (255, 255) for band in bands)
        assert not is_blank
    def draw_articulations(self, count, length, radius, line_thickness, ring_thickness):
        canvas_size = radius * 2 + length
        canvas_c = (canvas_size / 2.0, canvas_size / 2.0)
        half_len = length / 2.0
        theta = 360.0 / count
        
        canvas = Image.new("RGB", [canvas_size, canvas_size], tuple(LIGHTGREY[:3]))
        mask = Image.new('L', [canvas_size, canvas_size], 0)
        mask_surf = aggdraw.Draw(mask)

        line_pen = aggdraw.Pen(255, line_thickness)
        ring_pen = aggdraw.Pen(0, ring_thickness)
        transparent_brush = aggdraw.Brush((255, 0, 0), 0)
        
        # Draw articulations
        for i in range(0, count):
            start = util.point_pos(canvas_c, radius-half_len, angle=theta*i)
            end = util.point_pos(canvas_c, radius+half_len, angle=theta*i)
            mask_surf.line((start[0], start[1], end[0], end[1]), line_pen)
        
        # Draw ring mask for articulations
        xy_1 = canvas_c[0] - radius
        xy_2 = canvas_c[0] + radius
        mask_surf.ellipse([xy_1, xy_1, xy_2, xy_2], ring_pen, transparent_brush)
        mask_surf.flush()
        
        # Apply ring mask to articulations
        canvas.putalpha(mask)
        
        return np.asarray(canvas)
Exemple #6
0
def split_and_compress_layers(layers, tilewidth, tileheight):
    unique_tiles = []
    compressed_layers = []
    for l, (layer_file, depth) in enumerate(layers):
        print "Splitting and compressing %s" % layer_file
        tiles = []

        layer = Image.open(layer_file)
        layerwidth, layerheight = layer.size

        for tile_buffer in crop(layer, tilewidth, tileheight):
            tile = Image.new('RGB', [tilewidth, tileheight], 255)
            tile.paste(tile_buffer)
            tile_str = tile.tostring()

            if tile_str not in unique_tiles:
                unique_tiles.append(tile_str)
            tiles.append(unique_tiles.index(tile_str))

            del tile
        del layer
        compressed_layers.append((layerwidth, layerheight, depth, tiles))

        print "Layer %d has %d unique tiles out of %d" % (l, len(set(compressed_layers[l][3])), len(compressed_layers[l][3]))
    return unique_tiles, compressed_layers
Exemple #7
0
    def test_project_manager(self):
        manager = self.generate_manager(glue.ProjectSpriteManager, 'multiple')
        manager.process()

        rgb_img_path = os.path.join(self.output_path, 'rgb.png')
        rgb_css_path = os.path.join(self.output_path, 'rgb.css')
        mix_img_path = os.path.join(self.output_path, 'mix.png')
        mix_css_path = os.path.join(self.output_path, 'mix.css')
        self.assertTrue(os.path.isfile(rgb_img_path))
        self.assertTrue(os.path.isfile(rgb_css_path))
        self.assertTrue(os.path.isfile(mix_img_path))
        self.assertTrue(os.path.isfile(mix_css_path))

        image = Image.open(rgb_img_path)
        css = open(rgb_css_path)

        self.assertEqual(image.getpixel((0, 0)), RED)
        self.assertEqual(image.getpixel((25, 0)), GREEN)
        self.assertEqual(image.getpixel((0, 25)), BLUE)
        self.assertEqual(image.getpixel((25, 25)), TRANSPARENT)

        self.assertEqualCSS(css.read(), EXPECTED_PROJECT_RGB_CSS)
        css.close()

        image = Image.open(mix_img_path)
        css = open(mix_css_path)

        self.assertEqual(image.getpixel((0, 0)), YELLOW)
        self.assertEqual(image.getpixel((25, 0)), PINK)
        self.assertEqual(image.getpixel((0, 25)), CYAN)
        self.assertEqual(image.getpixel((25, 25)), TRANSPARENT)

        self.assertEqualCSS(css.read(), EXPECTED_PROJECT_MIX_CSS)
        css.close()
Exemple #8
0
    def basehash(self, path=True):
        """basehash compare If histogram smooth
        :return: float
        """
        import math
        import operator
        from PIL import Image

        if path:
            image1 = Image.open(self.image_a_path)
            image2 = Image.open(self.image_b_path)
        else:
            image1 = Image.open(self.image_a_binary)
            image2 = Image.open(self.image_b_binary)

        if not image1.size is image2.size:
            image2 = image2.resize(image1.size)
        pass
        h1 = image1.convert('RGB').histogram()
        h2 = image2.convert('RGB').histogram()

        rms = math.sqrt(
            reduce(operator.add, list(map(lambda a, b: (a - b) ** 2, h1, h2)))
            /
            len(h1)
        )
        self.value_of_phash = rms
        return rms
def translate(name,text):
    path = sys.path[0]+"\TP\\"
    im = Image.open(path+"space.bmp")
    line = text.split("@")
    length = 0
    for i in line:
        if len(i) > length:
            length = len(i)
    height = len(line)
    length *= 42
    height *= 40
    diagram = Image.new("RGBA",(length,height),(255,255,255))
    longest = 0
    for i in range(0,len(line)):
        letters = []
        pos = 0
        for j in range(0,len(line[i])):
            temp = convert(line[i][j])
            if(temp != "null"):
                letters.append(temp)
        for j in range(0,len(letters)):
            k = len(letters)-j-1
            im = Image.open(path+letters[k]+".bmp")
            (le,up,ri,bo) = im.getbbox()
            diagram.paste(im,(pos,i*40,pos+ri,(i+1)*40))
            pos+=ri+1
        if(pos > longest):
            longest = pos
    diagram = diagram.crop((0,0,longest-1,len(line)*40))
    diagram.save(path+name+".png")
    diagram.show()
Exemple #10
0
def preprocess(file_name, variations, storage):
    with storage.open(file_name) as f:
        with Image.open(f) as image:
            file_format = 'PNG'

            # resize to a maximum of 1000x1000 keeping aspect ratio
            image.thumbnail((1000, 1000), resample=Image.ANTIALIAS)

            # Create a disk as mask
            mindimension = min(1000, image.size[1], image.size[0])
            bigsize = (mindimension * 3, mindimension * 3)
            mask = Image.new('L', bigsize, 0)
            draw = ImageDraw.Draw(mask)
            draw.ellipse((0, 0) + bigsize, fill=255)
            mask = mask.resize((mindimension, mindimension), Image.ANTIALIAS)

            # only keep the image that fit in the mask
            output = ImageOps.fit(image, mask.size, centering=(0.5, 0.5))
            output.putalpha(mask)

            with BytesIO() as file_buffer:
                output.save(file_buffer, file_format)
                f = ContentFile(file_buffer.getvalue())
                # delete the original big image
                storage.delete(file_name)
                # save the resized version with the same filename and format
                storage.save(file_name, f)

    # render stdimage variations
    render_variations(file_name, variations, replace=True, storage=storage)

    return False  # prevent default rendering
Exemple #11
0
    def test_12bit_rawmode(self):
        """ Are we generating the same interpretation
        of the image as Imagemagick is? """

        # Image.DEBUG = True
        im = Image.open('Tests/images/12bit.cropped.tif')

        # to make the target --
        # convert 12bit.cropped.tif -depth 16 tmp.tif
        # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif
        # imagemagick will auto scale so that a 12bit FFF is 16bit FFF0,
        # so we need to unshift so that the integer values are the same.

        im2 = Image.open('Tests/images/12in16bit.tif')

        if Image.DEBUG:
            print (im.getpixel((0, 0)))
            print (im.getpixel((0, 1)))
            print (im.getpixel((0, 2)))

            print (im2.getpixel((0, 0)))
            print (im2.getpixel((0, 1)))
            print (im2.getpixel((0, 2)))

        self.assert_image_equal(im, im2)
Exemple #12
0
 def __init__(self, master, func):
     Tkinter.Toplevel.__init__(self, master, relief=Tkinter.SOLID, highlightthickness=1, highlightcolor=fg)
     self.root = master
     self.root.withdraw()
     self.overrideredirect(Tkinter.TRUE)
     self.progress = Progressbar(self)
     if not config.python3:
         self.image1 = Image.open(config.relinuxdir + "/splash.png")
         self.image2 = Image.open(config.relinuxdir + "/splash_glowy.png")
         self.images = []
         for i in range(0, 11):
             percent = float(float(i) / 10)
             self.images.append(ImageTk.PhotoImage(Image.blend(self.image1, self.image2, percent)))
         # self.image = ImageTk.PhotoImage(Image.blend(self.image1, self.image2, 0.0))
         self.image = self.images[0]
         self.imgw = self.image.width()
         self.imgh = self.image.height()
     else:
         self.image = Tkinter.PhotoImage(file=config.relinuxdir + "/splash.ppm")
         self.imgw = self.image.width()
         self.imgh = self.image.height()
     self.textvar = Tkinter.StringVar()
     self.progresstext = Label(self, textvariable=self.textvar, height=15, width=480, anchor=Tkinter.W)
     self.w = self.imgw
     self.h = self.imgh + 32
     self.x = self.root.winfo_screenwidth() / 2 - self.w / 2
     self.y = self.root.winfo_screenheight() / 2 - self.h / 2
     self.geometry("%dx%d+%d+%d" % (self.w, self.h, self.x, self.y))
     self.panel = Label(self, image=self.image)
     self.panel.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=True)
     self.progress.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=True)
     self.progresstext.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=True)
     self.update()
     self.thread = FuncThread(func, self.endSplash, self)
     self.thread.start()
def test_write_svg_to_png(filename):
    # If you want to regenerate these, e.g. the svg template has significantly
    # changed, easiest way is to patch write_svg_to_png to not delete the
    # temporary file (delete:False in temp_args) and copy the svg out of /tmp.
    # Output png files are in user-media/version-previews/full and /thumbs.
    out = tempfile.mktemp()
    svg_xml = os.path.join(
        settings.ROOT,
        'src/olympia/versions/tests/static_themes/%s.svg' % filename)
    svg_png = os.path.join(
        settings.ROOT,
        'src/olympia/versions/tests/static_themes/%s.png' % filename)
    with storage.open(svg_xml, 'rb') as svgfile:
        svg = svgfile.read()
    write_svg_to_png(svg, out)
    assert storage.exists(out)
    # compare the image content. rms should be 0 but travis renders it
    # different... 3 is the magic difference.
    svg_png_img = Image.open(svg_png)
    svg_out_img = Image.open(out)
    image_diff = ImageChops.difference(svg_png_img, svg_out_img)
    sum_of_squares = sum(
        value * ((idx % 256) ** 2)
        for idx, value in enumerate(image_diff.histogram()))
    rms = math.sqrt(
        sum_of_squares / float(svg_png_img.size[0] * svg_png_img.size[1]))

    assert rms < 3
Exemple #14
0
def _is_image_file(path):
    """Whether the file is a readable image file via Pillow."""
    try:
        pImage.open(path)
        return True
    except:
        return False
Exemple #15
0
 def _apply_watermark(self, datafile):
     text = self.aq_parent.watermark_text
     FONT = os.path.join(os.path.dirname(__file__), 'fonts', 'VeraSeBd.ttf')
     img = Image.open(datafile)
     newimg = StringIO()
     fmt = img.format
     watermark = Image.new("RGBA", (img.size[0], img.size[1]))
     draw = ImageDraw.ImageDraw(watermark, "RGBA")
     size = 0
     while True:
         size += 1
         nextfont = ImageFont.truetype(FONT, size)
         nexttextwidth, nexttextheight = nextfont.getsize(text)
         if nexttextwidth+nexttextheight/3 > watermark.size[0]:
             break
         font = nextfont
         textwidth, textheight = nexttextwidth, nexttextheight
     draw.setfont(font)
     draw.text(((watermark.size[0]-textwidth)/2,
                (watermark.size[1]-textheight)/2), text)
     watermark = watermark.rotate(degrees(atan(float(img.size[1])/img.size[0])),
                              Image.BICUBIC)
     mask = watermark.convert("L").point(lambda x: min(x, 88))
     watermark.putalpha(mask)
     img.paste(watermark, None, watermark)
     quality = self._photo_quality(datafile)
     img.save(newimg, fmt, quality=quality)
     newimg.seek(0)
     return newimg
def watermark(im, mark, position, opacity=1):
    """Adds a watermark to an image."""
    if opacity < 1:
        mark = reduce_opacity(mark, opacity)
    if im.mode != 'RGBA':
        im = im.convert('RGBA')
    # create a transparent layer the size of the image and draw the
    # watermark in that layer.
    layer = Image.new('RGBA', im.size, (0,0,0,0))
    if position == 'tile':
        for y in range(0, im.size[1], mark.size[1]):
            for x in range(0, im.size[0], mark.size[0]):
                layer.paste(mark, (x, y))
    elif position == 'scale':
        # scale, but preserve the aspect ratio
        ratio = min(
            float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1])
        w = int(mark.size[0] * ratio)
        h = int(mark.size[1] * ratio)
        mark = mark.resize((w, h))
        layer.paste(mark, ((im.size[0] - w) // 2, (im.size[1] - h) // 2))
    else:
        layer.paste(mark, position)
    # composite the watermark with the layer
    return Image.composite(layer, im, layer)
Exemple #17
0
    def test_shipment_service(self):
        """Test the shipment service.
        """
        service = ShipmentService(CONFIGURATION)
        shipment = service.create_shipment()
        shipment.ShipTimestamp = datetime.now()
        set_label(shipment.LabelSpecification)
        package = service.create_package()
        set_package(package)
        set_shipment(shipment, package)
        result = service.process(shipment)
        print result
        details = result.CompletedShipmentDetail.CompletedPackageDetails[0]
        image = details.Label.Parts[0].Image
        binary = a2b_base64(image)

        with NamedTemporaryFile() as png_file:
            png_file.write(binary)

            if Image:
                png_file.seek(0)
                Image.open(png_file.name).show()

        tracking_id = details.TrackingIds[0]
        result = service.delete(tracking_id)
        print result
 def getValicode(self):
     element = self.driver.find_element_by_id("change_cas")
     element.click();
     time.sleep(0.5)
     self.driver.get_screenshot_as_file("screenshot.png")
     img = IMG.open('screenshot.png')
     width = img.size[0]
     height =  img.size[1]
     region = (int(width*0.50699677), int(height*0.52849162), int(width*0.593110872), int(height*0.57318436))
     cropImg = img.crop(region)
     cropImg.save('1.png')
     image = IMG.open('1.png')
     enhancer = ImageEnhance.Contrast(image)
     image_enhancer = enhancer.enhance(2)
     valicode = image_to_string(image_enhancer)
     if len(valicode)==0:
         return self.getValicode()
     else:
         pattern = re.compile(r'[0-9,a-z,A-Z]{4}')
         match = pattern.match(valicode)
         if match:
             print valicode
             return valicode
         else:
             return self.getValicode()
def imageStructToPIL(imageRow):
    """
    Convert the immage from image schema struct to PIL image

    :param imageRow: Row, must have ImageSchema
    :return PIL image
    """
    imgType = imageTypeByOrdinal(imageRow.mode)
    if imgType.dtype != 'uint8':
        raise ValueError("Can not convert image of type " +
                         imgType.dtype + " to PIL, can only deal with 8U format")
    ary = imageStructToArray(imageRow)
    # PIL expects RGB order, image schema is BGR
    # => we need to flip the order unless there is only one channel
    if imgType.nChannels != 1:
        ary = _reverseChannels(ary)
    if imgType.nChannels == 1:
        return Image.fromarray(obj=ary, mode='L')
    elif imgType.nChannels == 3:
        return Image.fromarray(obj=ary, mode='RGB')
    elif imgType.nChannels == 4:
        return Image.fromarray(obj=ary, mode='RGBA')
    else:
        raise ValueError("don't know how to convert " +
                         imgType.name + " to PIL")
def leftpanel(frame):
	height = 580 #image height

	LeftPanel = wx.Panel(frame,wx.ID_ANY)
	LeftPanel.SetBackgroundColour('#FFFFFF')
	layoutLeft = wx.BoxSizer(wx.VERTICAL)

	# for ImagePanel
	ImagePanel = wx.Panel(LeftPanel,wx.ID_ANY)
	pil = Image.open('store/gui/top.jpg')
	ratio = float(height-10*2)/float(pil.size[1])
	new_size = (int(pil.size[0]*ratio),int(pil.size[1]*ratio))
	pil.thumbnail(new_size,Image.ANTIALIAS)
	image = wx.EmptyImage(pil.size[0],pil.size[1])
	image.SetData(pil.convert('RGB').tostring())
	wx.StaticBitmap(ImagePanel, wx.ID_ANY, image.ConvertToBitmap())
	layoutLeft.Add(ImagePanel,flag=wx.ALL,border=10)

	# for LogoPanel
	LogoPanel = wx.Panel(LeftPanel,wx.ID_ANY)
	pil = Image.open('store/gui/logo.png')
	ratio = float(image.GetWidth()-10*2)/float(pil.size[0])
	new_size = (int(pil.size[0]*ratio),int(pil.size[1]*ratio))
	pil.thumbnail(new_size,Image.ANTIALIAS)
	image = wx.EmptyImage(pil.size[0],pil.size[1])
	image.SetData(pil.convert('RGB').tostring())
	wx.StaticBitmap(LogoPanel, wx.ID_ANY, image.ConvertToBitmap())
	LogoPanel.SetSize(new_size)
	layoutLeft.Add(LogoPanel,flag=wx.ALL,border=10)

	LeftPanel.SetSizer(layoutLeft)
	return LeftPanel
def resizeImage(subdir, infile, output_dir=""):
     outfile = os.path.splitext(infile)[0]+"_min"
     extension = os.path.splitext(infile)[1]
     w=400
     logo = Image.open('../Logo/JXX.png')
     logo.thumbnail((w/10, w/10))
     if (cmp(extension, ".JPG")):
        return

     if infile != outfile:
        try :
            im = Image.open(subdir+"/"+infile)
            width, height = im.size
            if(width>height):
                nh = width*height/w
            else:
                nh = w
                w = height*width/nh
            im.thumbnail((w, nh), Image.ANTIALIAS)

            image_copy = im.copy()
            position = ((image_copy.width - logo.width - 10), (image_copy.height - logo.height - 10))
            image_copy.paste(logo, position, logo)
            image_copy.save(subdir+"/"+output_dir+outfile+extension,"JPEG")
        except IOError:
            print "cannot reduce image for ", infile
    def update_image_sizes( sender, **kwargs):
        # if main image is too big, resize it; make a thumbnail image
        img_rec = kwargs.get('instance', None)
        if img_rec is None:
            return

        # (1) resize main image
        if img_rec.main_image.width > MAX_MAIN_IMAGE_WIDTH or img_rec.main_image.height > MAX_MAIN_IMAGE_WIDTH:
            im = Image.open(img_rec.main_image.file.name)   # open image
            im.thumbnail((MAX_MAIN_IMAGE_WIDTH, MAX_MAIN_IMAGE_WIDTH), Image.ANTIALIAS) # resize
            im.save(img_rec.main_image.file.name, quality=90)   #save
        
        # (2) make a thumbnail
        thumb = Image.open(img_rec.main_image.file.name)    # open the main image
        thumb.thumbnail((MAX_THUMB_IMAGE_WIDTH, MAX_THUMB_IMAGE_WIDTH), Image.ANTIALIAS)
        thumb_fullpath = os.path.join(settings.MEDIA_ROOT\
                        , img_rec.get_image_upload_directory_thumb(os.path.basename(img_rec.main_image.path)) )

        # if needed, make thumb directory
        if not os.path.isdir(os.path.dirname(thumb_fullpath)):
            os.makedirs(os.path.dirname(thumb_fullpath))
        # save file
        thumb.save(thumb_fullpath, quality=100)

        # disconnect save signal, save the ImageRecord, and reconnect signal
        post_save.disconnect(ImageRecord.update_image_sizes, sender=ImageRecord)        
        # update/save django model
        img_rec.thumb_image.name = img_rec.get_image_upload_directory_thumb(os.path.basename(thumb_fullpath))
        img_rec.save()
        post_save.connect(ImageRecord.update_image_sizes, sender=ImageRecord)
Exemple #23
0
def downloader(opener, filename, s, jpg=None, png=None):
    s.acquire()
    try:
        if not os.path.exists(filename):
            log("Download %s" % (filename))
            try:
                page = opener.open(jpg)
                dJPG = page.read()
                imageStringJPG = cStringIO.StringIO(dJPG)
                imageStringJPG.seek(0)
                page.close()
            except urllib2.HTTPError, e:
                imageStringJPG = ""
                log("Error %s" % (e))
            
            try:
                page = opener.open(png)
                dPNG = page.read()
                imageStringPNG = cStringIO.StringIO(dPNG)
                imageStringPNG.seek(0)
                page.close()
            except urllib2.HTTPError, e:
                imageStringPNG = ""
                log("Error %s" % (e))
                
            if imageStringJPG and imageStringPNG:
                imageJPG = Image.open(imageStringJPG)
                imagePNG = Image.open(imageStringPNG)
                A = imagePNG.convert('RGBA').split()[-1]
                imageJPG.paste(imagePNG, A)
                imageJPG.save(filename, quality=100)
                imageStringJPG.close()
                imageStringPNG.close()
def fix_generated_thumbs(file, is_verbose, fix_thumb):
    try:
        cover = Image.open(file)
    except IOError:
        return False
    try:
        dpi = cover.info["dpi"]
    except KeyError:
        dpi = (96, 96)
    if dpi == (96, 96) and fix_thumb:
        if is_verbose:
            print('* Fixing generated thumbnail "%s"...' % (file))
        pdoc_cover = Image.new("L", (cover.size[0], cover.size[1] + 45),
                               "white")
        pdoc_cover.paste(cover, (0, 0))
        pdoc_cover.save(file, dpi=[72, 72])
    elif dpi == (72, 72) and not fix_thumb:
        if is_verbose:
            print('* Reverse fix for generated thumbnail "%s"...' % (file))
        pdoc_cover = Image.new("L", (cover.size[0], cover.size[1] - 45),
                               "white")
        pdoc_cover.paste(cover, (0, 0))
        pdoc_cover.save(file, dpi=[96, 96])
    else:
        if is_verbose:
            print('* Generated thumbnail "%s" is OK. DPI: %s. Skipping...'
                  % (os.path.basename(file), dpi))
    return False
Exemple #25
0
def save_sample_pictures():
    for te_train, te_target in test_stream.get_epoch_iterator():
        break
    te_out, te_ta = ae_encode(input_transform(te_train), target_transform(te_target))
    te_reshape = inverse(te_out)
    te_target_reshape = inverse(te_ta)

    new_size = (128 * 6, 160 * 12)
    new_im = Image.new('RGB', new_size)
    r = np.random.choice(128, 24, replace=False).reshape(2,12)
    for i in range(2):
        for j in range(12):
            index =  r[i][j]
            a1 = np.concatenate((te_train[index],te_target_reshape[index]),axis=2)
            a1 = YUV2RGB(a1)
            a2 = np.concatenate((te_train[index],te_train[index]),axis=2)
            a2 = np.concatenate((a2,te_train[index]),axis=2)
            a3 = np.concatenate((te_train[index],te_reshape[index]),axis=2)
            a3 = YUV2RGB(a3)
            target_im = Image.fromarray(a1.astype(np.uint8))
            train_im = Image.fromarray(a2.astype(np.uint8))
            im = Image.fromarray(a3.astype(np.uint8))
            
            new_im.paste(target_im, (128 * i * 3, 160 * j))
            new_im.paste(train_im, (128 * (i * 3 + 1), 160 * j))
            new_im.paste(im, (128 * (i * 3 + 2), 160 * j))
    img_loc = "/data/chencj/Face/gen_images/%i.png" %int(time())     
    print "saving images to %s" %img_loc
    new_im.save(img_loc)
Exemple #26
0
def load(filepath, rescale=True, dtype='float64'):
    assert type(filepath) == str

    if rescale == False and dtype == 'uint8':
        rval = np.asarray(Image.open(filepath))
        # print 'image.load: ' + str((rval.min(), rval.max()))
        assert rval.dtype == 'uint8'
        return rval

    s = 1.0
    if rescale:
        s = 255.
    try:
        rval = Image.open(filepath)
    except:
        raise Exception("Could not open "+filepath)

    rval = np.cast[dtype](np.asarray(rval)) / s

    if len(rval.shape) == 2:
        rval = rval.reshape(rval.shape[0], rval.shape[1], 1)

    if len(rval.shape) != 3:
        raise AssertionError("Something went wrong opening " +
                filepath + '. Resulting shape is ' + str(rval.shape) +
                " (it's meant to have 3 dimensions by now)")

    return rval
def test_sanity():

    im1 = lena()
    im2 = Image.new(im1.mode, im1.size, 0)

    for y in range(im1.size[1]):
        for x in range(im1.size[0]):
            pos = x, y
            im2.putpixel(pos, im1.getpixel(pos))
    
    assert_image_equal(im1, im2)

    im2 = Image.new(im1.mode, im1.size, 0)
    im2.readonly = 1

    for y in range(im1.size[1]):
        for x in range(im1.size[0]):
            pos = x, y
            im2.putpixel(pos, im1.getpixel(pos))
    
    assert_false(im2.readonly)
    assert_image_equal(im1, im2)

    im2 = Image.new(im1.mode, im1.size, 0)

    pix1 = im1.load()
    pix2 = im2.load()

    for y in range(im1.size[1]):
        for x in range(im1.size[0]):
            pix2[x, y] = pix1[x, y]

    assert_image_equal(im1, im2)
def run_me(run_time=None):

    if run_time:
        start_time = time.time()

    # inp_paths = [os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Output', 'Two Crop test.png')]

    inp_paths = [os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Input', 'Two Crop test.png'),
                 os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Input', 'Two Crop test2.png')]
    orig_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Input', 'Two Infrared test.png')
    out_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Output', 'UnCropResult.png')

    orig = Image.open(orig_path)

    for img_path in inp_paths:
        try:
            img = Image.open(img_path)
            main(img, orig)
        except Exception as e:
            img = Image.open(img_path)
            main(img.transpose(Image.FLIP_LEFT_RIGHT), orig.transpose(Image.FLIP_LEFT_RIGHT))

    # img = Image.open('Input/Two Crop test3.png')
    #
    # ret = main(img, ret)
    # ret.show()

    if run_time:
        print("\n--- %s seconds ---" % (time.time() - start_time))
Exemple #29
0
    def check(self, mode, c=None):
        if not c:
            c = self.color(mode)

        # check putpixel
        im = Image.new(mode, (1, 1), None)
        im.putpixel((0, 0), c)
        self.assertEqual(
            im.getpixel((0, 0)), c,
            "put/getpixel roundtrip failed for mode %s, color %s" % (mode, c))

        # Check 0
        im = Image.new(mode, (0, 0), None)
        with self.assertRaises(IndexError):
            im.putpixel((0, 0), c)
        with self.assertRaises(IndexError):
            im.getpixel((0, 0))

        # check initial color
        im = Image.new(mode, (1, 1), c)
        self.assertEqual(
            im.getpixel((0, 0)), c,
            "initial color failed for mode %s, color %s " % (mode, c))

        # Check 0
        im = Image.new(mode, (0, 0), c)
        with self.assertRaises(IndexError):
            im.getpixel((0, 0))
Exemple #30
0
def image(path):
  if '..' in path:
    abort(500)
  fd = open(join(app.root_path, "images", path))
  data = fd.read()

  hsize = int(request.args.get("h", 0))
  vsize = int(request.args.get("v", 0))
  if hsize > 1000 or vsize > 1000:
    abort(500)

  if hsize:
    image = Image.open(StringIO(data))
    x, y = image.size

    x1 = hsize
    y1 = int(1.0 * y * hsize / x)
    image.thumbnail((x1, y1), Image.ANTIALIAS)
    output = StringIO()
    image.save(output, "PNG")
    data = output.getvalue()
  if vsize:
    image = Image.open(StringIO(data))
    x, y = image.size

    x1 = int(1.0 * x * vsize / y)
    y1 = vsize
    image.thumbnail((x1, y1), Image.ANTIALIAS)
    output = StringIO()
    image.save(output, "PNG")
    data = output.getvalue()

  response = make_response(data)
  response.headers['content-type'] = mimetypes.guess_type(path)
  return response
Exemple #31
0
def oulu_casia_get_data_set(_images_root_path,
                            _image_condition='VL',
                            _lighting_condition='Strong',
                            _max_im_per_seq=float('inf'),
                            _image_resolution=(_image_width, _image_height),
                            _return_min_sequence=False):
    '''
    Input 1: Path of OriginalImg/Preprocess Img (Set B or C)
    Input 2: Image Condition Required (Default: VL)
    Input 3: Lighting Condition Required (Default: Strong)
    Input 4: Max Images Per Sequence (Default: All)
    Input 5: Image Resolution in (Width, Height) (Default: Original)
    Input 6: Flag to return Minimum Length of All Sequences (Default: False)
    Purpose: Get OULU CASIA Dataset from original directory structure into 2
             lists denoting image sequences and the corresponding emotion label
    Output: [[[Image Sequences]], [Emotion Labels], <Min Length>]
    '''
    # Validate Arguments
    contents = set(os.listdir(_images_root_path))
    if set.intersection(contents, _image_conditions) != _image_conditions:
        raise Exception('Invalid Path Passed')
    if _image_condition not in _image_conditions:
        raise Exception('Invalid Image Condition Passed')
    if _lighting_condition not in _lighting_condition:
        raise Exception('Invalid Lighting Condition Passed')

    # Crawl and retrieve data
    next_path = _images_root_path + '/' + _image_condition
    next_path += '/' + _lighting_condition
    sequence_id = 0
    sequences = {}
    min_sequence = float('inf')
    _file_extension = re.compile('.*\.(.*)')
    _emotion_labels = set(_emotion_label_to_idx.keys())
    for person in os.listdir(next_path):
        person_path = next_path + '/' + person
        emotion_dirs = set(os.listdir(person_path))
        # Handle missing emotion images
        if set.intersection(emotion_dirs, _emotion_labels) != _emotion_labels:
            missing_emotion = _emotion_labels - emotion_dirs
            raise Exception('Emotions {0} missing from {1}'.format(
                missing_emotion, person_path))
        # Construct image sequence per emotion
        for emotion in _emotion_labels:
            emotion_path = person_path + '/' + emotion
            images_list = sorted(os.listdir(emotion_path))
            image_sequence = []
            for image_name in images_list:
                image_path = emotion_path + '/' + image_name
                extension = _file_extension.findall(image_name)[0]
                if extension == _image_extension:
                    image = Image.open(image_path)
                    if _image_resolution != (_image_width, _image_height):
                        image = image.resize(_image_resolution)
                    image_arr = np.array(image)
                    image_sequence.append(image_arr)
            image_sequence = np.array(image_sequence)
            # Useful: Keep track of length of smallest sequence
            if len(image_sequence) < min_sequence:
                min_sequence = len(image_sequence)
            # Summarize sequence by smartly reducing effective sequence length
            image_sequence = _reduce_sequence_len(image_sequence,
                                                  _max_im_per_seq, True)
            # Pack image sequence and corresponding emotion
            sequences[sequence_id] = [image_sequence, emotion]
            sequence_id += 1
    # Unpack and form association
    image_sequences = np.array(
        [seq_data[0] for seq_id, seq_data in sequences.items()])
    emotions = np.array(
        [seq_data[1] for seq_id, seq_data in sequences.items()])
    return_list = [image_sequences, emotions]
    if _return_min_sequence:
        return_list.append(min_sequence)
    return return_list
Exemple #32
0
tclist = listup_files('/Users/kenjimatsumoto1983/ResNetProject/train/TC/*.tif')
nontclist = listup_files('/Users/kenjimatsumoto1983/ResNetProject/train/nonTC/*.tif')


# In[ ]:


# x(画像のバイナリ配列)の配列初期化(3次元配列)
x = np.empty((0,64,64), np.float32)
# y(正否ラベル)の配列初期化(3次元配列)
y = np.empty((0,1), int)

# 正解データを配列化
for filepath in tclist:
    image = Image.open(filepath)
    item = np.array(image)
    x = np.append(x, np.array([item]), axis=0)
    y = np.append(y, np.array([[1]]), axis=0)

# 不正解データを配列化
count = 0
for flipath in nontclist:
    image = Image.open(filepath)
    x = np.append(x, np.array([item]), axis=0)
    y = np.append(y, np.array([[0]]), axis=0)
    count += 1
    print(count)


# In[16]:
Exemple #33
0
from PIL import Image, ImageDraw

image = Image.open(r'C:\Users\jekov\Desktop\lab3\test.jpg')
ch_image = Image.open(r'C:\Users\jekov\Desktop\lab3\test.jpg')
draw = ImageDraw.Draw(image)
width = image.size[0]
height = image.size[1]
pix = image.load()

level = int(input("Введите уровень яркости (от -100 до 100)"))
for i in range(width):
    for j in range(height):
        r = pix[i, j][0] + level
        g = pix[i, j][1] + level
        b = pix[i, j][2] + level
        if r < 0:
            r = 0
        elif r > 255:
            r = 255

        if g < 0:
            g = 0
        elif g > 255:
            g = 255

        if b < 0:
            b = 0
        elif b > 255:
            b = 255
        draw.point((i, j), (r, g, b))
def load_np_image(image_path):
    image = Image.open(image_path).convert('RGB')
    np_image = np.array(image)
    return np_image
#将图片左右两边扩展 扩展宽度为discrepancy 扩展方法是全部变成白色
def fill_blank(im_array, discrepancy):
    left = int(discrepancy / 2)
    right = discrepancy - left
    a = np.zeros((60, left))
    b = np.zeros((60, right))
    a.fill(254)
    b.fill(254)
    im_array = np.hstack((a, im_array))
    im_array = np.hstack((im_array, b))
    return im_array


if __name__ == '__main__':
    for i in range(1000):
        pil_im = Image.open('./captcha/' + str(i) + '.png').convert('L')
        im = pylab.array(pil_im)
        start, end = find_start_end(im)
        pcrop = pil_im.crop((start, 0, end, 60))
        pcrop = pcrop.resize((120, 60))
        """
           切割方法1
        """
        for k in range(4):
            single = pcrop.crop((30 * k, 0, 30 * (k + 1), 60))
            single.save('./single_captcha/data/' + str(i) + '_' + str(k) +
                        '.png')
        print(i)
        """
           切割方法2
        """
Exemple #36
0
from PIL import Image
im = Image.open('articuly.jpg')
w, h = im.size
print(w, h)
im.thumbnail((w // 2, h // 2))
print('Resize:{0}, {1}'.format(w // 2, h // 2))
im.save('smallarticuly.jpg', 'jpeg')
def loadImage(filename):
    img = Image.open(filename)
    return np.asarray(img.getdata())
Exemple #38
0
def image_resize_image(base64_source,
                       size=(1024, 1024),
                       encoding='base64',
                       filetype=None,
                       avoid_if_small=False,
                       upper_limit=False):
    """ Function to resize an image. The image will be resized to the given
        size, while keeping the aspect ratios, and holes in the image will be
        filled with transparent background. The image will not be stretched if
        smaller than the expected size.
        Steps of the resizing:
        - Compute width and height if not specified.
        - if avoid_if_small: if both image sizes are smaller than the requested
          sizes, the original image is returned. This is used to avoid adding
          transparent content around images that we do not want to alter but
          just resize if too big. This is used for example when storing images
          in the 'image' field: we keep the original image, resized to a maximal
          size, without adding transparent content around it if smaller.
        - create a thumbnail of the source image through using the thumbnail
          function. Aspect ratios are preserved when using it. Note that if the
          source image is smaller than the expected size, it will not be
          extended, but filled to match the size.
        - create a transparent background that will hold the final image.
        - paste the thumbnail on the transparent background and center it.

        :param base64_source: base64-encoded version of the source
            image; if False, returns False
        :param size: 2-tuple(width, height). A None value for any of width or
            height mean an automatically computed value based respectively
            on height or width of the source image.
        :param encoding: the output encoding
        :param filetype: the output filetype, by default the source image's
        :type filetype: str, any PIL image format (supported for creation)
        :param avoid_if_small: do not resize if image height and width
            are smaller than the expected size.
    """
    if not base64_source:
        return False
    # Return unmodified content if no resize or we etect first 6 bits of '<'
    # (0x3C) for SVG documents - This will bypass XML files as well, but it's
    # harmless for these purposes
    if size == (None, None) or base64_source[:1] == b'P':
        return base64_source
    image_stream = io.BytesIO(codecs.decode(base64_source, encoding))
    image = Image.open(image_stream)
    # store filetype here, as Image.new below will lose image.format
    filetype = (filetype or image.format).upper()

    filetype = {
        'BMP': 'PNG',
    }.get(filetype, filetype)

    asked_width, asked_height = size
    if upper_limit:
        if asked_width:
            if asked_width >= image.size[0]:
                asked_width = image.size[0]
        if asked_height:
            if asked_height >= image.size[1]:
                asked_height = image.size[1]

        if image.size[0] >= image.size[1]:
            asked_height = None
        else:
            asked_width = None
        if asked_width is None and asked_height is None:
            return base64_source

    if asked_width is None:
        asked_width = int(image.size[0] *
                          (float(asked_height) / image.size[1]))
    if asked_height is None:
        asked_height = int(image.size[1] *
                           (float(asked_width) / image.size[0]))
    size = asked_width, asked_height
    # check image size: do not create a thumbnail if avoiding smaller images
    if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
        return base64_source

    if image.size != size:
        image = image_resize_and_sharpen(image, size, upper_limit=upper_limit)
    if image.mode not in ["1", "L", "P", "RGB", "RGBA"
                          ] or (filetype == 'JPEG' and image.mode == 'RGBA'):
        image = image.convert("RGB")

    background_stream = io.BytesIO()
    image.save(background_stream, filetype)
    return codecs.encode(background_stream.getvalue(), encoding)
def load_resized_img(path):
    return Image.open(path).convert('RGB').resize((256, 256))
Exemple #40
0
    def _image(self,
               cr,
               uid,
               model,
               id,
               field,
               response,
               max_width=maxint,
               max_height=maxint,
               context=None):
        """ Fetches the requested field and ensures it does not go above
        (max_width, max_height), resizing it if necessary.

        Resizing is bypassed if the object provides a $field_big, which will
        be interpreted as a pre-resized version of the base field.

        If the record is not found or does not have the requested field,
        returns a placeholder image via :meth:`~._image_placeholder`.

        Sets and checks conditional response parameters:
        * :mailheader:`ETag` is always set (and checked)
        * :mailheader:`Last-Modified is set iif the record has a concurrency
          field (``__last_update``)

        The requested field is assumed to be base64-encoded image data in
        all cases.
        """
        Model = self.pool[model]
        id = int(id)

        ids = Model.search(cr, uid, [('id', '=', id)], context=context)
        if not ids and 'website_published' in Model._all_columns:
            ids = Model.search(cr,
                               openerp.SUPERUSER_ID,
                               [('id', '=', id),
                                ('website_published', '=', True)],
                               context=context)
        if not ids:
            return self._image_placeholder(response)

        concurrency = '__last_update'
        [record] = Model.read(cr,
                              openerp.SUPERUSER_ID, [id], [concurrency, field],
                              context=context)

        if concurrency in record:
            server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
            try:
                response.last_modified = datetime.datetime.strptime(
                    record[concurrency], server_format + '.%f')
            except ValueError:
                # just in case we have a timestamp without microseconds
                response.last_modified = datetime.datetime.strptime(
                    record[concurrency], server_format)

        # Field does not exist on model or field set to False
        if not record.get(field):
            # FIXME: maybe a field which does not exist should be a 404?
            return self._image_placeholder(response)

        response.set_etag(hashlib.sha1(record[field]).hexdigest())
        response.make_conditional(request.httprequest)

        # conditional request match
        if response.status_code == 304:
            return response

        data = record[field].decode('base64')

        if (not max_width) and (not max_height):
            response.data = data
            return response

        image = Image.open(cStringIO.StringIO(data))
        response.mimetype = Image.MIME[image.format]

        w, h = image.size
        max_w = int(max_width) if max_width else maxint
        max_h = int(max_height) if max_height else maxint

        if w < max_w and h < max_h:
            response.data = data
        else:
            image.thumbnail((max_w, max_h), Image.ANTIALIAS)
            image.save(response.stream, image.format)
            # invalidate content-length computed by make_conditional as
            # writing to response.stream does not do it (as of werkzeug 0.9.3)
            del response.headers['Content-Length']

        return response
 def get_img_from_redis(self, key):
     value = self.redis_connection.get_image_by_key(key)
     data = base64.b64decode(value)
     image = io.BytesIO(data)
     image = Image.open(image)
     return image
Exemple #42
0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import codecs
import io

from PIL import Image
from PIL import ImageEnhance
from random import randrange

# Preload PIL with the minimal subset of image formats we need
from odoo.tools import pycompat

Image.preinit()
Image._initialized = 2

# Maps only the 6 first bits of the base64 data, accurate enough
# for our purpose and faster than decoding the full blob first
FILETYPE_BASE64_MAGICWORD = {
    b'/': 'jpg',
    b'R': 'gif',
    b'i': 'png',
    b'P': 'svg+xml',
}

# ----------------------------------------
# Image resizing
# ----------------------------------------


def image_resize_image(base64_source,
# digital GPIO pin numbers for all the required display pins.  For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)

# Initialize library.
disp.begin()

# Clear display.
disp.clear()
disp.display()

# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))

# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)

# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)

# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
 def get_img_from_url(image_url):
     response = requests.get(image_url)
     response = response.content
     bytes_obj = io.BytesIO(response)
     image = Image.open(bytes_obj)
     return image
 def __getitem__(self, index):
     ls = self.imgs[index].strip().split()
     img_path = ls[0]
     target = int(ls[1])
     img = Image.open(os.path.join(self.root, img_path)).convert('RGB')
     return self.transform(img), target
from PIL import Image

png = Image.open("photo/instagram.png")
#print png.getdata()[200]


def getRed(pixel):
    return pixel[0]


def getGreen(pixel):
    return pixel[1]


def getBlue(pixel):
    return pixel[2]


def getAveragePixel(pixel):
    avg = (getRed(pixel) + getGreen(pixel) + getBlue(pixel)) / 3
    return avg


new_pixels = []
size = png.height * png.width
old_pixels = png.getdata()
for i in range(size):
    old_pixel = old_pixels[i]
    if (i % png.width > png.width / 2):
        new_pixel = getAveragePixel(old_pixel)
    else:
Exemple #47
0
def resize_image(image):
    im = Image.open(image)
    im.save(image, "PNG")
def generate_predictor_batches(data_folder, image_shape, sequence_length, batch_size):

    images = sorted(glob(os.path.join(data_folder, 'image', '*.png')))
    n_image = len(images)

    # first get the sequence lists
    action_dict = {}
    label_dict = {}

    for i in range(n_image):
        path, img_name = os.path.split(images[i])
        fn, ext = img_name.split(".")
        names = fn.split("_")
        action_id = int(names[0])
        class_id = names[1]
        color_id = names[2]
        frame_id = int(names[3])

        action_dict[action_id] = frame_id
        label_dict[action_id] = class_id + '_' + color_id

    #print(len(action_dict))

    sequence_list = []
    for i in range(1,len(action_dict)+1):
        #print (action_dict.get(i))
        if action_dict.get(i) > sequence_length:
            total_sequence_nbr = action_dict.get(i) - sequence_length +1
            for j in range(1,total_sequence_nbr+1):
                curr_list = []
                for k in range(j,j+sequence_length):
                    ac_id='%06d' % i
                    fr_id='%06d' % k
                    curr_name = ac_id+ '_' + label_dict.get(i) + '_' + fr_id+'.png'
                    file_name = os.path.join(data_folder, 'image', curr_name)
                    curr_list.append(file_name)
                sequence_list.append(curr_list)

    n_sequence = len(sequence_list)
    #print(len(sequence_list))
    #print (sequence_list[0])

    # this line is just to make the generator infinite, keras needs that
    while True:

        # Randomize the indices to make an array
        indices_arr = np.random.permutation(n_sequence)
        for batch in range(0, len(indices_arr), batch_size):
            # slice out the current batch according to batch-size
            current_batch = indices_arr[batch:(batch + batch_size)]

            # initializing the arrays, x_train and y_train
            x_train = []  # np.empty([0, image_shape[0],image_shape[1],image_shape[2]], dtype=np.float32)
            y_train = []

            for i in current_batch:

                image_files = sequence_list[i]
                #print(image_files)
                image_set =[]
                label_set =[]
                for k in range(sequence_length):
                    image = Image.open(image_files[k])
                    image = image.resize((image_shape[0], image_shape[1]))
                    image = np.asarray(image)
                    target_pos = get_target_pos(image_files[k], image_shape)
                    label_set.append(target_pos)
                # Appending them to existing batch
                x_train.append(np.array(image_set))
                y_train.append(np.array(label_set))
            # y_train = to_categorical(y_train, num_classes=len(classes))

            batch_images = np.array(x_train)
            batch_lables = np.array(y_train)
            # normalize image data (not the labels)
            batch_images = batch_images.astype('float32') / 255
            batch_lables = batch_lables.astype('float32')

            yield (batch_images, batch_lables)
from keras.models import load_model
import socket
import sys
import datetime
import csv
import cv2
from PIL import Image
from io import BytesIO
import numpy as np
model = load_model("../model/nvidiaModel.h5")
with open("../training/data/images/1574517989418963.jpg", "rb") as f:
    data = f.read()


def image_preprocess(image):
    image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
    image = cv2.GaussianBlur(image, (3, 3), 0)
    image = cv2.resize(image, (200, 66))  # Image input size of the Nvidia model architecture
    image = (image / 127.5) - 1

    return image


image = Image.open(BytesIO(bytearray(data)))
image = np.asarray(image)
image = image_preprocess(image)
image = np.array([image])
steering_angle = str(model.predict_classes(image))
print(steering_angle)
Exemple #50
0
def im2torchTransform(imdir, transform=default_transform):
    im = Image.open(imdir)
    return transform(im).numpy().transpose(1,2,0)
    def video_loop(self):
        """ Get frame from the video stream and show it in Tkinter """
        if self.frame_no != 0 and self.start == True:
            try:
                self.index = self.img_list.index(
                    str(self.readLogFile[-1].split('\t')[0])) + 1
                self.start = False
            except:
                pass

        if len(self.readLogFile) != 0 and self.start == True:
            self.index = self.img_list.index(
                str(self.readLogFile[-1].split('\t')[0])) + 1
            self.start = False

        if not self.is_paused:
            self.frame_no = self.img_list[self.index].split('.')[0]
            self.frame = cv2.imread(self.filename + '/' + self.image_folder +
                                    '/' + self.img_list[self.index])
            if self.frame.size >= 640 * 360 * 3:
                self.mul = np.array([640, 360]) / np.array(
                    self.frame.shape[-2::-1])
            else:
                self.mul = [1, 1]

            self.global_image_frame = self.frame
            self.is_paused = True
            self.x_topleft, self.y_topleft, self.x_bottomright, self.y_bottomright, self.x_live, self.y_live = 0, 0, 0, 0, 0, 0
        else:
            self.frame = self.global_image_frame

        cv2image = self.frame.copy()
        r, col, ch = cv2image.shape
        cv2resized = cv2.resize(cv2image,
                                fx=self.mul[0],
                                fy=self.mul[1],
                                dsize=(0, 0))

        if True:  # frame captured without any errors
            self.panel.bind("<Button-1>", self.top_left_click)
            self.panel.bind("<ButtonRelease-1>", self.bottom_right_release)
            self.panel.bind("<B1-Motion>", self.mouse_movement)

            if (self.x_topleft, self.y_topleft) != (self.prev_xtl,
                                                    self.prev_ytl):
                self.prev_xtl, self.prev_ytl = self.x_topleft, self.y_topleft
                self.points = [(self.prev_xtl, self.prev_ytl)]
                self.is_paused = True
            if (self.x_bottomright, self.y_bottomright) != (
                    self.prev_xbr, self.prev_ybr) and self.is_paused:
                self.prev_xbr, self.prev_ybr = self.x_bottomright, self.y_bottomright
                self.points += [(self.prev_xbr, self.prev_ybr)]
                thread1 = threading.Thread(target=self.boundingbox,
                                           args=(cv2image, self.frame_no,
                                                 self.points))
                thread1.start()

            if (self.x_live, self.y_live) != (self.prev_xl, self.prev_yl):
                self.prev_xl, self.prev_yl = self.x_live, self.y_live
                cv2.rectangle(cv2resized, (self.x_topleft, self.y_topleft),
                              (self.x_live, self.y_live), (0, 255, 0), 1)

            if self.is_paused:
                cv2.rectangle(cv2resized, (self.x_topleft, self.y_topleft),
                              (self.x_live, self.y_live), (0, 255, 0), 1)

            self.current_image = Image.fromarray(
                cv2resized)  # convert image for PIL
            imgtk = ImageTk.PhotoImage(
                image=self.current_image)  # convert image for tkinter
            self.panel.imgtk = imgtk  # anchor imgtk so it does not be deleted by garbage-collector
            self.panel.config(image=imgtk)  # show the image
        self.root.after(
            40 // self.speed_ratio,
            self.video_loop)  # call the same function after 30 milliseconds
Exemple #52
0
class camClass(Image):
    def __init__(self,pose_guess=None,Kmat=None):

        self.c = np.array([sensor_x,sensor_y])  # Sensor
        self.images = []
        self.pose_guess = pose_guess
        self.Kmat = Kmat #camera matrix
        self.pointCloud = []
    def add_images(self,image):
        image.pose = self.pose_guess  #initialize image with guess
        self.images.append(image) 

    def rotational_transform(self,pts,pose):
            """  
            This function performs the translation and rotation from world coordinates into generalized camera coordinates.
            This function takes the Easting, Northing, and Elevation of the features in an image.
            The pose vector is unknown and what we are looking to optimize.
            """
            cam_x = pose[0]
            cam_y = pose[1]
            cam_z = pose[2]
            roll = pose[3]
            pitch = pose[4]
            yaw = pose[5]

            r_axis = np.array([[1, 0, 0], 
                               [0, 0,-1], 
                               [0, 1, 0]])
            r_roll = np.array([[np.cos(roll), 0, -1*np.sin(roll)], 
                               [0, 1, 0], 
                               [np.sin(roll), 0, np.cos(roll)]])            
            r_pitch = np.array([[1, 0, 0], 
                                [0, np.cos(pitch), np.sin(pitch)], 
                                [0, -1*np.sin(pitch), np.cos(pitch)]])           
            r_yaw = np.array([[np.cos(yaw), -1*np.sin(yaw), 0, 0], 
                              [np.sin(yaw), np.cos(yaw), 0, 0], 
                              [0, 0, 1, 0]])
            T = np.array([[1, 0, 0, -cam_x], 
                          [0, 1, 0, -cam_y], 
                          [0, 0, 1, -cam_z], 
                          [0, 0, 0, 1]])
            C = r_axis @ r_roll @ r_pitch @ r_yaw @ T   
            
            
            if pts.ndim <= 1:
                pts = pts[np.newaxis,:]
            pts = (np.c_[pts, np.ones(pts.shape[0])]).T
            
            return C @ pts
               
    def projective_transform(self,rot_pt):
        """  
        This function performs the projective transform on generalized coordinates in the camera reference frame.
        This function needs the outputs of the rotational transform function (the rotated points).
        """
        focal = self.f 
        sensor = self.c  
        rot_pt = rot_pt.T
        #General Coordinates
        gcx = rot_pt[:,0]/rot_pt[:,2]
        gcy = rot_pt[:,1]/rot_pt[:,2]
        #Pixel Locations
        pu = gcx*focal + sensor[0]/2.
        pv = gcy*focal + sensor[1]/2.
        return np.array([pu,pv]).T
          
 
    def estimate_pose(self):
        
         def residual_pose(pose, realgcp, imagegcp,self):
            pt = self.projective_transform(self.rotational_transform(realgcp, pose))
            res = pt.flatten() - imagegcp.flatten()
            return res 

         for i in range(len(self.images)):
            realgcp = self.images[i].realgcp
            imagegcp = self.images[i].imagegcp
            self.images[i].pose = so.least_squares(residual_pose, self.images[i].pose, method='lm',args=[realgcp, imagegcp,self]).x
     
    def estimate_RWC(self):

        if(len(self.images) < 2):
            print("There are not 2 images in this camera class")
        #===========================
        def residual_RWC( RWC, pose1, pose2,imcor1,imcor2,self):
        
            pt_1 = self.projective_transform(self.rotational_transform(RWC, pose1)) # u,v based on first image
            pt_2 = self.projective_transform(self.rotational_transform(RWC, pose2)) 
            res_1 = pt_1.flatten() - imcor1.flatten()
            res_2 = pt_2.flatten() - imcor2.flatten()
            return np.hstack((res_1, res_2))  
        #==========================
    
        for i in range(len(self.images)):
            for j in range(len(self.images)):
                if( i != j):
                    self.images[i].realgcp = so.least_squares(residual_RWC, self.images[i].realgcp , method='lm',args=(self.images[i].pose,                         self.images[j].pose, self.images[i].imagegcp, self.images[j].imagegcp,self)).x
                    
	def pointCloud():
        	
            def triangulate(P0,P1,x1,x2):
                A = np.array([[P0[2,0]*x1[0] - P0[0,0], P0[2,1]*x1[0] - P0[0,1], P0[2,2]*x1[0] - P0[0,2], P0[2,3]*x1[0] - P0[0,3]],
                [P0[2,0]*x1[1] - P0[1,0], P0[2,1]*x1[1] - P0[1,1], P0[2,2]*x1[1] - P0[1,2], P0[2,3]*x1[1] - P0[1,3]],
                  [P1[2,0]*x2[0] - P1[0,0], P1[2,1]*x2[0] - P1[0,1], P1[2,2]*x2[0] - P1[0,2], P1[2,3]*x2[0] - P1[0,3]],
                  [P1[2,0]*x2[1] - P1[1,0], P1[2,1]*x2[1] - P1[1,1], P1[2,2]*x2[1] - P1[1,2], P1[2,3]*x2[1] - P1[1,3]]])
                    u,s,vt = np.linalg.svd(A)
                return vt[-1]
            
        I1 = self.images[0]
        I2 = self.images[1]
		h,w,d = I1.shape
		sift = cv2.xfeatures2d.SIFT_create()
		kp1,des1 = sift.detectAndCompute(I1,None)
		kp2,des2 = sift.detectAndCompute(I2,None)
		bf = cv2.BFMatcher()
		matches = bf.knnMatch(des1,des2,k=2)

		# Apply ratio test
		good = []
		for i,(m,n) in enumerate(matches):
   			if m.distance < 0.7*n.distance:
       				 good.append(m)
    
		u1 = []
		u2 = []

		for m in good:
    			u1.append(kp1[m.queryIdx].pt)
    			u2.append(kp2[m.trainIdx].pt)
    
		u1 = np.array(u1)  #general coords
		u2 = np.array(u2)

		#Make homogeneous
		u1 = np.c_[u1,np.ones(u1.shape[0])]
		u2 = np.c_[u2,np.ones(u2.shape[0])cu = w//2
		
		cv = image[0].h/2
        cu = image[0].w/2

		K_cam = np.array([[f,0,cu],[0,f,cv],[0,0,1]])
		K_inv = np.linalg.inv(K_cam)
		x1 = u1 @ K_inv.T  #camera coords
		x2 = u2 @ K_inv.T 

		E, inliers = cv2.findEssentialMat(x1[:,:2],x2[:,:2],np.eye(3),method=cv2.RANSAC,threshold=1e-3)
		inliers = inliers.ravel().astype(bool) 
		n_in,R,t,_ = cv2.recoverPose(E,x1[inliers,:2],x2[inliers,:2])

		P0 = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])
		P1 = np.hstack((R,t))
        
        for i in range(len(u2)):
            self.pointcloud.append(triangulate(P0,P1,x1[i],x2[i]))   #appends to list of points in xyz coordinates
                   
		self.pointCloud = np.array(self.pointCloud)
        self.pointCloud /= self.pointCloud[0][3]
                   
        def plotPointCloud():
            %matplotlib notebook
            fig = plt.figure()
            ax = fig.add_subplot(111,projection='3D')
            #for i in range(len(self.pointCloud)):
            ax.plot(*self.pointCloud.T,'k.')
	
                   
                   
                   
if __name__ == "__main__":
  Image1 = Image(sys.argv[1])
  Image2 = Image(sys.argv[2])
  pointCloud = camClass()
  pointCloud.add_images(Image1)
  pointCloud.add_images(Image2)
  pointCloud.pointCloud()
  pointCloud.plotPointCloud()
def highlight_faces(image, faces, output_filename, terminal_print=True):
    """Adapted from Google tutorial. Draw figure with API information and save it.

    Note: this is just for illustration, the graphics are not robust: hard coded
    fonts etc.
    """
    im = Image.open(image)
    draw = ImageDraw.Draw(im)

    for (face_ind, face) in enumerate(faces):

        # compute emotions
        list_emotion_scores = [face.sorrow_likelihood,
                               face.joy_likelihood,
                               face.anger_likelihood,
                               face.surprise_likelihood]

        list_emotions = ["SORROW",
                         "JOY",
                         "ANGER",
                         "SURPRISE"]

        string_label = generate_string_label(list_emotions, list_emotion_scores)

        if terminal_print:
            # print emotions on terminal
            print("\n")
            print("-----------------------")
            print("Face {}".format(face_ind))

            for (crrt_emotion, crrt_score) in zip(list_emotions, list_emotion_scores):
                print("{}: {}".format(crrt_emotion, crrt_score))

            print(string_label)

            print("-----------------------")

        # draw box around face
        box = [(vertex.x, vertex.y)
               for vertex in face.bounding_poly.vertices]
        draw.line(box + [box[0]], width=5, fill='#00ff00')

        # add legend in the face box
        fontsize = 35
        font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", fontsize)

        offset = 5
        heigth_text = 40
        length_text = box[1][0] - box[0][0] - 2 * offset
        draw.rectangle(((box[0][0] + offset, box[0][1] + offset), (box[0][0] + length_text + offset, box[0][1] + heigth_text + offset)), fill="black")
        draw.text((box[0][0] + offset, box[0][1] + offset), string_label, font=font, fill=(255, 255, 255, 255))

        # highlight significant points
        point_nbr = 0
        half_width_sqare = 2

        list_point_coords = []

        for point in face.landmarks:
            x = point.position.x
            y = point.position.y

            list_point_coords.append((x, y))

            draw.rectangle(((x - half_width_sqare, y - half_width_sqare), (x + half_width_sqare, y + half_width_sqare)), fill="red")

            # fontsize = 15
            # font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", fontsize)
            # draw.text((x, y), str(point_nbr), font=font, fill=(255, 255, 0, 0))

            point_nbr += 1

        all_lists_points = [
                           [10, 11, 9],
                           [10, 12, 11],
                           [14, 7, 13, 15],
                           [7, 6],
                           [14, 6, 13, 7, 14],
                           [16, 17, 18, 19],
                           [21, 22, 23, 24],
                           [30, 6],
        ]

        for crrt_list_points in all_lists_points:
            draw_line_list_points(draw, crrt_list_points, list_point_coords)

        draw_line_list_points(draw, [2, 26, 3], list_point_coords, close=False)
        draw_line_list_points(draw, [4, 27, 5], list_point_coords, close=False)
        draw_line_list_points(draw, [10, 8, 11], list_point_coords, close=False)

    im.save(output_filename)
resize_method = Image.ANTIALIAS

max_height = 450
max_width = 450
extensions = ['JPG']

path = os.path.abspath("your path here for the images")


def adjusted_size(width, height):
    if width > max_width or height > max_height:
        if width > height:
            return max_width, int(max_width * height / width)
        else:
            return int(max_height * width / height), max_height
    else:
        return width, height


if __name__ == "__main__":
    for f in os.listdir(path):
        if os.path.isfile(os.path.join(path, f)):
            f_text, f_ext = os.path.splitext(f)
            f_ext = f_ext[1:].upper()
            if f_ext in extensions:
                print(f)
                image = Image.open(os.path.join(path, f))
                width, height = image.size
                image = image.resize(adjusted_size(width, height))
                image.save(os.path.join(path, f))
Exemple #55
0
    def __getitem__(self, idx):
        start_load = time.time()
        scan, frame_offset = self._scan_pairs[idx]
        scan_path = os.path.join(self.root_dir, 'scans', scan, scan + '.sens')
        sensor_data = SensorData()
        idxs = [
            frame_offset + i * self.frame_interval
            for i in range(self.context_views)
        ]
        start_load_frames = time.time()
        rgbd_frames = sensor_data.load_frames(scan_path,
                                              self._scan_indexes[scan], idxs)
        end_load_frames = time.time()
        frames = []
        cameras = []
        decompress_frames_time = 0
        convert_frames_time = 0
        process_camera_time = 0
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize((64, 64)),
            torchvision.transforms.ToTensor(),
        ])
        for rgbd_frame in rgbd_frames:
            # Decompress frame
            decompress_frames_start = time.time()
            frame = Image.open(io.BytesIO(rgbd_frame.color_data))
            decompress_frames_time = time.time() - decompress_frames_start

            convert_frames_start = time.time()
            image = transforms(frame)
            convert_frames_time += time.time() - convert_frames_start
            # Process camera
            camera_time = time.time()
            R = np.linalg.inv(rgbd_frame.camera_to_world[0:3, 0:3])
            t = -rgbd_frame.camera_to_world[0:3, 2]
            # Convert rotation matrix to yaw, pitch, and roll
            yaw = np.arctan2(R[1, 0], R[0, 0])
            pitch = np.arctan2(-R[2, 0], np.sqrt(R[2, 1]**2 + R[2, 2]**2))
            roll = np.arctan2(R[2, 1], R[2, 2])
            if yaw != yaw:
                t = [0, 0, 0]
                yaw = 0
                pitch = 0
                roll = 0
            cameras.append(
                np.array([
                    t[0], t[1], t[2],
                    np.sin(yaw),
                    np.cos(yaw),
                    np.sin(pitch),
                    np.cos(pitch),
                    np.sin(roll),
                    np.cos(roll)
                ],
                         dtype=np.float32))
            process_camera_time += time.time() - camera_time
            frames.append(image)

        new_frames = frames
        new_frames = np.stack(new_frames, axis=0)
        # Preprocess cameras
        new_cameras = np.stack(cameras, axis=0)
        new_cameras = new_cameras[:, :, np.newaxis, np.newaxis]

        end_load = time.time()

        if False:
            print('Time to load idx {:d}: {:f}'.format(idx,
                                                       end_load - start_load))
            print('Time to read frames: {:f}'.format(end_load_frames -
                                                     start_load_frames))
            print('Time to decompress frames: {:f}'.format(
                decompress_frames_time))
            print('Time to convert frames: {:f}'.format(convert_frames_time))
            print('Time to process camera: {:f}'.format(process_camera_time))

        return new_frames, new_cameras
Exemple #56
0
from PIL import Image
import cv2
import pytesseract as image_to_string
import numpy as np

source_image = 'baba2.jpg'
img = Image.open(source_image)  #1005 630
#print(img_binary.size)
#text_all=image_to_string.image_to_string(img,lang='ara')
filew = open('output.txt', 'w+')

#text=image_to_string.image_to_string(img,lang='ara')
#filew.write(text)

im_gray = cv2.imread(source_image, cv2.IMREAD_GRAYSCALE)
ret, thresh_img = cv2.threshold(im_gray, 130, 255, cv2.THRESH_BINARY)
kernel = np.ones((2, 2), np.uint8)
#img_binary = cv2.erode(thresh_img,kernel,iterations = 1)
#img_binary = cv2.medianBlur(img_binary, 1)
cv2.imwrite('threshold.jpg', thresh_img)

img_binary = Image.open('threshold.jpg')

text_all = image_to_string.image_to_string(img_binary, lang='ara')
#filew.write(text_all+'\n')
#print(text_all)
#print('________________________________')


def crop(dim1, dim2, dim3, dim4, name):
    area = (dim1, dim2, dim3, dim4)
Exemple #57
0
    def do_open(self):
        """usage: open <string:filename>

        Open the indicated image, read it, push the image on the stack.
        """
        self.push(Image.open(self.do_pop()))
def statistics(filename,data,threshold = 0.4):
    """
    data is dict 
    dict key :filename
    dict value: data-->(predpicdata,beforeinputframe(90min,120min))
    -------
    if predpicdata/beforedata <0.3 we think it predict false need use the rover to fillnan

    """
    caculateoptflow_data = data[filename][1]
    caculateoptflow_data = caculateoptflow_data/80.0
    predict_data = data[filename][0]
    #print(predict_data.shape)
    predict_data[predict_data<20] = 0.0
    #predict_data = predict_data/80.0
    # decision which data need to cover
    #print(caculateoptflow_data.shape)
    #print(caculateoptflow_data[-1].sum())
    #print(predict_data.sum())
    for ind,data in enumerate(predict_data):
        beginchange_number = ind
        if ind ==0:
            judge = data.mean()/(caculateoptflow_data[-1].mean()+0.0000000000001) 
        else:
            judge = data.mean()/(predict_data[ind-1].mean()+0.0000000000001) 
        if judge <threshold:
            break
    """
    if judge<0.4:
        print(filename)
        return filename
    """
    #print(judge)
    if judge < threshold or True:
        if True:
            print('begin the changing---------')
            model = Rover()
            caculateoptflow_data = caculateoptflow_data[:,np.newaxis,:,:,:]
            print(caculateoptflow_data[0].sum()-caculateoptflow_data[1].sum())
            if (caculateoptflow_data[0] == caculateoptflow_data[1]).all():
                print("that caculateoptflow data is break-------------------------")
            predict = model(caculateoptflow_data)
            predict = predict*80.0
            print('predict diff------->',predict[0].sum() - predict[1].sum())
            if (predict[0] == predict[1]).all():
                print("that pred data is break-------------------------")
            #print(predict.shape)
            timefilename = [30,60,90,120]
            for ind,time in enumerate(timefilename) :
                picname = str(time)+'.png'
                pic_savepath = os.path.join('/media/workdir/hujh/hujh-new/huaweirader_baseline/data_eda/Predict/f**k',filename)
                #pic_savepath = os.path.join(savepath)
                if  not os.path.exists(pic_savepath) :
                    os.mkdir(pic_savepath)
                pic_savepath = os.path.join('/media/workdir/hujh/hujh-new/huaweirader_baseline/data_eda/Predict/f**k',filename,picname)
                #scipy.misc.toimage(predict_np, high=255, low=0, cmin=0, cmax=255).save(pic_savepath)
                #predict_np_save = predict_np[time,:,:]
                #print(predict_np_save.shape)
                #print(predict[ind][0].shape)
                if ind >= beginchange_number or True:
                    #os.remove(pic_savepath)
                    Image.fromarray(np.uint8(predict[ind*2+2][0][0])).save(pic_savepath)
            print('already change the data')

        return filename
def resize(in_img, size):
    img = Image.fromarray(np.uint8(in_img*255))
    img = img.resize((size[1], size[0]), resample=0, box=None)
    return np.asarray(img)/255.0
Exemple #60
0
"""
OCR engine binding to tesseract engine.

tesseract engine: https://github.com/tesseract-ocr/tesseract
tesseract language data: https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016
tesserocr (python wrapper of tesseract): https://github.com/sirfz/tesserocr
"""


import tesserocr
from PIL import Image

image = Image.open('./pics/screen.png')
print(tesserocr.image_to_text(image))
print(tesserocr.get_languages())


# or ...
from tesserocr import PyTessBaseAPI

images = ['./pics/screen.png']

# you can set language here, but you need to install specify language data firstly.
with PyTessBaseAPI(lang='eng') as api:
    for img in images:
        api.SetImageFile(img)
        print(api.GetUTF8Text())