def test_adjust__width(self):
     im = Image.open(self._data_path('100x100.png'))
     crop = Crop(width=50)
     adjusted = crop.adjust(im)
     self.assertEqual(adjusted.size, (50, 100))
     expected = Image.open(self._data_path('50x100_crop.png'))
     self.assertImageEqual(adjusted, expected)
    def test_jpeg(self):
        path = os.path.join(TEST_DATA_PATH, "Sam_Hat1.jpg")
        image = Image.objects.create_from_path(path)

        # Re-load the image, now that the task is done
        image = Image.objects.get(id=image.id)

        self.assertTrue(image.source.path.endswith("Sam_Hat1.jpg"))
        self.assertEqual(image.width, 3264)
        self.assertEqual(image.height, 2448)
        self.assertEqual(image.jpeg_quality, None)
        self.assertTrue(os.path.exists(image.optimized.path))
        self.assertTrue(os.path.exists(image.source.path))

        source = PILImage.open(image.source.path)
        optimized = PILImage.open(image.optimized.path)

        self.assertEqual(
            source.quantization,
            optimized.quantization
        )

        self.assertEqual(
            JpegImagePlugin.get_sampling(source),
            JpegImagePlugin.get_sampling(optimized),
        )
Example #3
0
File: tests.py Project: agoya/glue
    def test_project_manager(self):
        manager = self.generate_manager(glue.ProjectSpriteManager, 'multiple')
        manager.process()

        rgb_img_path = os.path.join(self.output_path, 'rgb.png')
        rgb_css_path = os.path.join(self.output_path, 'rgb.css')
        mix_img_path = os.path.join(self.output_path, 'mix.png')
        mix_css_path = os.path.join(self.output_path, 'mix.css')
        self.assertTrue(os.path.isfile(rgb_img_path))
        self.assertTrue(os.path.isfile(rgb_css_path))
        self.assertTrue(os.path.isfile(mix_img_path))
        self.assertTrue(os.path.isfile(mix_css_path))

        image = Image.open(rgb_img_path)
        css = open(rgb_css_path)

        self.assertEqual(image.getpixel((0, 0)), RED)
        self.assertEqual(image.getpixel((25, 0)), GREEN)
        self.assertEqual(image.getpixel((0, 25)), BLUE)
        self.assertEqual(image.getpixel((25, 25)), TRANSPARENT)

        self.assertEqualCSS(css.read(), EXPECTED_PROJECT_RGB_CSS)
        css.close()

        image = Image.open(mix_img_path)
        css = open(mix_css_path)

        self.assertEqual(image.getpixel((0, 0)), YELLOW)
        self.assertEqual(image.getpixel((25, 0)), PINK)
        self.assertEqual(image.getpixel((0, 25)), CYAN)
        self.assertEqual(image.getpixel((25, 25)), TRANSPARENT)

        self.assertEqualCSS(css.read(), EXPECTED_PROJECT_MIX_CSS)
        css.close()
 def test_adjust__height(self):
     im = Image.open(self._data_path('100x100.png'))
     crop = Crop(height=50)
     adjusted = crop.adjust(im)
     self.assertEqual(adjusted.size, (100, 50))
     expected = Image.open(self._data_path('100x50_crop.png'))
     self.assertImageEqual(adjusted, expected)
def query(query_term, folder_name, path):

    BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q=' + query_term + '&start=%d'

    BASE_PATH = os.path.join(path, folder_name.replace(' ', '_'))

    if not os.path.exists(BASE_PATH):
        os.makedirs(BASE_PATH)
        print "made: " + BASE_PATH

    start = 0  # start query string parameter for pagination
    while start < 40:   # query 20 pages
        r = requests.get(BASE_URL % start)
        for image_info in json.loads(r.text)['responseData']['results']:
            url = image_info['unescapedUrl']
            try:
                image_r = requests.get(url)
            except ConnectionError, e:
                print 'could not download %s' % url
                continue

            #remove file-system path characters from name
            title = query_term.replace(' ', '_') + '_' + image_info['imageId']
            file = open(os.path.join(BASE_PATH, '%s.jpg') % title, 'w')
            try:
                Image.open(StringIO(image_r.content)).save(file, 'JPEG')
            except IOError, e:
                # throw away gifs and stuff
                print 'couldnt save %s' % url
                continue
            finally:
Example #6
0
    def basehash(self, path=True):
        """basehash compare If histogram smooth
        :return: float
        """
        import math
        import operator
        from PIL import Image

        if path:
            image1 = Image.open(self.image_a_path)
            image2 = Image.open(self.image_b_path)
        else:
            image1 = Image.open(self.image_a_binary)
            image2 = Image.open(self.image_b_binary)

        if not image1.size is image2.size:
            image2 = image2.resize(image1.size)
        pass
        h1 = image1.convert('RGB').histogram()
        h2 = image2.convert('RGB').histogram()

        rms = math.sqrt(
            reduce(operator.add, list(map(lambda a, b: (a - b) ** 2, h1, h2)))
            /
            len(h1)
        )
        self.value_of_phash = rms
        return rms
Example #7
0
def translate(name,text):
    path = sys.path[0]+"\TP\\"
    im = Image.open(path+"space.bmp")
    line = text.split("@")
    length = 0
    for i in line:
        if len(i) > length:
            length = len(i)
    height = len(line)
    length *= 42
    height *= 40
    diagram = Image.new("RGBA",(length,height),(255,255,255))
    longest = 0
    for i in range(0,len(line)):
        letters = []
        pos = 0
        for j in range(0,len(line[i])):
            temp = convert(line[i][j])
            if(temp != "null"):
                letters.append(temp)
        for j in range(0,len(letters)):
            k = len(letters)-j-1
            im = Image.open(path+letters[k]+".bmp")
            (le,up,ri,bo) = im.getbbox()
            diagram.paste(im,(pos,i*40,pos+ri,(i+1)*40))
            pos+=ri+1
        if(pos > longest):
            longest = pos
    diagram = diagram.crop((0,0,longest-1,len(line)*40))
    diagram.save(path+name+".png")
    diagram.show()
def test_write_svg_to_png(filename):
    # If you want to regenerate these, e.g. the svg template has significantly
    # changed, easiest way is to patch write_svg_to_png to not delete the
    # temporary file (delete:False in temp_args) and copy the svg out of /tmp.
    # Output png files are in user-media/version-previews/full and /thumbs.
    out = tempfile.mktemp()
    svg_xml = os.path.join(
        settings.ROOT,
        'src/olympia/versions/tests/static_themes/%s.svg' % filename)
    svg_png = os.path.join(
        settings.ROOT,
        'src/olympia/versions/tests/static_themes/%s.png' % filename)
    with storage.open(svg_xml, 'rb') as svgfile:
        svg = svgfile.read()
    write_svg_to_png(svg, out)
    assert storage.exists(out)
    # compare the image content. rms should be 0 but travis renders it
    # different... 3 is the magic difference.
    svg_png_img = Image.open(svg_png)
    svg_out_img = Image.open(out)
    image_diff = ImageChops.difference(svg_png_img, svg_out_img)
    sum_of_squares = sum(
        value * ((idx % 256) ** 2)
        for idx, value in enumerate(image_diff.histogram()))
    rms = math.sqrt(
        sum_of_squares / float(svg_png_img.size[0] * svg_png_img.size[1]))

    assert rms < 3
Example #9
0
    def test_12bit_rawmode(self):
        """ Are we generating the same interpretation
        of the image as Imagemagick is? """

        # Image.DEBUG = True
        im = Image.open('Tests/images/12bit.cropped.tif')

        # to make the target --
        # convert 12bit.cropped.tif -depth 16 tmp.tif
        # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif
        # imagemagick will auto scale so that a 12bit FFF is 16bit FFF0,
        # so we need to unshift so that the integer values are the same.

        im2 = Image.open('Tests/images/12in16bit.tif')

        if Image.DEBUG:
            print (im.getpixel((0, 0)))
            print (im.getpixel((0, 1)))
            print (im.getpixel((0, 2)))

            print (im2.getpixel((0, 0)))
            print (im2.getpixel((0, 1)))
            print (im2.getpixel((0, 2)))

        self.assert_image_equal(im, im2)
Example #10
0
def _is_image_file(path):
    """Whether the file is a readable image file via Pillow."""
    try:
        pImage.open(path)
        return True
    except:
        return False
Example #11
0
 def __init__(self, master, func):
     Tkinter.Toplevel.__init__(self, master, relief=Tkinter.SOLID, highlightthickness=1, highlightcolor=fg)
     self.root = master
     self.root.withdraw()
     self.overrideredirect(Tkinter.TRUE)
     self.progress = Progressbar(self)
     if not config.python3:
         self.image1 = Image.open(config.relinuxdir + "/splash.png")
         self.image2 = Image.open(config.relinuxdir + "/splash_glowy.png")
         self.images = []
         for i in range(0, 11):
             percent = float(float(i) / 10)
             self.images.append(ImageTk.PhotoImage(Image.blend(self.image1, self.image2, percent)))
         # self.image = ImageTk.PhotoImage(Image.blend(self.image1, self.image2, 0.0))
         self.image = self.images[0]
         self.imgw = self.image.width()
         self.imgh = self.image.height()
     else:
         self.image = Tkinter.PhotoImage(file=config.relinuxdir + "/splash.ppm")
         self.imgw = self.image.width()
         self.imgh = self.image.height()
     self.textvar = Tkinter.StringVar()
     self.progresstext = Label(self, textvariable=self.textvar, height=15, width=480, anchor=Tkinter.W)
     self.w = self.imgw
     self.h = self.imgh + 32
     self.x = self.root.winfo_screenwidth() / 2 - self.w / 2
     self.y = self.root.winfo_screenheight() / 2 - self.h / 2
     self.geometry("%dx%d+%d+%d" % (self.w, self.h, self.x, self.y))
     self.panel = Label(self, image=self.image)
     self.panel.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=True)
     self.progress.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=True)
     self.progresstext.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=True)
     self.update()
     self.thread = FuncThread(func, self.endSplash, self)
     self.thread.start()
Example #12
0
 def getValicode(self):
     element = self.driver.find_element_by_id("change_cas")
     element.click();
     time.sleep(0.5)
     self.driver.get_screenshot_as_file("screenshot.png")
     img = IMG.open('screenshot.png')
     width = img.size[0]
     height =  img.size[1]
     region = (int(width*0.50699677), int(height*0.52849162), int(width*0.593110872), int(height*0.57318436))
     cropImg = img.crop(region)
     cropImg.save('1.png')
     image = IMG.open('1.png')
     enhancer = ImageEnhance.Contrast(image)
     image_enhancer = enhancer.enhance(2)
     valicode = image_to_string(image_enhancer)
     if len(valicode)==0:
         return self.getValicode()
     else:
         pattern = re.compile(r'[0-9,a-z,A-Z]{4}')
         match = pattern.match(valicode)
         if match:
             print valicode
             return valicode
         else:
             return self.getValicode()
Example #13
0
File: tests.py Project: jmg/fedex
    def test_shipment_service(self):
        """Test the shipment service.
        """
        service = ShipmentService(CONFIGURATION)
        shipment = service.create_shipment()
        shipment.ShipTimestamp = datetime.now()
        set_label(shipment.LabelSpecification)
        package = service.create_package()
        set_package(package)
        set_shipment(shipment, package)
        result = service.process(shipment)
        print result
        details = result.CompletedShipmentDetail.CompletedPackageDetails[0]
        image = details.Label.Parts[0].Image
        binary = a2b_base64(image)

        with NamedTemporaryFile() as png_file:
            png_file.write(binary)

            if Image:
                png_file.seek(0)
                Image.open(png_file.name).show()

        tracking_id = details.TrackingIds[0]
        result = service.delete(tracking_id)
        print result
Example #14
0
def leftpanel(frame):
	height = 580 #image height

	LeftPanel = wx.Panel(frame,wx.ID_ANY)
	LeftPanel.SetBackgroundColour('#FFFFFF')
	layoutLeft = wx.BoxSizer(wx.VERTICAL)

	# for ImagePanel
	ImagePanel = wx.Panel(LeftPanel,wx.ID_ANY)
	pil = Image.open('store/gui/top.jpg')
	ratio = float(height-10*2)/float(pil.size[1])
	new_size = (int(pil.size[0]*ratio),int(pil.size[1]*ratio))
	pil.thumbnail(new_size,Image.ANTIALIAS)
	image = wx.EmptyImage(pil.size[0],pil.size[1])
	image.SetData(pil.convert('RGB').tostring())
	wx.StaticBitmap(ImagePanel, wx.ID_ANY, image.ConvertToBitmap())
	layoutLeft.Add(ImagePanel,flag=wx.ALL,border=10)

	# for LogoPanel
	LogoPanel = wx.Panel(LeftPanel,wx.ID_ANY)
	pil = Image.open('store/gui/logo.png')
	ratio = float(image.GetWidth()-10*2)/float(pil.size[0])
	new_size = (int(pil.size[0]*ratio),int(pil.size[1]*ratio))
	pil.thumbnail(new_size,Image.ANTIALIAS)
	image = wx.EmptyImage(pil.size[0],pil.size[1])
	image.SetData(pil.convert('RGB').tostring())
	wx.StaticBitmap(LogoPanel, wx.ID_ANY, image.ConvertToBitmap())
	LogoPanel.SetSize(new_size)
	layoutLeft.Add(LogoPanel,flag=wx.ALL,border=10)

	LeftPanel.SetSizer(layoutLeft)
	return LeftPanel
def resizeImage(subdir, infile, output_dir=""):
     outfile = os.path.splitext(infile)[0]+"_min"
     extension = os.path.splitext(infile)[1]
     w=400
     logo = Image.open('../Logo/JXX.png')
     logo.thumbnail((w/10, w/10))
     if (cmp(extension, ".JPG")):
        return

     if infile != outfile:
        try :
            im = Image.open(subdir+"/"+infile)
            width, height = im.size
            if(width>height):
                nh = width*height/w
            else:
                nh = w
                w = height*width/nh
            im.thumbnail((w, nh), Image.ANTIALIAS)

            image_copy = im.copy()
            position = ((image_copy.width - logo.width - 10), (image_copy.height - logo.height - 10))
            image_copy.paste(logo, position, logo)
            image_copy.save(subdir+"/"+output_dir+outfile+extension,"JPEG")
        except IOError:
            print "cannot reduce image for ", infile
    def update_image_sizes( sender, **kwargs):
        # if main image is too big, resize it; make a thumbnail image
        img_rec = kwargs.get('instance', None)
        if img_rec is None:
            return

        # (1) resize main image
        if img_rec.main_image.width > MAX_MAIN_IMAGE_WIDTH or img_rec.main_image.height > MAX_MAIN_IMAGE_WIDTH:
            im = Image.open(img_rec.main_image.file.name)   # open image
            im.thumbnail((MAX_MAIN_IMAGE_WIDTH, MAX_MAIN_IMAGE_WIDTH), Image.ANTIALIAS) # resize
            im.save(img_rec.main_image.file.name, quality=90)   #save
        
        # (2) make a thumbnail
        thumb = Image.open(img_rec.main_image.file.name)    # open the main image
        thumb.thumbnail((MAX_THUMB_IMAGE_WIDTH, MAX_THUMB_IMAGE_WIDTH), Image.ANTIALIAS)
        thumb_fullpath = os.path.join(settings.MEDIA_ROOT\
                        , img_rec.get_image_upload_directory_thumb(os.path.basename(img_rec.main_image.path)) )

        # if needed, make thumb directory
        if not os.path.isdir(os.path.dirname(thumb_fullpath)):
            os.makedirs(os.path.dirname(thumb_fullpath))
        # save file
        thumb.save(thumb_fullpath, quality=100)

        # disconnect save signal, save the ImageRecord, and reconnect signal
        post_save.disconnect(ImageRecord.update_image_sizes, sender=ImageRecord)        
        # update/save django model
        img_rec.thumb_image.name = img_rec.get_image_upload_directory_thumb(os.path.basename(thumb_fullpath))
        img_rec.save()
        post_save.connect(ImageRecord.update_image_sizes, sender=ImageRecord)
Example #17
0
def image(path):
  if '..' in path:
    abort(500)
  fd = open(join(app.root_path, "images", path))
  data = fd.read()

  hsize = int(request.args.get("h", 0))
  vsize = int(request.args.get("v", 0))
  if hsize > 1000 or vsize > 1000:
    abort(500)

  if hsize:
    image = Image.open(StringIO(data))
    x, y = image.size

    x1 = hsize
    y1 = int(1.0 * y * hsize / x)
    image.thumbnail((x1, y1), Image.ANTIALIAS)
    output = StringIO()
    image.save(output, "PNG")
    data = output.getvalue()
  if vsize:
    image = Image.open(StringIO(data))
    x, y = image.size

    x1 = int(1.0 * x * vsize / y)
    y1 = vsize
    image.thumbnail((x1, y1), Image.ANTIALIAS)
    output = StringIO()
    image.save(output, "PNG")
    data = output.getvalue()

  response = make_response(data)
  response.headers['content-type'] = mimetypes.guess_type(path)
  return response
Example #18
0
def downloader(opener, filename, s, jpg=None, png=None):
    s.acquire()
    try:
        if not os.path.exists(filename):
            log("Download %s" % (filename))
            try:
                page = opener.open(jpg)
                dJPG = page.read()
                imageStringJPG = cStringIO.StringIO(dJPG)
                imageStringJPG.seek(0)
                page.close()
            except urllib2.HTTPError, e:
                imageStringJPG = ""
                log("Error %s" % (e))
            
            try:
                page = opener.open(png)
                dPNG = page.read()
                imageStringPNG = cStringIO.StringIO(dPNG)
                imageStringPNG.seek(0)
                page.close()
            except urllib2.HTTPError, e:
                imageStringPNG = ""
                log("Error %s" % (e))
                
            if imageStringJPG and imageStringPNG:
                imageJPG = Image.open(imageStringJPG)
                imagePNG = Image.open(imageStringPNG)
                A = imagePNG.convert('RGBA').split()[-1]
                imageJPG.paste(imagePNG, A)
                imageJPG.save(filename, quality=100)
                imageStringJPG.close()
                imageStringPNG.close()
def run_me(run_time=None):

    if run_time:
        start_time = time.time()

    # inp_paths = [os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Output', 'Two Crop test.png')]

    inp_paths = [os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Input', 'Two Crop test.png'),
                 os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Input', 'Two Crop test2.png')]
    orig_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Input', 'Two Infrared test.png')
    out_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Output', 'UnCropResult.png')

    orig = Image.open(orig_path)

    for img_path in inp_paths:
        try:
            img = Image.open(img_path)
            main(img, orig)
        except Exception as e:
            img = Image.open(img_path)
            main(img.transpose(Image.FLIP_LEFT_RIGHT), orig.transpose(Image.FLIP_LEFT_RIGHT))

    # img = Image.open('Input/Two Crop test3.png')
    #
    # ret = main(img, ret)
    # ret.show()

    if run_time:
        print("\n--- %s seconds ---" % (time.time() - start_time))
Example #20
0
    def test_save_screenshot_valid(self, tmpdir):
        """Check that 'save_screenshot' works"""
        # Run the test crawl
        manager_params, browser_params = self.get_config(str(tmpdir))
        manager = TaskManager.TaskManager(manager_params, browser_params)
        cs = CommandSequence.CommandSequence(url_a)
        cs.get(sleep=1)
        cs.save_screenshot('test')
        cs.screenshot_full_page('test_full')
        manager.execute_command_sequence(cs)
        manager.close()

        # Check that viewport image is not blank
        pattern = os.path.join(str(tmpdir), 'screenshots', '1-*-test.png')
        screenshot = glob.glob(pattern)[0]
        im = Image.open(screenshot)
        bands = im.split()
        is_blank = all(band.getextrema() == (255, 255) for band in bands)
        assert not is_blank

        # Check that full page screenshot is not blank
        pattern = os.path.join(str(tmpdir), 'screenshots', '1-*-test_full.png')
        screenshot = glob.glob(pattern)[0]
        im = Image.open(screenshot)
        bands = im.split()
        is_blank = all(band.getextrema() == (255, 255) for band in bands)
        assert not is_blank
Example #21
0
def load(filepath, rescale=True, dtype='float64'):
    assert type(filepath) == str

    if rescale == False and dtype == 'uint8':
        rval = np.asarray(Image.open(filepath))
        # print 'image.load: ' + str((rval.min(), rval.max()))
        assert rval.dtype == 'uint8'
        return rval

    s = 1.0
    if rescale:
        s = 255.
    try:
        rval = Image.open(filepath)
    except:
        raise Exception("Could not open "+filepath)

    rval = np.cast[dtype](np.asarray(rval)) / s

    if len(rval.shape) == 2:
        rval = rval.reshape(rval.shape[0], rval.shape[1], 1)

    if len(rval.shape) != 3:
        raise AssertionError("Something went wrong opening " +
                filepath + '. Resulting shape is ' + str(rval.shape) +
                " (it's meant to have 3 dimensions by now)")

    return rval
Example #22
0
def processImageTupleYCbCr_gmm(rootDir, mainImagename,  qLimit):
    maskImagename = getMaskImagename(mainImagename)
    assert(os.path.exists(os.path.join(rootDir, mainImagename)) and os.path.exists(os.path.join(rootDir, maskImagename)))
    mainImage = Image.open(os.path.join(rootDir, mainImagename))
    yCbCrMainImage = mainImage.convert('YCbCr')
    maskImage = Image.open(os.path.join(rootDir, maskImagename))
    binMaskImage = maskImage.convert("1")

    print 'processing image %s, (%r), mask: %s, (%r),  mode: %s, qLimit: %d' %  (mainImagename, mainImage.size, maskImagename, maskImage.size, "CbCr", qLimit)
    assert(mainImage.size == maskImage.size)
    yMainImage, cbMainImage, crMainImage = yCbCrMainImage.split()
    cBcR_skin = []
    cBcR_noSkin = []
    w, h = cbMainImage.size
    for cb, cr, mask in zip( enumerate(cbMainImage.getdata()), enumerate(crMainImage.getdata()), enumerate(binMaskImage.getdata())):
        assert(cb[1] < 256 and cr[1] < 256)
        cb_2 = cb[1] * qLimit/256
        cr_2 = cr[1] * qLimit/256


        if mask[1] > 0:
            cBcR_skin.append((cb_2, cr_2))
        else:
            cBcR_noSkin.append((cb_2,cr_2))
    return (np.array(cBcR_skin), np.array(cBcR_noSkin))
Example #23
0
def imageGinput(image1, image2):
	x1 = Image.open(image1)
	x2 = Image.open(image2)
	fig2 = lab.figure(1)
	fig1 = lab.figure(1)
	ax2 = fig2.add_subplot(141)
	ax1 = fig1.add_subplot(142)
	ax1.imshow(x1)
	ax2.imshow(x2)
	x = fig1.ginput(3)
	xArray = []
	for i in range(0,len(x)):
		xTuple = (int(x[i][0]), int(x[i][1]))
		xArray.append(xTuple)
	x22 = fig2.ginput(3)
	x22Array = []
	for i in range(0,len(x22)):
		x22Tuple = (int(x22[i][0]), int(x22[i][1]))
		x22Array.append(x22Tuple)
	fig1.show()
	fig2.show()
	newArray = [np.asarray(xArray), np.asarray(x22Array)]
	x1.save(image1)
	x2.save(image2)
	print newArray
	return newArray
    def changeImage(event):
##        print "clicked at", event.x, event.y, "typed ", event.char
        if event.char == "1":
            print event.char
            myImg = resizeImg(Image.open("bardejov.jpg"), root.winfo_screenwidth())
            myImage = ImageTk.PhotoImage(myImg)
            label = Label(root, image=myImage)
            label.image = myImage
            xLabel = (root.winfo_screenwidth() - myImg.size[0])/2
            yLabel = (root.winfo_screenheight() - myImg.size[1])/2
            label.place(x=xLabel, y=yLabel)

        if event.char == "2":
            print event.char
            myImg = resizeImg(Image.open("rotunda.jpg"), root.winfo_screenwidth())
            myImage = ImageTk.PhotoImage(myImg)
            label = Label(root, image=myImage)
            label.image = myImage
            xLabel = (root.winfo_screenwidth() - myImg.size[0])/2
            yLabel = (root.winfo_screenheight() - myImg.size[1])/2
            label.place(x=xLabel, y=yLabel)

        if event.char == "3":
            print event.char
            myImg = resizeImg(Image.open("mincol.jpg"), root.winfo_screenwidth())
            myImage = ImageTk.PhotoImage(myImg)
            label = Label(root, image=myImage)
            label.image = myImage
            xLabel = (root.winfo_screenwidth() - myImg.size[0])/2
            yLabel = (root.winfo_screenheight() - myImg.size[1])/2
            label.place(x=xLabel, y=yLabel)
Example #25
0
  def dp_init(self):
    files = glob.glob(self.data_dir + '/*/*')
    image_files = []
    for f in files:
      try:
        Image.open(f)
        image_files.append(f)
      except:
        pass

    assert len(image_files) > 0, 'No image files found in %s' % self.data_dir
    print 'Found %d files' % len(image_files)
    
    label_file = self.data_dir + '/LABELS.json'
    try:
      self.label_dict = json.load(open(label_file))
    except:
      print 'No label file found, will create one.'
      self.label_dict = {}

    for f in image_files:
      type = f.split('/')[-2]
      if not type in self.label_dict:
        self.label_dict[type] = len(self.label_dict)

    with open(label_file, 'w') as l:
      json.dump(self.label_dict, l)

    self.image_files = consistent_shuffle(image_files)
    self.num_inputs = len(self.image_files)
Example #26
0
def getIcons(filename=None):
    """Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.
    The latter has a plus sign added over the top.

    png files work best, but anything that wx.Image can import should be fine
    """
    icons = {}
    if filename is None:
        filename = join(dirname(abspath(__file__)), 'base.png')
        
    # get the low-res version first
    im = Image.open(filename)
    icons['24'] = pilToBitmap(im, scaleFactor=0.5)
    icons['24add'] = pilToBitmap(im, scaleFactor=0.5)
    # try to find a 128x128 version
    filename128 = filename[:-4]+'128.png'
    if False: # TURN OFF FOR NOW os.path.isfile(filename128):
        im = Image.open(filename128)
    else:
        im = Image.open(filename)
    icons['48'] = pilToBitmap(im)
    # add the plus sign
    add = Image.open(join(dirname(abspath(__file__)), 'add.png'))
    im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
    # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
    #               im.size[0], im.size[1]], mask=add)
    icons['48add'] = pilToBitmap(im)

    return icons
Example #27
0
def verify(url, model, save=False):
    """
    :param url: 验证码地址
    :param model: 处理该验证码的模型
    :param save: 是否保存临时文件到cache
    :return:
    """
    if save:
        pic_file = 'cache/todo.png'
        urllib.urlretrieve(url, pic_file)
        image = Image.open(pic_file).convert("L")
    else:
        image = Image.open(StringIO(urllib2.urlopen(url).read()))
    x_size, y_size = image.size
    y_size -= 5

    # y from 1 to y_size-5
    # x from 4 to x_size-18
    piece = (x_size-22) / 8
    centers = [4+piece*(2*i+1) for i in range(4)]
    data = np.empty((4, 21 * 16), dtype="float32")
    for i, center in enumerate(centers):
        single_pic = image.crop((center-(piece+2), 1, center+(piece+2), y_size))
        data[i, :] = np.asarray(single_pic, dtype="float32").flatten() / 255.0
        if save:
            single_pic.save('cache/todo-%s.png' % i)
    clf = joblib.load(model)
    answers = clf.predict(data)
    answers = map(chr, map(lambda x: x + 48 if x <= 9 else x + 87 if x <= 23 else x + 88, map(int, answers)))
    return answers
Example #28
0
def performAffineTransColor(image1, image2):
	correspondance = imageGinput(image1, image2)
	print correspondance
	trn = Affine_Fit(correspondance[0], correspondance[1])
	affineParamsX = trn.getParamsX()
	affineParamsY = trn.getParamsY()
	print trn.To_Str()
	print affineParamsX
	print affineParamsY
	x1 = Image.open(image1)
	x2 = Image.open(image2)
	width1, height1 = x1.size
	oldImage = np_from_image(image1)
	matrixFiller = np.asarray([0, 0, 0, 255])
	newImage = []
	for i in range(0,height1):
		imagevector = []
		for j in range(0,width1):
			imagevector.append(matrixFiller)
		newImage.append(np.asarray(imagevector))
	for i in range(0,height1-2):
		for j in range(0,width1-2):
			newPoint = (int(i * affineParamsX[0] + (j * affineParamsX[1]) + affineParamsX[2]), int(i * affineParamsY[0] + (j * affineParamsY[1]) + affineParamsY[2]))
			if (((newPoint[0] > 0) and (newPoint[0] < height1-1)) and ((newPoint[1] > 0) and (newPoint[1] < width1-1))):
				newImage[newPoint[0]][newPoint[1]] = oldImage[i][j] 
	save_as_image(np.asarray(newImage), 'img/affine.png')
Example #29
0
  def __get_offline_image(self):

    def draw_text_center(im, draw, text, font, **kwargs):
      text_height = text_top = None
      linecounter = 0
      for line in text:
        text_size = draw.textsize(line, font)
        if text_height is None:
          text_height = len(text) * ( text_size[1])
          text_top = (im.size[1] - text_height) / 2

        draw.text(
          ((im.size[0] - text_size[0]) / 2, (text_top + (linecounter * text_height)) / 2),
          line, font=font, **kwargs)

        linecounter += 1

    self.raw_image = Image.open('static/images/webcam_offline.png')

    mask = Image.open('static/images/mask_offline.png')
    draw = ImageDraw.Draw(mask)
    font = ImageFont.truetype('fonts/DejaVuSans.ttf',40)
    text = ['Offline since:',datetime.now().strftime("%A %d %B %Y"),datetime.now().strftime("%H:%M:%S")]
    draw_text_center(mask,draw,text,font)

    mask_width, mask_height = mask.size
    source_width, source_height = self.raw_image.size

    self.raw_image.paste(mask, ((source_width/2)-(mask_width/2),(source_height/2)-(mask_height/2)), mask)
Example #30
0
def processDatas():
	allfiles=os.listdir(os.getcwd() + "/datas")
	imlist=[filename for filename in allfiles if  filename[-4:] in [".png",".png"]]


	w,h=Image.open("datas/" + imlist[0]).size
	N=len(imlist)
	number = 1

	arr=numpy.zeros((h,w,3),numpy.float)
	first = numpy.array(Image.open("datas/" + imlist[0]),dtype=numpy.float)

	loop = 0

	for im in imlist:
	    loop = loop + 1
	    imarr=numpy.array(Image.open("datas/" + im),dtype=numpy.float)
	    diff = first - imarr
	    total = 0
	    for idx, value in np.ndenumerate(diff):
	    	total = total + value
	    total = total / first.size

	    if total*total < 0.1:
	    	number +=1
	    	arr=arr+imarr
	arr = arr/number

	arr=numpy.array(numpy.round(arr),dtype=numpy.uint8)

	out=Image.fromarray(arr,mode="RGB")
	out.save("results/Average.png")
Example #31
0
if infile is not None and outfile is not None:
    print("Size:", size, "   Input:", infile, "   Output:", outfile)
    run = True

if run:
    f = open(outfile, "w+")

    L1 = random.choice(L1)
    L2 = random.choice(L2)
    L3 = random.choice(L3)
    L4 = random.choice(L4)
    L5 = random.choice(L5)
    L6 = random.choice(L6)
    L7 = random.choice(L7)

    im = Image.open(infile).convert('LA')
    w, h = im.size
    im = im.resize((int(w / size), int(h / size)), Image.ANTIALIAS)

    pixels = list(im.getdata())
    width, height = im.size
    pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]

    for i in pixels:
        f.write("\n")
        for e in i:
            if e[1] == 255:
                if e[0] in range(0, 32):
                    f.write(L7)
                if e[0] in range(32, 64):
                    f.write(L6)
Example #32
0
    def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, image_weights=False,
                 cache_labels=False, cache_images=False):
        path = str(Path(path))  # os-agnostic
        with open(path, 'r') as f:
            self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines()  # os-agnostic
                              if os.path.splitext(x)[-1].lower() in img_formats]

        n = len(self.img_files)
        bi = np.floor(np.arange(n) / batch_size).astype(np.int)  # batch index
        nb = bi[-1] + 1  # number of batches
        assert n > 0, 'No images found in %s' % path

        self.n = n
        self.batch = bi  # batch index of image
        self.img_size = img_size
        self.augment = augment
        self.hyp = hyp
        self.image_weights = image_weights
        self.rect = False if image_weights else rect

        # Define labels
        self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
                            for x in self.img_files]

        # Rectangular Training  https://github.com/ultralytics/yolov3/issues/232
        if self.rect:
            # Read image shapes
            sp = 'data' + os.sep + path.replace('.txt', '.shapes').split(os.sep)[-1]  # shapefile path
            try:
                with open(sp, 'r') as f:  # read existing shapefile
                    s = [x.split() for x in f.read().splitlines()]
                    assert len(s) == n, 'Shapefile out of sync'
            except:
                s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
                np.savetxt(sp, s, fmt='%g')  # overwrites existing (if any)

            # Sort by aspect ratio
            s = np.array(s, dtype=np.float64)
            ar = s[:, 1] / s[:, 0]  # aspect ratio
            i = ar.argsort()
            self.img_files = [self.img_files[i] for i in i]
            self.label_files = [self.label_files[i] for i in i]
            self.shapes = s[i]
            ar = ar[i]

            # Set training image shapes
            shapes = [[1, 1]] * nb
            for i in range(nb):
                ari = ar[bi == i]
                mini, maxi = ari.min(), ari.max()
                if maxi < 1:
                    shapes[i] = [maxi, 1]
                elif mini > 1:
                    shapes[i] = [1, 1 / mini]

            self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32

        # Preload labels (required for weighted CE training)
        self.imgs = [None] * n
        self.labels = [None] * n
        if cache_labels or image_weights:  # cache labels for faster training
            self.labels = [np.zeros((0, 5))] * n
            extract_bounding_boxes = False
            create_datasubset = False
            pbar = tqdm(self.label_files, desc='Reading labels')
            nm, nf, ne, ns = 0, 0, 0, 0  # number missing, number found, number empty, number datasubset
            for i, file in enumerate(pbar):
                try:
                    with open(file, 'r') as f:
                        l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
                except:
                    nm += 1  # print('missing labels for image %s' % self.img_files[i])  # file missing
                    continue

                if l.shape[0]:
                    assert l.shape[1] == 5, '> 5 label columns: %s' % file
                    assert (l >= 0).all(), 'negative labels: %s' % file
                    assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
                    self.labels[i] = l
                    nf += 1  # file found

                    # Create subdataset (a smaller dataset)
                    if create_datasubset and ns < 1E4:
                        if ns == 0:
                            create_folder(path='./datasubset')
                            os.makedirs('./datasubset/images')
                        exclude_classes = 43
                        if exclude_classes not in l[:, 0]:
                            ns += 1
                            # shutil.copy(src=self.img_files[i], dst='./datasubset/images/')  # copy image
                            with open('./datasubset/images.txt', 'a') as f:
                                f.write(self.img_files[i] + '\n')

                    # Extract object detection boxes for a second stage classifier
                    if extract_bounding_boxes:
                        p = Path(self.img_files[i])
                        img = cv2.imread(str(p))
                        h, w, _ = img.shape
                        for j, x in enumerate(l):
                            f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
                            if not os.path.exists(Path(f).parent):
                                os.makedirs(Path(f).parent)  # make new output folder

                            b = x[1:] * np.array([w, h, w, h])  # box
                            b[2:] = b[2:].max()  # rectangle to square
                            b[2:] = b[2:] * 1.3 + 30  # pad
                            b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)

                            b[[0, 2]] = np.clip(b[[0, 2]], 0, w)  # clip boxes outside of image
                            b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
                            assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
                else:
                    ne += 1  # file empty

                pbar.desc = 'Reading labels (%g found, %g missing, %g empty for %g images)' % (nf, nm, ne, n)
            assert nf > 0, 'No labels found. Recommend correcting image and label paths.'

        # Cache images into memory for faster training (~5GB)
        if cache_images and augment:  # if training
            for i in tqdm(range(min(len(self.img_files), 10000)), desc='Reading images'):  # max 10k images
                img_path = self.img_files[i]
                img = cv2.imread(img_path)  # BGR
                assert img is not None, 'Image Not Found ' + img_path
                r = self.img_size / max(img.shape)  # size ratio
                if self.augment and r < 1:  # if training (NOT testing), downsize to inference shape
                    h, w, _ = img.shape
                    img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR)  # or INTER_AREA
                self.imgs[i] = img

        # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
        detect_corrupted_images = False
        if detect_corrupted_images:
            from skimage import io  # conda install -c conda-forge scikit-image
            for file in tqdm(self.img_files, desc='Detecting corrupted images'):
                try:
                    _ = io.imread(file)
                except:
                    print('Corrupted image detected: %s' % file)
Example #33
0
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(X_train, y_train, epochs=epochs,
                        validation_data=(X_test, y_test))

    plt.plot(history.history['acc'], 'g-')
    plt.plot(history.history['val_acc'], 'b-')

    plt.plot(history.history['loss'], 'r-')
    plt.xlim(0, 4)
# %%
else:
    # testing simple neutral network
    model = load_model('classifier.h5')

    tr = Image.open('train1.png').convert('L')
    tr = tr.resize((28, 28))
    tr = np.array(tr.getdata())

    tr = tr.astype('float32')
    t = tr/255

    t = np.array([t])

    print(model.predict(t).argmax())


# %%

# (X_train, y_train, X_test, y_test) = preprocess()
cnn = Sequential()
Example #34
0
    def execute_code(self, event=None):
        if not getattr(self.shared, "src_image", None):
            self.execblink("No image", "#ffff00")
            self.status("No image given")
            return

        if event != "RUN":
            thr = Thread(target=self.execute_code, args=("RUN", ))
            thr.start()
            return

        self.status("Starting...")
        self.profiler_canvas.delete("top-text")
        self.profiler_canvas.delete("pieslice")
        self.update()

        self.shared.profiler_info = {}
        code = self.codeblock._text.get('1.0', 'end')
        tempcode = tempfile.mkstemp('.py', 'detcher-')[1]
        with open(tempcode, 'w') as fd:
            fd.write(code)
        self.status("Preparing...")
        tempimg = tempfile.mkstemp('.png', 'detcher-')[1]
        self.shared.src_image.save(tempimg)

        self.execbutton.config(text="Kill", fg="#ff0000", state="normal")
        self.status("Running...")

        proc = Popen([sys.executable, "-u", "-B", sys.argv[0], "--IS-HELPER"],
                     stdin=PIPE,
                     stdout=PIPE)

        self.shared.proc = proc
        proc.stdin.write(tempcode.encode() + b"\n")
        proc.stdin.write(tempimg.encode() + b"\n")
        proc.stdin.write(f"{self.shared.seed}\n".encode())
        proc.stdin.flush()

        err = [None, None]
        out, tmpdir = None, None
        line = b'-- -- --'

        while proc.poll() is None or len(line):
            line = proc.stdout.readline()
            if not line:
                continue
            if proc.poll():
                print("[INFO] Process finished but buffer is not empty")
            ln = line.decode().rstrip()
            if ln.startswith("\x1b"):
                cmd = ln.lstrip("\x1b")
                params = cmd.split(":")
                print(f">> {cmd}")
                name = params[0]
                if name == "OKDONE":
                    out = ":".join(params[1:])
                if name == "DR":
                    ok, name, cpu, real = params[1:]
                    cpu, real = map(float, [cpu, real])
                    self.after_call(name, [cpu, real])
                if name == "ERR":
                    err[0] = ":".join(params[1:])
                if name == "ERRF":
                    err[1] = ":".join(params[1:])
                if name == "ERR_NOTB":
                    err = [":".join(params[1:]), None]
                if name == "TMP":
                    tmpdir = ":".join(params[1:])
                if name == "DBG":
                    self.status(":".join(params[1:]), "#7878fa")
            else:
                print(f"[SUB][STDOUT]: {ln}")
                self.status(f"[sub] {ln.rstrip()}", "#7878fa")
        proc.wait()

        exitcode = proc.poll()
        print(f"[SUB] -> {exitcode}")

        self.shared.proc = None
        if exitcode:
            short_e, e_file = err
            trace = "-- no trace --"
            if e_file:
                with open(e_file, "r") as fd:
                    trace = fd.read()
                rm_file(e_file)
            self.execblink("Terminated" if exitcode < 0 else "Error",
                           "#ff0000")
            self.status(f"{short_e or 'Terminated'}", "#ff0000")
            print(trace)
            self.shared.proc = None
            if tmpdir:
                shutil.rmtree(tmpdir)
            return

        self.status("Loading image...")
        self.shared.out_image = Image.open(out).copy()
        self.status("Cleaning files...")
        rm_file(tempcode)
        rm_file(tempimg)
        if tmpdir:
            shutil.rmtree(tmpdir)
        self.status("Updating...")
        self.update_image()
        self.execbutton.config(text="Execute", fg="#78fa78")
        self.status("Done.")
Example #35
0
def main():

	#=======================chainer setting=======================
	parse = argparse.ArgumentParser(description='test human position detection')
	parse.add_argument('--batchsize', '-b', type=int, default=100)
	parse.add_argument('--gpu', '-g', type=int, default=0) #change to -1 for use only CPU
	parse.add_argument('--model','-m', default='my_output_5.model')
	parse.add_argument('--channel', '-c', default=3)
	args = parse.parse_args()
	#=======================chainer setting=======================

	#=======================read images & labels set=======================
	pathsAndLabels = []
	pathsAndLabels.append(np.asarray(['./test/center/', 0]))
	pathsAndLabels.append(np.asarray(['./test/left/', 1]))
	pathsAndLabels.append(np.asarray(['./test/right/', 2]))
	pathsAndLabels.append(np.asarray(['./test/near/', 3]))
	pathsAndLabels.append(np.asarray(['./test/none/', 4]))

	allData = []
	for pathAndLabel in pathsAndLabels:
		path = pathAndLabel[0]
		label = pathAndLabel[1]
		imagelist = glob.glob(path + "*")
		for imgName in imagelist:
			allData.append([imgName, label])
	print('Number of datas is ' + str(len(allData)))
	print('')
	#=======================read images & labels set=======================

	#=======================testing program=======================
	outNumStr = args.model.split(".")[0].split("_")
	outnum = int(outNumStr[ len(outNumStr)-1 ])
	correct = 0

	model = L.Classifier(alexLike.AlexLike(outnum))
	chainer.serializers.load_npz(args.model, model)

	count = 1
	val = ['center', 'left', 'right', 'near', 'none']
	for pathAndLabel in allData:
		img = Image.open(pathAndLabel[0])
		r,g,b = img.split()
		rImgData = np.asarray(np.float32(r)/255.0)
		gImgData = np.asarray(np.float32(g)/255.0)
		bImgData = np.asarray(np.float32(b)/255.0)
		imgData = np.asarray([[[rImgData, gImgData, bImgData]]])
		x = Variable(imgData)
		y = F.softmax(model.predictor(x.data[0]))
		predR = np.round(y.data[0])
		for pre_i in np.arange(len(predR)):
			if predR[pre_i] == 1:
				if pathAndLabel[1].astype(int) == pre_i:
					correct += 1
					print('image number ', count, 'is correct')
				else:
					print('image number', count, 'is incorrect')
					a = imgData[0][0]
					a = np.swapaxes(a,0,2)
					a = np.swapaxes(a,0,1)
					a = a*255
					a = cv2.cvtColor(a, cv2.COLOR_BGR2RGB)
					a = cv2.resize(a, (640, 480))
					cv2.putText(a,val[pre_i],(550,450), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
					cv2.putText(a,val[pathAndLabel[1].astype(int)],(20,450), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
					cv2.imwrite('wrong/'+str(count)+'.png',a)
		count += 1

	print('correct = ', correct/len(allData)*100, '%')
Example #36
0
    params = {'appid': weather_key, 'zip': zipcode, 'units': 'metric'}
    response = requests.get(url, params=params)
    weather = response.json()

    label['text'] = information(weather)
    readApikey.close()


root = tk.Tk()
root.title('Weather App')
root.iconphoto(False, tk.PhotoImage(file='icon.png'))

canvas = tk.Canvas(root, height=cHeight, width=cWidth, bd=20)
canvas.pack()

background_image = ImageTk.PhotoImage(Image.open("background.png"))
background_label = tk.Label(root, image=background_image)
background_label.place(relwidth=1, relheight=1)

frame = tk.Frame(root, bg="green", bd=5)
frame.place(relx=0.5, rely=0.05, relwidth=0.7, relheight=0.1, anchor="n")

entry = tk.Entry(frame, font=16)
entry.bind("<Return>", (lambda event: get_weather(entry.get())))
entry.place(relheight=1, relwidth=0.7)
entry.insert(0, 'zip code')
entry.bind('<FocusIn>', on_entry_click)
entry.bind('<FocusOut>', on_focusout)
entry.config(fg='grey')

button = tk.Button(frame,
Example #37
0
            min_temp = corr_temperature
            max_temp = corr_temperature

    temp_string = f"{corr_temperature:.0f}°C"
    img = overlay_text(img, (68, 18), temp_string, font_lg, align_right=True)
    spacing = font_lg.getsize(temp_string)[1] + 1
    if min_temp is not None and max_temp is not None:
        range_string = f"{min_temp:.0f}-{max_temp:.0f}"
    else:
        range_string = "------"
    img = overlay_text(img, (68, 18 + spacing),
                       range_string,
                       font_sm,
                       align_right=True,
                       rectangle=True)
    temp_icon = Image.open(f"{path}/icons/temperature.png")
    img.paste(temp_icon, (margin, 18), mask=temp_icon)

    # Humidity
    humidity = bme280.get_humidity()
    corr_humidity = correct_humidity(humidity, temperature, corr_temperature)
    humidity_string = f"{corr_humidity:.0f}%"
    img = overlay_text(img, (68, 48),
                       humidity_string,
                       font_lg,
                       align_right=True)
    spacing = font_lg.getsize(humidity_string)[1] + 1
    humidity_desc = describe_humidity(corr_humidity).upper()
    img = overlay_text(img, (68, 48 + spacing),
                       humidity_desc,
                       font_sm,
Example #38
0
    ###############################################################################
    print(result)
    result_str  = str(result[0][0]['rate']) + "%"
    result_name = result[0][0]['name']
    print(str(result_name))
    if(result_name == "桜井和寿"):
        result_str += "桜井和寿"
    
    elif(result_name == "吉岡聖恵"):
        result_str += "吉岡聖恵"
    
    elif(result_name == "福山雅治"):
        result_str += "福山雅治"
        
    
    #画像の読み込み
    img = Image.open(img_path)
    #drawインスタンスを生成
    draw = ImageDraw.Draw(img)
    #フォントの設定(フォントファイルのパスと文字の大きさ)
    ################################################################################################
    font = ImageFont.truetype("/System/Library/Fonts/Hiragino Sans GB.ttc", 50)
    ################################################################################################
    #文字を書く
    draw.text((10, 10), result_str, fill=(255, 0, 0), font=font)
    #画像の保存先
    ###############################################################################
    img.save("パス")
   ###############################################################################

Example #39
0
from PIL import Image, ImageDraw

# Load image:
input_image = Image.open("lena.jpg")
input_pixels = input_image.load()

# Box Blur kernel
kernel = [[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1],
          [1, 1, 1, 1, 1]]

# Select kernel here:
kernel = kernel

# Middle of the kernel
offset = len(kernel) // 2

# Create output image
output_image = Image.new("RGB", input_image.size)
draw = ImageDraw.Draw(output_image)

# Compute convolution between intensity and kernels
for x in range(offset, input_image.width - offset):
    for y in range(offset, input_image.height - offset):
        acc = [0, 0, 0]
        for a in range(len(kernel)):
            for b in range(len(kernel)):
                xn = x + a - offset
                yn = y + b - offset
                pixel = input_pixels[xn, yn]
                acc[0] += pixel[0] * kernel[a][b]
                acc[1] += pixel[1] * kernel[a][b]
Example #40
0
import os
from PIL import Image

path = "E:/数据学习资料/自己做的小项目/爬取简书推荐作者/src/"

dirlist = []

for root, dirs, files in os.walk(path):
    for dir in dirs:
        dirlist.append(dir)
print(dirlist)

num = 0
for dir in dirlist:
    images = []  # images in each folder
    for root, dirs, files in os.walk(path + dir):  # traverse each folder
        print(path + dir + '')
        i = 1
        for file in files:  #遍历文件夹中的每一个文件
            print(file)
            images.append(Image.open(path + dir + '/' + file))
            im = Image.open(path + dir + '/' + file)
            im = im.convert('RGBA')
            width, height = im.size
            width = 100  #修改图片的宽度
            height = 100  #修改图片的高度
            resizedim = im.resize((width, height))  #修改图片的尺寸
            name = path + str(i) + '.jpg'  #定义图片名称
            resizedim.save(name)  #保存图片
            i += 1
Example #41
0
 def __init__(self, nation, territory, order, status):
     super().__init__(nation, territory, order, status)
     self.icon = ImageTk.PhotoImage(
         Image.open("GUI\\fleet_" + nation + ".png").resize(
             (40, 40), Image.ANTIALIAS))
     self.unitType = "fleet"
def main():
    st.title('Trying out Sentiment Analysis with Streamlit!')

    st.subheader("EDA, Data Cleaning, & Modeling with Kaggle's \
		Twitter US Ariline Sentiment Dataset.")

    main_image = Image.open('./Images/nlp-pipe.jpg')
    st.image(main_image, use_column_width=True)

    html_temp = """
	<div style="background-color:tomato;"><p style="color:white; font-size:18px; text-align:center">Choose what to do:</p></div>
	"""
    st.markdown(html_temp, unsafe_allow_html=True)

    if st.checkbox('Exploratory Data Analysis'):
        explorer = EDA()
        n_rows = st.sidebar.slider('Displaying dataset, select number of rows',
                                   10, 20)

        all_cols = explorer.df.columns.tolist()
        select_cols = st.sidebar.multiselect('Select column(s) to display:',
                                             all_cols,
                                             ['airline_sentiment', 'text'])

        'Number of rows:', n_rows,  #
        explorer.df[select_cols].head(n_rows),  #

        if st.sidebar.checkbox('Most Frequent Words Per Category'):
            '---------------------------------------------',  #
            st.info("Try with removing stopwords and/or tags('@'/'#')")
            st.write(
                'Most Frequent Words for Positive(Blue), Negative(Red), and Neutral(Green) Tweets:'
            )
            c = st.sidebar.slider(
                'Select a number for the top frequent words to display', 10,
                15, 10)
            c = int(c)

            remove_stop = False
            if st.sidebar.checkbox('Remove stop words'):
                remove_stop = True

            remove_at = False
            if st.sidebar.checkbox('Remove @ and #'):
                remove_at = True

            freqs = explorer.most_freq_words(c, remove_at, remove_stop)
            plt.show()
            st.pyplot()

            cat = st.sidebar.selectbox(
                "To view word counts, select a sentiment category",
                ('Positive', 'Negative', 'Neutral'))

            if cat == 'Positive':
                'Top words in ', freqs[0][0], ' tweets',  #
                freqs[0][1].head(c),  #
            elif cat == 'Negative':
                'Top words in ', freqs[1][0], ' tweets',  #
                freqs[1][1].head(c),  #
            else:
                'Top words in ', freqs[2][0], ' tweets',  #
                freqs[2][1].head(c),  #

        if st.sidebar.checkbox('Word Counts'):
            '---------------------------------------------',  #
            explorer.word_counts()
            st.pyplot()

        if st.sidebar.checkbox("View most frequent @'s and #'s"):
            '---------------------------------------------',  #
            char = st.sidebar.radio('', ('@', '#'))
            if char == '@':
                explorer.find_at_hash()
            else:
                explorer.find_at_hash(at=False)
            st.pyplot()

        if st.sidebar.checkbox("View most frequent emojis and emoticons"):
            '---------------------------------------------',  #
            c = st.sidebar.slider('Choose the number of top emojis to view',
                                  10, 20)
            emojis = explorer.find_emojis()
            emojis.head(c),  #
            st.balloons()

        if st.sidebar.checkbox('Target Field'):
            '---------------------------------------------',  #
            explorer.show_target_field()
            st.pyplot()

    if st.checkbox("Text Preprocessing And Sentiment Analysis"):
        text = st.text_area(
            "Enter your text to analize:",
            "@americanairline Thanks for the #amazing flying experience!")
        cleaner = Cleaner(text)
        operations = st.sidebar.multiselect(
            "Choose the preprocessing steps to perform", [
                'Lowercasing', 'Remove html tags', 'Remove punctuations',
                'Replace links', 'Replace emojis', 'Replace Mentions(@)',
                'Replace Hashtags(#)', 'Remove stop words', 'Lemmatization',
                'Spell correction'
            ], ['Remove stop words'])

        str_to_func = {
            'Lowercasing': cleaner.lowercasing,
            'Remove html tags': cleaner.remove_html,
            'Remove punctuations': cleaner.remove_punc,
            'Replace links': cleaner.replace_links,
            'Replace Mentions(@)': cleaner.replace_mentions,
            'Replace Hashtags(#)': cleaner.replace_hashtags,
            'Replace emojis': cleaner.replace_emojis,
            'Remove stop words': cleaner.remove_stop,
            'Lemmatization': cleaner.lemmatize,
            'Spell correction': cleaner.sepll_correct
        }

        if not operations:
            st.info('### No preprocessing steps selected')
        else:
            for op in operations:
                op = str_to_func[op]
                sample_text, findings = op()

                if findings:
                    st.info(op.__doc__ + ', '.join(findings).strip())

            st.write('#### Preprocessed text: ', sample_text)

        if st.button("Analyze Text Sentiment"):
            model = load('./Model/lr_clf.joblib')
            # confusion_matrix = Image.open('./Images/confusion_matrix.jpg')
            # 'Model Performance on the Test set:', #
            # st.image(confusion_matrix)

            class_names = ['negative', 'neutral', 'positive']
            explainer = LimeTextExplainer(class_names=class_names)

            if text:
                model = load('./lr_clf.joblib')
                processed_text, sentiment = get_sentiment(text, model)
                'Original text ---> ', text,  #
                'Processed text --> ', processed_text,  #
                'Text Sentiment --> {}'.format(sent_dict[sentiment]),  #

                exp = explainer.explain_instance(processed_text,
                                                 model.predict_proba)
                # exp.show_in_notebook()
                exp.as_pyplot_figure()
                st.pyplot()
Example #43
0
        move.getUserMoves(terrs, allUnits[nation], turnMoves, nation)
        print()

    #Adjudicate user moves and return outcomes/dislodge status
    print("Adjudicating turn outcome...")
    adjudedMoves = adjudicator.adjudicate(turnMoves)

    #-------------------------------- MOVE GUI ------------------------------------#
    root = rootWindow()
    graphicsCanvas, graphicsFrame = graphicsWindow(root)
    textFrame, text_title, textSubframe, scrollbar, text, buttonFrame = textWindow(
        root, yearString)

    #kloojey - load in icon images
    convoy_icon = ImageTk.PhotoImage(
        Image.open("GUI\\convoy_icon.png").resize((50, 9), Image.ANTIALIAS))
    dislodged_icon = ImageTk.PhotoImage(
        Image.open("GUI\\dislodge_icon.png").resize((60, 60), Image.ANTIALIAS))
    hold_icon = ImageTk.PhotoImage(
        Image.open("GUI\\hold_icon.png").resize((50, 4), Image.ANTIALIAS))
    fail_icon = ImageTk.PhotoImage(
        Image.open("GUI\\disband_icon.png").resize((25, 25), Image.ANTIALIAS))

    phaseMoveDraw(adjudedMoves, buttonFrame, graphicsCanvas)

    #------------------------------------------------------------------------------#

    #Write adjuded moves to dicts of new unit positions, and dislodged units
    print(yearString + " Move Outcomes: \n")
    for nation in nationality.keys():
        outputLists = move.movePhaseOutput(adjudedMoves, nationality, nation)
Example #44
0
     await m.download(file_name=thumb_image_path)
     thumb_image_path = thumb_image_path
 else:
     thumb_image_path = None
 else:
     width = 0
     height = 0
     metadata = extractMetadata(createParser(thumb_image_path))
     if metadata.has("width"):
         width = metadata.get("width")
     if metadata.has("height"):
         height = metadata.get("height")
     # resize image
     # ref: https://t.me/PyrogramChat/44663
     # https://stackoverflow.com/a/21669827/4723940
     Image.open(thumb_image_path).convert("RGB").save(thumb_image_path)
     img = Image.open(thumb_image_path)
     # https://stackoverflow.com/a/37631799/4723940
     # img.thumbnail((90, 90))
     img.resize((320, height))
     img.save(thumb_image_path, "JPEG")
     # https://pillow.readthedocs.io/en/3.1.x/reference/Image.html#create-thumbnails
 c_time = time.time()
 await bot.send_document(
     chat_id=update.chat.id,
     document=new_file_name,
     thumb=thumb_image_path,
     caption=description,
     # reply_markup=reply_markup,
     reply_to_message_id=update.reply_to_message.message_id,
     progress=progress_for_pyrogram,
Example #45
0
 def transform(self, X):
     return pytesseract.image_to_string(Image.open(X))
Example #46
0
def drawBackground(canv):
    global bg_image
    bg_image = ImageTk.PhotoImage(Image.open("GUI\\bg_small.jpg"))
    canv.create_image(0, 0, image=bg_image, anchor="nw")
Example #47
0
    def _get_batches_of_transformed_samples(self, index_array):
        """Gets a batch of transformed samples.
        # Arguments
            index_array: array of sample indices to include in batch.
        # Returns
            A batch of transformed samples.
        """
        current_batch_size = len(index_array)

        # The transformation of images is not under thread lock so it can be
        # done in parallel
        if self.target_size:
            # TODO(ahundt) make dtype properly configurable
            batch_x = np.zeros((current_batch_size, ) + self.image_shape)
            if self.loss_shape is None and self.label_file_format is 'img':
                batch_y = np.zeros((current_batch_size, ) + self.label_shape,
                                   dtype=int)
            elif self.loss_shape is None:
                batch_y = np.zeros((current_batch_size, ) + self.label_shape)
            else:
                batch_y = np.zeros((current_batch_size, ) + self.loss_shape,
                                   dtype=np.uint8)
        grayscale = self.color_mode == 'grayscale'
        # build batch of image data and labels
        for i, j in enumerate(index_array):
            data_file = self.data_files[j]
            label_file = self.label_files[j]
            img_file_format = 'img'
            img = load_img(os.path.join(self.data_dir, data_file),
                           grayscale=grayscale,
                           target_size=None)
            label_filepath = os.path.join(self.label_dir, label_file)

            if self.label_file_format == 'npy':
                y = np.load(label_filepath)
            else:
                label = Image.open(label_filepath)
                if self.save_to_dir and self.palette is None:
                    self.palette = label.palette

            # do padding
            if self.target_size:
                if self.crop_mode != 'none':
                    x = img_to_array(img, data_format=self.data_format)
                    if self.label_file_format is not 'npy':
                        y = img_to_array(
                            label, data_format=self.data_format).astype(int)
                    img_w, img_h = img.size
                    if self.pad_size:
                        pad_w = max(self.pad_size[1] - img_w, 0)
                        pad_h = max(self.pad_size[0] - img_h, 0)
                    else:
                        pad_w = max(self.target_size[1] - img_w, 0)
                        pad_h = max(self.target_size[0] - img_h, 0)
                    if self.data_format == 'channels_first':
                        x = np.lib.pad(x, ((0, 0),
                                           (pad_h // 2, pad_h - pad_h // 2),
                                           (pad_w // 2, pad_w - pad_w // 2)),
                                       'constant',
                                       constant_values=0.)
                        y = np.lib.pad(y, ((0, 0),
                                           (pad_h // 2, pad_h - pad_h // 2),
                                           (pad_w // 2, pad_w - pad_w // 2)),
                                       'constant',
                                       constant_values=self.label_cval)
                    elif self.data_format == 'channels_last':
                        x = np.lib.pad(x, ((pad_h // 2, pad_h - pad_h // 2),
                                           (pad_w // 2, pad_w - pad_w // 2),
                                           (0, 0)),
                                       'constant',
                                       constant_values=0.)
                        y = np.lib.pad(y, ((pad_h // 2, pad_h - pad_h // 2),
                                           (pad_w // 2, pad_w - pad_w // 2),
                                           (0, 0)),
                                       'constant',
                                       constant_values=self.label_cval)
                else:
                    x = img_to_array(img.resize(
                        (self.target_size[1], self.target_size[0]),
                        Image.BILINEAR),
                                     data_format=self.data_format)
                    if self.label_file_format is not 'npy':
                        y = img_to_array(
                            label.resize(
                                (self.target_size[1], self.target_size[0]),
                                Image.NEAREST),
                            data_format=self.data_format).astype(int)
                    else:
                        print(
                            'ERROR: resize not implemented for label npy file')

            if self.target_size is None:
                batch_x = np.zeros((current_batch_size, ) + x.shape)
                if self.loss_shape is not None:
                    batch_y = np.zeros((current_batch_size, ) +
                                       self.loss_shape)
                else:
                    batch_y = np.zeros((current_batch_size, ) + y.shape)

            x, y = self.seg_data_generator.random_transform(x, y)
            x = self.seg_data_generator.standardize(x)

            if self.ignore_label:
                y[np.where(y == self.ignore_label)] = self.classes

            if self.loss_shape is not None:
                y = np.reshape(y, self.loss_shape)

            batch_x[i] = x
            batch_y[i] = y
        # optionally save augmented images to disk for debugging purposes
        if self.save_to_dir:
            for i in range(current_batch_size):
                img = array_to_img(batch_x[i], self.data_format, scale=True)
                label = batch_y[i][:, :, 0].astype('uint8')
                label[np.where(label == self.classes)] = self.ignore_label
                label = Image.fromarray(label, mode='P')
                label.palette = self.palette
                # TODO(ahundt) fix index=i, a hacky workaround since current_index + i is no long available
                fname = '{prefix}_{index}_{hash}'.format(
                    prefix=self.save_prefix,
                    index=i,
                    hash=np.random.randint(1e4))
                img.save(
                    os.path.join(
                        self.save_to_dir, 'img_' + fname +
                        '.{format}'.format(format=self.save_format)))
                label.save(
                    os.path.join(self.save_to_dir, 'label_' + fname + '.png'))
        # return
        # batch_x = preprocess_input(batch_x)
        if self.class_mode == 'sparse':
            return batch_x, batch_y
        else:
            return batch_x
Example #48
0
        temp2=temp1#to match the depth of activation map after convolution layer and kernel depth

        strideVect.append((1,1))
        
        padVect.append('valid')

        actfnVect.append('relu')       
        
        poolfnVect.append('max')

    return kernelMatrix,strideVect,padVect,actfnVect,poolfnVect


################################

img = (np.array(Image.open('check.jpg')))

plt.imshow(img)
plt.show()

img=(img-np.mean(img))/np.std(img)
img=img.reshape([img.shape[0],img.shape[1],3])

NoOfConv = 2

kernelMatrix,strideVect,padVect,actfnVect,poolfnVect=init(img,NoOfConv)

###############################
# CNN
feature_maps,poolOut = compConv(img,NoOfConv,kernelMatrix,strideVect,padVect,actfnVect,poolfnVect)
from PIL import Image  # PIL (Pillow): library that help us work with images

img = Image.open(
    '/media/amey/New Volume/Programs_Python/img_name.jpeg'
)  # img object needed to work with images, similar to text file

print(img.size)
print(img.format)

img.show(
)  # Images can't be opened in terminal, NOTICE: name of img file opened is diff from img_name
# show() creates a temporary copy of the img_name, saves it and displays it using the default application
Example #50
0
from PIL import Image, ImageDraw

width = 500
height = 500


img = Image.open(r'C:\Users\wiley\Documents\class_pangolin\code\wiley\lab16\not_stock_resize.jpg')
img = img.convert('RGBA')

#img.load()
#img = Image.open()

draw = ImageDraw.Draw(img)



# the origin (0, 0) is at the top-left corner

#draw.rectangle(((0, 0), (width, height)), fill=("grey"))

# draw a rectangle from x0, y0 to x1, y1
#draw.rectangle(((250, 250), (600, 600)), fill="lightblue")

# draw a line from x0, y0, x1, y1
# using the color pink
color = (256, 128, 128)  # pink

#line starts at x250,y150 and ends at x250 y400
#this is the "body"

draw.line((250,150, 250, 400 ), fill=color, width = 10)
Example #51
0
    def chart_to_image(self, file_name):
        """Creates the specified chart and saves it to an image. """

        # Create axis for the price and technical indicator graph
        ax0 = self.fig.add_subplot(411)
        plt.axis('off')

        # Plot Price
        self.df.plot(x=self.col_label, y=self.row_label, ax=ax0, color='black', label='_nolegend_', linewidth=3)

        # Plot Technical Indicators
        if self.tech_inds:
            for col_name in self.tech_inds:
                ti_df = self.df[['time', col_name]].copy()
                ti_df.plot(x='time', y=col_name, ax=ax0, label='_nolegend_')


        # Plot Volume as Bar Chart on the bottom
        # Turn off the axes and background lines
        ax1 = self.fig.add_subplot(412)
        plt.axis('off')

        # Plot candlesticks
        candlestick2_ochl(width=0.4, colorup='g', colordown='r',
                          ax=ax1, opens=self.df['open'],
                          closes=self.df['close'],
                          highs=self.df['high'], lows=self.df['low'], )


        # Create axis for the volume bar chart
        ax2 = self.fig.add_subplot(413)


        time_list = self.x
        volume_list = self.df.volume.tolist()

        norm_volume_list = normalize_by_dataset(volume_list, self.y, from_origin=True)

        # Plot the volume graph
        plt.axis('off')
        vol_df = pd.DataFrame(list(zip(time_list, norm_volume_list)), columns=['time', 'volume'])
        vol_df.plot.bar(x='time', y='volume', ax=ax2, label='_nolegend_')



        # Create axis for special technical indicators, obv, macd, etc.
        ax3 = self.fig.add_subplot(414)
        plt.axis('off')
        
        if 'macd' in self.df:
            # Normalize macd
            macd_list = self.df['macd'].tolist()
            norm_macd_list = normalize_by_dataset(macd_list, self.y, from_origin=True)

            # Plot the macd graph
            vol_df = pd.DataFrame(list(zip(time_list, norm_macd_list)),
                                  columns=['time', 'macd'])
            vol_df.plot(x='time', y='macd', ax=ax3, label='_nolegend_')

        # Save to an image

        # low dpi and low quality
        plt.savefig(file_name, legend=False, bbox_inches='tight', dpi=45)

        # Resize to 230, 175
        with Image.open(file_name) as img:
            img = img.resize((224, 224), Image.ANTIALIAS)
            img.save(file_name)
            
            # close image
            img.close()
        
        # close all open plots to save memory
        plt.close('all')
Example #52
0
def matching():
    i = 700
    t_set = set()

    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    label = {}
    with open("labels.pickle", 'rb') as f:
        og_label = pickle.load(f)
        label = {v: k for k, v in og_label.items()}
    recognizer.read("trainner.yml")

    T_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    t_img_dir = os.path.join(T_BASE_DIR, "tracked_images")

    for root, dirs, files in os.walk(t_img_dir):
        for file in files:

            if file.endswith(".png") or file.endswith(".jpg"):

                path = os.path.join(root, file)
                #print(path)
                #roi_color=frame[y:y+h,x:x+w]
                pil_image = Image.open(path).convert(
                    "L")  # used for converting image into grayscale
                size = (550, 550)
                final_image = pil_image.resize(size, Image.ANTIALIAS)
                img_array = np.array(
                    final_image, "uint8"
                )  #uint8 is type and converts images to array format
                #print(img_array)

                faces = face_cascade.detectMultiScale(img_array, 1.4, 3)
                #print(faces)

                for (x, y, w, h) in faces:

                    roi = img_array[y:y + h, x:x + w]
                    id_, percent = recognizer.predict(roi)

                    if percent > 15:
                        if label[id_] not in t_set:
                            t_set.add(label[id_])
                            #print(label[id_])
                            cur.execute(
                                "select attendance from attendance where sno={}"
                                .format(label[id_]))
                            for x in cur.fetchall():
                                for y in x:
                                    cur.execute(
                                        "update attendance set attendance={}+1 where sno={}"
                                        .format(y, label[id_]))
                            cur.execute(
                                "select attendance from attendance where sno={}"
                                .format(label[id_]))
                            for i in cur.fetchall():
                                for j in i:
                                    #print(j)
                                    cur.execute(
                                        "update attendance set percent=({}/20)*100 where sno={}"
                                        .format(j, label[id_]))
                                    connection.commit()

    if len(t_set) > 1:
        say = "There are {} students in the class".format(len(t_set))
        engine.say(say)
        engine.runAndWait()
    elif len(t_set) == 1:
        say = "There is only a single student in the class and He bears id number {}".format(
            t_set)
        engine.say(say)
        engine.runAndWait()
    else:
        say = "There is no student in the class"
        engine.say(say)
        engine.runAndWait()
Example #53
0
# Author:  Martin McBride
# Created: 2021-03-16
# Copyright (C) 2021, Martin McBride
# License: MIT

import numpy as np
from PIL import Image
from scipy import ndimage
import math

img_in = Image.open('boat.jpg')
array = np.array(img_in)


def transform(output_coords):
    return output_coords[0] % 200, output_coords[1] % 300, output_coords[2]


transformed_array = ndimage.geometric_transform(array, transform)

img_out = Image.fromarray(transformed_array)
img_out.save('geo-boat.jpg')
try:
	infile = sys.argv[1]
	outfile = sys.argv[2]
except:
	print "Usage: %s <input> <output>" % ( sys.argv[0], )
	print
	print "    <input>: any existing image file, e.g. a PNG,"
	print "             preferably 640x480"
	print
	print "   <output>: output filename - raw data to dump to"
	print "             /dev/fb/0, assuming you got the image"
	print "             dimensions right"
	print
	sys.exit(1)

im = Image.open( infile )
im2 = im.convert( "RGB" )
data = im2.getdata()

fp = open( outfile, "wb" )
for (r,g,b) in data:
	assert r >= 0 and r <= 255
	assert g >= 0 and g <= 255
	assert b >= 0 and b <= 255
#	r /= 8
#	g /= 4
#	b /= 8
#	assert r >= 0 and r <= 31
#	assert g >= 0 and g <= 63
#	assert b >= 0 and b <= 31
#	value = r*2048+g*32+b
Example #55
0
    elif o == "-c":
        output_format = a

    if o == "-g":
        convert = "L"
    elif o == "-p":
        convert = "P"
    elif o == "-r":
        convert = "RGB"

    elif o == "-o":
        options["optimize"] = 1
    elif o == "-q":
        options["quality"] = string.atoi(a)

if len(argv) != 2:
    usage()

try:
    im = Image.open(argv[0])
    if convert and im.mode != convert:
        im.draft(convert, im.size)
        im = im.convert(convert)
    if output_format:
        im.save(argv[1], output_format, **options)
    else:
        im.save(argv[1], **options)
except:
    print("cannot convert image", end=' ')
    print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
Example #56
0
def run_mdnet(img_list, init_bbox, gt=None, savefig_dir='', display=False):

    # Init bbox
    target_bbox = np.array(init_bbox)
    result = np.zeros((len(img_list), 4))
    result_bb = np.zeros((len(img_list), 4))
    result[0] = target_bbox
    result_bb[0] = target_bbox
    average_overlap = 0.0

    # Init model
    model = MDNet(opts['model_path'])
    if opts['use_gpu']:
        model = model.cuda()
    model.set_learnable_params(opts['ft_layers'])

    # Init criterion and optimizer
    criterion = BinaryLoss()
    init_optimizer = set_optimizer(model, opts['lr_init'])
    update_optimizer = set_optimizer(model, opts['lr_update'])

    tic = time.time()
    # Load first image
    image = Image.open(img_list[0]).convert('RGB')

    # Train bbox regressor
    bbreg_examples = gen_samples(
        SampleGenerator('uniform', image.size, 0.3, 1.5, 1.1), target_bbox,
        opts['n_bbreg'], opts['overlap_bbreg'], opts['scale_bbreg'])
    bbreg_feats = forward_samples(model, image, bbreg_examples)
    bbreg = BBRegressor(image.size)
    bbreg.train(bbreg_feats, bbreg_examples, target_bbox)

    # Draw pos/neg samples
    pos_examples = gen_samples(
        SampleGenerator('gaussian', image.size, 0.1, 1.2), target_bbox,
        opts['n_pos_init'], opts['overlap_pos_init'])

    neg_examples = np.concatenate([
        gen_samples(SampleGenerator('uniform', image.size, 1, 2,
                                    1.1), target_bbox, opts['n_neg_init'] // 2,
                    opts['overlap_neg_init']),
        gen_samples(SampleGenerator('whole', image.size, 0, 1.2,
                                    1.1), target_bbox, opts['n_neg_init'] // 2,
                    opts['overlap_neg_init'])
    ])
    neg_examples = np.random.permutation(neg_examples)

    # Extract pos/neg features
    pos_feats = forward_samples(model, image, pos_examples)
    neg_feats = forward_samples(model, image, neg_examples)
    feat_dim = pos_feats.size(-1)

    # Initial training
    train(model, criterion, init_optimizer, pos_feats, neg_feats,
          opts['maxiter_init'])

    # Init sample generators
    sample_generator = SampleGenerator('gaussian',
                                       image.size,
                                       opts['trans_f'],
                                       opts['scale_f'],
                                       valid=True)
    pos_generator = SampleGenerator('gaussian', image.size, 0.1, 1.2)
    neg_generator = SampleGenerator('uniform', image.size, 1.5, 1.2)

    # Init pos/neg features for update
    pos_feats_all = [pos_feats[:opts['n_pos_update']]]
    neg_feats_all = [neg_feats[:opts['n_neg_update']]]

    spf_total = time.time() - tic

    # Display
    savefig = savefig_dir != ''
    if display or savefig:
        dpi = 80.0
        figsize = (image.size[0] / dpi, image.size[1] / dpi)

        fig = plt.figure(frameon=False, figsize=figsize, dpi=dpi)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        im = ax.imshow(image, aspect=1)

        if gt is not None:
            gt_rect = plt.Rectangle(tuple(gt[0, :2]),
                                    gt[0, 2],
                                    gt[0, 3],
                                    linewidth=3,
                                    edgecolor="#00ff00",
                                    zorder=1,
                                    fill=False)
            ax.add_patch(gt_rect)

        rect = plt.Rectangle(tuple(result_bb[0, :2]),
                             result_bb[0, 2],
                             result_bb[0, 3],
                             linewidth=3,
                             edgecolor="#ff0000",
                             zorder=1,
                             fill=False)
        ax.add_patch(rect)

        if display:
            plt.pause(.01)
            plt.draw()
        if savefig:
            fig.savefig(os.path.join(savefig_dir, '0000.jpg'), dpi=dpi)

    # Main loop
    for i in range(1, len(img_list)):

        tic = time.time()
        # Load image
        image = Image.open(img_list[i]).convert('RGB')

        # Estimate target bbox
        samples = gen_samples(sample_generator, target_bbox, opts['n_samples'])
        sample_scores = forward_samples(model, image, samples, out_layer='fc6')
        top_scores, top_idx = sample_scores[:, 1].topk(5)
        top_idx = top_idx.cpu().numpy()
        target_score = top_scores.mean()
        target_bbox = samples[top_idx].mean(axis=0)

        success = target_score > opts['success_thr']

        # Expand search area at failure
        if success:
            sample_generator.set_trans_f(opts['trans_f'])
        else:
            sample_generator.set_trans_f(opts['trans_f_expand'])

        # Bbox regression
        if success:
            bbreg_samples = samples[top_idx]
            bbreg_feats = forward_samples(model, image, bbreg_samples)
            bbreg_samples = bbreg.predict(bbreg_feats, bbreg_samples)
            bbreg_bbox = bbreg_samples.mean(axis=0)
        else:
            bbreg_bbox = target_bbox

        # Copy previous result at failure
        if not success:
            target_bbox = result[i - 1]
            bbreg_bbox = result_bb[i - 1]

        # Save result
        result[i] = target_bbox
        result_bb[i] = bbreg_bbox

        # Data collect
        if success:
            # Draw pos/neg samples
            pos_examples = gen_samples(pos_generator, target_bbox,
                                       opts['n_pos_update'],
                                       opts['overlap_pos_update'])
            neg_examples = gen_samples(neg_generator, target_bbox,
                                       opts['n_neg_update'],
                                       opts['overlap_neg_update'])

            # Extract pos/neg features
            pos_feats = forward_samples(model, image, pos_examples)
            neg_feats = forward_samples(model, image, neg_examples)
            pos_feats_all.append(pos_feats)
            neg_feats_all.append(neg_feats)
            if len(pos_feats_all) > opts['n_frames_long']:
                del pos_feats_all[0]
            if len(neg_feats_all) > opts['n_frames_short']:
                del neg_feats_all[0]

        # Short term update
        if not success:
            nframes = min(opts['n_frames_short'], len(pos_feats_all))
            pos_data = torch.stack(pos_feats_all[-nframes:],
                                   0).view(-1, feat_dim)
            neg_data = torch.stack(neg_feats_all, 0).view(-1, feat_dim)
            train(model, criterion, update_optimizer, pos_data, neg_data,
                  opts['maxiter_update'])

        # Long term update
        elif i % opts['long_interval'] == 0:
            pos_data = torch.stack(pos_feats_all, 0).view(-1, feat_dim)
            neg_data = torch.stack(neg_feats_all, 0).view(-1, feat_dim)
            train(model, criterion, update_optimizer, pos_data, neg_data,
                  opts['maxiter_update'])

        spf = time.time() - tic
        spf_total += spf

        # Display
        if display or savefig:
            im.set_data(image)

            if gt is not None:
                gt_rect.set_xy(gt[i, :2])
                gt_rect.set_width(gt[i, 2])
                gt_rect.set_height(gt[i, 3])

            rect.set_xy(result_bb[i, :2])
            rect.set_width(result_bb[i, 2])
            rect.set_height(result_bb[i, 3])

            if display:
                plt.pause(.01)
                plt.draw()
            if savefig:
                fig.savefig(os.path.join(savefig_dir, '%04d.jpg' % (i)),
                            dpi=dpi)

        if gt is None:
            print "Frame %d/%d, Score %.3f, Time %.3f" % \
                (i, len(img_list), target_score, spf)
        else:
            print "Frame %d/%d, Overlap %.3f, Score %.3f, Time %.3f" % \
                (i, len(img_list), overlap_ratio(gt[i],result_bb[i])[0], target_score, spf)
            average_overlap += overlap_ratio(gt[i], result_bb[i])[0]

    fps = len(img_list) / spf_total
    average_overlap = average_overlap / len(img_list)
    print "Average Overlap %.3f" % average_overlap
    return result, result_bb, fps, average_overlap
import os

from src import config
from src.prediction.predict import Predictor
from PIL import Image

if __name__ == "__main__":
    # test_validation

    test_img_path = config.test_img_dir_file

    print("test_img_path->", test_img_path)
    predictor = Predictor()
    image = Image.open(test_img_path)
    image = predictor.prepare_image(image)

    predictor.predict(image)
def beam_search(checkpoint_path,
                img_path,
                beam_size=5,
                vocab=None,
                transforms=None,
                device=None):

    k = beam_size
    ## imput tensor preparation
    img = Image.open(img_path)
    if transforms is not None: img = transforms(img)
    img = img.unsqueeze(0)  #treating as batch of size 1

    # Checkpoint loading
    checkpoint = torch.load(checkpoint_path, map_location=str(device))
    decoder = checkpoint['decoder']
    decoder = decoder.to(device)
    decoder.eval()
    encoder = checkpoint['encoder']
    encoder = encoder.to(device)
    encoder.eval()

    # encoder output
    encoder_out = encoder(img)
    encoder_dim = encoder_out.size(-1)
    num_pixels = encoder_out.size(1)

    # expand or repeat 'k' time
    encoder_out = encoder_out.expand(
        k, num_pixels, encoder_dim)  # (k, num_pixels, encoder_dim)

    # Tensor to store top k previous words at each step; now they're just <start>
    k_prev_words = torch.LongTensor([[vocab['<start>']]] * k).to(
        device)  # (k, 1)

    # Tensor to store top k sequences; now they're just <start>
    seqs = k_prev_words  # (k, 1)

    # Tensor to store top k sequences' scores; now they're just 0
    top_k_scores = torch.zeros(k, 1).to(device)  # (k, 1)

    # Lists to store completed sequences and scores
    complete_seqs = list()
    complete_seqs_scores = list()

    # Start decoding
    step = 1
    h, c = decoder.init_hidden_state(encoder_out)

    hypotheses = list()

    # s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
    while True:

        embeddings = decoder.embedding(k_prev_words).squeeze(
            1).float()  # (s, embed_dim)
        awe, _ = decoder.attention(encoder_out,
                                   h)  # (s, encoder_dim), (s, num_pixels)
        gate = decoder.sigmoid(decoder.f_beta(h))
        awe = (gate * awe)

        h, c = decoder.lstm(torch.cat([embeddings, awe], dim=1), (h, c))
        scores = decoder.fc(h)
        scores = F.log_softmax(scores, dim=1)

        # Add scores to prev scores
        scores = top_k_scores.expand_as(scores) + scores  # (s, vocab_size)

        # For the first step, all k points will have the same scores (since same k previous words, h, c)
        if step == 1:
            top_k_scores, top_k_words = scores[0].topk(k, 0, True, True)  # (s)
        else:
            # Unroll and find top scores, and their unrolled indices
            top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True,
                                                             True)  # (s)

        # Convert unrolled indices to actual indices of scores
        prev_word_inds = top_k_words / len(vocab)  # (s)
        next_word_inds = top_k_words % len(vocab)  # (s)

        # Add new words to sequences
        seqs = torch.cat([seqs[prev_word_inds],
                          next_word_inds.unsqueeze(1)],
                         dim=1)  # (s, step+1) stroes indices of words

        # Which sequences are incomplete (didn't reach <end>)?
        incomplete_inds = [
            ind for ind, next_word in enumerate(next_word_inds)
            if next_word != vocab['<end>']
        ]

        complete_inds = list(
            set(range(len(next_word_inds))) - set(incomplete_inds))

        # Set aside complete sequences
        if len(complete_inds) > 0:
            complete_seqs.extend(seqs[complete_inds].tolist())
            complete_seqs_scores.extend(top_k_scores[complete_inds])
        k -= len(complete_inds)  # reduce beam length accordingly

        # Proceed with incomplete sequences
        if k == 0:
            break
        seqs = seqs[incomplete_inds]
        h = h[prev_word_inds[incomplete_inds]]
        c = c[prev_word_inds[incomplete_inds]]
        encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
        top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
        k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)

        # Break if things have been going on too long
        if step > 50:
            break
        step += 1

    i = complete_seqs_scores.index(max(complete_seqs_scores))
    seq = complete_seqs[i]

    # Hypotheses
    hypotheses.append([
        w for w in seq
        if w not in {vocab['<start>'], vocab['<end>'], vocab['<pad>']}
    ])
    hypotheses = hypotheses[0]
    return hypotheses
Example #59
0
def main(argv):

    if len(argv) != 3:
        print("usage: Sequential Structure from Motion: <img1> <img2> <img3>")
        sys.exit(1)

    im1 = Image.open(argv[0])

    f, sensor_size = get_intrinsic_params(im1)
    cu, cv = sensor_size

    im_1 = plt.imread(argv[0])
    im_2 = plt.imread(argv[1])
    im_3 = plt.imread(argv[2])

    # Create plot
    fig = plt.figure()
    ax = fig.gca(projection='3d')

    sw = SIFTWrapper(im_1, im_2)
    u1, u2_1 = sw.compute_best_matches(0.7)

    # Note that here we are assuming all pictures came from same camera
    # (A safe assumption for now, because we KNOW they came from the same camera)
    K_cam = intrinsic_cam_mtx(f, cu, cv)
    extrinsic_cam, x1_inliers, x2_1_inliers, u1_inliers, u2_1_inliers = get_inliers(u1, u2_1, K_cam)

    # Note that we are using generalized
    # camera coordinates, so we do not
    # need to multiply by K(For either of P0, P1)
    P_0 = np.array([[1, 0, 0, 0],
                    [0, 1, 0, 0],
                    [0, 0, 1, 0]])

    P_1 = extrinsic_cam

    # Estimate points
    point_estimates = get_point_estimates(P_0, P_1, x1_inliers, x2_1_inliers)

    # Compute keypoints for image 3
    kp3, des3 = compute_keypoints(im_3)

    # Find good matches b/w images 2 and 3
    # Note that we relax r to allow for more full matches
    u2_2, u3 = compute_best_matches(sw.kps[1], sw.descs[1], kp3, des3, r=0.75)

    # Estimate pose(using recovered essential matrix) and get inliers.
    # These inliers will be used for point triangulation
    extrinsic_cam2, x2_2_inliers, x3_inliers, u2_2_inliers, u3_inliers = get_inliers(u2_2, u3, K_cam)

    P_1_gen = np.row_stack((P_1, np.asarray([0, 0, 0, 1])))
    P_2_prime_gen = np.row_stack((extrinsic_cam2, np.asarray([0, 0, 0, 1])))

    # Note that the rotation of this pose is correct wrt P1's coordinate system
    # the translation is also correct, up to scale.
    P_2_init = (P_1_gen @ P_2_prime_gen)[:3]

    # FIXME: Something with get_gcp_mask is probably broken.
    # Get a shape error in estimate_translation...

    # Find set of points for which we have 3D estimates
    u_mask, three_d_mask = get_gcp_mask(u2_1_inliers, u2_2_inliers)

    X_gcp = []

    # FIXME:
    # X_gcp = point_estimates[three_d_mask]
    # u_gcp = np.column_stack((x2_2_inliers[u_mask], x3_inliers[u_mask]))

    # TODO: Could make this faster
    shared_points = []
    for x2_2, x3 in zip(x2_2_inliers, x3_inliers):
        for i, x2_1 in enumerate(x2_1_inliers):

            if np.allclose(x2_2, x2_1):

                # Use x2_1 inliers b/c they assume that the camera in img 1 is
                # the origin.
                shared_points.append((x2_1, x3))
                X_gcp.append(point_estimates[i])

    X_gcp = np.array(X_gcp)
    shared_points = np.array(shared_points).reshape(len(shared_points), 4)

    t0 = P_2_init[:, 3]
    R = P_2_init[:, :3]

    # Minimize projection error between observed and predicted 3d coordinates
    # This essentially fixes the scaling on the translation vector
    # Note that we could make this a bit easier / faster by optimizing only the scale
    # of the translation vector
    t_est = estimate_translation(X_gcp, shared_points, t0, R, P_1)

    P_2 = np.column_stack((R, t_est))

    second_pt_estimates = get_point_estimates(P_1, P_2, x2_2_inliers, x3_inliers)

    u_mask_inv = np.invert(u_mask)
    new_points = second_pt_estimates[u_mask_inv]

    plot_3d(point_estimates, ax, 'r')
    plot_3d(new_points, ax, 'b')
    plt.show()

    calc_final_error(P_1, P_2, shared_points, X_gcp)

    num_new = len(new_points)
    num_all = len(second_pt_estimates)
    print(num_new, "new points were recovered out of", num_all,
          "possible (~{0:.0f}%)".format((num_new / num_all) * 100))
    def __getitem__(self, idx):
        """
        Function to get a sample from the dataset. First both RGB and Semantic images are read in PIL format. Then
        transformations are applied from PIL to Numpy arrays to Tensors.

        For regular usage:
            - Images should be outputed with dimensions (3, W, H)
            - Semantic Images should be outputed with dimensions (1, W, H)

        In the case that 10-crops are used:
            - Images should be outputed with dimensions (10, 3, W, H)
            - Semantic Images should be outputed with dimensions (10, 1, W, H)

        :param idx: Index
        :return: Dictionary containing {RGB image, semantic segmentation mask, scene category index}
        """

        # Get RGB image path and load it
        # img_name = os.path.join(self.image_dir, self.set, self.labels[idx], self.filenames[idx])
        img_name = os.path.join(os.path.join(self.image_dir, self.auxiliarnames[idx]))
        img = Image.open(img_name)

        # Convert it to RGB if gray-scale
        if img.mode is not "RGB":
            img = img.convert("RGB")

        filename_sem = img_name[img_name.find('places365_standard') + 19:img_name.find('.jpg')]
        # aux_indx = img_name.find('train')
        if img_name.find('/train/') > 0:
            sem_name = os.path.join('/media/vpu/f376732d-7565-499a-95f5-b6b26c4a902d/Datasets/places365_standard',
                                    "noisy_annotations_RGB", (filename_sem + ".png"))
            sem_score_name = os.path.join('/media/vpu/f376732d-7565-499a-95f5-b6b26c4a902d/Datasets/places365_standard',
                                          "noisy_scores_RGB", (filename_sem + ".png"))
        else:
            sem_name = os.path.join(self.image_dir, "noisy_annotations_RGB", (filename_sem + ".png"))
            sem_score_name = os.path.join(self.image_dir, "noisy_scores_RGB", (filename_sem + ".png"))

        sem = Image.open(sem_name)
        semScore = Image.open(sem_score_name)

        # Load semantic segmentation mask
        # if self.set == 'train':
        #     filename_sem = self.filenames[idx][0:self.filenames[idx].find('.jpg')]
        #     sem_name = os.path.join('/media/vpu/f376732d-7565-499a-95f5-b6b26c4a902d/Datasets/places365_standard',
        #                             "noisy_annotations_RGB", self.set, self.labels[idx], (filename_sem + ".png"))
        #
        #     sem = Image.open(sem_name)
        #
        #     # Load semantic segmentation scores
        #     filename_scores = self.filenames[idx][0:self.filenames[idx].find('.jpg')]
        #     sem_score_name = os.path.join('/media/vpu/f376732d-7565-499a-95f5-b6b26c4a902d/Datasets/places365_standard',
        #                                   "noisy_scores_RGB", self.set, self.labels[idx], (filename_scores + ".png"))
        #
        #     semScore = Image.open(sem_score_name)
        #
        # else:
        #     filename_sem = self.filenames[idx][0:self.filenames[idx].find('.jpg')]
        #     sem_name = os.path.join(self.image_dir, "noisy_annotations_RGB", self.set, self.labels[idx], (filename_sem + ".png"))
        #
        #     sem = Image.open(sem_name)
        #
        #     # Load semantic segmentation scores
        #     filename_scores = self.filenames[idx][0:self.filenames[idx].find('.jpg')]
        #     sem_score_name = os.path.join(self.image_dir, "noisy_scores_RGB", self.set, self.labels[idx], (filename_scores + ".png"))
        #
        #     semScore = Image.open(sem_score_name)

        # Apply transformations depending on the set (train, val)
        if self.set is "train":
            # # Extract Random Crop parameters
            i, j, h, w = transforms.RandomCrop.get_params(img, output_size=(self.outputSize, self.outputSize))
            # Apply Random Crop parameters
            img = TF.crop(img, i, j, h, w)
            sem = TF.crop(sem, i, j, h, w)
            semScore = TF.crop(semScore, i, j, h, w)

            # Random horizontal flipping
            if random.random() > 0.5:
                img = TF.hflip(img)
                sem = TF.hflip(sem)
                semScore = TF.hflip(semScore)

            # Apply transformations from ImgAug library
            img = np.asarray(img)
            sem = np.asarray(sem)
            semScore = np.asarray(semScore)

            img = np.squeeze(self.seq.augment_images(np.expand_dims(img, axis=0)))
            if self.SemRGB:
                sem = np.squeeze(self.seq_sem.augment_images(np.expand_dims(sem, 0)))
                semScore = np.squeeze(self.seq_sem.augment_images(np.expand_dims(semScore, 0)))
            else:
                sem = np.squeeze(self.seq_sem.augment_images(np.expand_dims(np.expand_dims(sem, 0), 3)))
                semScore = np.squeeze(self.seq_sem.augment_images(np.expand_dims(np.expand_dims(semScore, 0), 3)))

            # Apply not random transforms. To tensor and normalization for RGB. To tensor for semantic segmentation.
            img = self.train_transforms_img(img)
            sem = self.train_transforms_sem(sem)
            semScore = self.train_transforms_scores(semScore)
        else:
            img = self.val_transforms_img(img)
            sem = self.val_transforms_sem(sem)
            semScore = self.val_transforms_scores(semScore)

        # Final control statements
        if not self.TenCrop:
            if not self.SemRGB:
                assert img.shape[0] == 3 and img.shape[1] == self.outputSize and img.shape[2] == self.outputSize
                assert sem.shape[0] == 1 and sem.shape[1] == self.outputSize and sem.shape[2] == self.outputSize
                assert semScore.shape[0] == 1 and semScore.shape[1] == self.outputSize and semScore.shape[2] == self.outputSize
            else:
                assert img.shape[0] == 3 and img.shape[1] == self.outputSize and img.shape[2] == self.outputSize
                assert sem.shape[0] == 3 and sem.shape[1] == self.outputSize and sem.shape[2] == self.outputSize
                assert semScore.shape[0] == 3 and semScore.shape[1] == self.outputSize and semScore.shape[2] == self.outputSize
        else:
            if not self.SemRGB:
                assert img.shape[0] == 10 and img.shape[2] == self.outputSize and img.shape[3] == self.outputSize
                assert sem.shape[0] == 10 and sem.shape[2] == self.outputSize and sem.shape[3] == self.outputSize
                assert semScore.shape[0] == 10 and semScore.shape[2] == self.outputSize and semScore.shape[3] == self.outputSize
            else:
                assert img.shape[0] == 10 and img.shape[2] == self.outputSize and img.shape[3] == self.outputSize
                assert sem.shape[0] == 10 and sem.shape[2] == self.outputSize and sem.shape[3] == self.outputSize
                assert semScore.shape[0] == 10 and semScore.shape[2] == self.outputSize and semScore.shape[3] == self.outputSize

        # Create dictionary
        self.sample = {'Image': img, 'Semantic': sem,
                       'Semantic Scores': semScore, 'Scene Index': self.classes.index(self.labels[idx])}

        return self.sample