コード例 #1
0
	def _render(self):
		"""
		Render the current cursor image into the canvas.
		"""
		# If path to datasets folder is provided we need to exchange the path in the path to
		# the image in order to be able to load the image
		if self.path_datasets is not None:
			path_image = self.file_list[self.cursor]
			# Find the position of the "datasets" folder in the path
			pos = path_image.find('/datasets/') + 1
			if pos >= 0:
				path_image = os.path.join(self.path_datasets, path_image[pos:])
			img = mpimg.imread(path_image)
		else:
			img = mpimg.imread(self.file_list[self.cursor])

		# Render
		self.ax.cla()
		self.ax.imshow(img)

		self.ax.set_xlim([0, img.shape[1]])
		self.ax.set_ylim([img.shape[0], 0])

		if self.iml_gt is not None:
			self._render_bounding_boxes(self.iml_gt, self.gt_mapping, gt=True)
		self._render_bounding_boxes(self.iml_detections, self.detections_mapping)

		plt.title('[' + str(self.cursor) + '/' + str(len(self.file_list)) + '] ' + self.file_list[self.cursor] + ' (((' + str(self.confidence) + ')))')
		plt.axis('off')
		self.fig.canvas.draw()
		plt.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0)
コード例 #2
0
ファイル: data_loader.py プロジェクト: ashishpatel26/w-net
def clean_directory(path):
    for img_file in tqdm(glob(path)):
        try:
            mpimg.imread(img_file)
        except Exception as e:
            print('removing ' + os.path.join(path, img_file))
            os.remove(os.path.join(path, img_file))
コード例 #3
0
def rotate_training():
    ## Saves rotated versions of each image in the training set
    niter = 10
    k=5
    for i in np.linspace(1,100,100):

        truth = mpimg.imread('training/groundtruth/satImage_'+ '%.3d' % i  +'.png')
        
        image = mpimg.imread('training/images/satImage_'+ '%.3d' % i  +'.png')
        

        
        imgs = mk_rotations(image)
        truths = mk_rotations(truth)

        count =0
        for im in imgs:
            im = format_image(im)
            Image.fromarray(im).save('training_big/Images/satImage_'+ '%.3d' % i  +'_rota'+str(np.int(count))+'.png')
            count+=1
        count =0
        for im in imgs:
            im = format_image(im)
            Image.fromarray(im).save('training_big/Truth/satImage_'+ '%.3d' % i  +'_rota'+str(np.int(count))+'.png')
            count+=1


        print('Writing image ',i)
    return 0
コード例 #4
0
def compare_entropy(name_img1,name_img2,method="rmq"):
     '''Compare two images by the Kullback-Leibler divergence

     Parameters
     ----------
     name_img1 : string
       filename of image 1 (png format)

     name_img2 : string
       filename of image 2 (png format)

     Returns
     -------
     S : float
        Kullback-Leibler divergence S = sum(pk * log(pk / qk), axis=0)

     Note
     ----
     See http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html
     '''
     img1 = mpimg.imread(name_img1)
     img2 = mpimg.imread(name_img2)
     fimg1 = img1.flatten()
     fimg2 = img2.flatten()
     if method == "KL-div":
          eps = 0.0001
          S = stats.entropy(fimg2+eps,fimg1+eps)
          S = numpy.log10(S)
     elif method == "rmq":
          fdiff=fimg1-fimg2
          fdiff_sqr = fdiff**4
          S = (fdiff_sqr.sum())**(old_div(1.,4))

     return S,fimg1, fimg2
コード例 #5
0
ファイル: geoaxes.py プロジェクト: dmcdougall/cartopy
    def stock_img(self, img_name):
        # XXX Turn into a dictionary (inside the method)?
        if img_name == 'bluemarble':
            source_proj = ccrs.PlateCarree()
            fname = '/data/local/dataZoo/cartography/raster/blue_marble_720_360.png'
#            fname = '/data/local/dataZoo/cartography/raster/blue_marble_2000_1000.jpg'            
            img_origin = 'lower'
            img = imread(fname)
            img = img[::-1]
            return self.imshow(img, origin=img_origin, transform=source_proj, extent=[-180, 180, -90, 90])
        elif img_name == 'bm_high':
            source_proj = ccrs.PlateCarree()
            fname = '/data/local/dataZoo/cartography/raster/blue_marble_2000_1000.jpg'
            img_origin = 'lower'
            img = imread(fname)
            return self.imshow(img, origin=img_origin, transform=source_proj, extent=[-180, 180, -90, 90])
        elif img_name == 'ne_shaded':
            source_proj = ccrs.PlateCarree()
            fname = '/data/local/dataZoo/cartography/raster/NE1_50M_SR_W/NE1_50M_SR_W_720_360.png'
            img_origin = 'lower'
            img = imread(fname)
            img = img[::-1]
            return self.imshow(img, origin=img_origin, transform=source_proj, extent=[-180, 180, -90, 90])
        else:
            raise ValueError('Unknown stock image.')
コード例 #6
0
ファイル: poster.py プロジェクト: zsomko/visualcortex
def make_plot_1(u):
   
   fig = plt.figure(figsize=(15,10), dpi=1000)   
   fig.clf()
   gs = gridspec.GridSpec(2,3)
   padd = 0
   hpadd = 0
   gs.update(left=padd, right=1-padd, top=1-hpadd, bottom=hpadd, wspace=padd, hspace=-0.3)
   im = img.imread("images/poster_samplest%.2f.png"%u)
   ax = fig.add_subplot(gs[0,0])
   ax.imshow(im)
   plt.axis('off')
   im = img.imread("images/poster_samples%.2f.png"%u)
   ax = fig.add_subplot(gs[1,0])
   ax.imshow(im)
   plt.axis('off')
   im = img.imread("images/poster_rates%.2f.png"%u)
   ax = fig.add_subplot(gs[1,1])
   ax.imshow(im)
   plt.axis('off')
   im = img.imread("images/tr2trspki%.2f.png"%u)
   ax = fig.add_subplot(gs[0,2])
   ax.imshow(im)
#   ax.set_title("Integration")
   plt.axis('off')
   im = img.imread("images/tr2trspkp%.2f.png"%u)
   ax = fig.add_subplot(gs[1,2])
   ax.imshow(im)
#   ax.set_title("Poisson")
   plt.axis('off')
   fig.savefig("images/poster_1_%.2f.png"%u, transparent=True, frameon=False)
コード例 #7
0
ファイル: controlspeed.py プロジェクト: y2kbugger/picderivate
    def __init__(self, dpi=101, x=5, y=5, image=False, imagelocation='t.png'):
        # canvas
        self.dpi = dpi
        self.inches = np.array([x, y])
        self.dots = self.dpi * self.inches
        self.iterations = 0
        self.speed = -3  # pause = 2^speed

        if image:
            # import png
            print(mpimg.imread(imagelocation).shape[0:2])
            self.dots = np.array(mpimg.imread(imagelocation).shape[0:2])
            self.imageIn = np.uint8(255 * mpimg.imread(imagelocation))
            self.fig = plt.figure(figsize=[self.dots[1], self.dots[0]], dpi=1)
        else:
            # draw 2d zero matrix
            self.imageIn = np.zeros(shape=self.dots, dtype=np.uint8)
            self.fig = plt.figure(figsize=[self.inches[1], self.inches[0]], dpi=self.dpi)

        # display array as image figure
        # self.fig = plt.figure(figsize=[self.inches[1], self.inches[0]], dpi=self.dpi)
        # self.fig = plt.figure(figsize=self.dots, dpi=self.dpi)
        self.im = self.fig.figimage(self.imageIn)

        self.cid = self.fig.canvas.mpl_connect('scroll_event', self.onscroll)
コード例 #8
0
ファイル: idiff.py プロジェクト: Jozhogg/iris
def diff_viewer(expected_fname, result_fname, diff_fname):
    plt.figure(figsize=(16, 16))
    plt.suptitle(os.path.basename(expected_fname))
    ax = plt.subplot(221)
    ax.imshow(mimg.imread(expected_fname))
    ax = plt.subplot(222, sharex=ax, sharey=ax)
    ax.imshow(mimg.imread(result_fname))
    ax = plt.subplot(223, sharex=ax, sharey=ax)
    ax.imshow(mimg.imread(diff_fname))

    def accept(event):
        # removes the expected result, and move the most recent result in
        print('ACCEPTED NEW FILE: %s' % (os.path.basename(expected_fname), ))
        os.remove(expected_fname)
        shutil.copy2(result_fname, expected_fname)
        os.remove(diff_fname)
        plt.close()

    def reject(event):
        print('REJECTED: %s' % (os.path.basename(expected_fname), ))
        plt.close()

    ax_accept = plt.axes([0.7, 0.05, 0.1, 0.075])
    ax_reject = plt.axes([0.81, 0.05, 0.1, 0.075])
    bnext = mwidget.Button(ax_accept, 'Accept change')
    bnext.on_clicked(accept)
    bprev = mwidget.Button(ax_reject, 'Reject')
    bprev.on_clicked(reject)

    plt.show()
コード例 #9
0
def colour_back_projection_exercise(bins, model):
    """
    Uses colour back projection as descibed in Swain and Ballard's 1990 paper,
    to locate a target image within a larger image. Coordinates of best guess.
    
    """

    print '\n\nColour back projection exercise\n'
    raw_input('Let\'s view the image to search & the "target":\n(Press enter)')
    target = mpimg.imread('../images/waldo/waldo_no_bg.tiff')
    image = mpimg.imread('../images/waldo/waldo_env.tiff')
    plt.subplot(1,2,1)
    plt.imshow(np.flipud(image))
    plt.subplot(1,2,2)
    plt.imshow(np.flipud(target))
    plt.show()

    print '\nNow let\'s carry out the back projection...'
    locations, result_image = colour_backproject(target, image, bins, model)
    print '\nThe most likely match location(s):'
    print locations
    plt.subplot(1,1,1)
    plt.imshow(np.flipud(result_image))
    plt.show()

    menu()
コード例 #10
0
	def plot_superimposed_heatmap(self, maptype=u'cancellation'):
		
		"""Plots a heatmap superimposed on the task image"""

		# draw heatmap if this has not been done yet
		if not u'%salphaheatmap' % maptype in self.files.keys():
			self.plot_heatmap(maptype=maptype)

		# create a new figure
		fig = pyplot.figure(figsize=(self.dispsize[0]/self.dpi, self.dispsize[1]/self.dpi), dpi=self.dpi, frameon=False)
		ax = pyplot.Axes(fig, [0,0,1,1])
		ax.set_axis_off()
		fig.add_axes(ax)
		# load images
		taskimg = image.imread(self.files[u'task'])
		heatmap = image.imread(self.files[u'%salphaheatmap' % maptype])
		# resize task image
		taskimg = numpy.resize(taskimg, (numpy.size(heatmap,axis=0),numpy.size(heatmap,axis=1)))
		# draw task
		ax.imshow(self.taskimg, origin=u'upper', alpha=1)
		# superimpose heatmap
		ax.imshow(heatmap, alpha=0.5)
		# save figure
		self.files[u'%staskheatmap' % maptype] = os.path.join(self.outdir, u'%s_heatmap_superimposed.png' % maptype)
		fig.savefig(self.files[u'%staskheatmap' % maptype])
コード例 #11
0
ファイル: pia.py プロジェクト: monikascholz/PIA
 def updateAutoRun(self, index):
     i = self.currentIndex.get()
     if index ==0:
         # read current image
         _tmpImage = mpimg.imread(os.path.join(self.imageFolder.get(),self.imageNames[i]))
         # update coordinates
         tmp_xC, tmp_yC = self.ROILocationData
         # use same algorithm as manual to get fluorescence
         self.AutoFluorescenceDetector(_tmpImage, tmp_xC, tmp_yC, i)
     else:
          # read current image
         _tmpImage = mpimg.imread(os.path.join(self.imageFolder.get(),self.imageNames[i]))
         # update coordinates
         tmp_xC, tmp_yC  = self.data[3][i-1]+(self.data[3][i-1]-self.data[3][i-2])*0.2, self.data[4][i-1]++(self.data[4][i-1]-self.data[4][i-2])*0.25
         self.AutoFluorescenceDetector(_tmpImage, tmp_xC, tmp_yC, i)
     self.drawDataLine()
     self.updateMain()
     self.drawRect()
     self.drawData()
     if len(self.ROI) > 0:
         for item in self.ROI:
             self.ax['Main'].lines.remove(item[0])
             self.ROI = []
     self.currentIndex.set(i+1)
     index += 1
     if self.AutoRunActive and index <= self.numOfImages-1:
         root.after(2, lambda: self.updateAutoRun(index))
     return
コード例 #12
0
ファイル: MAP.py プロジェクト: exedre/e4t
    def insert_background(self,f1):
        imagename = expandvars(expanduser(self._spec['IMAGE']))
        logger.debug('Image is %s',imagename)
        if imagename[0]=='/':
            imgfname = imagename
        else:
            imgfname = expandvars(expanduser(join(self._options.input_path,imagename)))
            if not exists(imgfname):
                SHAPE_PATH = DEFAULT_IMAGE_PATH
                imgfname = expandvars(expanduser(join(SHAPE_PATH,imagename)))

        if exists(imgfname):
            logger.debug('Loading background image %s',imgfname)
            logger.debug('MPL VERSION %s',mpl.__version__)
            V=[ int(x) for x in mpl.__version__.split('.')]
            if V > (1,0,0):
                img = np.flipud(mpimg.imread(imgfname))
            else:
                img = mpimg.imread(imgfname)
            if img is not None:
                logger.debug('Showing image')
                f1.imshow(img,origin='lower',visible=True,alpha=.5,aspect='equal')
                logger.debug('Showed image')
            else:
                logger.error('E:FIG:MAP:001 -- Non posso aprire la mappa %s',imgfname)
                raise ValueError,'E:FIG:MAP:001'
        else:
            logger.error("L'immagine %s non esiste",imgfname)
            raise ValueError,'E:FIG:MAP:002'
コード例 #13
0
ファイル: test_plot.py プロジェクト: CNS-OIST/PyPe9
 def test_network_plot(self):
     # Generate test signal
     brunel_ai = ninemlcatalog.load(
         'network/Brunel2000/AI.xml').as_network('Brunel2000AI')
     scaled_brunel_ai_path = os.path.join(self.work_dir,
                                          'brunel_scaled.xml')
     brunel_ai.scale(0.01).write(scaled_brunel_ai_path)
     argv = ("{} nest 100.0 0.1 "
             "--record Exc.spike_output {}"
             .format(scaled_brunel_ai_path, self.network_signal_path))
     simulate.run(argv.split())
     # Run plotting command
     for pop_name in self.recorded_pops:
         out_path = '{}/{}.png'.format(self.work_dir, pop_name)
         argv = ("{in_path} --save {out_path} --hide --dims 5 5 "
                 "--resolution 100.0"
                 .format(in_path=self.network_signal_path,
                         out_path=out_path, name='v'))
         plot.run(argv.split())
         image = img.imread(out_path)
         self._ref_network_plot()
         ref_image = img.imread(self.ref_network_path)
         self.assertEqual(
             image.shape, ref_image.shape)
         self.assertTrue(
             (image == ref_image).all(),
             "Plotted spike data using 'plot' command (saved to '{}') "
             "did not match loaded image from '{}'"
             .format(out_path, self.ref_network_path))
コード例 #14
0
ファイル: plot.py プロジェクト: ybrighty/Final-Code
def view(structure, original=True):
    '''
    Shows 4 pictures.
    '''
    if not structure._color_calculated:
        structure.calculate_color()
        
    os.chdir("plot support files")
    T_image = mpimg.imread("test_outdoor.png")
    R_image = mpimg.imread("test_indoor.png")
    os.chdir("..")
    
    T_filter = np.array(structure.T_color, float) / 255
    R_filter = np.array(structure.R_color, float) / 255

    T_image_after = T_image * T_filter
    R_image_after = R_image * R_filter
    overlay = T_image_after + R_image_after

    plt.figure()
    plt.axis("off")
    plt.imshow(overlay)

    if original:
        plt.figure()
        plt.axis("off")
        plt.imshow(T_image)
        plt.figure()
        plt.axis("off")
        plt.imshow(R_image)
コード例 #15
0
    def compare_with_ref(self, fname, tolerance, show=False):
        fpath = self.fpath(fname)
        fpath_ref = self.fpath_ref(fname)

        if not os.path.exists(fpath_ref):
            shutil.copy(fpath, fpath_ref)

        img = image.imread(fpath)
        img_ref = image.imread(fpath_ref)
        self.assertEqual(img.shape, img_ref.shape)
        d = num.abs(img - img_ref)
        merr = num.mean(d)
        if (merr > tolerance or show) and not noshow:
            fig = plt.figure()
            axes1 = fig.add_subplot(1, 3, 1, aspect=1.)
            axes2 = fig.add_subplot(1, 3, 2, aspect=1.)
            axes3 = fig.add_subplot(1, 3, 3, aspect=1.)
            axes1.imshow(img)
            axes1.set_title('Candidate')
            axes2.imshow(img_ref)
            axes2.set_title('Reference')
            axes3.imshow(d)
            axes3.set_title('Mean abs difference: %g' % merr)
            plt.show()
            plt.close(fig)

        assert merr <= tolerance
コード例 #16
0
ファイル: clip_figure.py プロジェクト: albuscrow/acaffd
def gen_error_cube(file_name):
    pic = mpimg.imread('error/' + file_name)
    colormap = mpimg.imread('res/colormap.png').transpose((1, 0, 2))[::-1, ::, ::]
    picPosition = (150, 0)
    outputSize = pic.shape[:2]
    picSize = [x * 0.8 for x in pic.shape[:2]]

    # picSize[1] += 200

    def ps2e(p, s):
        return (p[0], p[0] + s[0], p[1], p[1] + s[1])

    colorSize = (10, 200)
    colorMapExtent = (outputSize[0] - colorSize[0], outputSize[0],
                      0 + 50, colorSize[1] + 50)

    imshow(pic, extent=ps2e(picPosition, picSize), zorder=-1)
    imshow(colormap, extent=colorMapExtent, zorder=-1)

    text(outputSize[0] - 50, 55, '    0', fontsize=11)
    text(outputSize[0] - 63, 35 + colorSize[1], ' π/30', fontsize=11)
    gca().get_xaxis().set_visible(False)
    gca().get_yaxis().set_visible(False)

    xlim([150, outputSize[0]])
    ylim([50, outputSize[1] - 200])
    # grid()
    savefig('/home/ac/paperlzq/pic/7_with_colormap.png', bbox_inches='tight', pad_inches=-0.015)
コード例 #17
0
ファイル: make_movie.py プロジェクト: ofek-schechner/mmvt
    def two_brains_two_graphs():
        brain_ax = plt.subplot(gs[:-g2, :g3])
        brain_ax.set_aspect('equal')
        brain_ax.get_xaxis().set_visible(False)
        brain_ax.get_yaxis().set_visible(False)

        image = mpimg.imread(images[0])
        im = brain_ax.imshow(image, animated=True)#, cmap='gray',interpolation='nearest')

        brain_ax2 = plt.subplot(gs[:-g2, g3:-1])
        brain_ax2.set_aspect('equal')
        brain_ax2.get_xaxis().set_visible(False)
        brain_ax2.get_yaxis().set_visible(False)

        image2 = mpimg.imread(images2[0])
        im2 = brain_ax2.imshow(image2, animated=True)#, cmap='gray',interpolation='nearest')

        graph1_ax = plt.subplot(gs[-g2:, :])
        graph2_ax = graph1_ax.twinx()
        if cb_data_type != '':
            ax_cb = plt.subplot(gs[:-g2, -1])
        else:
            ax_cb = None
        plt.tight_layout()
        resize_and_move_ax(brain_ax, dx=0.04)
        resize_and_move_ax(brain_ax2, dx=-0.00)
        if cb_data_type != '':
            resize_and_move_ax(ax_cb, ddw=0.5, ddh=0.8, dx=-0.01, dy=0.06)
        for graph_ax in [graph1_ax, graph2_ax]:
            resize_and_move_ax(graph_ax, dx=0.04, dy=0.03, ddw=0.89)
        return ax_cb, im, im2, graph1_ax, graph2_ax
コード例 #18
0
ファイル: practica1.py プロジェクト: Yue93/PID
def imgHibrida():
    raiz = os.getcwd()
    filtro = gaussiana(9)
    alinear(Image.open(raiz + "\human.png"), Image.open(raiz + "\cat.png"))
    gato = mpimg.imread(raiz + "\cat.png")
    print gato.shape
    humano = mpimg.imread(raiz + "\humanAlign.png")

    gatoConv = lowFilter(gato, filtro)
    humanoConv = lowFilter(humano, filtro)
    gatoHighConv = highFilter(gato, gatoConv)

    plt.show()
    plt.imshow(gatoHighConv)
    plt.colorbar()
    plt.title("Gat(Convolution with hp)")

    plt.show()
    plt.imshow(humanoConv)
    plt.colorbar()
    plt.title("Human(Convolution with lp)")

    finalImage = gatoHighConv + humanoConv
    normalizar(finalImage)
    plt.show()
    plt.imshow(finalImage)
    plt.colorbar()
    plt.title("Hybrid Image")
    mpimg.imsave("HybridImage1.png", finalImage)
コード例 #19
0
def plot_greyscale_images(pngimage):
    fig = plt.figure()

    ax0 = fig.add_subplot(3, 2, 3)
    ax1 = fig.add_subplot(3, 2, 2)
    ax2 = fig.add_subplot(3, 2, 4)
    ax3 = fig.add_subplot(3, 2, 6)

    ax0.imshow(mpimg.imread(pngimage))
    ax0.set_xticklabels([])
    ax0.set_yticklabels([])
    ax0.set_title('Orignal')

    ax1.imshow(lightness(mpimg.imread(pngimage)), cmap='binary')
    ax1.set_xticklabels([])
    ax1.set_yticklabels([])
    ax1.set_title('Lightness')

    ax2.imshow(average(mpimg.imread(pngimage)), cmap='binary')
    ax2.set_xticklabels([])
    ax2.set_yticklabels([])
    ax2.set_title('Average')

    ax3.imshow(luminosity(mpimg.imread(pngimage)), cmap='binary')
    ax3.set_xticklabels([])
    ax3.set_yticklabels([])
    ax3.set_title('Luminosity')

    fig.suptitle('RGB image and three greyscale methods')
    # fig.subplots_adjust(hspace=.5)

    plt.savefig('BLAC_hw6_TLRH_6126561_greyscale.pdf')
コード例 #20
0
def createImagesForFinal(prefix1,prefix2,outDescription):
	suffix = '.png'
	length = len(utils.benchmarks)
	images = []

	for i in range(length): #range(length):
		dataset = utils.benchmarks[i]

		img1 = mpimg.imread(prefix1+dataset+suffix)
		img2 = mpimg.imread(prefix2+dataset+suffix)
		

		fig = plt.figure(i)
		plt.subplot(1, 2,1)
		imgplot1 = plt.imshow(img1)
		plt.axis('off')

		#plt.subplot(1, 2,2*i+2)
		plt.subplot(1, 2,2)
		imgplot2 = plt.imshow(img2)
		plt.axis('off')


		fig.savefig(outDescription + dataset + '.png',dpi=100)
		#plt.show()

		plt.close('all')

#createImagesForFinal('Output/final_annealing_','Output/final_annealingNewCost_','Output/Final Comparison_')
コード例 #21
0
def plot_gaussian_blur(pngimage):
    red = mpimg.imread(pngimage)[:, :, 0]
    green = mpimg.imread(pngimage)[:, :, 1]
    blue = mpimg.imread(pngimage)[:, :, 2]

    fig = plt.figure()
    ax0 = fig.add_subplot(2, 1, 1)
    ax1 = fig.add_subplot(2, 1, 2)

    ax0.imshow(mpimg.imread(pngimage))
    ax0.set_xticklabels([])
    ax0.set_yticklabels([])
    ax0.set_title('Original')

    # gaussian_blur is in a different file, as requested.
    radius, sigma = 7, 0.84089642
    blurred_img = gaussian_blur(red, green, blue, radius, sigma)
    print type(blurred_img), blurred_img.dtype, blurred_img.shape
    ax1.imshow(blurred_img)
    ax1.set_xticklabels([])
    ax1.set_yticklabels([])
    ax1.set_title('Gaussian Blurred with kernel size {0} and sigma {1}'
                  .format(radius, sigma))

    fig.suptitle('Gaussian blur')
    plt.savefig('BLAC_hw6_TLRH_6126561_gaussian_blur.pdf')
コード例 #22
0
def createImages(prefix1,prefix2,prefix3,outDescription):
	suffix = '.png'
	length = len(utils.benchmarks)
	images = []

	for i in range(length): #range(length):
		dataset = utils.benchmarks[i]

		img1 = mpimg.imread(prefix1+dataset+suffix)
		img2 = mpimg.imread(prefix2+dataset+suffix)
		img3 = mpimg.imread(prefix3+dataset+suffix)

		fig = plt.figure(i)
		plt.subplot(1, 3,1)
		imgplot1 = plt.imshow(img1)
		plt.axis('off')

		#plt.subplot(1, 2,2*i+2)
		plt.subplot(1, 3,2)
		imgplot2 = plt.imshow(img2)
		plt.axis('off')

		plt.subplot(1, 3,3)
		imgplot2 = plt.imshow(img3)
		plt.axis('off')

		fig.savefig(outDescription + dataset + '.png',dpi=100)
		#plt.show()

		plt.close('all')
def main():
  # parse command line arguments
  parser = argparse.ArgumentParser(description='Colorize pictures')
  parser.add_argument('greyImage', help='png image to be coloured')
  parser.add_argument('markedImage', help='png image with colour hints')
  parser.add_argument('output', help='png output file')
  parser.add_argument('-v', '--view', help='display image', action='store_true')
  args = parser.parse_args()

  # Note: when reading .png, division by 255. is not required
  # Note: when reading .bmp, division by 255. is required
  # TODO: make this more universal, i.e., support various image formats
  # read images
  greyImage = mpimg.imread(args.greyImage, format='png')
  markedImage = mpimg.imread(args.markedImage, format='png')
  
  # colorize
  colouredImage = colorize(greyImage, markedImage)
    
  # save output
  mpimg.imsave(args.output,colouredImage, format='png')
  
  # display output, if requested
  if args.view:
    plt.imshow(colouredImage)
    plt.show()
コード例 #24
0
ファイル: test_devectorize.py プロジェクト: CKrawczyk/astroML
def test_devectorize_axes():
    np.random.seed(0)

    x, y = np.random.random((2, 1000))

    # save vectorized version
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x, y)
    sio = StringIO()
    fig.savefig(sio)
    sio.reset()
    im1 = image.imread(sio)
    plt.close()

    # save devectorized version
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x, y)
    devectorize_axes(ax, dpi=200)
    sio = StringIO()
    fig.savefig(sio)
    sio.reset()
    im2 = image.imread(sio)
    plt.close()

    assert_(im1.shape == im2.shape)
    assert_((im1 != im2).sum() < 0.1 * im1.size)
コード例 #25
0
def get_images(images_directory, groundtruths_directory, num_images): 
    #
    #   DESCRIPTION 
    #       Loads each training image and its ground truth and creates tensors [numImages, 400, 400, 3]
    #   
    #   INPUTS 
    #       images_directory path to training images directory  
    #       groundtruths_directory path to the groundtruth images directory
    #       num_images number of images to load 
    #       
    #   OUTPUTS
    #       images, ground_truth two tensors 
    #
    images = []
    ground_truth = [] 
    for i in num_images:
        image_id = "satImage_%.3d" % i
        image_filename = image_id + ".png"
        image_path = images_directory + image_filename;
        groundtruth_image_path = groundtruths_directory + image_filename;

        if ((os.path.isfile(image_path))&(os.path.isfile(groundtruth_image_path))):
            print ('Loading ' + image_filename) 
            loaded_image = mpimg.imread(image_path)
            loaded_gt_image = mpimg.imread(groundtruth_image_path) 

            if ordering == "th":
                loaded_image = np.rollaxis(loaded_image,2) 

            images.append(loaded_image) 
            ground_truth.append(loaded_gt_image)
        else:
            print ('File ' + image_path + ' does not exist')

    return images, ground_truth
コード例 #26
0
def input_image_setup(img_name, img2_name):
	'''	Nimmt ein Bild als input, erstellt eine "Regel-Karte". Bei der Bild-
	erstellung bedenken: Rot = Gitter, Gruen = Verzweigt, Blau = Radial, wobei ein schwarzer
	Pixel ein Zentrum definiert. '''
	#TODO: Document
	import matplotlib.image as mpimg
	import matplotlib.pyplot as plt
	import procedural_city_generation
	import os
	#TODO:translate	
	
	img = mpimg.imread(img_name)
	img2 = mpimg.imread(img2_name)
	
	import matplotlib.pyplot as plt
	path=os.path.dirname(procedural_city_generation.__file__)
	print path
	plt.imsave(path+"/temp/diffused.png",img2,cmap='gray')
	with open(path+"/temp/isdiffused.txt",'w') as f:
		f.write("False")
	
	
	img*=255
	img2*=255
	return img, img2
コード例 #27
0
ファイル: lab4.py プロジェクト: athuras/tools
def get_data(skimage_resource_path='/usr/local/lib/python2.7/site-packages/skimage/data',
             degraded_path='/Users/ath/Desktop/degraded.png'):
    '''Returns the two images we'll use in the lab,
    Make sure to change the path to where the source images can be found'''
    cam = mpimg.imread(skimage_resource_path + '/camera.png')
    cam = skimage.transform.resize(cam, (256, 256))
    degraded = mpimg.imread(degraded_path)
    return cam, degraded
コード例 #28
0
ファイル: benchmark.py プロジェクト: ChristopherBe/burnman
def check_averaging_2():
    """
    Reproduce Figure 1 from Hashin and Shtrikman (1963) to check the
    Hashin-Shtrikman bounds for an elastic composite
    """

    hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
    hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()

    # create arrays for sampling in volume fraction
    volumes = np.linspace(0.0, 1.0, 100)
    hsu_bulk_modulus = np.empty_like(volumes)
    hsu_shear_modulus = np.empty_like(volumes)
    hsl_bulk_modulus = np.empty_like(volumes)
    hsl_shear_modulus = np.empty_like(volumes)

    # These values are from Hashin and Shtrikman (1963)
    K1 = 25.0
    K2 = 60.7
    G1 = 11.5
    G2 = 41.8

    for i in range(len(volumes)):
        hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
            [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]
        )
        hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
            [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]
        )

        hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
            [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]
        )
        hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
            [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]
        )

    fig = mpimg.imread("../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig1_K.png")
    plt.imshow(fig, extent=[0, 1.0, 1.1, K2 + 0.3], aspect="auto")
    plt.plot(volumes, hsu_bulk_modulus, "g-")
    plt.plot(volumes, hsl_bulk_modulus, "g-")
    plt.ylim(K1, K2)
    plt.xlim(0, 1.0)
    plt.xlabel("Volume fraction")
    plt.ylabel("Averaged bulk modulus")
    plt.title("Comparing with Figure 1 of Hashin and Shtrikman (1963)")
    plt.show()

    fig = mpimg.imread("../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig2_G.png")
    plt.imshow(fig, extent=[0, 1.0, 0.3, G2], aspect="auto")
    plt.plot(volumes, hsu_shear_modulus, "g-")
    plt.plot(volumes, hsl_shear_modulus, "g-")
    plt.ylim(G1, G2)
    plt.xlim(0, 1.0)
    plt.xlabel("Volume fraction")
    plt.ylabel("Averaged shear modulus")
    plt.title("Comparing with Figure 2 of Hashin and Shtrikman (1963)")
    plt.show()
コード例 #29
0
ファイル: match_images.py プロジェクト: 812864539/models
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image)
コード例 #30
0
ファイル: main.py プロジェクト: Groskilled/where_the_cut
def print_cuts(cuts, imgs):
    for i in cuts:
        print "I found a cut between frame {0} and frame {1}.".format(i[0], i[1])
        fig = plt.figure()
        a = fig.add_subplot(1,2,2)
        plt.subplot(121)
        imgplot = plt.imshow(mpimg.imread(imgs[i[0]]))
        plt.subplot(122)
        imgplot = plt.imshow(mpimg.imread(imgs[i[1]]))
        plt.show()
コード例 #31
0
ファイル: a.py プロジェクト: Fantaaaaa/6643
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

img = np.uint8(mpimg.imread('2.png') * 255.0)


def correlation(kernel, image):
    height, width = image.shape
    output = np.float32(np.zeros_like(image))
    kSize, _ = kernel.shape
    edge = int(kSize / 2)
    # edge remains the same
    output[0:edge, 0:height] = image[0:edge, 0:height]
    output[0:width, 0:edge] = image[0:width, 0:edge]
    output[width - edge:width, 0:height] = image[width - edge:width, 0:height]
    output[0:width, height - edge:height] = image[0:width,
                                                  height - edge:height]
    # correlation
    for x in range(edge, height - edge):
        for y in range(edge, width - edge):
            output[y, x] = (
                kernel *
                image[y - edge:y + edge + 1, x - edge:x + edge + 1]).sum()
    return output


def gaussian(kernel, image):
コード例 #32
0
ファイル: freesound.py プロジェクト: Bluethealpha/Bowl-Bot
def plot_bitrate(adur,
                 input,
                 stream,
                 output,
                 format,
                 min: int = None,
                 max: int = None):
    #plotbitrate.py "0004 - Pink Floyd - Comfortably Numb - 06-49.flac" -s audio -o test.png -f png

    # get list of supported matplotlib formats
    format_list = list(
        matplot.figure().canvas.get_supported_filetypes().keys())
    matplot.close()  # destroy test figure

    # check if format given w/o output file
    if format and not output:
        sys.stderr.write("Error: Output format requires output file\n")
        return False

    # check given y-axis limits
    if min and max and (min >= max):
        sys.stderr.write("Error: Maximum should be greater than minimum\n")
        return False

    bitrate_data = {}
    frame_count = 0
    frame_rate = None
    frame_time = 0.0

    # get frame data for the selected stream
    with subprocess.Popen([
            "ffprobe", "-show_entries", "frame", "-select_streams", stream[0],
            "-print_format", "xml", input
    ],
                          stdout=subprocess.PIPE,
                          stderr=subprocess.DEVNULL) as proc_frame:

        # process xml elements as they close
        for event in etree.iterparse(proc_frame.stdout):

            # skip non-frame elements
            node = event[1]
            if node.tag != 'frame':
                continue

            # count number of frames
            frame_count += 1

            # get type of frame
            if stream == 'audio':
                frame_type = 'A'  # pseudo frame type
            else:
                frame_type = node.get('pict_type')

            # get frame rate only once (assumes non-variable framerate)
            # TODO: use 'pkt_duration_time' each time instead
            if frame_rate is None:

                # audio frame rate, 1 / frame duration
                if stream == 'audio':
                    frame_rate = 1.0 / float(node.get('pkt_duration_time'))

                # video frame rate, read stream header
                else:
                    with subprocess.Popen(
                        [
                            "ffprobe", "-show_entries", "stream",
                            "-select_streams", "v", "-print_format", "xml",
                            input
                        ],
                            stdout=subprocess.PIPE,
                            stderr=subprocess.DEVNULL) as proc_stream:

                        # parse stream header xml
                        stream_data = etree.parse(proc_stream.stdout)
                        stream_elem = stream_data.find('.//stream')

                        # compute frame rate from ratio
                        frame_rate_ratio = stream_elem.get('avg_frame_rate')
                        (dividend, divisor) = frame_rate_ratio.split('/')
                        frame_rate = float(dividend) / float(divisor)

            #
            # frame time (x-axis):
            #
            #   ffprobe conveniently reports the frame time position.
            #
            # frame bitrate (y-axis):
            #
            #   ffprobe reports the frame size in bytes. This must first be
            #   converted to kbits which everyone is use to. To get instantaneous
            #   frame bitrate we must consider the frame duration.
            #
            #   bitrate = (kbits / frame) * (frame / sec) = (kbits / sec)
            #

            # collect frame data
            try:
                frame_time = float(node.get('best_effort_timestamp_time'))
            except:
                try:
                    frame_time = float(node.get('pkt_pts_time'))
                except:
                    if frame_count > 1:
                        frame_time += float(node.get('pkt_duration_time'))

            frame_bitrate = (float(node.get('pkt_size')) * 8 /
                             1000) * frame_rate
            frame = (frame_time, frame_bitrate)

            # create new frame list if new type
            if frame_type not in bitrate_data:
                bitrate_data[frame_type] = []

            # append frame to list by type
            bitrate_data[frame_type].append(frame)

        # check if ffprobe was successful
        if frame_count == 0:
            sys.stderr.write(
                "Error: No frame data, failed to execute ffprobe\n")
            return False

    # end frame subprocess

    # setup new figure
    fig = matplot.figure(figsize=(24, 8), dpi=300,
                         edgecolor='k')  #.canvas.set_window_title(input)
    #matplot.figure().canvas.set_window_title(input)
    ax = fig.add_subplot(111)
    matplot.title("Stream Bitrate vs Time")
    matplot.xlabel("Time (sec)")
    matplot.ylabel("Frame Bitrate (kbit/s)")
    matplot.grid(True)

    #Set background image
    path = os.getcwd()
    datafile = cbook.get_sample_data(path + BACKGROUND, asfileobj=False)
    #print('loading %s' % datafile)
    im = image.imread(datafile)
    im[:, :, -1] = 1.0  # set the alpha channel
    fig.figimage(im, 0, 0)

    # map frame type to color
    frame_type_color = {
        # audio
        'A': '#7289DA',
        # video
        'I': 'red',
        'P': 'green',
        'B': 'blue'
    }

    global_peak_bitrate = 0.0
    global_mean_bitrate = 0.0

    # render charts in order of expected decreasing size
    for frame_type in ['I', 'P', 'B', 'A']:

        # skip frame type if missing
        if frame_type not in bitrate_data:
            continue

        # convert list of tuples to numpy 2d array
        frame_list = bitrate_data[frame_type]
        frame_array = numpy.array(frame_list)

        # update global peak bitrate
        peak_bitrate = frame_array.max(0)[1]
        if peak_bitrate > global_peak_bitrate:
            global_peak_bitrate = peak_bitrate

        # update global mean bitrate (using piecewise mean)
        mean_bitrate = frame_array.mean(0)[1]
        global_mean_bitrate += mean_bitrate * (len(frame_list) / frame_count)

        # plot chart using gnuplot-like impulses
        matplot.vlines(frame_array[:, 0], [0],
                       frame_array[:, 1],
                       color=frame_type_color[frame_type],
                       label="{} Frames".format(frame_type))

    # set y-axis limits if requested
    if min:
        matplot.ylim(ymin=min)
    if max:
        matplot.ylim(ymax=max)

    # Define step size(grid) of time axis
    step_size = round((adur * 0.03) * 2) / 2
    print(step_size)
    ax.xaxis.set_ticks(numpy.arange(0.0, adur, step_size))

    # calculate peak line position (left 15%, above line)
    peak_text_x = matplot.xlim()[1] * 0.15
    peak_text_y = global_peak_bitrate + \
        ((matplot.ylim()[1] - matplot.ylim()[0]) * 0.015)
    peak_text = "peak ({:.0f})".format(global_peak_bitrate)

    # draw peak as think black line w/ text
    matplot.axhline(global_peak_bitrate, linewidth=2, color='blue')
    matplot.text(peak_text_x,
                 peak_text_y,
                 peak_text,
                 horizontalalignment='center',
                 fontweight='bold',
                 color='blue')

    # calculate mean line position (right 85%, above line)
    mean_text_x = matplot.xlim()[1] * 0.85
    mean_text_y = global_mean_bitrate + \
        ((matplot.ylim()[1] - matplot.ylim()[0]) * 0.015)
    mean_text = "mean ({:.0f})".format(global_mean_bitrate)

    # draw mean as think black line w/ text
    matplot.axhline(global_mean_bitrate, linewidth=2, color='green')
    matplot.text(mean_text_x,
                 mean_text_y,
                 mean_text,
                 horizontalalignment='center',
                 fontweight='bold',
                 color='green')

    matplot.legend()

    # render graph to file (if requested) or screen
    if output:
        matplot.savefig(output, format=format)
    else:
        matplot.show()

    # Cleanup
    del fig
コード例 #33
0
#
# img = Image.imread(test_image)
# undist = undistort_img(img, objpoints, imgpoints)
# binary_img = binary_color(undist, sobel_kernel=3, gray_threshold=(45,255), color_thresold=(110,255))
# binary_warped, Minv = warp_M(binary_img)
# ploty, left_fitx, right_fitx, left_fit, right_fit = slide_windows(binary_warped, nwindows=9, margin=100, minpix=50)
# ploty, left_fitx, right_fitx = acc_frame_to_frame(binary_warped, left_fit, right_fit, margin = 100)
# result = img_region(binary_warped, left_fitx, right_fitx, ploty, Minv, undist)
#
# plt.imshow(result)
# plt.show()

## test a list of img
with open(obj_img_dir, 'rb') as f:
    objpoints, imgpoints = pickle.load(f)

# img  = Image.imread('test_images/'+'straight_lines1.jpg')
# res = pipline(img, objpoints, imgpoints)
# plt.imshow(res)
# plt.show()

for img_name in os.listdir('test_images/'):
    img = Image.imread('test_images/' +img_name)
    print ('test_images/' +img_name)
    res = pipline(img, objpoints, imgpoints)
    plt.imshow(res)
    plt.show()



コード例 #34
0
ファイル: encoder.py プロジェクト: guyko81/FaceEditor
    plt.ylim([0.0, 0.01])
    loc = ('upper right' if on_top else 'lower right')
    plt.legend(['Train', 'Test'], loc=loc)
    plt.draw()
    plt.savefig(fname)


#Load data set
print("Loading Data...")
files = os.listdir("../input/img_align_celeba/img_align_celeba")
images = []
for t, file in enumerate(files[:2000]):
    if t % 100 == 0:
        print(t)
    images.append(
        mpimg.imread('../input/img_align_celeba/img_align_celeba/' + file))

y_train = np.array(images).astype(np.float32) / 255.0
y_train = y_train[:y_train.shape[0] - y_train.shape[0] % BATCH_SIZE]
x_train = np.expand_dims(np.arange(y_train.shape[0]), axis=1)
num_samples = y_train.shape[0]
print("Loaded " + str(num_samples) + " Samples.")

###################################
#  Create Model
###################################
print("Loading Keras...")
import os, math

from keras.initializers import RandomUniform
from keras.layers import Input, Dense, Activation, Dropout, Flatten, Reshape, SpatialDropout2D
コード例 #35
0
args = vars(ap.parse_args())


# read in indexed images' feature vectors and corresponding image names
h5f = h5py.File(args["index"],'r')
feats = h5f['dataset_1'][:]
imgNames = h5f['dataset_2'][:]
h5f.close()
        
print ("--------------------------------------------------")
print ("               searching starts")
print ("--------------------------------------------------")
    
# read and show query image
queryDir = args["query"]
queryImg = mpimg.imread(queryDir)
plt.title("Query Image")
plt.imshow(queryImg)
plt.show()

# init VGGNet16 model
model = VGGNet()

# extract query image's feature, compute simlarity score and sort
queryVec = model.extract_feat(queryDir)
scores = np.dot(queryVec, feats.T)
rank_ID = np.argsort(scores)[::-1]
rank_score = scores[rank_ID]
#print rank_ID
#print rank_score
コード例 #36
0
def load_image(data_dir, image_file):
    """
    Load RGB images from a file
    """
    return mpimg.imread(os.path.join(data_dir, image_file.strip()))
コード例 #37
0
ファイル: TestGrid.py プロジェクト: PArguelles/thesis-scripts
# see astronaut as a matrix of blocks (of shape block_shape)
view = view_as_blocks(l, block_shape)

# collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

# resampling the image by taking either the `mean`,
# the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)

# display resampled images
fig, axes = plt.subplots(3, 5, figsize=(20, 20), sharex=True, sharey=True)
ax = axes.ravel()

l_resized = ndi.zoom(l, 2, order=3)

img = mpimg.imread('C:/ShareSSD/tests/plot_T0859_GDT-HA_GDT-TS_km_3.png')

i = 0
while i < 15:
    ax[i].imshow(img, interpolation='nearest', cmap=cm.Greys_r)
    i += 1

for a in ax:
    a.set_axis_off()

fig.tight_layout()
plt.show()
#Edge dection kernel
#Built with SciPy
#Copyright 2019 Denis Rothman MIT License. READ LICENSE.
import matplotlib.image as mpimg
import numpy as np
import scipy.ndimage.filters as filter
import matplotlib.pyplot as plt


#I.An edge dectection kernel
kernel_edge_detection = np.array([[0.,1.,0.],
                                [1.,-4.,1.],
                                [0.,1.,0.]])

#II.Load image
image=mpimg.imread('img.bmp')[:,:,0]
shape = image.shape
print("image shape",shape)
#III.Convolution
image_after_kernel = filter.convolve(image,kernel_edge_detection,mode='constant', cval=0)



#III.Displaying the image before and after the convolution
f = plt.figure(figsize=(8, 8))
axarr=f.subplots(2,sharex=False)
axarr[0].imshow(image,cmap=plt.cm.gray)
axarr[1].imshow(image_after_kernel,cmap=plt.cm.gray)
f.show()

コード例 #39
0
def read_image_from_archive(arhive, image_name):
    imgfile = archive.open(image_name, "r")
    img = mpimg.imread(imgfile)
    return img
コード例 #40
0
# # Uncoment to print mAPs
# # print(mAP_euclidean)
# # print(mAP_manhattan)
# # print(mAP_cosine)

## Matches figure
with open('baselineKrank.pkl', 'rb') as f:
    krank = pickle.load(f)

pyplot.rcParams["font.family"] = 'Times New Roman'
fig, axes = pyplot.subplots(nrows=5, ncols=11)
pyplot.suptitle("Rank-10 lists for 5 random query images", fontsize=20)
idx = [28, 42, 77, 3, 4]
for i in range(len(idx)):

    imgOrg = mpimg.imread('./PR_data/images_cuhk03/' +
                          data.getImageFileName(idx[i], 'query')[0])

    img = zeros((len(imgOrg) + 20, len(imgOrg[0]) + 20, 3))

    img[:, :, 0] = pad(imgOrg[:, :, 0], ((10, 10), (10, 10)),
                       'constant',
                       constant_values=(0, 0))
    img[:, :, 1] = pad(imgOrg[:, :, 1], ((10, 10), (10, 10)),
                       'constant',
                       constant_values=(0, 0))
    img[:, :, 2] = pad(imgOrg[:, :, 2], ((10, 10), (10, 10)),
                       'constant',
                       constant_values=(0, 0))

    axes[i, 0].imshow(img)
    axes[i, 0].axis('off')
コード例 #41
0
    import matplotlib.image as mpimg
    cam_calib = CameraCalib(9,6)
    images = glob.glob('../camera_cal/calibration*.jpg')
    # print(images)

    for idx, fname in enumerate(images):
        img = cam_calib.project_img_file(fname)
        write_name = './calib_corner/corners_found{}.jpg'.format(idx)
        cv2.imwrite(write_name, img)

    cam_calib.calibrate()
    cam_calib.export_calib("calibration_pickle.p")
    images.extend(glob.glob('../test_images/*.jpg'))

    for idx, fname in enumerate(images):
        org_img = mpimg.imread(fname)
        img = cam_calib.undist_img_file(fname)
        outfile = './calib/post_calib_comparisno_{}.png'.format(idx)
        f, ax = plt.subplots(1, 2, figsize=(30, 12))
        f.tight_layout()

        ax[0].imshow(org_img)
        ax[0].set_title('Original', fontsize=30)
        ax[1].imshow(img)
        ax[1].set_title('Undistorted', fontsize=30)
        plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.0)
        plt.savefig(outfile)


    
コード例 #42
0
import matplotlib.pyplot as plt 
import matplotlib.image as mgimg
from matplotlib import animation
import os
n_imagenes=os.listdir('figuras/')
frames = []
for i in range (len(n_imagenes)):
    frames.append('figuras/'+str(i)+'.png')




fig = plt.figure()

myimages = []
plt.axis('off')
for p in frames:
    fname = p
    img = mgimg.imread(fname)
    imgplot = plt.imshow(img)
    myimages.append([imgplot])


my_anim = animation.ArtistAnimation(fig, myimages, interval=200, blit=True, repeat_delay=1000)


my_anim.save("gaus_seidel_v30_a10.mp4")

コード例 #43
0
import numpy as np
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

# First, load the image again
filename = "MarshOrchid.png"
image = mpimg.imread(filename)
# height, width, depth = image.shape

# Create a TensorFlow Variable
x = tf.Variable(image, name='x')
shape = tf.Variable(np.ones(3), name="shape")

model = tf.initialize_all_variables()

with tf.Session() as session:
    shape = tf.shape(x)
    session.run(model)
    shape = session.run(shape)

    # Try `tf.ones_initializer` to make this more robust!
    x = tf.reverse_sequence(x,
                            np.ones((shape[0], )) * shape[1],
                            1,
                            batch_dim=0)
    session.run(model)
    result = session.run(x)

print(result.shape)
plt.imshow(result)
コード例 #44
0
import matplotlib.image as img
import matplotlib.pylab as plt
import numpy as np
import funciones as fun
import imgconv as ivn

# imread (path,format)lee una imagen desde un archivo
# @param string path . path de la imagen
# @param format.
# @return numpy.array para escalas grices retorna MxN
# para RGB retorna MxNx3, RGBA  retorna MxNx4

img_path = "D:/Documents/Tarpuy/Procesamiento-R/2dconv-verilog/src/py/Metrica/img/da_bossGS.jpg"
input_img = img.imread(img_path)

# Filtro para la convolucion
kernel = np.array(
    [
        [0, 1, 0],
        [1, -4, 1],
        [0, 1, 0]
    ])

# nomralizacion con 'l2'
fix1 = fun.cross_corr(input_img, kernel)  # convolucon de la senal originla
Signal = fun.potencia(fix1)  # potencia de la senal convlucionada
ker_fix = np.asarray(ivn.ker_norm(kernel))
ker_fix = np.asarray(fun.fix_matriz(ker_fix, 8, 7, 'S', 'round', 'saturate'))  # kernel a punto fijo

input_S = fun.norm_m(input_img, 'l2')  # fincion normalizada l2
SNR1 = []
コード例 #45
0
from copy import copy
from sys import path

import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.image as image
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable

path.extend([path[0][:path[0].rindex("src") - 1]])
from bin.Coordinators.gym_coordinator import Coordinator
from bin.Environment.simple_env import Env

_z = np.flipud(
    image.imread("C:/Docs/ETSI/BO_drones/data/Map/Ypacarai/map.png"))[:, :, 0]
nans = np.fliplr(np.asarray(np.where(_z == 1)).reshape(2, -1).T)


def get_clean(_file):
    # return _file
    for nnan in nans:
        _file[nnan[1], nnan[0]] = -1
    return np.ma.array(_file, mask=(_file == -1))


_bo_xs = np.array([
    [563, 375],
    # [559, 410],
    [604, 368],
    [647, 327],
    # [704, 362],
コード例 #46
0
ファイル: ocr.py プロジェクト: patharanordev/pangpuriye
    def imshow(self, img):
        if isinstance(img, str):
            img = mpimg.imread(img)

        plt.imshow(img)
        plt.show()
コード例 #47
0
ファイル: datasets.py プロジェクト: slyviacassell/keras-ocr
 def preprocess(self, image_name, dir=None):
     return DictNet.preprocess(
         mpimg.imread("%s/%s" %
                      (self.dir if dir is None else dir, image_name)))
コード例 #48
0
ファイル: ocr.py プロジェクト: patharanordev/pangpuriye
 def imread(self, fpath):
     return mpimg.imread(fpath)
コード例 #49
0
def exp_hog_parameters(args, var, to_log=True):
    # define feature parameters
    cell_per_block  = var['cell_per_block'] # 2
    color_space     = var['color_space']    # can be RGB, HSV, LUV, HLS, YUV, YCrCb
    hist_bins       = var['hist_bins']      # 32  # number of histogram bins
    hist_feat       = var['hist_feat']      # histogram features on or off
    hog_channel     = var['hog_channel']    # 'ALL' # can be 0, 1, 2, or 'ALL'
    hog_feat        = var['hog_feat']       # HOG features on or off
    orient          = var['orient']         # 8
    overlap         = var['overlap']        # 0.5
    pix_per_cell    = var['pix_per_cell']   # 8
    scale           = var['scale']          # 1.0
    spatial_feat    = var['spatial_feat']   # True, spatial features on or off
    spatial_size    = var['spatial_size']   # (32,32)  # spatial binning dimensions
    x_start_stop    = var['x_start_stop']   # [None, None]
    y_start_stop    = var['y_start_stop']   # [400, 656]
    xy_window       = var['xy_window']      # (128, 128)

    # list_all_images
    cars, notcars = list_all_images(args)

    # choose random car/notcar indices
    flag_random = False
    if flag_random:
        car_ind = np.random.randint(0, len(cars))
        notcar_ind = np.random.randint(0, len(notcars))
    else:
        car_ind, notcar_ind = 2734, 7868

    # read in car / notcar images
    car_image = mpimg.imread(cars[car_ind])
    notcar_image = mpimg.imread(notcars[notcar_ind])

    num_img = 5
    flag_random = False
    if flag_random:
        cars_image_to_plot = [[cars[index].split('\\')[-1][:-4], cars[index]] for index in
                              [random.randint(0, len(cars)) for i in range(num_img)]]
        notcars_image_to_plot = [[notcars[index].split('\\')[-1][:-4], notcars[index]] for index in
                                 [random.randint(0, len(notcars)) for i in range(num_img)]]
    else:
        cars_image_to_plot = [[index, cars[index]] for index in
                              [random.randint(0, len(cars)) for i in range(num_img)]]
        notcars_image_to_plot = [[index, notcars[index]] for index in
                                 [random.randint(0, len(notcars)) for i in range(num_img)]]

    car_features, car_hog_image = single_img_features(car_image, color_space=color_space, spatial_size=spatial_size,
                                                      hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell,
                                                      cell_per_block=cell_per_block, hog_channel=hog_channel,
                                                      spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat,
                                                      vis=True)

    notcar_features, notcar_hog_image = single_img_features(notcar_image, color_space=color_space,
                                                            spatial_size=spatial_size,
                                                            hist_bins=hist_bins, orient=orient,
                                                            pix_per_cell=pix_per_cell,
                                                            cell_per_block=cell_per_block, hog_channel=hog_channel,
                                                            spatial_feat=spatial_feat, hist_feat=hist_feat,
                                                            hog_feat=hog_feat,
                                                            vis=True)

    t            = time.time()
    n_samples    = 1000
    random_idxs  = np.random.randint(0, len(cars), n_samples)
    test_cars    = np.array(cars)[random_idxs]
    test_noncars = np.array(notcars)[random_idxs]

    car_features = extract_features(test_cars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins,
                                    orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                                    hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)

    notcar_features = extract_features(test_noncars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins,
                                    orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                                    hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)

    t_feature_computation = round(time.time() - t, 2)
    print(t_feature_computation, 'Seconds to compute features...')
    X = np.vstack((car_features, notcar_features)).astype(np.float64)
    # fit a per_column scaler
    X_scaler = StandardScaler().fit(X)
    # apply the scaler to X
    scaled_X = X_scaler.transform(X)

    # define the labels vector
    y = np.hstack(( np.ones(len(car_features)), np.zeros(len(notcar_features)) ))

    # split up data into randomized training and test sets
    rand_state = np.random.randint(0, 100)
    X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.1, random_state=rand_state)

    print('Using:', orient, 'orientations,', pix_per_cell, 'pixels per cell,', cell_per_block,
          'cells per block,', hist_bins, 'histogram bins, and', spatial_size, 'spatial sampling')
    print('Feature vector length:', len(X_train[0]))

    # use a linear SVC
    svc = LinearSVC()
    # check the training time for the SVC
    t   = time.time()

    svc.fit(X_train, y_train) # https://stackoverflow.com/questions/40524790/valueerror-this-solver-needs-samples-of-at-least-2-classes-in-the-data-but-the
    t_train = round(time.time()-t, 2)
    print(t_train, 'Seconds to train SVC...')
    # check the score of the SVC
    accuracy = round(svc.score(X_test, y_test), 4)
    print('Test Accuracy of SVC = ', accuracy)

    log = [ cell_per_block, color_space, hist_bins,
            hist_feat, hog_channel, orient,
            pix_per_cell, spatial_feat, spatial_size,
            accuracy, len(X_train[0]), t_feature_computation, t_train, t_feature_computation+t_train  ]

    # log = [ var['cell_per_block'], var['color_space'], var['hist_bins'],
    #         var['hist_feat'], var['hog_channel'], var['orient'],
    #         var['pix_per_cell'], var['spatial_feat'], var['spatial_size'],
    #         accuracy, len(X_train[0]), t_feature_computation, t_train, t_feature_computation+t_train  ]

    if to_log: log_write(args, log)
コード例 #50
0
# # Classifying large images using Inception

# **Exercise:** Download some images of various animals. Load them in Python, for example using the matplotlib.image.mpimg.imread() function or the scipy.misc.imread() function. Resize and/or crop them to 299 × 299 pixels, and ensure that they have just three channels (RGB), with no transparency channel. The images that the Inception model was trained on were preprocessed so that their values range from -1.0 to 1.0, so you must ensure that your images do too.

# In[ ]:

tf.reset_default_graph()

width = 299
height = 299
channels = 3

# In[57]:

import matplotlib.image as mpimg
test_image = mpimg.imread(os.path.join("rsz_dog.jpg"))[:, :, :channels]
plt.imshow(test_image)
plt.axis("off")
plt.show()

# In[ ]:

test_image = 2 * test_image - 1

# **Exercise:** Download the latest pretrained Inception v4 model at
# http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz.<br>
# The list of class names is available at https://goo.gl/brXRtZ, but you must insert a "background" class at the beginning.
#

# In[ ]:
コード例 #51
0
            dist = pickle_dict['dist']
    except IOError as err:
        print('Generating mtx and dist and saving to {f}'.format(
            f=pickle_file_name))
        ret, mtx, dist, rvecs, tvecs = callibrate_camera(file_names)
        pickle.dump({'mtx': mtx, 'dist': dist}, open(pickle_file_name, 'wb'))

    # draw_lines(undist, [
    #    [(521.697, 500.501, 234.025, 699.821)],
    #    [(764.666, 500.501, 1064.43, 699.821)]
    # ])
    # draw_lines(undist, get_points_for_lanes(src_points))

    p_transform_matrix, p_transform_matrix_inv = get_perspective_transform_matrix(
        mtx, dist)
    img = mpimg.imread(args.test_image_name)
    undist, threshold_binary, p_transformed, out_img, left_fitx, right_fitx, left_fit, right_fit = transform_image(
        img,
        mtx=mtx,
        dist=dist,
        p_transform_matrix=p_transform_matrix,
        color_threshold=(150, 255),
        grad_x_thresh=(20, 255))

    f, axs = plt.subplots(2, 2, figsize=(24, 10))
    [ax1, ax2, ax3, ax4] = plt.gcf().get_axes()
    f.tight_layout()
    ax1.imshow(img)
    ax1.set_title('Original Image', fontsize=10)
    ax2.imshow(threshold_binary)
    ax2.set_title('Thresholded', fontsize=10)
コード例 #52
0
get_ipython().run_line_magic('matplotlib', 'inline')

# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)

# Arrays to store object points and image points from all the images.
objpoints = []  # 3d points in real world space
imgpoints = []  # 2d points in image plane.

# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')

# Step through the list and search for chessboard corners
for fname in images:
    img = mpimg.imread(fname)
    #img = cv2.imread(fname)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)

    # If found, add object points, image points
    if ret == True:
        objpoints.append(objp)
        imgpoints.append(corners)

        # Draw and display the corners
        img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()
import matplotlib.image as mpimg
import time

# Import functions for perception and decision making
from perception import perception_step
from decision import decision_step
from supporting_functions import update_rover, create_output_images
# Initialize socketio server and Flask application 
# (learn more at: https://python-socketio.readthedocs.io/en/latest/)
sio = socketio.Server()
app = Flask(__name__)

# Read in ground truth map and create 3-channel green version for overplotting
# NOTE: images are read in by default with the origin (0, 0) in the upper left
# and y-axis increasing downward.
ground_truth = mpimg.imread('../calibration_images/map_bw.png')
# This next line creates arrays of zeros in the red and blue channels
# and puts the map into the green channel.  This is why the underlying 
# map output looks green in the display image
ground_truth_3d = np.dstack((ground_truth*0, ground_truth*255, ground_truth*0)).astype(np.float)

# Define RoverState() class to retain rover state parameters
class RoverState():
    def __init__(self):
        self.start_time = None # To record the start time of navigation
        self.total_time = None # To record total duration of naviagation
        self.start_pos = None # Position (x, y) of the starting location
        self.img = None # Current camera image
        self.pos = None # Current position (x, y)
        self.yaw = None # Current yaw angle
        self.pitch = None # Current pitch angle
コード例 #54
0
    images = os.listdir(category_path)
    # print(images)

    for image in images:
        image_path = f'{category_path}/{image}'

        # 0.5 default
        low_exp_cutoff = 0.55  # before 0.5 (was not dark enough)
        high_exp_cutoff = 0.38  # before 0.4 (sometimes to light)

        # 1 default, greater than 1 darker
        low_exp_gamma = 1
        high_exp_gamma = 0.8

        # Read image from path
        img = mpimg.imread(image_path)

        lower_exp = exposure.adjust_sigmoid(img, cutoff=low_exp_cutoff)
        # higher_exp = exposure.adjust_sigmoid(img, cutoff=high_exp_cutoff)
        higher_exp = exposure.adjust_gamma(img, gamma=high_exp_gamma)

        flip_y = flip(img, 1)

        flipped_lower_exp = exposure.adjust_sigmoid(flip_y,
                                                    cutoff=low_exp_cutoff)
        # flipped_higher_exp = exposure.adjust_sigmoid(flip_y, cutoff=high_exp_cutoff)
        flipped_higher_exp = exposure.adjust_gamma(flip_y,
                                                   gamma=high_exp_gamma)

        # Create folder for each version
        create_folder(f'{augmented_path}/{category}')
コード例 #55
0
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
dob = 'flashcardpictures/3-way handshake(0).PNG'
img = mpimg.imread(dob)
plt.imshow(img)
plt.show()
コード例 #56
0
        # If found, save & draw corners
        if ret == True:
            # Save object points and corresponding corners
            objp_list.append(objp)
            corners_list.append(corners)

        else:
            print('Warning: ret = %s for %s' % (ret, fname))

    # Calibrate camera and undistort a test image
    img = cv2.imread('test_images/straight_lines1.jpg')
    img_size = (img.shape[1], img.shape[0])
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objp_list, corners_list,
                                                       img_size, None, None)

    return mtx, dist


if __name__ == '__main__':
    mtx, dist = calibrate_camera()
    save_dict = {'mtx': mtx, 'dist': dist}
    with open('calibrate_camera.p', 'wb') as f:
        pickle.dump(save_dict, f)

    # Undistort example calibration image
    img = mpimg.imread('camera_cal/calibration5.jpg')
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    plt.imshow(dst)
    plt.savefig('example_images/undistort_calibration.png')
コード例 #57
0
def pid_imgs_lookup(pid):
    return [i for i in img_paths if int(i.split('_')[0].split('/')[-1]) == pid]


# Dict with Key: pid - Value: list of image-paths
pid_dict = {pid: pid_imgs_lookup(pid) for pid in pid_list}

# %% Anonymize all photos
import sys
import os
from deep_privacy_anonymize import anon_and_write_imgs

batch_size = 30
# number of faces to anonymize at once
for i in range(0, len(img_paths), step_size):
    print('anonymizing:', i, i + step_size - 1)
    filepath_original = img_paths[i:i + step_size - 1]
    filepath_anonymized = [
        i.replace('originals', 'anonymized') for i in filepath_original
    ]
    anon_and_write_imgs(filepath_original, filepath_anonymized)
# %%

for path_img in pid_dict[223]:
    print(path_img)
    img = mpimg.imread(path_img)
    plt.imshow(img)
    plt.show()

# %%
コード例 #58
0
ファイル: ImageReader.py プロジェクト: Proccyon/Game
def GetImage(Name):
    img = mpimg.imread(Name)
    Image,R,G,B = TransformImage(img)
    return Image
コード例 #59
0
q = np.argmax(sess.run(predict, feed_dict=feed_dict), 1)
a = sess.run(acc_, feed_dict=feed_dict)

print "\n", q, a, np.argmax(y, 1), "\n"
viz = sess.run(h_2, feed_dict=feed_dict)
#plt.scatter(viz[:,0],viz[:,1],s=100)

from sklearn.cluster import SpectralClustering as SC
clf = SC(n_clusters=2)
output = clf.fit_predict(viz)
plt.scatter(viz[:, 0], viz[:, 1], c=output, s=75)
plt.show()
d1 = data[output == 0]
d2 = data[output == 1]

import matplotlib.image as mpimg
im = mpimg.imread('scene.jpg')
plt.imshow(im)

for row in d1:
    row = np.reshape(row, (15, 2))
    plt.scatter(row[:, 0], row[:, 1], c='r')
plt.show()

plt.imshow(im)
for row in d2:
    row = np.reshape(row, (15, 2))
    plt.scatter(row[:, 0], row[:, 1], c='b')

plt.show()
コード例 #60
0
src = np.float32([[490, 482], [820, 482],
                  [1280, 670], [20, 670]])
dst = np.float32([[0, 0], [1280, 0],
                  [1280, 720], [0, 720]])

ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
ym_per_pix = 30 / 720  # meters per pixel in y dimension
xm_per_pix = 3.7 / 600  # meters per pixel in x dimension

# import Camera Calibration Parameters
dist_pickle = "./wide_dist_pickle.p"
with open(dist_pickle, mode="rb") as f:
    CalData = pickle.load(f)
mtx, dist = CalData["mtx"], CalData["dist"]
frame_num = 5   # latest frames number of good detection
left = Line()
right = Line()

video_output = './output_videos/harder_challenge.mp4'
input_path = './test_videos/harder_challenge_video.mp4'

clip1 = VideoFileClip(input_path)
# clip1 = VideoFileClip(input_path).subclip(0, 30)

final_clip = clip1.fl_image(process_image)
final_clip.write_videofile(video_output, audio=False)

img = mpimg.imread('./test_images/test_ch4.jpg')
# r = process_image( mpimg.imread('./test_images/test_ch4.jpg'), plot=True)