コード例 #1
0
    def graph(self, x, y):
        plt.gray()

        for i in xrange(x * y):
            plt.subplot(y, x, i + 1)
            plt.imshow(self._data[i][0].reshape(IMG_WIDTH, IMG_HEIGHT))
        plt.plot()
コード例 #2
0
def draw_digit(data):
    size = int(len(data) ** 0.5)
    Z = data.reshape(size, size)  # convert from vector to matrix
    plt.imshow(Z, interpolation="None")
    plt.gray()
    plt.tick_params(labelbottom="off")
    plt.tick_params(labelleft="off")
コード例 #3
0
ファイル: digits.py プロジェクト: Mela2014/info490-sp16
def im_plot(x, y, images):
    
    # First we build an array of the indicaes contianing the first ten images for each digit.
    vals = np.where(y == 0)[0][:10]
    
    for i in range(1, 10):
        vals = np.vstack((vals, np.where(y == i)[0][:10]))
        
    nrows, ncols = vals.shape
    
    plt.figure(figsize=(8.5,8))
    plt.gray()

    # Build a list of the indices in the order I want.
    # plot them, one by one.

    for idx, i in enumerate(vals.T.ravel()):
        ax = plt.subplot(nrows, ncols, idx + 1)
        
        # We want square images
        ax.set_aspect('equal')
        
        # Now show the images, by default pixels are shown as white on black.
        # To show black on white, reverse colormap: cmap=plt.cm.gray_r
        # To smooth pixelated images: interpolation='nearest'
        ax.imshow(images[i])
        
        # No tick marks for small plots
        plt.xticks([]) ; plt.yticks([])
        
        # Only put plot lables over columns
        if idx < 10:
            plt.title(y[i])
コード例 #4
0
ファイル: util2.py プロジェクト: ErwanGalline/test
def graph_spectrogram(wav_file, wav_folder):
    name_save = wav_file.replace(".wav", ".png")
    name_save_cv2 = wav_file.replace(".wav", "_cv2.png")
    rate, data = get_wav_info(wav_file)
    nfft = 256  # Length of the windowing segments
    fs = 256  # Sampling frequency
    plt.clf()
    pxx, freqs, bins, im = plt.specgram(data, nfft, fs)
    plt.axis('off')
    plt.gray()

    plt.savefig(name_save,
                dpi=50,  # Dots per inch
                frameon='false',
                aspect='normal',
                bbox_inches='tight',
                pad_inches=0)

    # Expore plote as image
    fig = plt.gcf()
    fig.canvas.draw()
    # Get the RGBA buffer from the figure
    w, h = fig.canvas.get_width_height()
    buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
    buf.shape = (w, h, 3)
    # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
    # buf = np.roll(buf, 2)
    cv2.imwrite(name_save_cv2, buf)
コード例 #5
0
def plot_images(data_list, data_shape="auto", fig_shape="auto"):
    """
    plotting data on current plt object.
    In default,data_shape and fig_shape are auto.
    It means considered the data as a sqare structure.
    """
    n_data = len(data_list)
    if data_shape == "auto":
        sqr = int(n_data ** 0.5)
        if sqr * sqr != n_data:
            data_shape = (sqr + 1, sqr + 1)
        else:
            data_shape = (sqr, sqr)
    plt.figure(figsize=data_shape)

    for i, data in enumerate(data_list):
        plt.subplot(data_shape[0], data_shape[1], i + 1)
        plt.gray()
        if fig_shape == "auto":
            fig_size = int(len(data) ** 0.5)
            if fig_size ** 2 != len(data):
                fig_shape = (fig_size + 1, fig_size + 1)
            else:
                fig_shape = (fig_size, fig_size)
        Z = data.reshape(fig_shape[0], fig_shape[1])
        plt.imshow(Z, interpolation="nearest")
        plt.tick_params(labelleft="off", labelbottom="off")
        plt.tick_params(axis="both", which="both", left="off", bottom="off", right="off", top="off")
        plt.subplots_adjust(hspace=0.05)
        plt.subplots_adjust(wspace=0.05)
コード例 #6
0
ファイル: Question4.py プロジェクト: fs1214/assignment7
def main():
    print '-----Question4-----'
    #define constant.
    N_max = 50
    some_threshold = 50
    
    #construct a grid of x,y in range [-2, 1], [-1.5, 1.5].
    num_x = 500
    num_y = 500
    x = np.linspace(-2,1,num_x)
    y = np.linspace(-1.5,1.5,num_y)
    c = x[:,newaxis]+1j*y[newaxis,:]
    
    #do the iteration
    z = c
    for v in range(N_max):
        z = z**2 + c
    
    #define the boolean mask.
    mask = abs(z) < some_threshold
    
    #draw the image and save the result
    plt.imshow(mask.T, extent=[-2, 1, -1.5, 1.5])
    plt.gray()
    plt.savefig('mandelbrot.png')
    
    print '----------------'
コード例 #7
0
def just_do_it(limit_cont):
	fig = plt.figure(facecolor='black')
	plt.gray()
	print("Rozpoczynam przetwarzanie obrazow...")
    
	for i in range(20):
		img = data.imread(images[i])

		gray_img = to_gray(images[i])				# samoloty1.pdf
		#gray_img = to_gray2(images[i],  1001, 0.2, 5, 9, 12) 	# samoloty2.pdf
		#gray_img = to_gray2(images[i],  641, 0.2, 5, 20, 5)	# samoloty3.pdf
		conts = find_contours(gray_img, limit_cont)
		centrs = [find_centroid(cont) for cont in conts]

		ax = fig.add_subplot(4,5,i)
		ax.set_yticks([])
		ax.set_xticks([])
		io.imshow(img)
		print("Przetworzono: " + images[i])
        
		for n, cont in enumerate(conts):
			ax.plot(cont[:, 1], cont[:, 0], linewidth=2)
            
		for centr in centrs:
			ax.add_artist(plt.Circle(centr, 5, color='white'))
            
	fig.tight_layout()
	#plt.show()
	plt.savefig('samoloty3.pdf')
コード例 #8
0
ファイル: Question4.py プロジェクト: fanyichen/assignment7
def Question4():
    print 'question 4'
    #set the grid of x and y
    mandelbrot_set=compute_mandeohrot(50,50, 501,501 )
    plt.imshow(mandelbrot_set.T,extent=[-2,1,-1.5,1.5])
    plt.gray()
    plt.savefig('mandelbrot.png')
コード例 #9
0
def mostra_imagens(imagens, tam_patch=64):
    """Display a list of images"""
    n_imgs = len(imagens)
       
    fig = plt.figure()
    
    n = 1
    for img in imagens:
        imagem = img[0]
        titulo = img[1]       

        #####################################
        v, h, _ = imagem.shape
        # calcula as bordas horizontais
        h_m1 = h % tam_patch
        h_m2 = h_m1//2
        h_m1 -= h_m2
        # calcula das bordas verticais
        v_m1 = v % tam_patch
        v_m2 = v_m1//2
        v_m1 -= v_m2
        #####################################            
        
        a = fig.add_subplot(1,n_imgs,n) 
        a.set_xticks(np.arange(0+h_m1, 700-h_m2, tam_patch))
        a.set_yticks(np.arange(0+v_m1, 460-v_m2, tam_patch))
        a.grid(True)
        if imagem.ndim == 2: 
            plt.gray() 
            
        plt.imshow(imagem)
        a.set_title(titulo)
        n += 1
    fig.set_size_inches(np.array(fig.get_size_inches()) * n_imgs)
    plt.show()
コード例 #10
0
def run_conv_net_image_filtering():
    img = get_3_wolves_image()
    img = img.astype(theano.config.floatX) / 256.

    prepared_image = img.transpose(2, 0, 1).reshape(1, 3, img.shape[0], img.shape[1])
    # now image shape is 1x3x639x516 - (batchsize x number_of_feature_maps x height x width)

    input = T.tensor4('input')
    rng = np.random.RandomState(23455)
    filter_symbolic, _ = conv_layer(
        input, feature_maps_count_in=3, feature_maps_count_out=2, filter_shape=(9, 9), rng=rng)
    filter = theano.function([input], filter_symbolic)

    filtered_image = filter(prepared_image)
    output0 = filtered_image[0, 0, :, :]
    output1 = filtered_image[0, 1, :, :]

    plots.subplot(1, 3, 1)
    plots.axis('off')
    plots.imshow(img)
    plots.gray()
    plots.subplot(1, 3, 2)
    plots.axis('off')
    plots.imshow(output0)
    plots.subplot(1, 3, 3)
    plots.axis('off')
    plots.imshow(output1)
    plots.show()
コード例 #11
0
ファイル: helpers.py プロジェクト: klangner/license_plate
def plot_images(images, labels):
    fig = plt.figure()
    plt.gray()
    for i in range(min(9, images.shape[0])):
        fig.add_subplot(3, 3, i+1)
        show_image(images[i], labels[i])
    plt.show()
コード例 #12
0
ファイル: assignment7.py プロジェクト: im965/assignment7
def q4():
  '''question #4 module'''
  #grids
  pixel=1000.0
  x=np.arange(-2,1,3.0/pixel)
  y=np.arange(-1.5,1.5,3.0/pixel)
  xs,ys=np.meshgrid(x,y)

  #intialize
  mask=np.empty((pixel,pixel))
  mask[:,:]=1
  max_N=50
  bound=50
  c=xs+(1j*ys)
  z=c

  for t in np.arange(max_N):
    z= z**2 + c
    mask=mask*(z<bound)

  import matplotlib.pyplot as plt
  plt.imshow(mask, extent=[-2, 1, -1.5, 1.5])
  plt.gray()
  plt.savefig('mandelbrot.png')
  print ""
  print "Question #4"
  print "mandelbrot.png has been saved to local directory"
  print ""
コード例 #13
0
def test():
    saver.restore(sess, FLAGS.save_dir+'/model.ckpt')
    batch_x, _ = mnist.test.next_batch(10)
    batch_x_att, batch_p = sess.run([x_att, p], {x:batch_x})

    A = np.zeros((0, N*N))
    for i in range(10):
        for k in range(K):
            A = np.concatenate([A, batch_x_att[k][i].reshape((1, N*N))], 0)
    fig = plt.figure('attended')
    plt.gray()
    plt.axis('off')
    plt.imshow(batchmat_to_tileimg(A, (N, N), (10, K)))
    fig.savefig(FLAGS.save_dir+'/attended.png')

    """
    P = np.zeros((0, n_in))
    for i in range(10):
        P = np.concatenate([P, batch_x[i].reshape((1, n_in))], 0)
        for k in range(K):
            P = np.concatenate([P, batch_pk[k][i].reshape((1, n_in))], 0)
        P = np.concatenate([P, batch_p[i].reshape((1, n_in))])
    fig = plt.figure('reconstructed')
    plt.gray()
    plt.axis('off')
    plt.imshow(batchmat_to_tileimg(P, (height, width), (10, K+2)))
    fig.savefig(FLAGS.save_dir+'/reconstructed.png')
    """

    plt.show()
コード例 #14
0
ファイル: image_1D.py プロジェクト: btapo/becca
    def visualize_world(self, brain):
        """ 
        Show what's going on in the world.
        """
        if self.print_features:
            projections, activities = brain.cortex.get_index_projections(to_screen=True)
            wtools.print_pixel_array_features(
                    projections, 
                    self.fov_span ** 2 * 2, 
                    0,#self.num_actions,
                    self.fov_span, self.fov_span, 
                    world_name=self.name)

        # Periodically show the state history and inputs as perceived by BECCA
        print ''.join(["world is ", str(self.timestep), " timesteps old"])
        fig = plt.figure(11)
        plt.clf()
        plt.plot( self.column_history, 'k.')    
        plt.title(''.join(['Column history for ', self.name]))
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig  = plt.figure(12)
        sensed_image = np.reshape(
                0.5 * (self.sensors[:len(self.sensors)/2] - 
                       self.sensors[len(self.sensors)/2:] + 1), 
                (self.fov_span, self.fov_span))
        plt.gray()
        plt.imshow(sensed_image, interpolation='nearest')
        plt.title("Image sensed")
        fig.show()
        fig.canvas.draw()
コード例 #15
0
def run_denoising():
	noisy = prediction_image

	fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})

	plt.gray()

	ax[0, 0].imshow(noisy)
	ax[0, 0].axis('off')
	ax[0, 0].set_title('noisy')
	ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
	ax[0, 1].axis('off')
	ax[0, 1].set_title('TV')
	ax[0, 2].imshow(denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15))
	ax[0, 2].axis('off')
	ax[0, 2].set_title('Bilateral')

	ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
	ax[1, 0].axis('off')
	ax[1, 0].set_title('(more) TV')
	ax[1, 1].imshow(denoise_bilateral(noisy, sigma_range=0.1, sigma_spatial=15))
	ax[1, 1].axis('off')
	ax[1, 1].set_title('(more) Bilateral')
	ax[1, 2].imshow(noisy)
	ax[1, 2].axis('off')
	ax[1, 2].set_title('original')

	fig.subplots_adjust(wspace=0.02, hspace=0.2,
	                    top=0.9, bottom=0.05, left=0, right=1)

	plt.show()
コード例 #16
0
def display_depth(dev, data, timestamp):
    global image_depth
    global filenum
    filenum = filenum+1
    #print data.shape
    '''
    print type(data)
    print 100.0/(-0.00307 * data[240,320] + 3.33)
    print 0.1236 * math.tan(data[240,320] / 2842.5 + 1.1863)
    print data[240,320]
    print '-------'
    '''
    depth = 100/(-0.00307*data + 3.33)
    #savedepth(depth)
    t = time.time()
    fname = '%s/D%s.nparray'%(directory,t)
    np.save(fname, data)
    print "Writing %s"%fname
    data = frame_convert.pretty_depth(data)
    
    mp.gray()
    mp.figure(1)
    if image_depth:
        image_depth.set_data(data)
    else:
        image_depth = mp.imshow(data, interpolation='nearest', animated=True)
    mp.draw()
コード例 #17
0
def view(stack, image):  # function to view an image
    ''' View a single image from a chosen stack of images
        
    Keyword arguments: 
        stack -- (str) this should be either 'train', 'test', or 'recon'                 
        
        number -- (int) this is the index number of the image the stack,
                  12,000 images in the training set, 1233 in test set
    '''
    plt.gray()
    
    if stack == 'train':
        plt.imshow(np.hstack((
                            left[image].reshape(64,32), 
                            right[image].reshape(64,32)
                           ))
                  )
                  
    elif stack == 'test':
        plt.imshow(test[image].reshape(64,32))
    
    elif stack == 'recon':
        plt.imshow(np.hstack((
                            test[image].reshape(64,32), 
                            reconstructed[image].reshape(64,32)
                            ))
                  )
コード例 #18
0
def visualizefacenet(fname, imgs, patches_left, patches_right,
                     true_label, predicted_label):
    """Builds a plot of facenet with attention per RNN step and
    classification result
    """
    nsamples = imgs.shape[0]
    nsteps = patches_left.shape[1]
    is_correct = true_label == predicted_label
    w = nsteps + 2 + (nsteps % 2)
    h = nsamples * 2
    plt.clf()
    plt.gray()
    for i in range(nsamples):
        plt.subplot(nsamples, w//2, i*w//2 + 1)
        plt.imshow(imgs[i])
        msg = ('Prediction: ' + predicted_label[i] + ' TrueLabel: ' +
               true_label[i])
        if is_correct[i]:
            plt.title(msg,color='green')
        else:
            plt.title(msg,color='red')
        plt.axis('off')
        for j in range(nsteps):
            plt.subplot(h, w, i*2*w + 2 + 1 + j)
            plt.imshow(patches_left[i, j])
            plt.axis('off')
            plt.subplot(h, w, i*2*w + 2 + 1 + j + w)
            plt.imshow(patches_right[i, j])
            plt.axis('off')
    plt.show()
    plt.savefig(fname)
コード例 #19
0
ファイル: theta_one_spike.py プロジェクト: EvanBianco/modelr
def run_script(args):
    
    matplotlib.interactive(False)
    
    Rprop0 = args.Rpp0 
    Rprop1 = args.Rpp1    
            
    theta = np.arange(args.theta[0], args.theta[1], args.theta[2])
        
    warray_amp = create_theta_spike(args.pad,
                                    Rprop0, Rprop1, theta,
                                    args.f, args.points, args.reflectivity_method)
    
    fig = plt.figure()
    ax1 = fig.add_subplot(111)

    plt.gray()
    aspect = float(warray_amp.shape[1]) / warray_amp.shape[0]
    ax1.imshow(warray_amp, aspect=aspect, cmap=args.colour)
    
    plt.title(args.title % locals())
    plt.ylabel('time (ms)')
    plt.xlabel('trace')
    
    fig_path = tempfile.mktemp('.jpeg')
    plt.savefig(fig_path)
    
    with open(fig_path, 'rb') as fd:
        data = fd.read()
        
    unlink(fig_path)
        
    return data
コード例 #20
0
ファイル: Mandelbrot.py プロジェクト: ShixinLi/assignment7
def compute_mandelbrot():
	N_max = 50
	some_threshold = 50

	grid_interval = 1000

	# construct the grid 
	x = np.linspace(-2, 1, grid_interval)
	y = np.linspace(-1.5, 1.5, grid_interval)

	c = x[:, np.newaxis] + 1j*y[np.newaxis, :]

	# do the iteration 
	z = c 
	for v in range(N_max):
		with np.errstate(all = "ignore"): # catches overflow and invalid value errors 
			z = z**2 + c 
		
	with np.errstate(all = "ignore"): # catches overflow and invalid value errors 

		# form a 2-D boolean mask 
		mask = (abs(z) < some_threshold)

	# save the result to an image
	plt.imshow(mask.T, extent = [-2, 1, -1.5, 1.5])
	plt.gray()
	plt.savefig('mandelbrot.png')
コード例 #21
0
ファイル: test_warp.py プロジェクト: robintw/rasterio
def test_warp_from_file():
    """File to ndarray"""
    with rasterio.open('rasterio/tests/data/RGB.byte.tif') as src:
        dst_transform = [-8789636.708, 300.0, 0.0, 2943560.235, 0.0, -300.0]
        dst_crs = dict(
                    proj='merc',
                    a=6378137,
                    b=6378137,
                    lat_ts=0.0,
                    lon_0=0.0,
                    x_0=0.0,
                    y_0=0,
                    k=1.0,
                    units='m',
                    nadgrids='@null',
                    wktext=True,
                    no_defs=True)
        destin = numpy.empty(src.shape, dtype=numpy.uint8)
        reproject(
            rasterio.band(src, 1), 
            destin, 
            dst_transform=dst_transform, 
            dst_crs=dst_crs)
    assert destin.any()
    try:
        import matplotlib.pyplot as plt
        plt.imshow(destin)
        plt.gray()
        plt.savefig('test_warp_from_filereproject.png')
    except:
        pass
コード例 #22
0
def save_image(img, fn):
	if len(img) == 96*96:
		plt.imshow(to_matrix(img))
	else:
		plt.imshow(img)
	plt.gray()
	plt.savefig(fn)
コード例 #23
0
ファイル: mnist_ssdgm.py プロジェクト: juho-lee/tf_practice
def test():
    saver.restore(sess, FLAGS.save_dir+'/model.ckpt')
    batch_x, batch_y = mnist.test.next_batch(100)

    """
    fig = plt.figure('original')
    plt.gray()
    plt.axis('off')
    plt.imshow(batchmat_to_tileimg(batch_x, (height, width), (10, 10)))
    fig.savefig(FLAGS.save_dir+'/original.png')

    fig = plt.figure('reconstructed')
    plt.gray()
    plt.axis('off')
    p_recon = sess.run(p_x, {x:batch_x, y:batch_y})
    plt.imshow(batchmat_to_tileimg(p_recon, (height, width), (10, 10)))
    fig.savefig(FLAGS.save_dir+'/reconstructed.png')
    """

    batch_z = np.random.normal(size=(100, 50))
    batch_y = np.zeros((100, 10))
    for i in range(10):
        batch_y[i*10:(i+1)*10, i] = 1.0
    fig = plt.figure('generated')
    plt.gray()
    plt.axis('off')
    p_gen = sess.run(p_x, {z:batch_z, y:batch_y, is_train:False})
    plt.imshow(batchmat_to_tileimg(p_gen, (height, width), (10, 10)))
    fig.savefig(FLAGS.save_dir+'/generated.png')

    plt.show()
コード例 #24
0
def plot(image, invert = False, cmap=plt.cm.binary):
    
    if invert:
        image = np.ones(len(image)) - image
    
    plt.gray()
    plt.imshow(image, cmap=cmap)
コード例 #25
0
def Plot_harris_points(img, filtered_coords):
    plt.figure()
    plt.gray()
    plt.imshow(img)
    plt.plot([p[1] for p in filtered_coords], [p[0] for p in filtered_coords], '.')
    plt.axis('off')
    plt.show()
コード例 #26
0
ファイル: A7_Question4.py プロジェクト: ky822/assignment7
def Mandelbrot(rows= 1000, cols = 1000):
    print " **** Question 4 ****"
    print "Please wait patiently while the calculation is underway."
    
    #initialize grid and params

    threshold = 50
    N_max = 50
    x = np.linspace(-2,1,cols)
    y = np.linspace(-1.5, 1.5,rows)
    mask = np.ones((rows,cols))

    x_elements, y_elements = np.meshgrid(x,y)

    C = x_elements + 1j*y_elements

    #initialize
    Z = C

    # ignore runtime\overflow error. we can do this since we do not care about about the actual entries of z besides diverging\not diverging.
    np.seterr(all='ignore')

    # loop and change mask. 
    for v in range(N_max):
        Z = Z**2 + C
        mask = mask*(np.abs(Z)<threshold)

    plt.imshow(mask, extent=[-2, 1, -1.5, 1.5])
    plt.gray()
    plt.savefig('mandelbrot.png')
コード例 #27
0
def pltImAndCoords(h5file, frame, coordFiles, printerFriendly=True):
	'''read image coordinates, plot them together with the raw data
	save output as .png or .tiff or display with matplotlib'''
	
	# read input data
	rawData = h5.readHDF5Frame(h5file, frame)
	
	coordDat = [(coord2im.readCoordsFile(f)) for f in coordFiles]
	
	if printerFriendly:
		rawData = -rawData
	plt.imshow(rawData, interpolation='nearest')
	plt.colorbar()
	plt.gray()
	axx = plt.axis()
	markers = ['r+', 'bx', 'go', 'k,', 'co', 'yo']
	
	for i in range(len(coordFiles)):
		c, w, h = coordDat[i]
		cc = np.array(c)
		idxs = (cc[:,2] == frame)
		cc = cc[idxs]
		ll = coordFiles[i][coordFiles[i].rfind('/')+1:]
		plt.plot(cc[:,0], cc[:,1], markers[i], label=ll)
	plt.legend() 
	plt.axis(axx) # dont change axis by plotting coordinates
	plt.show()
コード例 #28
0
def display_image(img):
	if len(img) == 96*96:
		plt.imshow(to_matrix(img))
	else:
		plt.imshow(img)
	plt.gray()
	plt.show()
コード例 #29
0
ファイル: slic.py プロジェクト: omidi/CellLineageTracking
def denoising(astro):
	noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
	noisy = np.clip(noisy, 0, 1)
	fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5), sharex=True,
						   sharey=True, subplot_kw={'adjustable': 'box-forced'})

	plt.gray()

	ax[0, 0].imshow(noisy)
	ax[0, 0].axis('off')
	ax[0, 0].set_title('noisy')
	ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
	ax[0, 1].axis('off')
	ax[0, 1].set_title('TV')
	ax[0, 2].imshow(denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15))
	ax[0, 2].axis('off')
	ax[0, 2].set_title('Bilateral')

	ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
	ax[1, 0].axis('off')
	ax[1, 0].set_title('(more) TV')
	ax[1, 1].imshow(denoise_bilateral(noisy, sigma_range=0.1, sigma_spatial=15))
	ax[1, 1].axis('off')
	ax[1, 1].set_title('(more) Bilateral')
	ax[1, 2].imshow(astro)
	ax[1, 2].axis('off')
	ax[1, 2].set_title('original')

	fig.tight_layout()

	plt.show()
コード例 #30
0
ファイル: iSpex_spectrum.py プロジェクト: dronir/iSpex-python
def plot_raw(data, spectrum=None):
    """This function plots image data and spectrum. If the spectrum is not
    given, it is computed from the image data."""
    if spectrum==None:
        spectrum = spectrum_from_raw(data)

    plot.figure()
    plot.title("Raw data with summation lines")
    plot.imshow(data)
    plot.gray()
    X = np.linspace(0,data.shape[1],1000)
    b = CURVE_B
    A = np.linspace(CURVE_A_MIN, CURVE_A_MAX, data.shape[0])
    for y in xrange(0, data.shape[0], 50):
        a = A[y]
        Y = a*(X-b)**2 + y
        plot.plot(X,Y.round(),color="white")
    plot.xlim(0,data.shape[1])
    plot.ylim(0,data.shape[0])
    plot.axvline(SLIT_DIVISION)
    
    plot.figure()
    plot.title("Spectrum normalized to 550nm")
    plot.xlabel("Wavelength (nm)")
    plot.ylabel("Normalized intensity")
    X = np.arange(len(spectrum))
    X = calibration(X)
    plot.plot(X,spectrum[:,0], "-", linewidth=2, color="black", label="Wide slit")
    plot.plot(X,spectrum[:,1], ":", linewidth=2, color="black", label="Narrow slit")
    plot.xlim(X.min(), X.max())
    plot.legend()
#    plot.axvline(435.8)
#    plot.axvline(611.6)
#    plot.axvline(487)
    plot.show()
コード例 #31
0
from sklearn.ensemble import  AdaBoostClassifier #AdaBoost
from xgboost import XGBClassifier #XGBoost
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis  
import matplotlib.pyplot as plt

# 加载数据
digits = load_digits()
data = digits.data
# 数据探索
print(data.shape)
# 查看第一幅图像
print(digits.images[0])
# 第一幅图像代表的数字含义
print(digits.target[0])
# 将第一幅图像显示出来
"""
plt.gray()
plt.imshow(digits.images[0])
plt.show()
"""

# 分割数据,将25%的数据作为测试集,其余作为训练集
train_x, test_x, train_y, test_y = train_test_split(data, digits.target, test_size=0.25, random_state=33)

# 采用Z-Score规范化
ss = preprocessing.StandardScaler()
train_ss_x = ss.fit_transform(train_x)
test_ss_x = ss.transform(test_x)

# 创建LR分类器
lr = LogisticRegression(solver='liblinear', multi_class='auto') #数据集比较小,使用liblinear,数据集大使用 sag或者saga
コード例 #32
0
                validation_data=(x_test, x_test),
                callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])

#decode some inputs
#note take from test set

encoder = keras.Model(input_img, encoded)
encoded_imgs = encoder.predict(x_test)

#preview of encoded images
n = 10
plt.figure(figsize=(20, 8))
for i in range(1, n + 1):
    ax = plt.subplot(1, n, i)
    plt.imshow(encoded_imgs[i].reshape((4, 4 * 8)).T)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

decoded_imgs = autoencoder.predict(x_test)

# train_loss = tf.keras.losses.mae(decoded_imgs,x_test_temp)

# plt.hist(train_loss, bins=50)
# plt.xlabel("Train loss")
# plt.ylabel("No of examples")
total_value_x_test = np.sum(x_test[0])
total_value = np.sum(decoded_imgs[0])
print("Total value test", total_value_x_test)
print("Total value", total_value)
コード例 #33
0

def gaussian(im):
    b = array([[2, 4, 5, 4, 2], [4, 9, 12, 9, 4], [5, 12, 15, 12, 5],
               [4, 9, 12, 9, 4], [2, 4, 5, 4, 2]]) / 156
    k = zeros(im.shape)
    k[:b.shape[0], :b.shape[1]] = b
    fim = fft2(im)
    fk = fft2(k)
    fil_im = ifft2(fim * fk)
    return abs(fil_im).astype(
        int)  #returning numpy array with absolute interger values


if __name__ == "__main__":
    from sys import argv
    if len(argv) < 2:  #counting the length of arguments
        print "Usage: python %s <image>" % argv[0]
        exit()
    im = array(Image.open(argv[1]))  #taking input
    im = im[:, :, 0]
    gray()  #changing to grey
    subplot(1, 2, 1)  #creating a plot to obtain image
    imshow(im)
    axis('off')
    title('Original')
    subplot(1, 2, 2)
    imshow(gaussian(im))  #gaussian function is called here
    axis('off')
    title('Filtered')
    show()
コード例 #34
0
def plot_results_per_target(options):
    """
    From a folder of SPServer results, creates a results file for CASP.
    """
    # Get parameters
    input_dir = options.input_dir
    output_dir = options.output_dir
    create_directory(output_dir)
    scoring_functions = ['ES3DC', 'PAIR', 'ZES3DC', 'ZPAIR']

    # Read results
    native_results_file = os.path.join(input_dir, 'Native.csv')
    nearnative_results_file = os.path.join(input_dir, 'NearNative.csv')
    wrong_results_file = os.path.join(input_dir, 'Decoy.csv')
    native_results_df = pd.read_csv(native_results_file, sep='\t', index_col=0)
    nearnative_results_df = pd.read_csv(nearnative_results_file,
                                        sep='\t',
                                        index_col=0)
    wrong_results_df = pd.read_csv(wrong_results_file, sep='\t', index_col=0)
    nearnative_results_df['TypeStructure'] = 'Near-native'
    nearnative_results_df['Color'] = 'blue'
    wrong_results_df['TypeStructure'] = 'Wrong'
    wrong_results_df['Color'] = 'red'
    results_df = pd.concat([nearnative_results_df,
                            wrong_results_df]).reset_index(drop=True)
    new_df = results_df['PDB'].str.split('_', n=1, expand=True)
    results_df['Target'] = new_df[0]
    targets = set(results_df['Target'])
    results_df.rename(columns={
        'SPS_es3dc': 'ES3DC',
        'SPS_Pair': 'PAIR',
        'SPS_zes3dc': 'ZES3DC',
        'SPS_Zpair': 'ZPAIR',
        'GDT': 'GDT_TS',
        'TM': 'TM score'
    },
                      inplace=True)

    # Plot results per target
    results_per_target_dir = os.path.join(output_dir, 'results_per_target')
    create_directory(results_per_target_dir)
    results_per_target_file = os.path.join(output_dir,
                                           'results_per_target.txt')
    columns = [
        'Target', 'ZES3DC-GDT', 'ZES3DC-TM', 'ZES3DC-QCS', 'ZPAIR-GDT',
        'ZPAIR-TM', 'ZPAIR-QCS'
    ]
    results_per_target_df = pd.DataFrame(columns=columns)
    metric_to_top = {'GDT_TS': 100, 'TM score': 1, 'QCS': 100}
    metric_to_bottom = {'GDT_TS': 0, 'TM score': 0, 'QCS': 0}

    for target in sorted(targets):
        target_df = results_df[results_df['Target'] == target].fillna(0)
        r_values = []
        for spserver_scoring_function in ['ZES3DC', 'ZPAIR']:
            for metric in ['GDT_TS', 'TM score', 'QCS']:
                #for metric in ['GDT_TS']:

                # Calculate pearson correlation
                scores_spserver = target_df[spserver_scoring_function].astype(
                    float).tolist()
                scores_metric = target_df[metric].astype(float).tolist()
                r_value, p_value = pearsonr(scores_spserver, scores_metric)
                r_value = round(r_value, 2)
                r_values.append(r_value)
                print('Correlation for energies {} and {}: {}'.format(
                    spserver_scoring_function, metric, r_value))

                # Make plot
                output_plot = os.path.join(
                    results_per_target_dir,
                    '{}_{}_vs_{}.png'.format(target, spserver_scoring_function,
                                             metric))
                fig = pylab.figure(dpi=300)
                #sns.set_context("paper")
                #ax = sns.lmplot(x=spserver_scoring_function, y=metric, data=target_df, hue="TypeStructure", palette=['blue', 'red'], markers="+")
                ax = sns.regplot(
                    target_df[spserver_scoring_function],
                    target_df[metric],
                    color="black",
                    order=1,
                    marker="+",
                    label="R: {}".format(r_value),
                    scatter_kws={'facecolors': target_df['Color']})
                ax.set_ylim(metric_to_bottom[metric], metric_to_top[metric])

                # Make a legend
                # groupby and plot points of one color
                target_df[target_df['Color'] == 'blue'].plot(
                    kind='scatter',
                    x=spserver_scoring_function,
                    y=metric,
                    c='blue',
                    ax=ax,
                    marker="+",
                    label='Near-native',
                    zorder=3)
                target_df[target_df['Color'] == 'red'].plot(
                    kind='scatter',
                    x=spserver_scoring_function,
                    y=metric,
                    c='red',
                    ax=ax,
                    marker="+",
                    label='Wrong',
                    zorder=3)
                handles, labels = ax.get_legend_handles_labels(
                )  # How to remove the default title: https://stackoverflow.com/questions/51579215/remove-seaborn-lineplot-legend-title?rq=1
                ax.legend(handles=plt.plot([], ls="-", color='black') +
                          handles[1:],
                          labels=labels,
                          loc="best")
                #plt.title('{} vs {}'.format(spserver_scoring_function, metric), fontweight='bold')
                plt.gray()
                plt.grid(True, color='w', linestyle='-', linewidth=1, zorder=0)
                plt.gca().patch.set_facecolor('#EAEAF1')
                plt.xlabel(spserver_scoring_function, fontweight='bold')
                plt.ylabel(metric, fontweight='bold')

                pylab.savefig(output_plot, format='png')
                plt.clf()

        # Save the r values in the table
        results = [target] + r_values
        df2 = pd.DataFrame([results], columns=columns)
        results_per_target_df = results_per_target_df.append(df2)

        # Plot pair plot
        # output_plot = os.path.join(results_per_target_dir, '{}.png'.format(target))
        # fig, axs = pylab.subplots(ncols=2, dpi=300, figsize=(9, 4))
        # plt.rcParams['axes.grid'] = True
        # sns.regplot(target_df['ZES3DC'], target_df['GDT_TS'], color="black", order=1, marker="+", label="R: {}".format(r_value), scatter_kws={'facecolors':target_df['Color']}, ax=axs[0])
        # sns.regplot(target_df['ZPAIR'], target_df['GDT_TS'], color="black", order=1, marker="+", label="R: {}".format(r_value), scatter_kws={'facecolors':target_df['Color']}, ax=axs[1])
        # target_df[target_df['Color'] == 'blue'].plot(kind = 'scatter', x="ZES3DC", y="GDT_TS", c = 'blue', ax = axs[0], marker="+", label = 'Near-native', zorder = 3)
        # target_df[target_df['Color'] == 'red'].plot(kind = 'scatter', x="ZES3DC", y="GDT_TS", c = 'red', ax = axs[0], marker="+", label = 'Wrong', zorder = 3)
        # target_df[target_df['Color'] == 'blue'].plot(kind = 'scatter', x="ZPAIR", y="GDT_TS", c = 'blue', ax = axs[1], marker="+", label = 'Near-native', zorder = 3)
        # target_df[target_df['Color'] == 'red'].plot(kind = 'scatter', x="ZPAIR", y="GDT_TS", c = 'red', ax = axs[1], marker="+", label = 'Wrong', zorder = 3)
        # handles, labels = axs[0].get_legend_handles_labels() # How to remove the default title: https://stackoverflow.com/questions/51579215/remove-seaborn-lineplot-legend-title?rq=1
        # axs[0].legend(handles=plt.plot([],ls="-", color='black') + handles[1:], labels=labels, loc="best")
        # handles, labels = axs[1].get_legend_handles_labels() # How to remove the default title: https://stackoverflow.com/questions/51579215/remove-seaborn-lineplot-legend-title?rq=1
        # axs[1].legend(handles=plt.plot([],ls="-", color='black') + handles[1:], labels=labels, loc="best")
        # #plt.gray()
        # #plt.grid(True, color='w', linestyle='-', linewidth=1, zorder=0)
        # #plt.gca().patch.set_facecolor('#EAEAF1')
        # pylab.savefig(output_plot, format='png')
        # plt.clf()

    # Write results
    results_per_target_df.to_csv(results_per_target_file,
                                 sep='\t',
                                 index=False)

    return
コード例 #35
0
ファイル: snr.py プロジェクト: becca9808/vip_0_9_11_modified
def snr_peakstddev(array, source_xy, fwhm, out_coor=False, plot=False,
                   verbose=False):
    """Calculates the S/N (signal to noise ratio) of a single planet in a
    post-processed (e.g. by LOCI or PCA) frame. The signal is taken as the ratio 
    of pixel value of the planet (test speckle) and the noise computed as the 
    standard deviation of the pixels in an annulus at the same radial distance 
    from the center of the frame. The diameter of the signal aperture and the 
    annulus width is in both cases 1 FWHM ~ 1 lambda/D.
    
    Parameters
    ----------
    array : array_like, 2d
        Post-processed frame where we want to measure S/N.
    source_xy : tuple of floats
        X and Y coordinates of the planet or test speckle.
    fwhm : float
        Size in pixels of the FWHM.
    out_coor: bool, optional
        If True returns back the S/N value and the y, x input coordinates.
    plot : bool, optional
        Plots the frame and the apertures considered for clarity. 
    verbose: bool, optional
        Chooses whether to print some intermediate results or not.    
        
    Returns
    -------
    snr : float
        Value of the S/N for the given planet or test speckle.
    """     
    sourcex, sourcey = source_xy
    centery, centerx = frame_center(array)
    rad = dist(centery,centerx,sourcey,sourcex)  
    
    array = array + np.abs(array.min()) 
    inner_rad = np.round(rad) - fwhm/2
    an_coor = get_annulus(array, inner_rad, fwhm, output_indices=True)
    ap_coor = draw.circle(sourcey, sourcex, int(np.ceil(fwhm/2)))
    array2 = array.copy()
    array2[ap_coor] = array[an_coor].mean()   # we 'mask' the flux aperture
    stddev = array2[an_coor].std()
    peak = array[sourcey, sourcex] 
    snr = peak / stddev
    if verbose:
        msg = "S/N = {:.3f}, Peak px = {:.3f}, Noise = {:.3f}"
        print(msg.format(snr, peak, stddev))
    
    if plot:
        _, ax = plt.subplots(figsize=(6, 6))
        ax.imshow(array, origin='lower', interpolation='nearest')
        circ = plt.Circle((centerx, centery), radius=inner_rad, color='r', 
                          fill=False) 
        ax.add_patch(circ)
        circ2 = plt.Circle((centerx, centery), radius=inner_rad+fwhm, color='r', 
                           fill=False) 
        ax.add_patch(circ2)
        aper = plt.Circle((sourcex, sourcey), radius=fwhm/2., color='b', 
                          fill=False)   # Coordinates (X,Y)
        ax.add_patch(aper)
        plt.show()
        plt.gray()
        
    if out_coor:
        return sourcey, sourcex, snr
    else:
        return snr
コード例 #36
0
ファイル: functions.py プロジェクト: lucasplagwitz/recon
def draw_images(img, name, data_output_path, vmin=0, vmax=1):
    plt.gray()
    plt.imshow(img, vmin=vmin, vmax=vmax)
    plt.axis('off')
    plt.savefig(data_output_path + name, bbox_inches='tight', pad_inches=0)
    plt.close()
コード例 #37
0
from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression factor 24.5, assuming the input is 784 floats
# this is our input placeholder; 784 = 28 x 28
input_img = Input(shape=(784, ))
my_epochs = 100
# "encoded" is the encoded representation of the inputs
encoded = Dense(encoding_dim * 4, activation='relu')(input_img)
encoded = Dense(encoding_dim * 2, activation='relu')(encoded)
encoded = Dense(encoding_dim, activation='relu')(encoded)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(encoding_dim * 2, activation='relu')(encoded)
decoded = Dense(encoding_dim * 4, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# Separate Encoder model
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# Separate Decoder model
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))
# retrieve the layers of the autoencoder model
decoder_layer1 = autoencoder.layers[-3]
decoder_layer2 = autoencoder.layers[-2]
コード例 #38
0
def solex_proc(serfile, shift, flag_display, ratio_fixe):
    """
    ----------------------------------------------------------------------------
    Reconstuit l'image du disque a partir de l'image moyenne des trames et 
    des trames extraite du fichier ser
    avec un fit polynomial
    Corrige de mauvaises lignes et transversallium
 
    basefich: nom du fichier de base de la video sans extension, sans repertoire
    shift: ecart en pixel par rapport au centre de la raie pour explorer 
    longueur d'onde decalée
    ----------------------------------------------------------------------------
    """
    plt.gray()  #palette de gris si utilise matplotlib pour visu debug

    global mylog
    mylog = []

    WorkDir = os.path.dirname(serfile) + "/"
    os.chdir(WorkDir)
    base = os.path.basename(serfile)
    basefich = os.path.splitext(base)[0]

    #affiche ou pas l'image du disque qui se contruit en temps reel
    #gain de temps si affiche pas, mis en dur dans le script
    #print(flag_display)
    #flag_display=False
    """
    ----------------------------------------------------------------------------
    Calcul polynome ecart sur une image au centre de la sequence
    ----------------------------------------------------------------------------
    """

    savefich = basefich + '_mean'
    ImgFile = savefich + '.fits'
    #ouvre image _mean qui la moyenne de toutes les trames spectrales du fichier ser
    hdulist = fits.open(ImgFile)
    hdu = hdulist[0]
    myspectrum = hdu.data
    ih = hdu.header['NAXIS2']
    iw = hdu.header['NAXIS1']
    myimg = np.reshape(myspectrum, (ih, iw))

    y1, y2 = detect_bord(myimg, axis=1, offset=5)
    toprint = 'Limites verticales y1,y2 : ' + str(y1) + ' ' + str(y2)
    print(toprint)
    mylog.append(toprint + '\n')
    PosRaieHaut = y1
    PosRaieBas = y2
    """
    -----------------------------------------------------------
    Trouve les min intensité de la raie
    -----------------------------------------------------------
    """
    # construit le tableau des min de la raie a partir du haut jusqu'en bas
    """
    MinOfRaie=[]
    
    for i in range(PosRaieHaut,PosRaieBas):
        line_h=myimg[i,:]
        MinX=line_h.argmin()
        MinOfRaie.append([MinX,i])
        #print('MinOfRaie x,y', MinX,i)
        
    np_m=np.asarray(MinOfRaie)
    xm,ym=np_m.T
    #best fit d'un polynome degre 2, les lignes y sont les x et les colonnes x sont les 
    p=np.polyfit(ym,xm,2)

    """
    MinX = np.argmin(myimg, axis=1)
    MinX = MinX[PosRaieHaut:PosRaieBas]
    IndY = np.arange(PosRaieHaut, PosRaieBas, 1)
    LineRecal = 1
    #best fit d'un polynome degre 2, les lignes y sont les x et les colonnes x sont les y
    p = np.polyfit(IndY, MinX, 2)
    #p=np.polyfit(ym,xm,2)

    #calcul des x colonnes pour les y lignes du polynome
    a = p[0]
    b = p[1]
    c = p[2]
    fit = []
    #ecart=[]
    for y in range(0, ih):
        x = a * y**2 + b * y + c
        deci = x - int(x)
        fit.append([int(x) - LineRecal, deci, y])
        #ecart.append([x-LineRecal,y])

    toprint = 'Coef A2,A1,A0 :' + str(a) + ' ' + str(b) + ' ' + str(c)
    print(toprint)
    mylog.append(toprint + '\n')

    np_fit = np.asarray(fit)
    xi, xdec, y = np_fit.T
    xdec = xi + xdec + LineRecal
    xi = xi + LineRecal
    #imgplot1 = plt.imshow(myimg)
    #plt.scatter(xm,ym,s=0.1, marker='.', edgecolors=('blue'))
    #plt.scatter(xi,y,s=0.1, marker='.', edgecolors=('red'))
    #plt.scatter(xdec,y,s=0.1, marker='.', edgecolors=('green'))

    #plt.show()

    #on sauvegarde les params de reconstrution
    #reconfile='recon_'+basefich+'.txt'
    #np.savetxt(reconfile,ecart,fmt='%f',header='fichier recon',footer=str(LineRecal))
    """
    ----------------------------------------------------------------------------
    ----------------------------------------------------------------------------
    Applique les ecarts a toute les lignes de chaque trame de la sequence
    ----------------------------------------------------------------------------
    ----------------------------------------------------------------------------
    """

    #ouverture et lecture de l'entete du fichier ser
    f = open(serfile, "rb")
    b = np.fromfile(serfile, dtype='int8', count=4)
    offset = 14

    b = np.fromfile(serfile, dtype=np.uint32, count=1, offset=offset)
    #print (LuID[0])
    offset = offset + 4

    b = np.fromfile(serfile, dtype='uint32', count=1, offset=offset)
    #print(ColorID[0])
    offset = offset + 4

    b = np.fromfile(serfile, dtype='uint32', count=1, offset=offset)
    #print(little_Endian[0])
    offset = offset + 4

    Width = np.fromfile(serfile, dtype='uint32', count=1, offset=offset)
    Width = Width[0]
    #print('Width :', Width)
    offset = offset + 4

    Height = np.fromfile(serfile, dtype='uint32', count=1, offset=offset)
    Height = Height[0]
    #print('Height :',Height)
    offset = offset + 4

    PixelDepthPerPlane = np.fromfile(serfile,
                                     dtype='uint32',
                                     count=1,
                                     offset=offset)
    PixelDepthPerPlane = PixelDepthPerPlane[0]
    #print('PixelDepth :',PixelDepthPerPlane)
    offset = offset + 4

    FrameCount = np.fromfile(serfile, dtype='uint32', count=1, offset=offset)
    FrameCount = FrameCount[0]
    #print('nb de frame :',FrameCount)

    toprint = ('width, height : ' + str(Width) + ' ' + str(Height))
    print(toprint)
    mylog.append(toprint + '\n')
    toprint = ('Nb frame : ' + str(FrameCount))
    print(toprint)
    mylog.append(toprint + '\n')

    count = Width * Height  # Nombre d'octet d'une trame
    FrameIndex = 1  # Index de trame, on evite les deux premieres
    offset = 178  # Offset de l'entete fichier ser

    if Width > Height:
        flag_rotate = True
        ih = Width
        iw = Height
    else:
        flag_rotate = False
        iw = Width
        ih = Height

    #debug
    ok_resize = True

    if flag_display:
        cv2.namedWindow('disk', cv2.WINDOW_NORMAL)
        FrameMax = FrameCount
        cv2.resizeWindow('disk', FrameMax, ih)
        cv2.moveWindow('disk', 100, 0)
        #initialize le tableau qui va recevoir la raie spectrale de chaque trame
        Disk = np.zeros((ih, FrameMax), dtype='uint16')

        cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        cv2.moveWindow('image', 0, 0)
        cv2.resizeWindow('image', int(iw), int(ih))
    else:
        #Disk=np.zeros((ih,1), dtype='uint16')
        FrameMax = FrameCount
        Disk = np.zeros((ih, FrameMax), dtype='uint16')

    # init vector to speed up from Andrew Smiths
    ind_l = (np.asarray(fit)[:, 0] + np.ones(ih) *
             (LineRecal + shift)).astype(int)
    ind_r = (ind_l + np.ones(ih)).astype(int)
    left_weights = np.ones(ih) - np.asarray(fit)[:, 1]
    right_weights = np.ones(ih) - left_weights

    # lance la reconstruction du disk a partir des trames
    while FrameIndex < FrameCount:
        #t0=float(time.time())
        img = np.fromfile(serfile, dtype='uint16', count=count, offset=offset)
        img = np.reshape(img, (Height, Width))

        if flag_rotate:
            img = np.rot90(img)

        if flag_display:
            cv2.imshow('image', img)
            if cv2.waitKey(1) == 27:
                cv2.destroyAllWindows()
                sys.exit()

        #new code from Andraw Smiths to speed up reconstruction
        # improve speed here
        left_col = img[np.arange(ih), ind_l]
        right_col = img[np.arange(ih), ind_r]
        IntensiteRaie = left_col * left_weights + right_col * right_weights

        #ajoute au tableau disk
        Disk[:, FrameIndex] = IntensiteRaie
        """
        ---------------------------------------------------------
        original code
        ---------------------------------------------------------
        IntensiteRaie=np.empty(ih,dtype='uint16')
        
        for j in range(0,ih):
            dx=fit[j][0]+shift
            deci=fit[j][1]
            try:
                IntensiteRaie[j]=(img[j,LineRecal+dx] *(1-deci)+deci*img[j,LineRecal+dx+1])
                if img[j,LineRecal+dx]>=65000:
                    IntensiteRaie[j]=64000
                    #print ('intensite : ', img[j,LineRecal+dx])
            except:
                IntensiteRaie[j]=IntensiteRaie[j-1]

        #ajoute au tableau disk 

        Disk[:,FrameIndex]=IntensiteRaie
        
        #cv2.resizeWindow('disk',i-i1,ih)
        if ok_resize==False:
            Disk=Disk[1:,FrameIndex:]
            #Disp=Disk
        """
        if flag_display and FrameIndex % 5 == 0:
            cv2.imshow('disk', Disk)
            if cv2.waitKey(1) == 27:  # exit if Escape is hit
                cv2.destroyAllWindows()
                sys.exit()

        FrameIndex = FrameIndex + 1
        offset = 178 + FrameIndex * count * 2

    #ferme fichier ser
    f.close()

    #sauve fichier disque reconstruit
    hdu.header['NAXIS1'] = FrameCount - 1
    DiskHDU = fits.PrimaryHDU(Disk, header=hdu.header)
    DiskHDU.writeto(basefich + '_img.fits', overwrite='True')

    if flag_display:
        cv2.destroyAllWindows()
    """
    --------------------------------------------------------------------
    --------------------------------------------------------------------
    on passe au calcul des mauvaises lignes et de la correction geometrique
    --------------------------------------------------------------------
    --------------------------------------------------------------------
    """
    iw = Disk.shape[1]
    ih = Disk.shape[0]
    img = Disk

    y1, y2 = detect_bord(img, axis=1, offset=5)  # bords verticaux

    #detection de mauvaises lignes

    # somme de lignes projetées sur axe Y
    ysum = np.mean(img, 1)
    #plt.plot(ysum)
    #plt.show()
    # ne considere que les lignes du disque avec marge de 15 lignes
    ysum = ysum[y1 + 15:y2 - 15]

    # filtrage sur fenetre de 31 pixels, polynome ordre 3 (etait 101 avant)
    yc = savgol_filter(ysum, 31, 3)

    # divise le profil somme par le profil filtré pour avoir les hautes frequences
    hcol = np.divide(ysum, yc)

    # met à zero les pixels dont l'intensité est inferieur à 1.03 (3%)
    hcol[abs(hcol - 1) <= 0.03] = 0

    # tableau de zero en debut et en fin pour completer le tableau du disque
    a = [0] * (y1 + 15)
    b = [0] * (ih - y2 + 15)
    hcol = np.concatenate((a, hcol, b))
    #plt.plot(hcol)
    #plt.show()

    # creation du tableau d'indice des lignes a corriger
    l_col = np.where(hcol != 0)
    listcol = l_col[0]

    # correction de lignes par filtrage median 13 lignes, empririque
    for c in listcol:
        m = img[c - 7:c + 6, ]
        s = np.median(m, 0)
        img[c - 1:c, ] = s

    #sauvegarde le fits
    DiskHDU = fits.PrimaryHDU(img, header=hdu.header)
    DiskHDU.writeto(basefich + '_corr.fits', overwrite='True')
    """
    ------------------------------------------------------------
    calcul de la geometrie si on voit les bords du soleil
    sinon on applique un facteur x=0.5
    ------------------------------------------------------------
    """

    img2 = np.copy(img)
    print()

    #methode des limbes
    #NewImg, newiw,flag_nobords,cercle =circularise(img2,iw,ih,ratio_fixe)

    #methode fit ellipse
    zexcl = 0.01  #zone d'exclusion des points contours
    EllipseFit, section = detect_fit_ellipse(img, y1, y2, zexcl)
    ratio = EllipseFit[2] / EllipseFit[1]
    NewImg, newiw = circularise2(img2, iw, ih, ratio)
    if section == 0:
        flag_nobords = True
    else:
        flag_nobords = False

    # sauve l'image circularisée
    frame = np.array(NewImg, dtype='uint16')
    hdu.header['NAXIS1'] = newiw
    DiskHDU = fits.PrimaryHDU(frame, header=hdu.header)
    DiskHDU.writeto(basefich + '_circle.fits', overwrite='True')

    #on fit un cercle !!!
    #CercleFit=detect_fit_cercle (frame,y1,y2)
    #print(CercleFit)
    #print()

    print()
    """
    NewImg, newiw =circularise2(img,iw,ih,ratio)
    frame=np.array(NewImg, dtype='uint16')
    hdu.header['NAXIS1']=newiw
    DiskHDU=fits.PrimaryHDU(frame,header=hdu.header)
    DiskHDU.writeto(basefich+'_circle2.fits',overwrite='True')
    #on fit un cercle
    CercleFit=detect_fit_cercle (frame,y1,y2)
    print(CercleFit)
    print()
    
    #EllipseFit,section=detect_fit_ellipse(frame,y1,y2,0.01)
    #x0=EllipseFit[0][0]
    #y0=EllipseFit[0][1]
    #diam_cercle=EllipseFit[2]
    #cercle=[x0,y0, diam_cercle]
    """
    """
    --------------------------------------------------------------
    on echaine avec la correction de transversallium
    --------------------------------------------------------------
    """

    # on cherche la projection de la taille max du soleil en Y
    y1, y2 = detect_bord(frame, axis=1, offset=0)
    #print ('flat ',y1,y2)
    # si mauvaise detection des bords en x alors on doit prendre toute l'image
    if flag_nobords:
        ydisk = np.median(img, 1)
    else:
        #plt.hist(frame.ravel(),bins=1000,)
        #plt.show()
        #plt.hist(frame.ravel(),bins=1000,cumulative=True)
        #plt.show()
        #seuil_bas=np.percentile(frame,25)
        seuil_haut = np.percentile(frame, 97)
        #print ('Seuils de flat: ',seuil_bas, seuil_haut)
        #print ('Seuils bas x: ',seuil_bas*4)
        #print ('Seuils haut x: ',seuil_haut*0.25)
        #myseuil=seuil_haut*0.2
        myseuil = seuil_haut * 0.5
        # filtre le profil moyen en Y en ne prenant que le disque
        ydisk = np.empty(ih + 1)
        for j in range(0, ih):
            temp = np.copy(frame[j, :])
            temp = temp[temp > myseuil]
            if len(temp) != 0:
                ydisk[j] = np.median(temp)
            else:
                ydisk[j] = 1
    y1 = y1
    y2 = y2
    ToSpline = ydisk[y1:y2]

    Smoothed2 = savgol_filter(ToSpline, 301,
                              3)  # window size, polynomial order
    #best fit d'un polynome degre 4
    np_m = np.asarray(ToSpline)
    ym = np_m.T
    xm = np.arange(y2 - y1)
    p = np.polyfit(xm, ym, 4)

    #calcul des x colonnes pour les y lignes du polynome
    a = p[0]
    b = p[1]
    c = p[2]
    d = p[3]
    e = p[4]
    Smoothed = []
    for x in range(0, y2 - y1):
        y = a * x**4 + b * x**3 + c * x**2 + d * x + e
        Smoothed.append(y)
    """
    plt.plot(ToSpline)
    plt.plot(Smoothed)
    plt.plot(Smoothed2)
    plt.show()
    """

    # divise le profil reel par son filtre ce qui nous donne le flat
    hf = np.divide(ToSpline, Smoothed2)

    # elimine possible artefact de bord
    hf = hf[5:-5]

    #reconstruit le tableau du pofil complet
    a = [1] * (y1 + 5)
    b = [1] * (ih - y2 + 5)
    hf = np.concatenate((a, hf, b))

    Smoothed = np.concatenate((a, Smoothed, b))
    ToSpline = np.concatenate((a, ToSpline, b))
    Smoothed2 = np.concatenate((a, Smoothed2, b))
    """
    plt.plot(ToSpline)
    plt.plot(Smoothed2)
    plt.show()
    
    plt.plot(hf)
    plt.show()
    """

    # genere tableau image de flat
    flat = []
    for i in range(0, newiw):
        flat.append(hf)

    np_flat = np.asarray(flat)
    flat = np_flat.T
    #evite les divisions par zeros...
    flat[flat == 0] = 1
    """
    plt.imshow(flat)
    plt.show()
    """

    # divise image par le flat
    BelleImage = np.divide(frame, flat)
    frame = np.array(BelleImage, dtype='uint16')
    # sauvegarde de l'image deflattée
    DiskHDU = fits.PrimaryHDU(frame, header=hdu.header)
    DiskHDU.writeto(basefich + '_flat.fits', overwrite='True')
    """
    -----------------------------------------------------------------------
    correction de Tilt du disque
    -----------------------------------------------------------------------
    """
    img = frame

    if not (flag_nobords) and abs(section) < 0.1:
        # correction de slant uniquement si on voit les limbes droit/gauche
        # trouve les coordonnées y des bords du disque dont on a les x1 et x2
        # pour avoir les coordonnées y du grand axe horizontal
        # on cherche la projection de la taille max du soleil en Y et en X
        BackGround = 1000
        x1, x2 = detect_bord(frame, axis=0, offset=0)
        y_x1, y_x2 = detect_y_of_x(img, x1, x2)

        # test que le grand axe de l'ellipse est horizontal
        if abs(y_x1 - y_x2) > 5:
            #calcul l'angle et fait une interpolation de slant
            dy = (y_x2 - y_x1)
            dx = (x2 - x1)
            TanAlpha = (-dy / dx)
            AlphaRad = math.atan(TanAlpha)
            AlphaDeg = math.degrees(AlphaRad)
            print('tan ', TanAlpha)
            toprint = 'Angle slant limbes: ' + "{:+.2f}".format(AlphaDeg)
            print(toprint)
            mylog.append(toprint + '\n')

            #decale lignes images par rapport a x1
            colref = x1
            NewImg = np.empty((ih, newiw))
            for i in range(0, newiw):
                x = img[:, i]
                NewImg[:, i] = x
                y = np.arange(0, ih)
                dy = (i - colref) * TanAlpha
                #print (dy)
                #ycalc=[]
                ycalc = y + np.ones(ih) * dy  # improvements TheSmiths
                #x et y sont les valeurs de la ligne originale avant decalge
                #for j in range(0, len(y)):
                #ycalc.append(y[j]+dy)
                f = interp1d(ycalc,
                             x,
                             kind='linear',
                             fill_value=(BackGround, BackGround),
                             bounds_error=False)
                xcalc = f(y)
                NewLine = xcalc
                NewImg[:, i] = NewLine
            NewImg[NewImg <= 0] = 0  #modif du 19/05/2021 etait a 1000
            img = NewImg
        """
        #decale lignes images par rapport a x1
        AlphaRad=np.deg2rad(angle_slant)
        TanAlpha=math.tan(AlphaRad)
        print()
        print ('tan ellipse', TanAlpha)
        print ('Angle_slant ellipse :',angle_slant)
        colref=x1
        NewImg=np.empty((ih,newiw))
        for i in range(0,newiw):
            x=img[:,i]
            NewImg[:,i]=x
            y=np.arange(0,ih)
            dy=(i-colref)*TanAlpha
            #print (dy)
            ycalc=[]
            #x et y sont les valeurs de la ligne originale avant decalge
            for j in range(0, len(y)):
                ycalc.append(y[j]+dy)
            f=interp1d(ycalc,x,kind='linear',fill_value=(BackGround,BackGround),bounds_error=False)
            xcalc=f(y)
            NewLine=xcalc
            NewImg[:,i]=NewLine
        NewImg[NewImg<=0]=0  #modif du 19/05/2021 etait a 1000
        img=NewImg
        """

    # refait un calcul de mise a l'echelle
    # le slant peut avoir legerement modifié la forme

    #methode fit ellipse
    print()
    print('recircularise apres slant')
    if (abs(section) < 0.1):
        zexcl = 0.1
    else:
        zexcl = 0.01

    EllipseFit, section = detect_fit_ellipse(img, y1, y2, zexcl)
    ratio = EllipseFit[2] / EllipseFit[1]
    NewImg, newiw = circularise2(img, newiw, ih, ratio)
    flag_nobords = False
    print()
    img = np.copy(NewImg)

    #on refait un test de fit ellipse
    print('ellipse finale')
    EllipseFit, section = detect_fit_ellipse(img, y1, y2, zexcl)
    xc = int(EllipseFit[0][0])
    yc = int(EllipseFit[0][1])
    wi = int(EllipseFit[1])
    he = int(EllipseFit[2])
    cercle = [xc, yc, wi, he]
    print()

    # sauvegarde en fits de l'image finale
    frame = np.array(img, dtype='uint16')
    DiskHDU = fits.PrimaryHDU(frame, header=hdu.header)
    DiskHDU.writeto(basefich + '_recon.fits', overwrite='True')

    with open(basefich + '.txt', "w") as logfile:
        logfile.writelines(mylog)

    return frame, hdu.header, cercle
コード例 #39
0
def main():
    plt.gray()
    exp = WaveletDenoise(imgutils.Image(imgutils.galaxy()[::-1]))
    exp.gui.start()
コード例 #40
0
#Read in a text file
inputimage = np.loadtxt("inputData.txt");
cpuimage = np.loadtxt("cpuoutput.txt");
gpuimage = np.loadtxt("gpuoutput.txt");


test = np.random.random((100,101))

#Display the arrays
#fig, ax = plt.subplots();
#ax.imshow(npcpuimage)
#plt.show()

f0 = plt.figure()
plt.imshow(inputimage,cmap=plt.gray())
plt.colorbar()
plt.suptitle("Input Image")


f1 = plt.figure()
plt.imshow(cpuimage,cmap=plt.gray())
plt.colorbar()
plt.suptitle("CPU Output")


f2 = plt.figure()
plt.imshow(gpuimage,cmap=plt.gray())
plt.colorbar()
plt.suptitle("GPU Output")
コード例 #41
0
def show(img):
    plt.gray()
    plt.imshow(img)
    plt.show()
コード例 #42
0
early = first
i = 0
while i < 30:
    current = updateData(kspace, pattern, current, 1)
    current = waveletShrinkage(current, 0.001)
    if (i == 0):
        early = current
    i += 1

#current = updateData(kspace, current, 0.1)

# todo:
# - impleemnt with conjugate transpose
# - check shrinkage with 0
# - create smaller phantom to speed computation time up

fig = pyplot.figure(dpi=90)
pyplot.subplot(221)
pyplot.set_cmap(pyplot.gray())
pyplot.imshow(abs(recon))
pyplot.subplot(222)
pyplot.set_cmap(pyplot.gray())
pyplot.imshow(abs(first))
pyplot.subplot(223)
pyplot.set_cmap(pyplot.gray())
pyplot.imshow(abs(early))
pyplot.subplot(224)
pyplot.set_cmap(pyplot.gray())
pyplot.imshow(abs(current))
pyplot.show()
コード例 #43
0
print("After crop: ", img.shape, img.dtype)
pyplot.figure()
pyplot.imshow(img)
pyplot.axis('on')
pyplot.title('Cropped')

# switch to CHW
img = img.swapaxes(1, 2).swapaxes(0, 1)
pyplot.figure()
for i in range(3):
    # For some reason, pyplot subplot follows Matlab's indexing
    # convention (starting with 1). Well, we'll just follow it...
    pyplot.subplot(1, 3, i + 1)
    pyplot.imshow(img[i])
    pyplot.axis('off')
    pyplot.gray()
    pyplot.title('RGB channel %d' % (i + 1))

# switch to BGR
img = img[(2, 1, 0), :, :]

# remove mean for better results
img = img * 255 - mean

# add batch size
img = img[np.newaxis, :, :, :].astype(np.float32)
print("NCHW: ", img.shape, img.dtype)

# initialize the neural net
with open(INIT_NET, 'rb') as f:
    init_net = f.read()
コード例 #44
0
    kernel = zeros(im.shape)
    kernel[:b.shape[0], :b.shape[1]] = b

    fim = fft2(im)
    fkernel = fft2(kernel)
    fil_im = ifft2(fim * fkernel)

    return abs(fil_im).astype(int)


if __name__ == "__main__":
    from sys import argv
    if len(argv) < 2:
        print "Usage: python %s <image>" % argv[0]
        exit()
    im = array(Image.open(argv[1]))
    im = im[:, :, 0]
    gray()

    subplot(1, 2, 1)
    imshow(im)
    axis('off')
    title('Original')

    subplot(1, 2, 2)
    imshow(gaussian(im))
    axis('off')
    title('Filtered')

    show()
コード例 #45
0
ファイル: t-SNE.py プロジェクト: kwonjunn01/Visualization
# create subplot object

fig, axes = plt.subplots(
    2,
    5,  #  allocate subplot object(2x5) to axes
    subplot_kw={
        'xticks': (),
        'yticks': ()
    })

for ax, img in zip(axes.ravel(), digits.images):

    ax.imshow(img)

plt.gray()  # plot graph

plt.show()  # plot graph

from sklearn.manifold import TSNE

# create model and learning

tsne = TSNE(random_state=0)

digits_tsne = tsne.fit_transform(digits.data)

colors = [
    "#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525", "#A83683",
    "#4E655E", "#853541", "#3A3120", "#535D8E"
]
コード例 #46
0
 def decodeQuad(self, quads, gray):
     """
     decode the Quad
     :param quads: array of quad which have four points
     :param gray: gray picture
     :return: array of detection
     """
     detections = []
     points = []
     whitepoint = []
     for quad in quads:
         dd = 2 * self._blackBorder + self._d  # tagFamily.d
         blackvalue = []
         whitevalue = []
         tagcode = 0
         for iy in range(dd):
             for ix in range(dd):
                 x = (ix + 0.5) / dd
                 y = (iy + 0.5) / dd
                 point = np.int32(self._interpolate(quad, (x, y)))
                 points.append(point)
                 value = gray[point[0], point[1]]
                 if ((iy == 0 or iy == dd - 1)
                         or (ix == 0 or ix == dd - 1)):
                     blackvalue.append(value)
                 elif ((iy == 1 or iy == dd - 2)
                       or (ix == 1 or ix == dd - 2)):
                     whitevalue.append(value)
                 else:
                     continue
         threshold = (np.average(blackvalue) + np.average(whitevalue)) / 2
         for iy in range(dd):
             for ix in range(dd):
                 if ((iy == 0 or iy == dd - 1)
                         or (ix == 0 or ix == dd - 1)):
                     continue
                 x = (ix + 0.5) / dd
                 y = (iy + 0.5) / dd
                 point = np.int32(self._interpolate(quad, (x, y)))
                 value = gray[point[0], point[1]]
                 tagcode = tagcode << 1
                 if value > threshold:
                     if (self._debug):
                         whitepoint.append(point)
                     tagcode |= 1
         tagcode = hex(tagcode)
         detection = self._decode(tagcode, quad)
         if detection.good == True:
             detection.addHomography()
             detections.append(detection)
     if self._debug and len(points) != 0:
         plt.figure().set_size_inches(19.2, 10.8)
         plt.subplot(121)
         showpoint = np.array(points)
         plt.plot(showpoint[:, 1], showpoint[:, 0], 'rx')
         plt.imshow(gray)
         plt.gray()
         showpoint = np.array(whitepoint)
         plt.subplot(122)
         plt.plot(showpoint[:, 1], showpoint[:, 0], 'rx')
         plt.imshow(gray)
         plt.gray()
         plt.show()
     return detections
コード例 #47
0
def imshow(image):
    """Show a [-1.0, 1.0] image."""
    if image.ndim == 3 and image.shape[2] == 1:  # for gray image
        image = np.array(image, copy=True)
        image.shape = image.shape[0:2]
    plt.imshow(to_range(image), cmap=plt.gray())
コード例 #48
0
def plot_generated_batch(generator_model):
    import matplotlib.pyplot as plt
    index = np.arange(Cloud.shape[0])
    np.random.shuffle(index)
    index = list(np.sort(index[:10]))
    future_frames = generator_model.predict([Cloud[index], Cloud_[index]],
                                            batch_size=10,
                                            verbose=1)
    plt.figure(figsize=(20, 20))
    t = 0
    for i in index:
        ax = plt.subplot(10, 9, t * 9 + 1)
        plt.imshow(Noncloud[i, 0].reshape((
            64, 64,
            IMAGE_CHANNEL)) if IMAGE_CHANNEL > 1 else Noncloud[i, 0].reshape((
                64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax = plt.subplot(10, 9, t * 9 + 2)
        plt.imshow(Noncloud[i, 1].reshape((
            64, 64,
            IMAGE_CHANNEL)) if IMAGE_CHANNEL > 1 else Noncloud[i, 1].reshape((
                64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax = plt.subplot(10, 9, t * 9 + 3)
        plt.imshow(Noncloud[i, 2].reshape((
            64, 64,
            IMAGE_CHANNEL)) if IMAGE_CHANNEL > 1 else Noncloud[i, 2].reshape((
                64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax = plt.subplot(10, 9, t * 9 + 4)
        plt.imshow(Noncloud[i, 3].reshape((
            64, 64,
            IMAGE_CHANNEL)) if IMAGE_CHANNEL > 1 else Noncloud[i, 3].reshape((
                64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        ax = plt.subplot(10, 9, t * 9 + 5)
        plt.imshow(future_frames[t, 0].reshape((
            64, 64, IMAGE_CHANNEL
        )) if IMAGE_CHANNEL > 1 else future_frames[t, 0].reshape((64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax = plt.subplot(10, 9, t * 9 + 6)
        plt.imshow(future_frames[t, 1].reshape((
            64, 64, IMAGE_CHANNEL
        )) if IMAGE_CHANNEL > 1 else future_frames[t, 1].reshape((64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax = plt.subplot(10, 9, t * 9 + 7)
        plt.imshow(future_frames[t, 2].reshape((
            64, 64, IMAGE_CHANNEL
        )) if IMAGE_CHANNEL > 1 else future_frames[t, 2].reshape((64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax = plt.subplot(10, 9, t * 9 + 8)
        plt.imshow(future_frames[t, 3].reshape((
            64, 64, IMAGE_CHANNEL
        )) if IMAGE_CHANNEL > 1 else future_frames[t, 3].reshape((64, 64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        ax = plt.subplot(10, 9, t * 9 + 9)
        plt.imshow(Cloud_[i, 0].reshape((
            64, 64,
            IMAGE_CHANNEL)) if IMAGE_CHANNEL > 1 else Cloud_[i,
                                                             0].reshape((64,
                                                                         64)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        t += 1
    plt.show()
コード例 #49
0
def cae_mnist_encoding(train_percentage=0.1, test_percentage=0.1):
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

    train_m = int(mnist.train.num_examples * train_percentage)
    test_m = int(mnist.test.num_examples * test_percentage)
    validation_m = mnist.validation.num_examples

    auto = ContractiveAutoencoder()

    learning_rate = 0.001
    optimizer_loss = tf.train.AdamOptimizer(learning_rate).minimize(auto.loss)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    batch_size = 100
    epochs = 5
    minimum_loss = np.inf
    for epoch_i in range(epochs):
        val_loss_list = []

        for batch_i in range(train_m // batch_size):
            batch_x, batch_y = mnist.train.next_batch(batch_size)

            sess.run(optimizer_loss,
                     feed_dict={
                         auto.x: batch_x,
                         auto.y: batch_y
                     })

        for batch_i in range(validation_m // batch_size):
            batch_x, batch_y = mnist.validation.next_batch(batch_size)
            val_loss = sess.run([auto.loss],
                                feed_dict={
                                    auto.x: batch_x,
                                    auto.y: batch_y
                                })

            val_loss_list.append(val_loss)

        validation_loss = np.mean(val_loss_list)

        print(epoch_i, validation_loss)

        if validation_loss < minimum_loss:
            minimum_loss = validation_loss
            save_path = saver.save(sess, "./models/cae_mnist/model.ckpt")

    # Encode training and testing samples
    # Save the encoded tensors

    saver.restore(sess, "./models/cae_mnist/model.ckpt")

    encoding_train_imgs_path = './data/MNIST_encoding/cae_train.encoding'
    encoding_test_imgs_path = './data/MNIST_encoding/cae_test.encoding'
    train_labels_path = './data/MNIST_encoding/cae_train.labels'
    test_labels_path = './data/MNIST_encoding/cae_test.labels'

    encoded_imgs_list = []
    labels_list = []
    for batch_i in range(train_m // batch_size):
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        encoded_batches = sess.run(auto.encoded_x,
                                   feed_dict={
                                       auto.x: batch_x,
                                       auto.y: batch_y
                                   })

        encoded_imgs_list.append(encoded_batches)
        labels_list.append(batch_y)

    for batch_i in range(validation_m // batch_size):
        batch_x, batch_y = mnist.validation.next_batch(batch_size)
        encoded_batches = sess.run(auto.encoded_x,
                                   feed_dict={
                                       auto.x: batch_x,
                                       auto.y: batch_y
                                   })

        encoded_imgs_list.append(encoded_batches)
        labels_list.append(batch_y)

    encoded_train_imgs = np.array(encoded_imgs_list)
    m, n, d = encoded_train_imgs.shape
    encoded_train_imgs = encoded_train_imgs.reshape(m * n, d)
    print(encoded_train_imgs.shape)

    train_labels = np.array(labels_list).flatten()
    print(train_labels.shape)

    # Save the encoded imgs
    pickle.dump(encoded_train_imgs, open(encoding_train_imgs_path, 'wb'))
    pickle.dump(train_labels, open(train_labels_path, 'wb'))

    encoded_imgs_list = []
    labels_list = []
    for batch_i in range(test_m // batch_size):
        batch_x, batch_y = mnist.test.next_batch(batch_size)
        encoded_batches = sess.run(auto.encoded_x,
                                   feed_dict={
                                       auto.x: batch_x,
                                       auto.y: batch_y
                                   })

        encoded_imgs_list.append(encoded_batches)
        labels_list.append(batch_y)

    encoded_test_imgs = np.array(encoded_imgs_list)
    m, n, d = encoded_test_imgs.shape
    encoded_test_imgs = encoded_test_imgs.reshape(m * n, d)
    print(encoded_test_imgs.shape)

    test_labels = np.array(labels_list).flatten()
    print(test_labels.shape)

    # Save the encoded imgs
    pickle.dump(encoded_test_imgs, open(encoding_test_imgs_path, 'wb'))
    pickle.dump(test_labels, open(test_labels_path, 'wb'))

    # Show 10 reconstructed images
    n = 32
    x_test, _ = mnist.test.next_batch(n)

    reconstructed_imgs = sess.run(auto.reconstructed_x,
                                  feed_dict={auto.x: x_test})

    n = 10
    plt.figure(figsize=(20, 4))
    for i in range(n):
        # display original
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(x_test[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)

        plt.imshow(reconstructed_imgs[i].reshape(28, 28))

        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

    plt.savefig('./tmp/cae_mnist.png')
コード例 #50
0
def plot_2D_clustering(X, labels):

    plt.figure()
    plt.scatter(X[:, 0], X[:, 1], c=labels.astype(np.float))
    plt.gray()
    plt.show()
コード例 #51
0
from scipy import signal, misc
import matplotlib.pyplot as plt
from scipy import ndimage
col = 256
row = 256
cx = 128
cy = 128
D0 = 50  # 원의 크기
N = 2  # 필터링 정도
W = 30  #링의 두께
BW_L = np.zeros(shape=[col, row], dtype=np.float32)
BW_H = np.zeros(shape=[col, row], dtype=np.float32)
BW_BP = np.zeros(shape=[col, row], dtype=np.float32)
BW_BS = np.zeros(shape=[col, row], dtype=np.float32)
for x in range(col):
    for y in range(row):
        D = np.sqrt((cx - x)**2 + (cy - y)**2)
        s = (W * D) / (D**2 - D0**2)
        BW_L[x, y] = 1 / (1 + pow(D / D0, 2 * N))
        BW_H[x, y] = 1 / (1 + pow(D0 / D, 2 * N))
        BW_BS[x, y] = 1 / (1 + pow(s, 2 * N))
BW_BP = 1 - BW_BS
plt.subplot(221), plt.imshow(BW_L), plt.gray(), plt.axis('off'), plt.title(
    'Butterworth Lowpass')
plt.subplot(222), plt.imshow(BW_H), plt.gray(), plt.axis('off'), plt.title(
    'Butterworth Highpass')
plt.subplot(223), plt.imshow(BW_BP), plt.gray(), plt.axis('off'), plt.title(
    'Butterworth Bandpass')
plt.subplot(224), plt.imshow(BW_BS), plt.gray(), plt.axis('off'), plt.title(
    'Butterworth Bandstop')
plt.show()
コード例 #52
0
from scipy import ndimage, misc
import numpy.fft
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.gray()  # show the filtered result in grayscale
ascent = misc.ascent()
input_ = numpy.fft.fft2(ascent)
result = ndimage.fourier_uniform(input_, size=20)
result = numpy.fft.ifft2(result)
ax1.imshow(ascent)
ax2.imshow(result.real)  # the imaginary part is an artifact
plt.show()
コード例 #53
0
def plot_RFs(name, nade, sample_shape, rows=5, cols=10):
    rf_sizes = []
    W = nade.W1
    for i in range(W.shape[1]):
        rf_sizes.append((i, -(W[:, i]**2).sum()))
    rf_sizes.sort(key=lambda x: x[1])
    plt.figure(figsize=(0.5 * cols, 0.5 * rows), dpi=100)
    plt.gray()
    for i in range(rows):
        for j in range(cols):
            n = i * cols + j
            plt.subplot(rows, cols, n + 1)
            rf = nade.W1[:, rf_sizes[n][0]]
            #plot_RF(rf, sample_shape)
            plot_RF_of_ps(np.exp(rf) / (1 + np.exp(rf)), sample_shape)
    plt.tight_layout(0.2, 0.2, 0.2)
    #plt.savefig(os.path.join(DESTINATION_PATH, name + "_W1.pdf"))
    plt.figure(figsize=(0.5 * cols, 0.5 * rows), dpi=100)
    plt.gray()
    for i in range(rows):
        for j in range(cols):
            n = i * cols + j
            plt.subplot(rows, cols, n + 1)
            rf = np.resize(nade.Wflags[:, rf_sizes[n][0]],
                           np.prod(sample_shape)).reshape(sample_shape)
            plot_RF(rf, sample_shape)
            #plot_RF_of_ps(np.exp(rf)/(1+np.exp(rf)), sample_shape)
    plt.tight_layout(0.2, 0.2, 0.2)
    #plt.savefig(os.path.join(DESTINATION_PATH, name + "_Wflags.pdf"))

    plt.figure(figsize=(0.5 * cols, 0.5 * rows), dpi=100)
    plt.gray()
    for i in range(rows):
        for j in range(cols):
            n = i * cols + j
            plt.subplot(rows, cols, n + 1)
            w = nade.W1[:, rf_sizes[n][0]]
            f = nade.Wflags[:, rf_sizes[n][0]]

            x = np.log((1 - np.exp(f - 1)) / (1 + np.exp(w + f)))
            y = w - x

            rf = np.resize(x, np.prod(sample_shape)).reshape(sample_shape)
            #plot_RF_of_ps(rf, sample_shape)
            plot_RF(rf, sample_shape)
    plt.tight_layout(0.2, 0.2, 0.2)

    plt.figure(figsize=(0.5 * cols, 0.5 * rows), dpi=100)
    plt.gray()
    for i in range(rows):
        for j in range(cols):
            n = i * cols + j
            plt.subplot(rows, cols, n + 1)
            w = nade.W1[:, rf_sizes[n][0]]
            f = nade.Wflags[:, rf_sizes[n][0]]

            x = np.log((1 - np.exp(f - 1)) / (1 + np.exp(w + f)))
            y = w - x

            rf = np.resize(y, np.prod(sample_shape)).reshape(sample_shape)
            #plot_RF_of_ps(rf, sample_shape)
            plot_RF(rf, sample_shape)
    plt.tight_layout(0.2, 0.2, 0.2)

    plt.show()
コード例 #54
0
def main():
    # this is the size of our encoded representations

    input_img = Input(shape=(1375, ))

    encoding_model = Model(input_img, get_encoding_network(input_img))
    encoding_model.summary()
    input_code = Input(shape=(49, ))
    decoding_model = Model(input_code, get_deconding_network(input_code))

    auto_encoder_model = Model(input_img,
                               decoding_model(encoding_model(input_img)))
    print('*****    encoding_model  *******')
    encoding_model.summary()
    print('*****    decoding_model  *******')
    decoding_model.summary()
    print('*****  auto_encoder_model*******')
    auto_encoder_model.summary()
    print('********************************')

    autoencoder = auto_encoder_model

    # this model maps an input to its encoded representation
    encoder = encoding_model

    decoder = decoding_model

    autoencoder.compile(optimizer=Adam(lr=0.0001), loss='mse')
    autoencoder.summary()

    print('loading file from server')

    all_subjects = [
        "gcd",
    ]
    add_time_domain_noise = False
    number_of_k_fold = 10
    downsample_params = 8
    current_experiment_setting = "Color116ms"
    cross_validation_iter = 1

    train_data, train_tags, test_data_with_noise, test_tags, noise_shifts = prepare_data_for_experiment(
        all_subjects,
        add_time_domain_noise=add_time_domain_noise,
        current_experiment_setting=current_experiment_setting,
        downsample_params=downsample_params,
        number_of_k_fold=number_of_k_fold,
        cross_validation_iter=cross_validation_iter)

    print('processed file, now running AE')

    x_train = train_data.reshape(train_data.shape[0] * train_data.shape[1],
                                 train_data.shape[2] * train_data.shape[3])
    x_tst = test_data_with_noise[0]
    x_test = x_tst.reshape(x_tst.shape[0] * x_tst.shape[1],
                           x_tst.shape[2] * x_tst.shape[3])
    random_number = str(dt.microsecond)
    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  factor=0.2,
                                  patience=5,
                                  min_lr=0.00001)
    callback = keras.callbacks.ModelCheckpoint(
        r"c:\temp\weights_" + random_number +
        ".{epoch:02d}-{val_loss:.2f}.hdf5",
        monitor='val_loss',
        verbose=0,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)

    autoencoder.fit(x_train,
                    x_train,
                    epochs=30,
                    batch_size=32,
                    shuffle=True,
                    validation_data=(x_test, x_test),
                    callbacks=[callback, reduce_lr])

    encoded_imgs = encoder.predict(x_test)
    decoded_imgs = decoder.predict(encoded_imgs)

    # use Matplotlib (don't ask)

    n = 10  # how many digits we will display
    plt.figure(figsize=(20, 4))
    for i in range(n):
        # display original
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(x_test[i].reshape(25, 55))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)
        plt.imshow(decoded_imgs[i].reshape(25, 55))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    plt.show()
コード例 #55
0
def cnn_nca_mnist_pretrain(trial, train_percentage=0.1, test_percentage=0.1):
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

    train_m = int(mnist.train.num_examples * train_percentage)
    test_m = int(mnist.test.num_examples * test_percentage)
    validation_m = mnist.validation.num_examples

    auto = Autoencoder()

    learning_rate = 0.001

    optimizer_rec_error = tf.train.AdamOptimizer(learning_rate).minimize(
        auto.reconstruction_error)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Pre-train step
    batch_size = 100
    epochs = 100
    rec_error = np.inf
    for epoch_i in range(epochs):
        for batch_i in range(train_m // batch_size):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            sess.run(optimizer_rec_error,
                     feed_dict={
                         auto.x: batch_x,
                         auto.y: batch_y
                     })

        validation_loss, reconstruction_error, nca_obj = cal_loss(
            auto, sess, mnist.validation, validation_m, batch_size)
        print(epoch_i, validation_loss, reconstruction_error, nca_obj)

        if reconstruction_error < rec_error:
            rec_error = reconstruction_error
            save_path = saver.save(sess, "./models/tf_mnist/model.ckpt")

    # Report the loss
    validation_loss, reconstruction_error, nca_obj = cal_loss(
        auto, sess, mnist.test, test_m, batch_size)
    print("Report the test loss of the best model: ")
    print(validation_loss, reconstruction_error, nca_obj)

    # Show 10 reconstructed images
    n = 10
    x_test, _ = mnist.test.next_batch(n)

    reconstructed_imgs = sess.run(auto.reconstructed_x,
                                  feed_dict={auto.x: x_test})

    plt.figure(figsize=(20, 4))
    for i in range(n):
        # display original
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(x_test[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)

        plt.imshow(reconstructed_imgs[i].reshape(28, 28))

        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

    plt.savefig('./tmp/tf_mnist.png')
コード例 #56
0
def main():
    global vmin, vmax
    mnist = True
    bsds = False
    if mnist:
        dataset_file = os.path.join(os.environ["DATASETSPATH"],
                                    "original_NADE/binarized_mnist.hdf5")
        test_dataset = Data.BigDataset(dataset_file, "test", "data")
        nade2hl = load_model(
            os.path.join(os.environ["RESULTSPATH"],
                         "orderless/mnist/2hl/NADE.hdf5"))
        #plot_MNIST_results()
        #exit()
        #plot_examples(nade2hl, test_dataset, (28,28), "mnist_examples.pdf", rows = 5, cols=10)
        #plot_samples(nade2hl, (28,28), "mnist_samples_2hl.pdf", rows = 5, cols=10)
        plot_RFs("MNIST_2hl", nade2hl, sample_shape=(28, 28))
        #nade2hl = load_model(os.path.join(os.environ["RESULTSPATH"], "orderless/mnist/2hl/NADE.hdf5"))
        #np.random.seed(1) #1,17,43,49
        #nade2hl.setup_n_orderings(10)
        #inpaint_digits(test_dataset, (28,28), nade2hl, delete_shape=(10,10), n_examples = 6, n_samples = 5)

    if bsds:
        vmin = -0.5
        vmax = 0.5
        np.random.seed(1)
        dataset_file = os.path.join(
            os.environ["DATASETSPATH"],
            "natural_images/BSDS300/BSDS300_63_no_DC_val.hdf5")
        test_dataset = Data.BigDataset(dataset_file, "test/.", "patches")
        nade = load_model(
            os.path.join(os.environ["RESULTSPATH"],
                         "orderless/BSDS300/6hl/NADE.hdf5"))
        plot_examples(nade,
                      test_dataset, (8, 8),
                      "BSDS_data.pdf",
                      rows=5,
                      cols=10)
        plot_samples(nade, (8, 8), "BSDS_6hl_samples.pdf", rows=5, cols=10)
        nade = load_model(
            os.path.join(os.environ["RESULTSPATH"],
                         "orderless/BSDS300/2hl/NADE.hdf5"))
        plot_RFs("BSDS_2hl", nade, sample_shape=(8, 8))
    exit()

    np.random.seed(8341)
    show_RFs = False
    print_likelihoods = False
    print_mixture_likelihoods = True
    show_data = False
    show_samples = False
    denoise_samples = False
    n_orderings = 6
    n_samples = 10
    #sample_shape = (28, 28)
    sample_shape = (8, 8)

    parser = OptionParser(
        usage="usage: %prog [options] dataset_file nade_path")
    parser.add_option("--training_samples",
                      dest="training_route",
                      default="train")
    parser.add_option("--validation_samples",
                      dest="validation_route",
                      default="validation")
    parser.add_option("--test_samples", dest="test_route", default="test")
    parser.add_option("--samples_name", dest="samples_name", default="data")
    parser.add_option("--normalize",
                      dest="normalize",
                      default=False,
                      action="store_true")
    parser.add_option("--add_dimension",
                      dest="add_dimension",
                      default=False,
                      action="store_true")
    (options, args) = parser.parse_args()

    dataset_filename = os.path.join(os.environ["DATASETSPATH"], args[0])
    model_filename = os.path.join(os.environ["RESULTSPATH"], args[1])
    print(model_filename)
    try:
        hdf5_route = "/highest_validation_likelihood/parameters"
        params = Results.Results(model_filename).get(hdf5_route)
    except:
        hdf5_route = "/final_model/parameters"
        params = Results.Results(model_filename).get(hdf5_route)
    model_class = getattr(NADE, params["__class__"])
    nade = model_class.create_from_params(params)
    #Load datasets
    print("Loading datasets")
    dataset_file = os.path.join(os.environ["DATASETSPATH"], dataset_filename)
    training_dataset = Data.BigDataset(dataset_file, options.training_route,
                                       options.samples_name)
    validation_dataset = Data.BigDataset(dataset_file,
                                         options.validation_route,
                                         options.samples_name)
    test_dataset = Data.BigDataset(dataset_file, options.test_route,
                                   options.samples_name)
    n_visible = training_dataset.get_dimensionality(0)

    if options.normalize:
        mean, std = Data.utils.get_dataset_statistics(training_dataset)
        training_dataset = Data.utils.normalise_dataset(
            training_dataset, mean, std)
        validation_dataset = Data.utils.normalise_dataset(
            validation_dataset, mean, std)
        test_dataset = Data.utils.normalise_dataset(test_dataset, mean, std)

    #Setup a list with n_orderings orderings
    print("Creating random orderings")
    orderings = list()
    #orderings.append(range(nade.n_visible))
    for i in range(n_orderings):
        o = range(nade.n_visible)
        np.random.shuffle(o)
        orderings.append(o)

    #Print average loglikelihood and se for several orderings
    if print_likelihoods:
        nade.setup_n_orderings(orderings=orderings)
        ll = nade.get_average_loglikelihood_for_dataset(test_dataset)
        print("Mean test-loglikelihood (%d orderings): %.2f" % (orderings, ll))

    if print_mixture_likelihoods:
        #d = test_dataset.sample_data(1000)[0].T
        d = test_dataset.get_data()[0].T
        for n in [1, 2, 4, 8, 16, 32, 64, 128]:
            #nade.setup_n_orderings(n)
            multi_ord = nade.logdensity(d)
            print(n, np.mean(multi_ord), scipy.stats.sem(multi_ord))

    if show_RFs:
        rf_sizes = []
        W = nade.W1.get_value()
        for i in range(W.shape[1]):
            rf_sizes.append((i, -(W[:, i]**2).sum()))
        rf_sizes.sort(key=lambda x: x[1])
        plt.figure()
        plt.gray()
        for i in range(10):
            for j in range(10):
                n = i * 10 + j
                plt.subplot(10, 10, n + 1)
                rf = nade.Wflags.get_value()[:, rf_sizes[n][0]]
                plot_RF(rf, sample_shape)
        plt.figure()
        plt.gray()
        for i in range(10):
            for j in range(10):
                n = i * 10 + j
                plt.subplot(10, 10, n + 1)
                rf = np.resize(nade.W1.get_value()[:, rf_sizes[n][0]],
                               np.prod(sample_shape)).reshape(sample_shape)
                plot_RF(rf, sample_shape)
        plt.show()

    #Show some samples
    if show_samples:
        images = []
        for row, o in enumerate(orderings):
            samples = nade.sample(n_samples)
            for i in range(samples.shape[1]):
                nade.setup_n_orderings(n=1)
                sample = samples[:, i]
                dens = nade.logdensity(sample[:, np.newaxis])
                if options.add_dimension:
                    sample_extra_dim = np.resize(sample, len(sample) + 1)
                    sample_extra_dim[-1] = -sample.sum()
                    images.append((sample_extra_dim, dens))
                else:
                    images.append((sample, dens))
        images.sort(key=lambda x: -x[1])
        plt.figure()
        plt.gray()
        for row, o in enumerate(orderings):
            for i in range(samples.shape[1]):
                plt.subplot(n_orderings, n_samples, row * n_samples + i + 1)
                im_ll = images[row * n_samples + i]
                plot_sample(im_ll[0], sample_shape)
                plt.title("%.1f" % im_ll[1], fontsize=9)
        plt.show()

    #Show some data
    if show_data:
        images = []
        for row, o in enumerate(orderings):
            samples = test_dataset.sample_data(n_samples)[0].T
            for i in range(samples.shape[1]):
                nade.setup_n_orderings(n=1)
                sample = samples[:, i]
                dens = nade.logdensity(sample[:, np.newaxis])
                if options.add_dimension:
                    sample_extra_dim = np.resize(sample, len(sample) + 1)
                    sample_extra_dim[-1] = -sample.sum()
                    images.append((sample_extra_dim, dens))
                else:
                    images.append((sample, dens))
        images.sort(key=lambda x: -x[1])
        plt.figure()
        plt.gray()
        for row, o in enumerate(orderings):
            for i in range(samples.shape[1]):
                plt.subplot(n_orderings, n_samples, row * n_samples + i + 1)
                im_ll = images[row * n_samples + i]
                plot_sample(im_ll[0], sample_shape)
                plt.title("%.1f" % im_ll[1], fontsize=9)
        plt.show()

    #Get a sample and clean it by taking pixels randomly and assigning the most probably value given all the others
    if denoise_samples:
        n = 10
        nade.set_ordering(orderings[-1])
        sample = nade.sample(n)
        plt.figure()
        plt.gray()
        for it in range(10):
            for s in range(n):
                logdensities = nade.logdensity(sample)
                plt.subplot(n, 10, s * 10 + it + 1)
                plot_sample(sample[:, s], sample_shape)
                plt.title("%.1f" % logdensities[s])
            #i = np.random.randint(sample.shape[0])
            for i in range(sample.shape[0]):
                j = np.random.randint(sample.shape[0])
                #mask = sample
                mask = np.ones_like(sample)
                mask[j] = 0
                sample[j] = 0
                ps = nade.conditional(sample, mask)
                #sample[j] = ps[j] > np.random.rand()
                sample[j] = ps[j] > 0.5
        plt.show()
コード例 #57
0
    def latent_traversal(self):
        limit = 1
        pad = 0
        random_index = np.random.randint(low=0,
                                         high=self.data.x_test.shape[0],
                                         size=2)
        z, z_mean, z_log_var = self.network.vae.encoder(
            self.data.x_test[random_index])
        z = z.numpy()
        fig = plt.figure(figsize=(18, 10))
        ax = fig.add_subplot(111)
        ax.set_yticks([])
        ax.set_xticks([-100, 100])
        ax.set_xticklabels(
            [r"$\mu_{z_{j}}-$" + str(limit), r"$\mu_{z_{j}}+$" + str(limit)],
            fontsize=16)
        n_col = 10
        n_row = z.shape[1]
        spec = gridspec.GridSpec(nrows=n_row,
                                 ncols=n_col,
                                 width_ratios=np.ones((n_col, )),
                                 height_ratios=np.ones((n_row, )),
                                 wspace=0,
                                 hspace=0)
        for i in range(n_row):
            start = z[0][i] - limit
            end = z[0][i] + limit
            int1 = np.linspace(start, z[0][i], num=5, endpoint=False)
            int2 = np.linspace(z[0][i], end, num=5,
                               endpoint=False)  # detta måste ändras
            grid_z = np.concatenate((int1, int2), axis=0)
            for j in range(n_col):
                f_ax1 = fig.add_subplot(spec[i, j])
                if j == 0:
                    string_i = str(i)
                    string_list = [int(string_i) for string_i in str(string_i)]
                    if len(string_list) == 2:
                        f_ax1.annotate(r"$z_{}$".format(string_list[0]) +
                                       r"$_{}$".format(string_list[1]),
                                       xy=(0, 0.5),
                                       xytext=(-f_ax1.yaxis.labelpad - pad, 0),
                                       xycoords=f_ax1.yaxis.label,
                                       textcoords='offset points',
                                       size='large',
                                       fontsize=20,
                                       ha='right',
                                       va='center')
                    else:
                        f_ax1.annotate(r"$z_{}$".format(string_list[0]),
                                       xy=(0, 0.5),
                                       xytext=(-f_ax1.yaxis.labelpad - pad, 0),
                                       xycoords=f_ax1.yaxis.label,
                                       textcoords='offset points',
                                       size='large',
                                       fontsize=20,
                                       ha='right',
                                       va='center')

                f_ax1.set_yticks([])
                f_ax1.set_xticks([])
                f_ax1.set_aspect('equal')
                z[0][i] = grid_z[j]
                x_rec = self.network.vae.decoder(z)
                x_rec = x_rec.numpy()
                plt.gray()
                f_ax1.imshow(x_rec[0].reshape(64, 64), aspect='auto')
        plt.savefig(self.path + "/figures/" + "traversal" + self.path + ".png")
        plt.show()
コード例 #58
0
def inpaint_digits(dataset,
                   shape,
                   model,
                   n_examples=5,
                   delete_shape=(10, 10),
                   n_samples=5,
                   name="inpaint_digits"):
    #Load a few digits from the test dataset (as rows)
    data = dataset.sample_data(1000)[0]

    #data = data[[1,12,17,81,88,102],:]
    data = data[[1, 12, 17, 81, 88, 37], :]
    n_examples = data.shape[0]

    #Generate a random region to delete
    regions = [(np.random.randint(shape[0] - delete_shape[0] + 1),
                np.random.randint(shape[1] - delete_shape[1] + 1))
               for i in range(n_examples)]
    print(regions)
    regions = [(11, 5), (11, 5), (11, 5), (4, 13), (4, 13), (4, 13)]

    #Generate masks
    def create_mask(x, y):
        mask = np.ones(shape)
        mask[y:y + delete_shape[1], x:x + delete_shape[0]] = 0
        return mask.flatten()

    masks = [create_mask(x, y) for (x, y) in regions]

    #Hollow
    def hollow(example, mask):
        hollowed = example.copy()
        return hollowed * mask

    hollowed = [hollow(data[i, :], mask) for i, mask in enumerate(masks)]

    densities = model.logdensity(data.T)
    #Calculate the marginal probability under a nade
    marginal_densities = [
        model.marginal_density(h, mask) for h, mask in zip(hollowed, masks)
    ]

    #Generate some samples
    samples = [
        model.sample_conditional(h, mask, n_samples=n_samples)
        for h, mask in zip(hollowed, masks)
    ]
    #samples = [model.sample_conditional_max(h, mask, n_samples=n_samples) for h, mask in zip(hollowed, masks)]

    #Plot it all
    matplotlib.rcParams.update({'font.size': 8})
    plt.figure(figsize=(5, 5), dpi=100)
    plt.gray()
    cols = 2 + n_samples
    for row in range(n_examples):
        # Original
        plt.subplot(n_examples, cols, row * cols + 1)
        plot_sample(data[row, :], shape, origin="upper")
        plt.title("%.2f" % densities[row])
        # Marginalization region
        plt.subplot(n_examples, cols, row * cols + 2)
        plot_sample(hollowed[row], shape, origin="upper")
        plt.gca().add_patch(
            plt.Rectangle(regions[row],
                          delete_shape[0],
                          delete_shape[1],
                          facecolor="red",
                          edgecolor="red"))
        plt.title("%.2f" % marginal_densities[row])
        # Samples
        for j in range(n_samples):
            plt.subplot(n_examples, cols, row * cols + 3 + j)
            plot_sample(samples[row][:, j], shape, origin="upper")
    plt.subplots_adjust(left=0.01,
                        right=0.99,
                        top=0.95,
                        bottom=0.01,
                        hspace=0.40,
                        wspace=0.04)
    plt.savefig(os.path.join(DESTINATION_PATH, name + ".pdf"))
def plot_digit(feature_vector):
    plt.gray()
    plt.matshow(feature_vector.reshape(28,28))
    plt.show()
コード例 #60
0
ファイル: KMeansPlus3.py プロジェクト: YRApril/LiJia
    dataTemp["Micro_precision"] = precision_score(y_true,
                                                  y_pred,
                                                  average='micro')
    dataTemp["Micro_recall"] = recall_score(y_true, y_pred, average='micro')
    dataTemp["Micro_f1-score"] = f1_score(y_true, y_pred, average='micro')

    resultDimen = resultDimen.append(dataTemp, ignore_index=True)

    # print('------Weighted-----')
    # print('Weighted_precision',precision_score(y_true,y_pred,average='weighted'))
    # print('Weighted_recall', recall_score(y_true, y_pred, average='weighted'))
    # print('Weighted_f1-score', f1_score(y_true, y_pred, average='weighted'))
    #
    # print('------Macro-----')
    # print('Macro_precision', precision_score(y_true, y_pred, average='macro'))
    # print('Macro_recall', recall_score(y_true, y_pred, average='macro'))
    # print('Macro_f1-score', f1_score(y_true, y_pred, average='macro'))
    #
    # print('------Micro-----')
    # print('Micro_precision', precision_score(y_true, y_pred, average='micro'))
    # print('Micro_recall', recall_score(y_true, y_pred, average='micro'))
    # print('Micro_f1-score', f1_score(y_true, y_pred, average='micro'))

    plt.matshow(conf_matrix, cmap=plt.gray())
    plt.title(str(list))
    plt.savefig("out/" + str(list) + ".png")
    plt.show()

print(resultDimen)
resultDimen.to_csv("out/result.csv")