Exemple #1
0
def plot_rocs(data, save_to=None):

    import matplotlib.pyplot as plt
    import palettable

    colors = palettable.colorbrewer.qualitative.Set2_8.mpl_colors
    fig, ax = plt.subplots(1)
    i = 0
    for filename, (xs, ys) in data:
        auc_score = metrics.auc(xs, ys, reorder=False)
        label = "%s (%.3f)" % (filename, auc_score)
        ax.plot(xs, ys, '-', linewidth=1.2, color=colors[i], label=label)
        i += 1

    ax.plot([0.0, 1.0], [0.0, 1.0], '--', color='gray')
    ax.set_ylabel("Portion Relevant")
    ax.set_xlabel("Portion Irrelevant")
    ax.set_title("ROC")
    ax.set_xlim([0.0, 1.0])
    ax.set_ylim([0.0, 1.0])
    ax.legend(loc='lower right')

    if save_to is None:
        fig.show()
        plt.waitforbuttonpress()
    else:
        fig.savefig(save_to)
    fig.clf()
Exemple #2
0
def display_Direction(centroid0,centroid1,k=None,cluster=None):
    """
    display the histogramm of a vector

    :arg centroid0: first centroid
    :type centroid0: Observation
    :arg centroid1: second centroid
    :type centroid1: Observation
    :param k: k of kmeans, for title
    :type k: int
    :param cluster: cluster whose direction is computed, for title
    :type cluster: int
    :rtype: void
    """


    dx = float(abs(centroid0.values[0]-centroid1.values[0]))
    dy = float(abs(centroid0.values[1]-centroid1.values[1]))

    if (centroid0.values[0]-centroid1.values[0])*(centroid0.values[1]-centroid1.values[1]) < 0:
        dy = -dy

    x = [(float(centroid0.values[0]+centroid1.values[0]))/2-1.5*dx,(float(centroid0.values[0]+centroid1.values[0]))/2+1.5*dx]
    y = [(float(centroid0.values[1]+centroid1.values[1]))/2-1.5*dy,(float(centroid0.values[1]+centroid1.values[1]))/2+1.5*dy]

    #time to keep displayed 
    time = 4

    plt.hold(True)
    plt.plot(x,y,c='red',linewidth=3)
    plt.waitforbuttonpress(timeout=time)
    plt.savefig("output/fig.png")
def replay():
    data = pickle.load(open(fpkl, 'rb'))

    for d in data:
        ts = d['newdata']['ts']
        spds = d['newdata']['spds']
        dists = d['newdata']['dists']
        coef_spd = d['fit_coef']['spd']
        coef_dist = d['fit_coef']['dist']

        tsp = np.linspace(0, ts[-1], 100)

        pfspd = np.poly1d(coef_spd)
        fspds = pfspd(tsp)

        pfdist = np.poly1d(coef_dist)
        fdists = pfdist(tsp)

        print 'a: %f, %f' % (coef_dist[0] * 2, coef_spd[0])

        plt.subplot(121)
        plt.plot(ts, spds, '.', ms=10, color='b')
        plt.plot(tsp, fspds, '-', color='b')
        plt.plot(ts, dists, '.', ms=10, color='g')
        plt.plot(tsp, fdists, '-', color='g')

        plt.subplot(122)
        plt.plot(ts, spds, '.')

        plt.draw()
        plt.waitforbuttonpress(-1)
        plt.clf()
Exemple #4
0
    def draw_pivot_interactive(self, xk, key_pressed=False, wait_time=1):
        """Draw a red circle at the current pivot position.
        To be used interactively.

        Keyword Arguments:
        xk          -- a pair representing the position of the new pivot
        key_pressed -- True if a key or button must be pressed to continue else
                       wait for time seconds
        wait_time   -- the time in seconds to wait
        """

        if self.started:
            if self.pivot_patch is None:
                self.pivot_patch = plt.Circle((0, 0), 0.1, fc='r')
            else:
                gui_line, = self.ax.plot([self.pivot_patch.center[0], xk[0]],
                                         [self.pivot_patch.center[1], xk[1]])
                gui_line.set_color('red')
                gui_line.set_linestyle('-')
                gui_line.set_linewidth(3)
                plt.draw()

            self.pivot_patch.center = (xk[0], xk[1])
            self.ax.add_patch(self.pivot_patch)
        else:
            self.started = True

        if key_pressed:
            plt.waitforbuttonpress()
        else:
            plt.pause(wait_time)
def plot_maps(maps,coords,width,height):
	import matplotlib.pyplot as plt;
	maps=np.array(maps);
	shape=maps.shape;
	num_vert=1;
	num_hor=len(maps);
	if(len(shape)>2):
		num_vert=shape[0];
		num_hor=shape[1];

	fig, axs=plt.subplots(num_vert,num_hor);
	for j in range(num_vert):
		for i in range(num_hor):
			if(num_vert==1):
				fig.colorbar(
					axs[i].imshow(
						mapnodestoimg(maps[i],width,
					height,coords)),ax=axs[i]);
			else:
				fig.colorbar(
					axs[j][i].imshow(
						mapnodestoimg(maps[j][i],width,
					height,coords)),ax=axs[j][i]);
	fig.show();
	plt.draw();
	plt.waitforbuttonpress();
Exemple #6
0
def show_example(out_path, counter, data_all):
    """
    Show example - need to re-write every time we change the data
    :param out_path: output writing path
    :param counter: example number
    :param data_all: dictionary with all data
    """
    print "out_path: " + out_path
    print "counter: " + str(counter)

    fig, ax = plt.subplots(nrows=3, ncols=2)
    fig.set_size_inches(18.5, 10.5, forward=True)

    ax[0][0].set_title('Original Image')
    imshow(data_all["image_gt"], ax=ax[0][0], fig=fig)

    ax[0][1].set_title('SubSampled Image')
    imshow(data_all["image"], ax=ax[0][1], fig=fig)

    ax[1][0].set_title('Origianl real k-space')
    imshow(data_all["k_space_real_gt"], ax=ax[1][0], fig=fig)

    ax[1][1].set_title('SubSampled real k-space')
    imshow(data_all["k_space_real"], ax=ax[1][1], fig=fig)

    ax[2][0].set_title('Origianl imaginary k-space')
    imshow(data_all["k_space_imag_gt"], ax=ax[2][0], fig=fig)

    ax[2][1].set_title('SubSampled imaginary k-space')
    imshow(data_all["k_space_imag"], ax=ax[2][1], fig=fig)

    plt.waitforbuttonpress(timeout=-1)
    plt.close()
def debug_call(particles_weighted, the_map):

  debug = False

  if not debug:
    return 

  # Initialize figure
  my_dpi = 96
  plt.figure(1, figsize=(800/my_dpi, 800/my_dpi), dpi=my_dpi)
  plt.cla()
  plt.xlim ( the_map.info.origin.position.x, the_map.info.origin.position.x + the_map.info.width )
  plt.ylim ( the_map.info.origin.position.y, the_map.info.origin.position.y + the_map.info.height )
  plt.gca().set_aspect('equal', adjustable='box')
  plt.xlabel('X world')
  plt.xlabel('Y world')
  ax = plt.axes()

  # Draw map
  draw_occupancy_grid(the_map, ax)

  # Draw particles
  draw_particles_scored(particles_weighted)

  # Show plot
  plt.draw()

  pause = True
  if pause:
    k = plt.waitforbuttonpress(1)
    while not k:
      k = plt.waitforbuttonpress(1)
  else:
    plt.waitforbuttonpress(1e-6)
def waitForInput():    
    ''' This time, proceed with a click or by hitting any key '''
    plt.plot(t,c)
    plt.title('Click in that window, or hit any key to continue')
    
    plt.waitforbuttonpress()
    plt.close()
Exemple #9
0
    def cut_bounds(self, data):
        def tellme(s):
            print s
            plt.title(s,fontsize=16)
            plt.draw()

        plt.clf()
        plt.plot(data)
        plt.setp(plt.gca(),autoscale_on=False)
        tellme('You will select start and end bounds. Click to continue')

        plt.waitforbuttonpress()

        happy = False
        while not happy:
            pts = []
            while len(pts) < 2:
                tellme('Select 2 bounding points with mouse')
                pts = plt.ginput(2,timeout=-1)
                if len(pts) < 2:
                    tellme('Too few points, starting over')
                    time.sleep(1) # Wait a second

            plt.fill_between(x, y1, y2, alpha=0.5, facecolor='grey', interpolate=True) 
            
            tellme('Happy? Key click for yes, mouse click for no')

            happy = plt.waitforbuttonpress()

        bounds = sorted([int(i[0]) for i in pts])
        plt.clf()
        print bounds
        return data[bounds[0] if bounds[0] > .02*len(data) else 0:bounds[1] if bounds[1] < len(data)-1 else len(data)-1],bounds
Exemple #10
0
def show_results(data, labels):
    """
    This is a utility function that can be used to visualize which
    images end up in which class.
    
    Parameters:
    data - A set of images in the original data format
    labels - one 0,1 label for each image.

    """
    implot = None
    for i in range(data.shape[0]):
        if implot == None:
            implot = plt.imshow(data_to_img(data[i,:]), 
                                interpolation='none')
            txt = plt.text(5, 5, str(labels[i]),size='20', 
                           color=(0,1,0))
        else:
            implot.set_data(data_to_img(data[i,:])) 
        txt.set_text(str(labels[i]))
        fig = plt.gcf()
        fig.set_size_inches(2,2)
        plt.xticks(())
        plt.yticks(())
        plt.draw()
        plt.waitforbuttonpress()
Exemple #11
0
def interactive_fit(redshifts, vals, nchi2, templ, sp, outfile, filename,
                    fig, spec2d=None, wa2d=None, spec2dpos=None, msky=None):
    fig.clf()
    objname = filename.split('/')[-1].replace('_xcorr.sav', '')
    wrapper = FindZWrapper(redshifts, vals, nchi2, templ, sp, objname, fig=fig,
                           spec2d=spec2d, wa2d=wa2d, spec2dpos=spec2dpos,
                           msky=msky)

    # wait until user decides on redshift
    try:
        while wrapper.wait:
            pl.waitforbuttonpress()
    except (tkinter.TclError, KeyboardInterrupt):
        print "\nClosing\n"
        sys.exit(1)

    if wrapper.zconf == 'k':
        # skip
        return
    print filename
    plotname = filename.replace('_xcorr.sav','.png')
    print 'saving to ', plotname
    fig.savefig(plotname)

    outfile.write('%20s %10s % 8.5e %1s\n' % (
        filename.split('/')[-1].split('_xcorr')[0], templ[wrapper.i].label,
        wrapper.zgood, wrapper.zconf))
    outfile.flush()
def test_mnist():
    """Summary

    Returns
    -------
    name : TYPE
        Description
    """
    # %%
    import tensorflow as tf
    import tensorflow.examples.tutorials.mnist.input_data as input_data
    import matplotlib.pyplot as plt

    # %%
    # load MNIST as before
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    mean_img = np.mean(mnist.train.images, axis=0)
    ae = VAE()

    # %%
    learning_rate = 0.002
    optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(ae['cost'])

    # %%
    # We create a session to use the graph
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())

    # %%
    # Fit all training data
    batch_size = 100
    n_epochs = 50
    for epoch_i in range(n_epochs):
        for batch_i in range(mnist.train.num_examples // batch_size):
            batch_xs, _ = mnist.train.next_batch(batch_size)
            train = np.array([(img - mean_img) for img in batch_xs])
        print(epoch_i,
              sess.run([ae['cost'], optimizer],
                       feed_dict={ae['x']: train})[0])

    # %%
    # Plot example reconstructions
    n_examples = 12
    test_xs, _ = mnist.test.next_batch(n_examples)
    test_xs_norm = np.array([img - mean_img for img in test_xs])
    recon = sess.run(ae['y'], feed_dict={ae['x']: test_xs_norm})
    print(recon.shape)
    fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
    for example_i in range(n_examples):
        axs[0][example_i].imshow(
            np.reshape(test_xs[example_i, :], (28, 28)),
            cmap='gray')
        axs[1][example_i].imshow(
            np.reshape(
                np.reshape(recon[example_i, ...], (784,)) + mean_img,
                (28, 28)),
            cmap='gray')
    fig.show()
    plt.draw()
    plt.waitforbuttonpress()
def createHistogramOfOrientedGradientFeatures(sourceImage, numOrientations, pixelsPerCell):
    # Returns an nxd matrix, n pixels and d the HOG vector length.
    
    # H is a matrix NBLOCKS_Y x NBLOCKS_X x CPB_Y x CPB_X x ORIENTATIONS
    # Here CPB == 1
    H,Himg = myhog.hog( sourceImage, numOrientations, pixelsPerCell, cells_per_block=(1,1), flatten=False, visualise=True )
    hog_image_rescaled = skimage.exposure.rescale_intensity( Himg )#, in_range=(0, 0.2))
    plt.interactive(True)
    plt.figure()
    plt.subplot(1,2,1)
    plt.imshow(sourceImage)
    plt.subplot(1,2,2)
    plt.imshow( hog_image_rescaled, cmap=plt.cm.gray )
    plt.title('HOG')
    plt.waitforbuttonpress()

    # Reduce to non-singleton dimensions, BY x BX x ORIENT
    H = H.squeeze()
    assert H.ndim == 3
    assert H.max() <= 1.0
    # resize to image pixels rather than grid blocks
    hogImg = np.zeros( ( sourceImage.shape[0], sourceImage.shape[1], numOrientations ), dtype=float )
    for o in range(numOrientations):
        hogPerOrient = H[:,:,o].astype(np.float32)
        hpoAsPil = pil.fromarray( hogPerOrient, mode='F' )
        hogImg[:,:,o] = np.array( hpoAsPil.resize( (sourceImage.shape[1], sourceImage.shape[0]), pil.NEAREST ) )
    return hogImg.reshape( ( sourceImage.shape[0]*sourceImage.shape[1], numOrientations ) )
Exemple #14
0
def manual_sync_pick(flow, gyro_ts, gyro_data):
    # First pick good points in flow
    plt.clf()
    plt.plot(flow)
    plt.title('Select two points')
    selected_frames = [int(round(x[0])) for x in plt.ginput(2)]

    # Now pick good points in gyro
    plt.clf()
    plt.subplot(211)
    plt.plot(flow)
    plt.plot(selected_frames, flow[selected_frames], 'ro')
    
    plt.subplot(212)
    plt.plot(gyro_ts, gyro_data.T)
    plt.title('Select corresponding sequence in gyro data')
    plt.draw()
    selected = plt.ginput(2) #[int(round(x[0])) for x in plt.ginput(2)]
    gyro_idxs = [(gyro_ts >= x[0]).nonzero()[0][0] for x in selected]
    plt.plot(gyro_ts[gyro_idxs], gyro_data[:, gyro_idxs].T, 'ro')
    plt.title('Ok, click to continue to next')
    plt.draw()
    plt.waitforbuttonpress(timeout=10.0)
    plt.close()
    
    return (tuple(selected_frames), gyro_idxs)
def photos(tt, tangl, tz0):
    fig = plt.figure(num=0)
    ax = fig.add_subplot(111)
    for i in [0, 100, 200, 300, 400]:
        ax.clear()
        photo2(ax, tt[i], tangl[i], tz0[i])
        plt.show()
        plt.waitforbuttonpress(timeout=1)
Exemple #16
0
 def display_unbounded(self):
     tableau_latex = self.attain_tableau_latex()
     unbounded_latex = r"""\noindent Unbounded!"""
     unbounded_latex += r" \\"
     self.text.set_text(unbounded_latex + tableau_latex)
     plt.waitforbuttonpress()
     plt.draw()
     plt.waitforbuttonpress()
Exemple #17
0
def hanoi(n, A, B, C):
    if n > 0:
        hanoi(n - 1, A, C, B)
        config[A].pop()
        config[C].append(n)
        plt.waitforbuttonpress()
        plot_config(config)
        hanoi(n - 1, B, A, C)
Exemple #18
0
def draw_mars_data(data):
    u = np.unique(data.flatten())
    u.sort()
    print(u[0], u[1])
    #print np.min(data), np.max(data)
    data = np.clip(data, u[1], np.max(data))
    imgplot = plt.imshow(data, interpolation="nearest")
    plt.waitforbuttonpress()
Exemple #19
0
def roipoly(ax=None):
  """ use polygonmask to get the pixel mask"""
  if ax is None:
    ax = plt.gca()
  r = Roipoly(ax)
  while not r.done:
    plt.waitforbuttonpress()
  return r.verts
def test_mnist():
    """Test the convolutional autoencder using MNIST."""
    # %%
    import tensorflow as tf
    import tensorflow.examples.tutorials.mnist.input_data as input_data
    import matplotlib.pyplot as plt

    flags = tf.app.flags
    FLAGS = flags.FLAGS
    flags.DEFINE_string('data_dir', '../data/mnist/', 'Directory for storing data')


    # %%
    # load MNIST as before
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    mean_img = np.mean(mnist.train.images, axis=0)
    ae = autoencoder()

    # %%
    learning_rate = 0.01
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])

    # %%
    # We create a session to use the graph
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())

    # %%
    # Fit all training data
    batch_size = 100
    n_epochs = 10
    for epoch_i in range(n_epochs):
        for batch_i in range(mnist.train.num_examples // batch_size):
            batch_xs, _ = mnist.train.next_batch(batch_size)
            train = np.array([img - mean_img for img in batch_xs])
            sess.run(optimizer, feed_dict={ae['x']: train})
        print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))

    # %%
    # Plot example reconstructions
    n_examples = 10
    test_xs, _ = mnist.test.next_batch(n_examples)
    test_xs_norm = np.array([img - mean_img for img in test_xs])
    recon = sess.run(ae['y'], feed_dict={ae['x']: test_xs_norm})
    print(recon.shape)
    fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
    for example_i in range(n_examples):
        axs[0][example_i].imshow(
            np.reshape(test_xs[example_i, :], (28, 28)))
        axs[1][example_i].imshow(
            np.reshape(
                np.reshape(recon[example_i, ...], (784,)) + mean_img,
                (28, 28)))
    fig.show()
    plt.draw()
    plt.waitforbuttonpress()
Exemple #21
0
def get():
  i = cv2.imread('rdy.pgm', cv2.IMREAD_ANYDEPTH)
  maxv = i.max()
  i *= 255 / maxv
  i = numpy.uint8(i)
  i = cv2.bilateralFilter(i, 9, 75, 75)
  # i = cv2.adaptiveThreshold(i, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 5)
  plt.imshow(i)
  plt.waitforbuttonpress()
  return i
Exemple #22
0
def plotspec(args):
    """Plot spectrum files

    Parameters
    ----------
    filenames : list of str
      Spectrum filenames
    """
    from linetools.spectra.io import readspec
    import warnings
    import matplotlib.pyplot as plt
    import matplotlib as mpl
    warnings.simplefilter('ignore', mpl.mplDeprecation)
    
    spec_cache = {}

    fig = plt.figure(figsize=(10,5))
    fig.subplots_adjust(left=0.07, right=0.95, bottom=0.11)
    ax = fig.add_subplot(111)
    i = 0
    quit = False
    print("#### Use left and right arrow keys to navigate, 'Q' to quit ####")

    while quit is False:
        filename = args.filenames[i]
        if filename not in spec_cache:
            spec_cache[filename] = readspec(filename)
        sp = spec_cache[filename]
        ax.cla()
        sp.plot(show=False)
        ax.set_xlabel(str(sp.wavelength.unit))
        ax.set_title(filename)
        if args.redshift is not None:
            from linetools.lists.linelist import LineList
            ll = LineList('Strong')
            #import pdb ;pdb.set_trace()
            wlines = ll._data['wrest'] * (1 + args.redshift)
            y0, y1 = ax.get_ylim()
            ax.vlines(wlines.to(sp.wavelength.unit).value, y0, y1,
                      linestyle='dotted')

        while True:
            plt.waitforbuttonpress()
            if sp._plotter.last_keypress == 'right':
                i += 1
                i = min(i, len(args.filenames) - 1)
                # Note this only breaks out of the inner while loop
                break
            elif sp._plotter.last_keypress == 'left':
                i -= 1
                i = max(i, 0)
                break
            elif sp._plotter.last_keypress == 'Q':
                quit = True
                break
def display_dataset():
    logging.basicConfig(level=logging.DEBUG)

    cfg = load_config()
    dataset = dataset_create(cfg)
    dataset.set_shuffle(False)

    while True:
        batch = dataset.next_batch()

        for frame_id in range(1):
            img = batch[Batch.inputs][frame_id,:,:,:]
            img = np.squeeze(img).astype('uint8')

            scmap = batch[Batch.part_score_targets][frame_id,:,:,:]
            scmap = np.squeeze(scmap)

            # scmask = batch[Batch.part_score_weights]
            # if scmask.size > 1:
            #     scmask = np.squeeze(scmask).astype('uint8')
            # else:
            #     scmask = np.zeros(img.shape)

            subplot_height = 4
            subplot_width = 5
            num_plots = subplot_width * subplot_height
            f, axarr = plt.subplots(subplot_height, subplot_width)

            for j in range(num_plots):
                plot_j = j // subplot_width
                plot_i = j % subplot_width

                curr_plot = axarr[plot_j, plot_i]
                curr_plot.axis('off')

                if j >= cfg.num_joints:
                    continue

                scmap_part = scmap[:,:,j]
                scmap_part = imresize(scmap_part, 8.0, interp='nearest')
                scmap_part = np.lib.pad(scmap_part, ((4, 0), (4, 0)), 'minimum')

                curr_plot.set_title("{}".format(j+1))
                curr_plot.imshow(img)
                curr_plot.hold(True)
                curr_plot.imshow(scmap_part, alpha=.5)

        # figure(0)
        # plt.imshow(np.sum(scmap, axis=2))
        # plt.figure(100)
        # plt.imshow(img)
        # plt.figure(2)
        # plt.imshow(scmask)
        plt.show()
        plt.waitforbuttonpress()
Exemple #24
0
    def addgrain(self,ori=0):
        '''
        add a grain inside the microstructure
        
        :param ori: orienation of the new grain [phi1 phi] (default random value)
        :type ori: array, float
        :return: new_micro, object with the new grain include
        :rtype: aita
        :Exemple: 
            >>> data.addgrain()      
        '''
        
        # select the contour of the grains
        h=self.grains.plot()
        # click on the submit of the new grain
        plt.waitforbuttonpress()
        print('click on the submit of the new grain :')
        x=np.array(pylab.ginput(3))/self.grains.res
        plt.close('all')
        
        # select a subarea contening the triangle
        minx=np.int(np.fix(np.min(x[:,0])))
        maxx=np.int(np.ceil(np.max(x[:,0])))
        miny=np.int(np.fix(np.min(x[:,1])))
        maxy=np.int(np.ceil(np.max(x[:,1])))
        
        # write all point inside this area
        gpoint=[]
        for i in list(range(minx,maxx)):
            for j in list(range(miny,maxy)):
                gpoint.append([i,j])
        
    
        # test if the point is inside the triangle    
        gIn=[]
        for i in list(range(len(gpoint))):
            gIn.append(isInsideTriangle(gpoint[i],x[0,:],x[1,:],x[2,:]))

        gpointIn=np.array(gpoint)[np.array(gIn)]
        
        #transform in xIn and yIn, the coordinate of the map
        xIn=np.shape(self.grains.field)[0]-gpointIn[:,1]
        yIn=gpointIn[:,0]
               
        # add one grains
        self.grains.field[xIn,yIn]=np.nanmax(self.grains.field)+1
        # add the orientation of the grains
        if ori==0:
            self.phi1.field[xIn,yIn]=random.random()*2*math.pi
            self.phi.field[xIn,yIn]=random.random()*math.pi/2
        else:
            self.phi1.field[xIn,yIn]=ori[0]
            self.phi.field[xIn,yIn]=ori[1]
            
        returnphi=self.phi1*mask
Exemple #25
0
def visualize_data(x, y, viz_trainining=False):
    # Plot data using matplotlib
    plt.plot(x, y, 'bo')
    plt.ylabel('Output Value')
    plt.xlabel('Input Feature')
    if viz_trainining:
        plt.title('Normalized data\tClick on the figure to run Gradient Descent Algorithm')
        plt.waitforbuttonpress()
    else:
        plt.title('Original data')
        plt.show()
Exemple #26
0
def generate_data():
    # Generate a non-normally distributed datasample
    data = stats.poisson.rvs(2, size=1000)
    
    # Show the data
    plt.plot(data, '.')
    plt.title('Non-normally distributed dataset: Press any key to continue')
    # plt.show()
    plt.waitforbuttonpress()
    plt.close()    
    
    return(data)
def test_mnist(dimensions):
    # Import and read MNIST data
    import tensorflow.examples.tutorials.mnist.input_data as input_data
    import matplotlib.pyplot as plt

    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
    mean_img = np.mean(mnist.train.images, axis=0)

    # Create an SDA with dimensions.length() - 1 layers
    ae = SDAutoencoder(dimensions=dimensions)

    # Create an Adam optimizer for gradient descent
    learning_rate = 1e-4
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae.cost)

    # Setup accuracy model
    # is_correct = tf.equal()

    # Initialize the default session graph
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())

    # Set batch and epoch size
    batch_size = 50
    n_epochs = 10

    for epoch_i in range(n_epochs):
        for batch_i in range(mnist.train.num_examples // batch_size):
            batch_xs, _ = mnist.train.next_batch(batch_size)
            train = np.array([img - mean_img for img in batch_xs])
            feed = {ae.x: train, ae.corrupt_prob: [1.0]}
            sess.run(optimizer, feed_dict=feed)

            if batch_i % 100 == 0:
                pass
        print(epoch_i, sess.run(ae.cost, feed_dict=feed))

    # Plotting
    n_examples = 15
    test_xs, _ = mnist.test.next_batch(n_examples)
    test_xs_norm = np.array([img - mean_img for img in test_xs])
    recon = sess.run(ae.y, feed_dict={
        ae.x: test_xs_norm, ae.corrupt_prob: [0.0]})
    fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
    for example_i in range(n_examples):
        axs[0][example_i].imshow(
            np.reshape(test_xs[example_i, :], (28, 28)))
        axs[1][example_i].imshow(
            np.reshape([recon[example_i, :] + mean_img], (28, 28)))
    fig.show()
    plt.draw()
    plt.waitforbuttonpress()
def viewClassifiedImages(path,dataset,suffix = ".png"):

    t=plt.text(20,-20,'Hello WOrld')
    for i in xrange(0,len(dataset.fileNames)):
        #load image
        name = dataset.fileNames[i].split('.')[0]
        name = name+suffix
        im = plt.imread(path+name)
        plt.imshow(im)
        text = 'Name: '+name + ', label: '+dataset.target[i]+ ', classified as: '+dataset.classifiedAs[i]
        t.set_text(text)
        plt.draw()
        plt.waitforbuttonpress()  
Exemple #29
0
 def display_optimal(self):
     tableau_latex = self.attain_tableau_latex()
     optimal_latex = r"""\noindent Optimal value: """ + dn(self.simplex.value) + r" \\ "
     optimal_latex += r"""Solution: """
     for i, s in enumerate(self.simplex.solution):
         if i != 0:
             optimal_latex += r", "
         optimal_latex += r"$x_" + str(i + 1) + r" = \,$" + dn(s[0])
     optimal_latex += r" \\"
     self.text.set_text(optimal_latex + tableau_latex)
     plt.waitforbuttonpress()
     plt.draw()
     plt.waitforbuttonpress()
Exemple #30
0
    def UserVsini(self, spectrum):
        """
          This does a Fourier transform on the spectrum, and then lets
        the user click on the first minimum, which indicates the vsini of the star.
        """
        # Set up plotting
        self.interactive_mode = "vsini"
        fig = plt.figure(1)
        cid = fig.canvas.mpl_connect('button_press_event', self.mouseclick)

        # Make wavelength spacing uniform
        xgrid = np.linspace(spectrum.x[0], spectrum.x[-1], spectrum.size())
        spectrum = FittingUtilities.RebinData(spectrum, xgrid)
        extend = np.array(40 * spectrum.size() * [1, ])
        spectrum.y = np.r_[extend, spectrum.y, extend]

        # Do the fourier transorm and keep the positive frequencies
        fft = np.fft.fft(spectrum.y - 1.0)
        freq = np.fft.fftfreq(spectrum.y.size, d=spectrum.x[1] - spectrum.x[0])
        good = np.where(freq > 0)[0]
        fft = fft[good].real ** 2 + fft[good].imag ** 2
        freq = freq[good]

        # Plot inside a do loop, to let user try a few times
        done = False
        trials = []
        plt.loglog(freq, fft)
        plt.xlim((1e-2, 10))
        plt.draw()
        for i in range(10):
            plt.waitforbuttonpress()
            sigma_1 = self.click.xdata
            if self.click.button == 1:
                c = constants.c.cgs.value * units.cm.to(units.km)
                vsini = 0.66 * c / (spectrum.x.mean() * sigma_1)
                print "vsini = ", vsini, " km/s"
                trials.append(vsini)
                plt.cla()
            else:
                done = True
                break

        fig.canvas.mpl_disconnect(cid)
        if len(trials) == 1:
            return trials[0]

        print "\n"
        for i, vsini in enumerate(trials):
            print "\t[%i]: vsini = %.1f km/s" % (i + 1, vsini)
        inp = raw_input("\nWhich vsini do you want to use (choose from the options above)? ")
        return trials[int(inp) - 1]
Exemple #31
0
def example2_KF():
    """
    Jet landing alttitude
    Simulated data for noise V_t
    Set initialize state from 1000
    Generated Data by U, V, X_init, A, B, C
    """

    # time t = 0 - 100
    T = 100
    #### Data simulation #### (below)
    ## Parameters ##
    # Sensor error
    R = np.array([[100]])
    # Prediction transition
    A = np.array([[0.95]])
    # Control transition
    B = np.array([[0.15]])
    # Measurement transition
    C = np.array([[0.5]])
    # Prediction Covariance init
    P = np.array([[1]])

    # True state at t = 0
    X_true = np.array([[1000]])
    X = np.zeros((1,T))
    Z = np.zeros((1,T))
    X[0,0] = X_true[0,0]
    # Control Signal (assumption in tutorial)
    U = np.zeros((1,T))
    U[0,:] = np.arange(T)
    # Noise added to measurement (assumption in tutorial)
    V = np.random.randint(-200,200,(1,T))
    Z[0,0] = C * X[0,0] + V[0,0]

    # Observation
    for t in range(1, T):
        _AX = A * X[0,t-1]
        _BU = B * U[0,t]
        X[0,t] = _AX + _BU

        _CX_V = C * X[0,t] + V[0,t]
        Z[0,t] = _CX_V 
    ##### Data simulation ####### (above)
    print("[INFO] Generated Data!")
    kf = KF()
    t = 0
    kf.setInit(X=Z[:1,t:t+1], Z=Z[:1,t:t+1], U=U[:1,t:t+1],
                A=A, B=B, C=C, R=R, P=P)
    plt.axis([0, T+10, -50, 1100])
    plt.grid(True)
    plt.plot(list(range(T)), X[0,:], color=(0,1,1))
    plt.scatter(t+1, Z[0,t], c='g')
    print("t:%d Z:%s X:%s"%(t, Z[0,t], kf.X))

    for t in range(1,T):
        plt.grid(True)
        #plt.pause(0.05)
        str_p = ["t:%d"%t]
        str_p.append("[G: %s]"%(kf.G))
        #print(t, 'G', ekf.G)
        plt.scatter(t+1, int(Z[0,t]),c='g')
        str_p.append("[Z: %s]"%(Z[0,t]))
        #print(t, 0, int(Z[0,t]), t+1)

        # initialize State
        kf.predict(U=U[:1,t:t+1])

        # update State
        kf.update(Z[:1,t:t+1])
        plt.scatter(t+1, int(kf.X[0,0]),c='r')
        str_p.append("[X+: %s]"%(kf.X[0,0]))
        #print(t, 1, int(ekf.X[0,0]), t+1)
        print(" ".join(str_p))
        keyboardClick = False
        while (keyboardClick != True) and ((t%50==0) or (t==T-1)):
            time.sleep(0.05)
            keyboardClick=plt.waitforbuttonpress()
def show_and_save(savename):
    plt.pause(0.1)
    plt.savefig(savename, dpi=100)
    print("savename:", savename)
    plt.waitforbuttonpress()
    plt.clf()
Exemple #33
0
def train_gan():
    mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
    tf.reset_default_graph()
    tf.set_random_seed(1)
    # Build Networks
    # Network Inputs
    gen_input = tf.placeholder(tf.float32,
                               shape=[None, noise_dim],
                               name='input_noise')
    x = tf.placeholder(tf.float32, shape=[None, 28 * 28])
    disc_input = tf.reshape(x, [-1, 28, 28, 1])
    label_input = tf.placeholder(tf.float32,
                                 shape=[None, NUM_LABEL],
                                 name='label_input')
    # Build Generator Network
    generator = Generator()
    gen_sample = generator.build(gen_input, label_input)

    # Build 2 Discriminator Networks (one from noise input, one from generated samples)
    discriminator = Discriminator()
    disc_real = discriminator.build(disc_input)
    disc_fake = discriminator.build(gen_sample)

    # Build Loss
    gen_loss = -tf.reduce_mean(tf.log(disc_fake))
    disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))

    # Build Optimizers
    optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate)
    optimizer_disc = tf.train.AdamOptimizer(learning_rate=learning_rate)

    # Training Variables for each optimizer
    # By default in TensorFlow, all variables are updated by each optimizer, so we
    # need to precise for each one of them the specific variables to update.
    # Generator Network Variables
    gen_vars = [
        Generator.linear_w, Generator.linear_b, Generator.deconv_w1,
        Generator.deconv_w2, Generator.deconv_w3, Generator.deconv_b1,
        Generator.deconv_b2, Generator.deconv_b3
    ]
    # Discriminator Network Variables
    disc_vars = [
        Discriminator.conv_w1, Discriminator.conv_w2, Discriminator.conv_w3,
        Discriminator.conv_w4, Discriminator.conv_w5, Discriminator.conv_b1,
        Discriminator.conv_b2, Discriminator.conv_b3, Discriminator.conv_b4,
        Discriminator.conv_b5, Discriminator.linear_w, Discriminator.linear_b
    ]

    # Create training operations
    train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
    train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)
    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    # Start training
    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)

        for i in range(1, num_steps + 1):
            # Prepare Data
            # Get the next batch of MNIST data (only images are needed, not labels)
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Generate noise to feed to the generator
            z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])

            # Train
            feed_dict = {x: batch_x, label_input: batch_y, gen_input: z}
            _, _, gl, dl = sess.run(
                [train_gen, train_disc, gen_loss, disc_loss],
                feed_dict=feed_dict)
            if i % 1000 == 0 or i == 1:
                print('Step %i: Generator Loss: %f, Discriminator Loss: %f' %
                      (i, gl, dl))

        # Generate images from noise, using the generator network.
        f, a = plt.subplots(4, 10, figsize=(10, 4))
        for i in range(10):
            # Noise input.
            z = np.random.uniform(-1., 1., size=[4, noise_dim])
            g = sess.run([gen_sample], feed_dict={gen_input: z})
            g = np.reshape(g, newshape=(4, 28, 28, 1))
            # Reverse colours for better display
            g = -1 * (g - 1)
            for j in range(4):
                # Generate image from noise. Extend to 3 channels for matplot figure.
                img = np.reshape(np.repeat(g[j][:, :, np.newaxis], 3, axis=2),
                                 newshape=(28, 28, 3))
                a[j][i].imshow(img)

        f.show()
        plt.draw()
        plt.waitforbuttonpress()
Exemple #34
0
for ax in axes.flatten():
    ax.set_xticks([])
    ax.set_yticks([])

while True:
    rand_idx = np.random.choice(np.arange(len(metadata)),
                                size=N,
                                replace=False)
    X, y = [], []
    for idx in rand_idx:
        filename = os.path.join(config("image_path"), metadata.loc[idx,
                                                                   "filename"])
        X.append(imread(filename))
        y.append(metadata.loc[idx, "semantic_label"])

    for i, (xi, yi) in enumerate(zip(X, y)):
        axes[0, i].imshow(xi)
        axes[0, i].set_title(yi)

    X_ = resize(np.array(X))
    X_ = standardizer.transform(X_)
    for i, (xi, yi) in enumerate(zip(X_, y)):
        axes[1, i].imshow(denormalize_image(xi), interpolation="bicubic")

    plt.draw()
    if plt.waitforbuttonpress(0) == None:
        break

print("OK, bye!")
Exemple #35
0
def average_all_results(all_s: List[SimStats], display_plots: bool):
    """Gather information regarding all runs and its metrics"""

    # gather summary information
    actors_wo_end = [
        len([1 for a in stats.actors if not a.reached_dest()])
        for stats in all_s
    ]
    avg_actors_not_finishing = np.sum(actors_wo_end) / len(all_s)

    actors_summary = [
        compute_average_over_time(stats.actors_in_graph) for stats in all_s
    ]
    edges_summary = [{
        str(e): compute_average_over_time(stats.edges_flow_over_time[e])
        for e in stats.edges_flow_over_time
    } for stats in all_s]

    # gather atis information
    atis_no = np.hstack([[a.total_travel_time for a in stats.actors]
                         for stats in all_s])

    results = {
        'avg_actors_not_finishing':
        avg_actors_not_finishing,
        'avg_actors': [np.mean(actors_summary),
                       np.std(actors_summary)],
        'avg_edges':
        defaultdict(lambda: []),
        'time_atis_no': [np.mean(atis_no), np.std(atis_no)]
        if len(atis_no) > 0 else [np.nan, np.nan]
    }

    for d in edges_summary:
        for d_k in d:
            results['avg_edges'][d_k].append(d[d_k])

    results['avg_edges'] = {
        e: [np.mean(results['avg_edges'][e]),
            np.std(results['avg_edges'][e])]
        for e in results['avg_edges']
    }

    # gather new information with atis separation
    actors_flow = defaultdict(lambda: [(0.0, 0)])
    for s in all_s:
        for key in s.actors_atis.keys():
            s.actors_atis[key] = s.actors_atis[key][1:]
            for tuple in s.actors_atis[key]:
                actors_flow[key].append(tuple)

    for key in actors_flow.keys():
        actors_flow[key] = sorted(actors_flow[key], key=lambda t: t[0])

    actors_flow_acc = defaultdict(lambda: [(0.0, 0)])

    for key in actors_flow.keys():
        for actor_tuple in actors_flow[key]:
            actors_flow_acc[key].append(
                [actor_tuple[0], actor_tuple[1] + actors_flow_acc[key][-1][1]])

    # plot_accumulated_actor_graph(actors_flow_acc, len(all_s))
    # plt.waitforbuttonpress(0)

    results['actors_atis_natis'] = actors_flow_acc

    # the above but for every edge
    inner_default_dict = lambda: defaultdict(lambda: [])
    results['edges_occupation'] = defaultdict(inner_default_dict)

    for s in all_s:
        edges = s.edges_flow_atis
        for key in edges.keys():
            for service in edges[key]:
                results['edges_occupation'][str(key)][service].append(
                    edges[key][service])

    for e_key in results['edges_occupation'].keys():
        edge_flow = defaultdict(lambda: [(0.0, 0)])
        # pretty(results['edges_occupation'][e_key])
        for actor_key in results['edges_occupation'][e_key]:
            for tuple_list in results['edges_occupation'][e_key][actor_key]:
                tuple_list = tuple_list[1:]
                for tuple in tuple_list:
                    edge_flow[actor_key].append(tuple)

        for key in edge_flow.keys():
            edge_flow[key] = sorted(edge_flow[key], key=lambda t: t[0])

        edge_flow_acc = defaultdict(lambda: [(0.0, 0)])
        for actor_key in edge_flow.keys():
            for edge_tuple in edge_flow[actor_key]:
                edge_flow_acc[actor_key].append([
                    edge_tuple[0],
                    edge_tuple[1] + edge_flow_acc[actor_key][-1][1]
                ])

        for actor_key in edge_flow_acc.keys():
            edge_flow_acc[actor_key] = edge_flow_acc[actor_key][1:]

        # print("acc")
        # print(edge_flow_acc)
        results['edges_occupation'][e_key] = edge_flow_acc
    emissions_dict = {"car": [], "bus": [], "sharedCar": [], "total": []}

    number_users_dict = {"car": [], "bus": [], "sharedCar": []}

    for run in all_s:
        run_emissions_dict = {"car": 0, "bus": 0, "sharedCar": 0, "total": 0}
        run_number_users_dict = {"car": 0, "bus": 0, "sharedCar": 0}
        for actor in run.actors:
            run_emissions_dict[actor.service] += actor.emissions
            run_number_users_dict[actor.service] += 1
            run_emissions_dict["total"] += actor.emissions

        emissions_dict["car"].append(run_emissions_dict["car"])
        emissions_dict["bus"].append(run_emissions_dict["bus"])
        emissions_dict["sharedCar"].append(run_emissions_dict["sharedCar"])
        emissions_dict["total"].append(run_emissions_dict["total"])

        number_users_dict["car"].append(run_number_users_dict["car"])
        number_users_dict["bus"].append(run_number_users_dict["bus"])
        number_users_dict["sharedCar"].append(
            run_number_users_dict["sharedCar"])

    np.save("{}_emissions".format(run_name), np.array(emissions_dict))
    np.save("{}_number".format(run_name), np.array(number_users_dict))

    if display_plots:
        plot_accumulated_actor_graph(actors_flow_acc, len(all_s))
        plot_accumulated_edges_graphs(results['edges_occupation'], len(all_s))
        plot_emissions_development(emissions_dict)
        plot_number_users_development(number_users_dict)
    plt.waitforbuttonpress(0)

    return results
def imshow_wait_input(img):
  """키보드 입력, 마우스클릭 기다리는 imshow"""
  plt.imshow(img)
  plt.draw()
  plt.waitforbuttonpress(0)
Exemple #37
0
with tf.Session() as sess:

    sess.run(init)

    for step in range(1, training_steps+1):
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # Reshape data to get 28 seq of 28 elements
        batch_x = batch_x.reshape((batch_size, timesteps, num_input))
        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
        if step % display_step == 0 or step == 1:
            # Calculate batch loss and accuracy
            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
                                                                 Y: batch_y})
            print("Step " + str(step) + ", Minibatch Loss= " + \
                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
                  "{:.3f}".format(acc))

            plt.scatter(step, acc, c='b')
            plt.pause(0.1)

    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images
    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
plt.waitforbuttonpress()
Exemple #38
0
def train_network(noise_input, batch_size, num_steps, learning_rate,
                  g_noise_input, d_image_input):
    # 第一次调用Generator, reuse保持默认的False,创建Generator变量域下的变量
    g_output = generator(g_noise_input)
    # 第一次调用Discriminator, reuse保持默认的False,创建Discriminator变量域下的变量
    d_real_output = discriminator(d_image_input)

    # 以g_output作为输入,再次调用discriminator()函数时,与上面以d_image_input调用共享一套鉴别器权重参数,因此设置reuse=True
    d_fake_output = discriminator(g_output, reuse=True)

    # 计算损失函数
    d_real_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=d_real_output,
                                                       labels=tf.ones(
                                                           [batch_size],
                                                           dtype=tf.int32)))
    d_fake_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=d_fake_output,
                                                       labels=tf.zeros(
                                                           [batch_size],
                                                           dtype=tf.int32)))
    d_loss = d_real_loss + d_fake_loss

    g_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=d_fake_output,
                                                       labels=tf.ones(
                                                           [batch_size],
                                                           dtype=tf.int32)))
    # 定义优化器
    g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    d_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

    # 为G和D获取各自的训练参数
    g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                               scope='Generator')
    d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                               scope='Discriminator')

    # 创建训练
    g_train = g_optimizer.minimize(g_loss, var_list=g_vars)
    d_train = d_optimizer.minimize(d_loss, var_list=d_vars)

    # 初始化变量
    init = tf.global_variables_initializer()

    # 开始训练
    sess = tf.Session()
    sess.run(init)

    for i in range(1, num_steps + 1):
        # 准备数据
        batch_x, _ = mnist.train.next_batch(batch_size)
        batch_x = np.reshape(batch_x, newshape=[-1, 28, 28, 1])

        # 训练鉴别器D和生成器G
        z = np.random.uniform(-1, 1, size=[batch_size,
                                           noise_input])  # 随机生成噪声输入
        _, _, gl, dl = sess.run([g_train, d_train, g_loss, d_loss],
                                feed_dict={
                                    g_noise_input: z,
                                    d_image_input: batch_x,
                                    is_training: True
                                })
        if i % 100 == 0 or i == 1:
            print("Step %i: Generator Loss: %f, Discriminator Loss: %f" %
                  (i, gl, dl))

    # 利用生成器网络在噪声中产生图像
    f, a = plt.subplots(4, 10, figsize=(10, 4))
    for i in range(10):
        # Noise input.
        z = np.random.uniform(-1., 1., size=[4, noise_input])
        g = sess.run(g_output, feed_dict={g_noise_input: z})
        for j in range(4):
            # Generate image from noise. Extend to 3 channels for matplot figure.
            img = np.reshape(np.repeat(g[j][:, :, np.newaxis], 3, axis=2),
                             newshape=(28, 28, 3))
            a[j][i].imshow(img)

    f.show()
    plt.draw()
    plt.waitforbuttonpress()
Exemple #39
0
def imshow(img):
    plt.imshow(img[:, :, [2, 1, 0]])
    plt.axis("off")
    plt.waitforbuttonpress()
Exemple #40
0
        plt.imshow(img_origin)
        plt.scatter(x=[max_col], y=[max_row], c='r', s=30)
        plt.title('scale response is: ' +
                  str(['{:.3f}'.format(i) for i in max_list]))

        plt.subplot(1, 2, 2)
        im = plt.imshow(out_mean * 1.0 / 255)
        from matplotlib import colors
        norm = colors.Normalize(vmin=0, vmax=1)
        im.set_norm(norm)
        norm = colors.Normalize(vmin=0, vmax=1)
        plt.colorbar(ticks=np.linspace(0, 1.0, 10, endpoint=True))
        plt.title('resnet oringial size maximum response is %.2f' %
                  (out_list[0].max()))
        plt.draw()
        plt.waitforbuttonpress(0.1)
        plt.savefig(
            './exp_dir/fish_localise/imgs/resnet50_fish_detect_train/' +
            img_list[im_num],
            bbox_inches='tight')
###############################################

test_directory = '/home/stevenwudi/Documents/Python_Project/Kaggle_The_Nature_Conversancy_Fisheries_Monitoring/test_stg1'
img_list = os.listdir(test_directory)

# maximize the figure
fig, axes = plt.subplots(nrows=2, ncols=3)
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
figManager = plt.gcf()
figManager.set_size_inches(40, 15)
Exemple #41
0
def fitqsocont(wa,
               fl,
               er,
               redshift,
               oldco=None,
               knots=None,
               nbin=1,
               divmult=1,
               forest_divmult=1,
               atmos=True,
               debug=False):
    """ Find an estimate of a QSO continuum.

    divmult=3 works well for R~40000, S/N~10, z=3 QSO spectrum.

    nbin bins the data for plotting and continuum fitting (obsolete)
    """
    # choose initial reference continuum points.  Increase divmult for
    # fewer initial continuum points (generally needed for poorer S/N
    # spectra).

    zp1 = 1 + redshift
    #reflines = np.array([1025.72, 1215.6701, 1240.14, 1398.0,
    #                     1549.06, 1908,      2800            ])

    # generate the edges of wavelength chunks to send to fitting routine

    # these edges and divisions are generated by trial and error

    # for S/N = 15ish and resolution = 2000ish
    div = np.rec.fromrecords([
        (500., 800., 25),
        (800., 1190., 25),
        (1190., 1213., 4),
        (1213., 1230., 6),
        (1230., 1263., 6),
        (1263., 1290., 5),
        (1290., 1340., 5),
        (1340., 1370., 2),
        (1370., 1410., 5),
        (1410., 1515., 5),
        (1515., 1600., 15),
        (1600., 1800., 8),
        (1800., 1900., 5),
        (1900., 1940., 5),
        (1940., 2240., 15),
        (2240., 3000., 25),
        (3000., 6000., 80),
        (6000., 20000., 100),
    ],
                             names=str('left,right,num'))

    div.num[2:] = np.ceil(div.num[2:] * divmult)
    div.num[:2] = np.ceil(div.num[:2] * forest_divmult)
    div.left *= zp1
    div.right *= zp1
    if debug: print(div.tolist())
    temp = [np.linspace(left, right, n + 1)[:-1] for left, right, n in div]
    edges = np.concatenate(temp)
    if debug: stats(edges)

    i0, i1, i2 = edges.searchsorted([wa[0], 1210 * zp1, wa[-1]])
    if debug: print(i0, i1, i2)

    contpoints = []
    if knots is not None:
        contpoints.extend(knots)
    else:
        co, cp = spline_continuum(wa, fl, er, edges[i0:i2], debug=debug)
        contpoints.extend(cp)
    fig = pl.figure(figsize=(11, 7))
    fig.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.95)
    wrapper = InteractiveCoFit(wa,
                               fl,
                               er,
                               contpoints,
                               co=oldco,
                               nbin=nbin,
                               redshift=redshift,
                               fig=fig,
                               atmos=atmos)
    while True:
        if wrapper.finished: break
        pl.waitforbuttonpress()

    return wrapper.continuum, wrapper.contpoints
Exemple #42
0
            st = (d-1)*eband_K
            plt.plot(outputs[frame,st:st+eband_K],'g')
            plt.plot(outputs_lin[frame,st:st+eband_K],'b')
            plt.plot(outputs_linstep[frame,st:st+eband_K],'c')
            if args.nn:
                plt.plot(outputs_nnest[frame,st:st+eband_K],'r')
            elif args.nnpf:
                plt.plot(outputs_linpf_est[frame,st:st+eband_K],'r')
            else:
                plt.plot(outputs_linpf[frame,st:st+eband_K],'r')
        plt.ylim((0,10))
    var_lin = np.var(10*outputs[frame,:]-10*outputs_lin[frame,:])
    var_linpf = np.var(10*outputs[frame,:]-10*outputs_linpf[frame,:])
    var_linstep = np.var(10*outputs[frame,:]-10*outputs_linstep[frame,:])
    print("frame: %d var_lin (b): %3.2f " % (frame,var_lin), end='')
    if args.nn:
        var_nnest = np.var(10*outputs[frame,:]-10*outputs_nnest[frame,:])
        print("var_nnest(r): %3.2f" % (var_nnest), end='')
    elif args.nnpf:
        var_nnpfest = np.var(10*outputs[frame,:]-10*outputs_linpf_est[frame,:])
        print("var_nnpfest(r): %3.2f" % (var_nnpfest), end='')
    else:
        print("var_linpf(r): %3.2f var_linstep(c): %3.2f" % (var_linpf, var_linstep), end='')
        
    print(flush=True)
    plt.show(block=False)

    loop = plt.waitforbuttonpress(0)
    frame += 1
    plt.clf()
 def plot_data(d):
     for c in range(self.num_classes):
         for n in range(self.num_nuisances):
             plt.scatter(*d[c][n].T, c=colours[c])
     plt.waitforbuttonpress()
Exemple #44
0
def redraw():
    plt.gcf().canvas.draw()
    plt.waitforbuttonpress(timeout=0.001)
    time.sleep(wait)
    def fit(self, X, Y, verbose=False):
        """
        Trains the model.

        Parameters
        ----------
        X: ndarray
            features having shape (n_samples, dim).
        Y: ndarray
            class labels having shape (n_samples,).
        verbose: bool
            whether or not to visualize the learning process.
            Default is False
        """

        # some inits
        n, d = X.shape
        if d != 2:
            verbose = False  # only plot learning if 2 dimensional

        self.possible_labels = np.unique(Y)

        # only binary problems please
        assert self.possible_labels.size == 2, 'Error: data is not binary'

        # initialize the sample weights as equally probable
        sample_weights = np.ones(shape=n) / n

        # start training
        for l in xrange(0, self.n_learners):

            # choose the indexes of 'difficult' samples (np.random.choice)
            cur_idx = np.random.choice(a=range(0, n),
                                       size=n,
                                       replace=True,
                                       p=sample_weights)

            # extract 'difficult' samples
            cur_X = X[cur_idx]
            cur_Y = Y[cur_idx]

            # search for a weak classifier
            error = 1
            n_trials = 0
            while error > 0.5:

                # select random feature (np.random.choice)
                cur_dim = np.random.choice(a=range(0, d))

                # select random split (np.random.uniform)
                M, m = np.max(cur_X[:, cur_dim]), np.min(cur_X[:, cur_dim])
                cur_split = np.random.uniform(low=m, high=M)

                # select random verse (np.random.choice)
                label_above_split = np.random.choice(a=self.possible_labels)
                label_below_split = -label_above_split

                # compute assignment
                cur_assignment = np.zeros(shape=n)
                cur_assignment[cur_X[:,
                                     cur_dim] >= cur_split] = label_above_split
                cur_assignment[cur_X[:,
                                     cur_dim] < cur_split] = label_below_split

                # compute error
                error = np.sum(
                    sample_weights[cur_idx[cur_Y != cur_assignment]])

                n_trials += 1
                if n_trials > 100:
                    # initialize the sample weights again
                    sample_weights = np.ones(shape=n) / n

            # save weak learner parameter
            alpha = np.log((1 - error) / error) / 2
            self.alphas[l] = alpha
            self.dims[l] = cur_dim
            self.splits[l] = cur_split
            self.label_above_split[l] = label_above_split

            # update sample weights
            sample_weights[cur_idx[cur_Y != cur_assignment]] *= np.exp(alpha)
            sample_weights[cur_idx[cur_Y == cur_assignment]] *= np.exp(-alpha)
            sample_weights /= np.sum(sample_weights)

            if verbose:
                # plot
                plt.clf()
                plt.scatter(cur_X[:, 0],
                            cur_X[:, 1],
                            c=cur_assignment,
                            s=sample_weights[cur_idx] * 50000,
                            cmap=cmap,
                            edgecolors='k')
                M1, m1 = np.max(X[:, 1]), np.min(X[:, 1])
                M0, m0 = np.max(X[:, 0]), np.min(X[:, 0])
                if cur_dim == 0:
                    plt.plot([cur_split, cur_split], [m1, M1], 'k-', lw=5)
                else:
                    plt.plot([m0, M0], [cur_split, cur_split], 'k-', lw=5)
                plt.xlim([m0, M0])
                plt.ylim([m1, M1])
                plt.xticks([])
                plt.yticks([])
                plt.title('Iteration: {:04d}'.format(l))
                plt.waitforbuttonpress(timeout=0.1)
Exemple #46
0
def example1_EKF():
    """
    Jet landing alttitude
    X_true - true state alttitude in meters
    Z - observation(measurement) alttitude in meters
    """
    # time t = 0 - 9
    # True state
    X_true = np.array([[1000, 750, 563, 422, 316, 237, 178, 133, 100, 75]])
    # Observation
    Z = np.array([[1090, 882, 554, 233, 345, 340, 340, 79, 299, -26]])

    ## Parameters ##
    # Sensor error
    R = np.array([[200]])
    # Prediction transition
    A = np.array([[0.75]])
    # Prediction Covariance init
    P = np.array([[1]])

    ## Initialize State ##
    ekf = EKF()
    t = 0
    def f(X, U): return np.dot(A, X.copy()) , A.copy()
    def h(Z): return Z.copy(), np.eye(ekf._sensDim)

    ekf.f = f
    ekf.h = h

    ekf.setInit(X=Z[:1,t:t+1], Z=Z[:1,t:t+1], R=R, P=P)


    plt.axis([0, 11, -50, 1100])
    plt.grid(True)
    plt.scatter(t+1, Z[0,t], c='g')
    print("t:%d Z:%s X:%s"%(t, Z[0,t], ekf.X))

    #plt.show()

    ## loop overvg bf 
    for t in range(1, X_true.shape[1]):
        plt.grid(True)
        plt.pause(0.05)
        str_p = ["t:%d"%t]
        str_p.append("[G: %s]"%(ekf.G))
        #print(t, 'G', ekf.G)
        plt.scatter(t+1, int(Z[0,t]),c='g')
        str_p.append("[Z: %s]"%(Z[0,t]))
        #print(t, 0, int(Z[0,t]), t+1)

        # initialize State
        ekf.predict()

        # update State
        ekf.update(Z[:1,t:t+1])
        plt.scatter(t+1, int(ekf.X[0,0]),c='r')
        str_p.append("[X+: %s]"%(ekf.X[0,0]))
        #print(t, 1, int(ekf.X[0,0]), t+1)
        print(" ".join(str_p))
        keyboardClick = False
        while keyboardClick != True:
            time.sleep(0.05)
            keyboardClick=plt.waitforbuttonpress()
Exemple #47
0
        #displayLineModel(x0, y0, x0[idx[0]], y0[idx[0]], x0[idx[1]], y0[idx[1]], 'r')
        #plt.waitforbuttonpress(0.001)
    #if len(list_lines) - 2 > 2:
    list_lines = list_lines[-3:]
    lines = np.vstack([lines, [y_best_1, x_best_1, y_best_2, x_best_2]])

    # Display the lines
    plt.figure('Ransac 1')
    plt.clf()
    plt.imshow(imBGR[..., ::-1] // 2)
    plt.scatter(x_original, y_original, color='r', s=1)
    data = np.argwhere(m_estimator_sac_best != 0)
    #plt.scatter(x_original[best_inliers_Mask], y_original[best_inliers_Mask], c='b')
    index = 0
    cmap = get_cmap(len(list_lines))
    for i in range(len(list_lines)):
        plt.scatter(list_lines[i][2][list_lines[i][3]],
                    list_lines[i][3][list_lines[i][3]],
                    c=cmap(index))
        y1, x1, y2, x2 = list_lines[i][0]
        plt.plot([x1, x2], [y1, y2], 'g', marker='o', linewidth=2)
        index += 1
    plt.xlim(0, imBGR.shape[1])
    plt.ylim(imBGR.shape[0], 0)

    # Use either of the following functions:
    plt.waitforbuttonpress(
        0.001)  # Wait for a button (click, key) to be pressed
    # frameIdx += 10
    frameIdx += 10
    # plt.waitforbuttonpress()  # Pause for 0.02 second. Useful to get an animation
import matplotlib.pyplot as plt

from multiple_view_geometry.scene import Scene
from multiple_view_geometry.cube import Cube
from multiple_view_geometry.camera import Camera
from multiple_view_geometry.camera_image_renderer import CameraImageRenderer
from multiple_view_geometry.homogeneous_matrix import HomogeneousMatrix
from multiple_view_geometry.transform_utils import create_rotation_mat_from_rpy

if __name__ == "__main__":
    camera0_extrinsic = HomogeneousMatrix.create([1.7, 0.0, 0.5],
                                                 create_rotation_mat_from_rpy(
                                                     -np.pi / 2, 0,
                                                     -np.pi / 4))
    camera0 = Camera("0", camera0_extrinsic)
    camera1_extrinsic = HomogeneousMatrix.create([2.3, 0.0, 0.5],
                                                 create_rotation_mat_from_rpy(
                                                     -np.pi / 2, 0.0, 0))
    camera1 = Camera("1", camera1_extrinsic)

    cube = Cube((2, 3, 0), (2, 2, 2), resolution=1)
    renderer = CameraImageRenderer({
        camera0: "red",
        camera1: "blue"
    },
                                   show_image_frame=True,
                                   show_epipolar_lines=True)
    scene = Scene(cube, [camera0, camera1], renderer)
    scene.project(True)
    plt.waitforbuttonpress(-1)
Exemple #49
0
def keep_plot_open():
    plt.show()
    plt.waitforbuttonpress(0)
Exemple #50
0
ax.scatter(*zip(*samp1), c="blue", s=50, label="mean=(-1,0), sigma=0.5")
ax.scatter(*zip(*samp2), c="green", s=50, label="mean=(1,0), sigma=1.0")
ax.scatter(*mean1, c="blue", s=500, marker="+")
ax.scatter(*mean2, c="green", s=500, marker="+")
ax.scatter(*samp1_mu, c="blue", s=500, marker="o", alpha=0.4)
ax.scatter(*samp2_mu, c="green", s=500, marker="o", alpha=0.4)
ax.legend(bbox_to_anchor=(1.4, 1.05))
fig.subplots_adjust(right=0.7)

xlim = [-2.0, 4.0]
ylim = [-3.0, 3.0]
ax.set(xlim=xlim)
ax.set(ylim=ylim)
plt.draw()
if args.pdf: plt.savefig(pdf, format="pdf")
else: plt.waitforbuttonpress()  #pause(10)

### naive linear classifier

m1x = samp1_mu[0]
m1y = samp1_mu[1]
m2x = samp2_mu[0]
m2y = samp2_mu[1]
P = (0.5 * (m1x + m2x), 0.5 * (m1y + m2y))
N = (m2x - m1x, m2y - m1y)
ABC = _sl_gen(P, N=N)
naive_lcr = LinClassifier(ABC, samp2_mu)
r1, w1, u1 = naive_lcr.test(-1, 1000, NV1)
r2, w2, u2 = naive_lcr.test(1, 1000, NV2)
acc = (r1 + r2) / 20.0
print "Naive classifier testing:"
Exemple #51
0
    return array[index]


def random_number(size):
    return random.randint(0, size - 1)


end_dir = os.path.dirname(__file__)
end_dir = end_dir + "/Uloha2"
image_array = np.ndarray(shape=(4, 100, 100, 3), dtype=np.float32)
for parent in os.listdir(end_dir):
    if ("Test" in parent) or ("Training" in parent):
        for x in range(0, 4):
            image = get_image(end_dir + "/" + parent)
            image_array[x, :, :] = image

        img0 = cv2.cvtColor(image_array[0], cv2.COLOR_RGB2BGR)
        img1 = cv2.cvtColor(image_array[1], cv2.COLOR_RGB2BGR)
        img2 = cv2.cvtColor(image_array[2], cv2.COLOR_RGB2BGR)
        img3 = cv2.cvtColor(image_array[3], cv2.COLOR_RGB2BGR)

        plt.subplot(221), plt.imshow(img0), plt.title(parent)
        plt.subplot(222), plt.imshow(img1), plt.title(parent)
        plt.subplot(223), plt.imshow(img2), plt.title(parent)
        plt.subplot(224), plt.imshow(img3), plt.title(parent)

        plt.tight_layout()
        plt.imshow(img0)
        plt.waitforbuttonpress(15)
        plt.close()
def tellme(s):
    print(s)
    plt.title(s, fontsize=16)
    plt.draw()


##################################################
# Define a triangle by clicking three points
##################################################
plt.clf()
plt.axis([-1., 1., -1., 1.])
plt.setp(plt.gca(), autoscale_on=False)

tellme('You will define a triangle, click to begin')

plt.waitforbuttonpress()

happy = False
while not happy:
    pts = []
    while len(pts) < 3:
        tellme('Select 3 corners with mouse')
        pts = np.asarray(plt.ginput(3, timeout=-1))
        if len(pts) < 3:
            tellme('Too few points, starting over')
            time.sleep(1)  # Wait a second

    ph = plt.fill(pts[:, 0], pts[:, 1], 'r', lw=2)

    tellme('Happy? Key click for yes, mouse click for no')
Exemple #53
0
        # algorithm in the book, but there are other options as well
        u_n = n / N + noise
        while u_n > cumweights[i]:
            i += 1
        pxn[n] = px[i]
    # indicesout = indicesout[np.randperm(np.size(indicesout))]
    rng.shuffle(pxn, axis=0)

    w.fill(1 / N)
    # else:
    #     pxn = px[:]

    N_eff = 1 / np.sum(w**2)

    # trajecory sample prediction
    for n in range(n):
        vkn = PF_dynamic_distribution.rvs()
        px[n] = pendulum_dynamics_discrete(pxn[n], vkn, Ts, a)

    # plot
    sch_particles.set_offsets(np.c_[l * np.sin(pxn[:, 0]),
                                    -l * np.cos(pxn[:, 0])])
    sch_true.set_offsets(np.c_[l * np.sin(x[k, 0]), -l * np.cos(x[k, 0])])

    fig4.canvas.draw_idle()
    plt.show(block=False)
    plt.waitforbuttonpress(plotpause)

plt.waitforbuttonpress()
# %%
Exemple #54
0
def get_reference_sl(metadata, settings):
    """
    Allows the user to manually digitize a reference shoreline that is used seed
    the shoreline detection algorithm. The reference shoreline helps to detect 
    the outliers, making the shoreline detection more robust.

    KV WRL 2018

    Arguments:
    -----------
    metadata: dict
        contains all the information about the satellite images that were downloaded
    settings: dict with the following keys
        'inputs': dict
            input parameters (sitename, filepath, polygon, dates, sat_list)
        'cloud_thresh': float
            value between 0 and 1 indicating the maximum cloud fraction in 
            the cropped image that is accepted
        'cloud_mask_issue': boolean
            True if there is an issue with the cloud mask and sand pixels
            are erroneously being masked on the images
        'output_epsg': int
            output spatial reference system as EPSG code

    Returns:
    -----------
    reference_shoreline: np.array
        coordinates of the reference shoreline that was manually digitized. 
        This is also saved as a .pkl and .geojson file.

    """

    sitename = settings['inputs']['sitename']
    filepath_data = settings['inputs']['filepath']
    pts_coords = []
    # check if reference shoreline already exists in the corresponding folder
    filepath = os.path.join(filepath_data, sitename)
    filename = sitename + '_reference_shoreline.pkl'
    # if it exist, load it and return it
    if filename in os.listdir(filepath):
        print('Reference shoreline already exists and was loaded')
        with open(os.path.join(filepath, sitename + '_reference_shoreline.pkl'), 'rb') as f:
            refsl = pickle.load(f)
        return refsl
    
    # otherwise get the user to manually digitise a shoreline on S2, L8 or L5 images (no L7 because of scan line error)
    else:
        # first try to use S2 images (10m res for manually digitizing the reference shoreline)
        if 'S2' in metadata.keys():
            satname = 'S2'
            filepath = SDS_tools.get_filepath(settings['inputs'],satname)
            filenames = metadata[satname]['filenames']
        # if no S2 images, try L8  (15m res in the RGB with pansharpening)
        elif not 'S2' in metadata.keys() and 'L8' in metadata.keys():
            satname = 'L8'
            filepath = SDS_tools.get_filepath(settings['inputs'],satname)
            filenames = metadata[satname]['filenames']
        # if no S2 images and no L8, use L5 images (L7 images have black diagonal bands making it
        # hard to manually digitize a shoreline)
        elif not 'S2' in metadata.keys() and not 'L8' in metadata.keys() and 'L5' in metadata.keys():
            satname = 'L5'
            filepath = SDS_tools.get_filepath(settings['inputs'],satname)
            filenames = metadata[satname]['filenames']
        else:
            raise Exception('You cannot digitize the shoreline on L7 images (because of gaps in the images), add another L8, S2 or L5 to your dataset.')
            
        # create figure
        fig, ax = plt.subplots(1,1, figsize=[18,9], tight_layout=True)
        mng = plt.get_current_fig_manager()
        mng.window.showMaximized()
        # loop trhough the images
        for i in range(len(filenames)):

            # read image
            fn = SDS_tools.get_filenames(filenames[i],filepath, satname)
            im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = preprocess_single(fn, satname, settings['cloud_mask_issue'])

            # compute cloud_cover percentage (with no data pixels)
            cloud_cover_combined = np.divide(sum(sum(cloud_mask.astype(int))),
                                    (cloud_mask.shape[0]*cloud_mask.shape[1]))
            if cloud_cover_combined > 0.99: # if 99% of cloudy pixels in image skip
                continue

            # remove no data pixels from the cloud mask (for example L7 bands of no data should not be accounted for)
            cloud_mask_adv = np.logical_xor(cloud_mask, im_nodata)
            # compute updated cloud cover percentage (without no data pixels)
            cloud_cover = np.divide(sum(sum(cloud_mask_adv.astype(int))),
                                    (sum(sum((~im_nodata).astype(int)))))

            # skip image if cloud cover is above threshold
            if cloud_cover > settings['cloud_thresh']:
                continue

            # rescale image intensity for display purposes
            im_RGB = rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)

            # plot the image RGB on a figure
            ax.axis('off')
            ax.imshow(im_RGB)

            # decide if the image if good enough for digitizing the shoreline
            ax.set_title('Press <right arrow> if image is clear enough to digitize the shoreline.\n' +
                      'If the image is cloudy press <left arrow> to get another image', fontsize=14)
            # set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071)
            # this variable needs to be immuatable so we can access it after the keypress event
            skip_image = False
            key_event = {}
            def press(event):
                # store what key was pressed in the dictionary
                key_event['pressed'] = event.key
            # let the user press a key, right arrow to keep the image, left arrow to skip it
            # to break the loop the user can press 'escape'
            while True:
                btn_keep = plt.text(1.1, 0.9, 'keep ⇨', size=12, ha="right", va="top",
                                    transform=ax.transAxes,
                                    bbox=dict(boxstyle="square", ec='k',fc='w'))
                btn_skip = plt.text(-0.1, 0.9, '⇦ skip', size=12, ha="left", va="top",
                                    transform=ax.transAxes,
                                    bbox=dict(boxstyle="square", ec='k',fc='w'))
                btn_esc = plt.text(0.5, 0, '<esc> to quit', size=12, ha="center", va="top",
                                    transform=ax.transAxes,
                                    bbox=dict(boxstyle="square", ec='k',fc='w'))
                plt.draw()
                fig.canvas.mpl_connect('key_press_event', press)
                plt.waitforbuttonpress()
                # after button is pressed, remove the buttons
                btn_skip.remove()
                btn_keep.remove()
                btn_esc.remove()
                # keep/skip image according to the pressed key, 'escape' to break the loop
                if key_event.get('pressed') == 'right':
                    skip_image = False
                    break
                elif key_event.get('pressed') == 'left':
                    skip_image = True
                    break
                elif key_event.get('pressed') == 'escape':
                    plt.close()
                    raise StopIteration('User cancelled checking shoreline detection')
                else:
                    plt.waitforbuttonpress()
                    
            if skip_image:
                ax.clear()
                continue
            else:
                # create two new buttons
                add_button = plt.text(0, 0.9, 'add', size=16, ha="left", va="top",
                                       transform=plt.gca().transAxes,
                                       bbox=dict(boxstyle="square", ec='k',fc='w'))
                end_button = plt.text(1, 0.9, 'end', size=16, ha="right", va="top",
                                       transform=plt.gca().transAxes,
                                       bbox=dict(boxstyle="square", ec='k',fc='w'))
                # add multiple reference shorelines (until user clicks on <end> button)
                pts_sl = np.expand_dims(np.array([np.nan, np.nan]),axis=0)
                geoms = []
                while 1:
                    add_button.set_visible(False)
                    end_button.set_visible(False)
                    # update title (instructions)
                    ax.set_title('Click points along the shoreline (enough points to capture the beach curvature).\n' +
                              'Start at one end of the beach.\n' + 'When finished digitizing, click <ENTER>',
                              fontsize=14)
                    plt.draw()

                    # let user click on the shoreline
                    pts = ginput(n=50000, timeout=-1, show_clicks=True)
                    pts_pix = np.array(pts)
                    # convert pixel coordinates to world coordinates
                    pts_world = SDS_tools.convert_pix2world(pts_pix[:,[1,0]], georef)

                    # interpolate between points clicked by the user (1m resolution)
                    pts_world_interp = np.expand_dims(np.array([np.nan, np.nan]),axis=0)
                    for k in range(len(pts_world)-1):
                        pt_dist = np.linalg.norm(pts_world[k,:]-pts_world[k+1,:])
                        xvals = np.arange(0,pt_dist)
                        yvals = np.zeros(len(xvals))
                        pt_coords = np.zeros((len(xvals),2))
                        pt_coords[:,0] = xvals
                        pt_coords[:,1] = yvals
                        phi = 0
                        deltax = pts_world[k+1,0] - pts_world[k,0]
                        deltay = pts_world[k+1,1] - pts_world[k,1]
                        phi = np.pi/2 - np.math.atan2(deltax, deltay)
                        tf = transform.EuclideanTransform(rotation=phi, translation=pts_world[k,:])
                        pts_world_interp = np.append(pts_world_interp,tf(pt_coords), axis=0)
                    pts_world_interp = np.delete(pts_world_interp,0,axis=0)

                    # save as geometry (to create .geojson file later)
                    geoms.append(geometry.LineString(pts_world_interp))

                    # convert to pixel coordinates and plot
                    pts_pix_interp = SDS_tools.convert_world2pix(pts_world_interp, georef)
                    pts_sl = np.append(pts_sl, pts_world_interp, axis=0)
                    ax.plot(pts_pix_interp[:,0], pts_pix_interp[:,1], 'r--')
                    ax.plot(pts_pix_interp[0,0], pts_pix_interp[0,1],'ko')
                    ax.plot(pts_pix_interp[-1,0], pts_pix_interp[-1,1],'ko')

                    # update title and buttons
                    add_button.set_visible(True)
                    end_button.set_visible(True)
                    ax.set_title('click on <add> to digitize another shoreline or on <end> to finish and save the shoreline(s)',
                              fontsize=14)
                    plt.draw()

                    # let the user click again (<add> another shoreline or <end>)
                    pt_input = ginput(n=1, timeout=-1, show_clicks=False)
                    pt_input = np.array(pt_input)

                    # if user clicks on <end>, save the points and break the loop
                    if pt_input[0][0] > im_ms.shape[1]/2:
                        add_button.set_visible(False)
                        end_button.set_visible(False)
                        plt.title('Reference shoreline saved as ' + sitename + '_reference_shoreline.pkl and ' + sitename + '_reference_shoreline.geojson')
                        plt.draw()
                        ginput(n=1, timeout=3, show_clicks=False)
                        plt.close()
                        break

                pts_sl = np.delete(pts_sl,0,axis=0)
                # convert world image coordinates to user-defined coordinate system
                image_epsg = metadata[satname]['epsg'][i]
                pts_coords = SDS_tools.convert_epsg(pts_sl, image_epsg, settings['output_epsg'])

                # save the reference shoreline as .pkl
                filepath = os.path.join(filepath_data, sitename)
                with open(os.path.join(filepath, sitename + '_reference_shoreline.pkl'), 'wb') as f:
                    pickle.dump(pts_coords, f)

                # also store as .geojson in case user wants to drag-and-drop on GIS for verification
                for k,line in enumerate(geoms):
                    gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(line))
                    gdf.index = [k]
                    gdf.loc[k,'name'] = 'reference shoreline ' + str(k+1)
                    # store into geodataframe
                    if k == 0:
                        gdf_all = gdf
                    else:
                        gdf_all = gdf_all.append(gdf)
                gdf_all.crs = {'init':'epsg:'+str(image_epsg)}
                # convert from image_epsg to user-defined coordinate system
                gdf_all = gdf_all.to_crs({'init': 'epsg:'+str(settings['output_epsg'])})
                # save as geojson
                gdf_all.to_file(os.path.join(filepath, sitename + '_reference_shoreline.geojson'),
                                driver='GeoJSON', encoding='utf-8')

                print('Reference shoreline has been saved in ' + filepath)
                break
            
    # check if a shoreline was digitised
    if len(pts_coords) == 0:
        raise Exception('No cloud free images are available to digitise the reference shoreline,'+
                        'download more images and try again') 

    return pts_coords
    def do_segmentation(self, test_image_name="cheetah.bmp",
                        ground_truth_image_name=None,sliding_window=True):
        """
        Perform segmentation using Bayesian Decision Rule
        :param test_image_name: Image to be tested for segmentation
        :param ground_truth_image_name: Segmentation mask  (ground truth), to compare algorithm results
        :param sliding_window: Do sliding window based prediction (for better results)

        """
        assert isinstance(test_image_name, str)

        if ground_truth_image_name:
            assert isinstance(ground_truth_image_name, str)

        test_image = cv2.imread(test_image_name)
        # convert to GScale cv2 reads images by default as BGR
        test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)

        assert len(test_image.shape) == 2

        # Stride of 1, Therefore for SAME padding, p1+p2  = f - 1, where p1=top, p2=bottom, f=filter
        # cv2.BORDER_DEFAULT pads the border with reflection padding(but doesn't repeat the border pixels themselves)
        # https://answers.opencv.org/question/50706/border_reflect-vs-border_reflect_101/
        f = 8  # filter
        p1, p2 = 3, 4  # padding on left/right or top/bottom
        padded_test_image = cv2.copyMakeBorder(test_image, p1, p2, p1, p2, borderType=cv2.BORDER_DEFAULT)
        segmentation_mask = np.zeros_like(test_image)

        R, C = segmentation_mask.shape

        if sliding_window:
            for i in range(R):
                for j in range(C):
                    segmentation_mask[i, j] = self.get_prediction_on_block(padded_test_image[i:i+f, j:j+f], f)
        else:
            i, j = 0, 0
            while i in range(R):
                while j in range(C):
                    segmentation_mask[i:i+f, j:j+f] = self.get_prediction_on_block(padded_test_image[i:i+f, j:j+f], f)
                    j += f
                i += f

        if ground_truth_image_name:
            ground_truth_image = cv2.imread(ground_truth_image_name)
            ground_truth_image = cv2.cvtColor(ground_truth_image, cv2.COLOR_BGR2GRAY)

            false_positives_perc = np.sum(((ground_truth_image/255) == 0) & (segmentation_mask == 1)) / np.sum((ground_truth_image/255) == 1)
            false_negatives_perc = np.sum(((ground_truth_image/255) == 1) & (segmentation_mask == 0)) / np.sum((ground_truth_image/255) == 0)
            prob_of_error = false_positives_perc*self.prior_class_1 + false_negatives_perc*self.prior_class_0

            fig, ax = plt.subplots(nrows=1, ncols=2)
            _ = ax[0].imshow(ground_truth_image, cmap="gray")
            _ = ax[1].imshow(segmentation_mask, cmap="gray")
            _ = ax[0].set_title("Ground truth", fontsize="small")
            _ = ax[1].set_title(f"Segmentation Result | Prob. of Error: {prob_of_error:.3f}", fontsize="small")

            fig.suptitle(test_image_name + " | Binary Segmentation using Bayesian Decision Rule")

        else:
            plt.imshow(segmentation_mask, cmap='gray')
            title = test_image_name + " | Binary Segmentation using Bayesian Decision Rule"
            plt.title = title
            plt.show()

        plt.waitforbuttonpress()
        plt.close("all")
def visualize(args):
    data_path = args.datapath or './data/{}/val_data_joint.npy'.format(
        args.dataset)
    label_path = args.labelpath or './data/{}/val_label.pkl.npy'.format(
        args.dataset)

    transformded_data = np.load(args.transformed_datapath, allow_pickle=True)
    data = np.load(data_path, allow_pickle=True)
    with open(label_path, 'rb') as f:
        labels = pickle.load(f, encoding='latin1')

    bones = bone_pairs[args.dataset]
    print(f'Dataset: {args.dataset}\n')

    def animate_1(skeleton):
        ax1.clear()
        ax1.set_xlim([-1, 1])
        ax1.set_ylim([-1, 1])
        ax1.set_zlim([-1, 1])

        for i, j in bones:
            joint_locs = skeleton[:, [i, j]]
            # plot them
            ax1.plot(joint_locs[0], joint_locs[1], joint_locs[2], color='blue')

        action_class = labels[1][index] + 1
        action_name = actions[action_class]
        plt.title(
            'Skeleton {} Frame #{} of 300 from {}\n (Action {}: {})'.format(
                index, skeleton_index[0], args.dataset, action_class,
                action_name))
        skeleton_index[0] += 1
        return ax1

    def animate_2(skeleton):
        ax2.clear()
        ax2.set_xlim([-1, 1])
        ax2.set_ylim([-1, 1])
        ax2.set_zlim([-1, 1])

        for i, j in bones:
            joint_locs = skeleton[:, [i, j]]
            # plot them
            ax2.plot(joint_locs[0], joint_locs[1], joint_locs[2], color='blue')

        action_class = labels[1][index] + 1
        action_name = actions[action_class]
        plt.title(
            'Skeleton {} Frame #{} of 300 from {}\n (Action {}: {})'.format(
                index, skeleton_index[0], args.dataset, action_class,
                action_name))
        skeleton_index[0] += 1
        return ax1

    for index in args.indices:
        # for index in range(0,60,10):
        mpl.rcParams['legend.fontsize'] = 10
        fig = plt.figure(figsize=(20, 40))
        ax1 = fig.add_subplot(2, 1, 1, projection='3d')
        ax2 = fig.add_subplot(2, 2, 1, projection='3d')

        # get data
        skeletons = data[index]
        action_class = labels[1][index] + 1
        action_name = actions[action_class]
        print(f'Sample index: {index}\nAction: {action_class}-{action_name}\n'
              )  # (C,T,V,M)
        # print(skeletons.shape)
        # Pick the first body to visualize
        skeleton1 = skeletons[..., 0]  # out (C,T,V)
        # print(skeleton1.shape)
        skeleton_index = [0]
        skeleton1 = skeleton1.transpose(1, 0, 2)

        print(skeleton1.shape)
        an1 = FuncAnimation(fig, animate_1, skeleton1)
        # an2 = FuncAnimation(fig, animate_2, transformded_data[index])

        plt.title('Skeleton {} from {} test data'.format(index, args.dataset))
        plt.waitforbuttonpress(0)  # this will wait for indefinite time
        plt.close(fig)
        plt.show()
def handObjectTrack(w, h, objParamInit, handParamInit, objMesh, camProp,
                    out_dir):
    ds = tf.data.Dataset.from_generator(
        lambda: dataGen(w, h),
        (tf.string, tf.float32, tf.float32, tf.float32, tf.float32),
        ((None, ), (None, h, w, 3), (None, h, w, 3), (None, h, w, 3),
         (None, h, w, 3)))
    # assert len(objParamInitList)==len(handParamInitList)

    numFrames = 1

    # read real observations
    frameCntInt, loadData, realObservs = LossObservs.getRealObservables(
        ds, numFrames, w, h)
    icp = Icp(realObservs, camProp)

    # set up the scene
    scene = Scene(optModeEnum.MULTIFRAME_RIGID_HO_POSE, frameCnt=numFrames)
    objID = scene.addObject(objMesh, objParamInit, segColor=objSegColor)
    handID = scene.addHand(handParamInit, handSegColor, baseItemID=objID)
    scene.addCamera(f=camProp.f,
                    c=camProp.c,
                    near=camProp.near,
                    far=camProp.far,
                    frameSize=camProp.frameSize)
    finalMesh = scene.getFinalMesh()

    # render the scene
    renderer = DirtRenderer(finalMesh, renderModeEnum.SEG_COLOR_DEPTH)
    virtObservs = renderer.render()

    # get loss over observables
    observLoss = LossObservs(virtObservs, realObservs,
                             renderModeEnum.SEG_COLOR_DEPTH)
    segLoss, depthLoss, colLoss = observLoss.getL2Loss(isClipDepthLoss=True,
                                                       pyrLevel=2)

    # get parameters and constraints
    handConstrs = Constraints()
    paramListHand = scene.getVarsByItemID(
        handID, [varTypeEnum.HAND_JOINT, varTypeEnum.HAND_ROT])
    jointAngs = paramListHand[0]
    handRot = paramListHand[1]
    validTheta = tf.concat([handRot, jointAngs], axis=0)
    theta = handConstrs.getFullThetafromValidTheta(validTheta)
    thetaConstrs, _ = handConstrs.getHandThetaConstraints(validTheta,
                                                          isValidTheta=True)

    paramListObj = scene.getParamsByItemID(
        [parTypeEnum.OBJ_ROT, parTypeEnum.OBJ_TRANS, parTypeEnum.OBJ_POSE_MAT],
        objID)
    rotObj = paramListObj[0]
    transObj = paramListObj[1]
    poseMat = paramListObj[2]

    paramListHand = scene.getParamsByItemID([
        parTypeEnum.HAND_THETA, parTypeEnum.HAND_TRANS, parTypeEnum.HAND_BETA
    ], handID)
    thetaMat = paramListHand[0]
    transHand = paramListHand[1]
    betaHand = paramListHand[2]

    # get icp losses
    icpLossHand = icp.getLoss(scene.itemPropsDict[handID].transformedMesh.v,
                              handSegColor)
    icpLossObj = icp.getLoss(scene.itemPropsDict[objID].transformedMesh.v,
                             objSegColor)

    # get rel hand obj pose loss
    handTransVars = tf.stack(scene.getVarsByItemID(
        handID, [varTypeEnum.HAND_TRANS_REL_DELTA]),
                             axis=0)
    handRotVars = tf.stack(scene.getVarsByItemID(
        handID, [varTypeEnum.HAND_ROT_REL_DELTA]),
                           axis=0)
    relPoseLoss = handConstrs.getHandObjRelDeltaPoseConstraint(
        handRotVars, handTransVars)

    # get final loss
    icpLoss = 1e3 * icpLossHand + 1e3 * icpLossObj
    totalLoss1 = 1.0e1 * segLoss + 1e0 * depthLoss + 0.0 * colLoss + 1e2 * thetaConstrs + icpLoss + 1e6 * relPoseLoss
    totalLoss2 = 1.15 * segLoss + 5.0 * depthLoss + 0.0 * colLoss + 1e2 * thetaConstrs

    # get the variables for opt
    optVarsHandList = scene.getVarsByItemID(
        handID,
        [  #varTypeEnum.HAND_TRANS, varTypeEnum.HAND_ROT,
            # varTypeEnum.HAND_ROT_REL_DELTA, varTypeEnum.HAND_TRANS_REL_DELTA,
            varTypeEnum.HAND_JOINT
        ],
        [])
    optVarsHandDelta = scene.getVarsByItemID(
        handID,
        [varTypeEnum.HAND_TRANS_REL_DELTA, varTypeEnum.HAND_ROT_REL_DELTA], [])
    optVarsHandJoint = scene.getVarsByItemID(handID, [varTypeEnum.HAND_JOINT],
                                             [])
    optVarsObjList = scene.getVarsByItemID(
        objID, [varTypeEnum.OBJ_TRANS, varTypeEnum.OBJ_ROT], [])
    optVarsList = optVarsObjList  #+ optVarsHandList
    optVarsListNoJoints = optVarsObjList  #+ optVarsHandList

    # get the initial val of variables for BFGS optimizer
    initVals = []
    for fID in range(len(objParamInitList)):
        initVals.append(handParamInitList[fID].trans)
        initVals.append(handParamInitList[fID].theta[:3])
    initVals.append(handParamInitList[0].theta[handConstrs.validThetaIDs][3:])
    for fID in range(len(objParamInitList)):
        initVals.append(objParamInitList[fID].trans)
        initVals.append(objParamInitList[fID].rot)
    initValsNp = np.concatenate(initVals, axis=0)

    # setup optimizer
    opti1 = Optimizer(totalLoss1,
                      optVarsList,
                      'Adam',
                      learning_rate=0.02 / 2.0,
                      initVals=initValsNp)
    opti2 = Optimizer(totalLoss1,
                      optVarsListNoJoints,
                      'Adam',
                      learning_rate=0.01)

    # get the optimization reset ops
    resetOpt1 = tf.variables_initializer(opti1.optimizer.variables())
    resetOpt2 = tf.variables_initializer(opti2.optimizer.variables())

    # tf stuff
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.4
    session = tf.Session(config=config)
    session.__enter__()
    tf.global_variables_initializer().run()

    # setup the plot window
    if showFig:
        plt.ion()
        fig = plt.figure()
        ax = fig.subplots(4, max(numFrames, 1))
        axesList = [[], [], [], []]
        for i in range(numFrames):
            axesList[0].append(ax[0].imshow(
                np.zeros((240, 320, 3), dtype=np.float32)))
            axesList[1].append(ax[1].imshow(
                np.zeros((240, 320, 3), dtype=np.float32)))
            axesList[2].append(ax[2].imshow(
                np.random.uniform(0, 2, (240, 320, 3))))
            axesList[3].append(ax[3].imshow(
                np.random.uniform(0, 1, (240, 320, 3))))
        plt.subplots_adjust(top=0.984,
                            bottom=0.016,
                            left=0.028,
                            right=0.99,
                            hspace=0.045,
                            wspace=0.124)
        figManager = plt.get_current_fig_manager()
        figManager.window.showMaximized()

    # python renderer for rendering object texture
    pyRend = renderScene(h, w)
    pyRend.addObjectFromMeshFile(modelPath, 'obj')
    pyRend.addCamera()
    pyRend.creatcamProjMat(camProp.f, camProp.c, camProp.near, camProp.far)

    segLossList = []
    depLossList = []
    icpLossList = []
    relPoseLossList = []

    while (True):
        session.run(resetOpt1)
        session.run(resetOpt2)

        # load new frame
        opti1.runOptimization(session, 1, {loadData: True})
        # print(icpLoss.eval(feed_dict={loadData: False}))
        # print(segLoss.eval(feed_dict={loadData: False}))
        # print(depthLoss.eval(feed_dict={loadData: False}))

        # run the optimization for new frame
        frameID = (realObservs.frameID.eval(
            feed_dict={loadData: False}))[0].decode('UTF-8')
        opti1.runOptimization(session, FLAGS.numIter, {loadData: False})

        segLossList.append(1.0 * segLoss.eval(feed_dict={loadData: False}))
        depLossList.append(1.0 * depthLoss.eval(feed_dict={loadData: False}))
        icpLossList.append(icpLoss.eval(feed_dict={loadData: False}))
        relPoseLossList.append(1e3 *
                               relPoseLoss.eval(feed_dict={loadData: False}))
        # icpLossList.append(1e2*icpLossObj.eval(feed_dict={loadData: False}))

        # show all the images for analysis
        plt.title(frameID)
        depRen = virtObservs.depth.eval(feed_dict={loadData: False})
        depGT = realObservs.depth.eval(feed_dict={loadData: False})
        segRen = virtObservs.seg.eval(feed_dict={loadData: False})
        segGT = realObservs.seg.eval(feed_dict={loadData: False})
        poseMatNp = poseMat.eval(feed_dict={loadData: False})
        colRen = virtObservs.col.eval(feed_dict={loadData: False})
        colGT = realObservs.col.eval(feed_dict={loadData: False})
        for f in range(numFrames):
            if doPyRendFinalImage:
                # render the obj col image
                pyRend.setObjectPose('obj', poseMatNp[f].T)
                cRend, dRend = pyRend.render()

                # blend with dirt rendered image to get full texture image
                dirtCol = colRen[f][:, :, [2, 1, 0]]
                objRendMask = (np.sum(np.abs(segRen[f] - objSegColor), 2) <
                               0.05).astype(np.float32)
                objRendMask = np.stack([objRendMask, objRendMask, objRendMask],
                                       axis=2)
                finalCol = dirtCol * (1 - objRendMask) + (
                    cRend.astype(np.float32) / 255.) * objRendMask

            if showFig:
                axesList[0][f].set_data(colGT[f])
                if doPyRendFinalImage:
                    axesList[1][f].set_data(finalCol)
                axesList[2][f].set_data(np.abs(depRen - depGT)[f, :, :, 0])
                axesList[3][f].set_data(np.abs(segRen - segGT)[f, :, :, :])

            if f >= 0:
                coordChangMat = np.array([[1., 0., 0.], [0., -1., 0.],
                                          [0., 0., -1.]])
                handJoints = scene.itemPropsDict[handID].transorfmedJs.eval(
                    feed_dict={loadData: False})[f]
                camMat = camProp.getCamMat()
                handJointProj = cv2.projectPoints(
                    handJoints.dot(coordChangMat), np.zeros((3, )),
                    np.zeros((3, )), camMat, np.zeros((4, )))[0][:, 0, :]
                imgIn = (colGT[f][:, :, [2, 1, 0]] * 255).astype(
                    np.uint8).copy()
                imgIn = cv2.resize(
                    imgIn, (imgIn.shape[1] * dscale, imgIn.shape[0] * dscale),
                    interpolation=cv2.INTER_LANCZOS4)
                imgJoints = showHandJoints(
                    imgIn,
                    np.round(handJointProj).astype(
                        np.int32)[jointsMapManoToObman] * dscale,
                    estIn=None,
                    filename=None,
                    upscale=1,
                    lineThickness=2)

                objCorners = getObjectCorners(mesh.v)
                rotObjNp = rotObj.eval(feed_dict={loadData: False})[f]
                transObjNp = transObj.eval(feed_dict={loadData: False})[f]
                objCornersTrans = np.matmul(
                    objCorners,
                    cv2.Rodrigues(rotObjNp)[0].T) + transObjNp
                objCornersProj = cv2.projectPoints(
                    objCornersTrans.dot(coordChangMat), np.zeros((3, )),
                    np.zeros((3, )), camMat, np.zeros((4, )))[0][:, 0, :]
                imgJoints = showObjJoints(imgJoints,
                                          objCornersProj * dscale,
                                          lineThickness=2)

                alpha = 0.35
                rendMask = segRen[f]
                # rendMask[:,:,[1,2]] = 0
                rendMask = np.clip(255. * rendMask, 0, 255).astype('uint8')
                msk = rendMask.sum(axis=2) > 0
                msk = msk * alpha
                msk = np.stack([msk, msk, msk], axis=2)
                blended = msk * rendMask[:, :, [2, 1, 0]] + (1. - msk) * (
                    colGT[f][:, :, [2, 1, 0]] * 255).astype(np.uint8)
                blended = blended.astype(np.uint8)

                cv2.imwrite(out_dir + '/annoVis_' + frameID + '.jpg',
                            imgJoints)
                cv2.imwrite(out_dir + '/annoBlend_' + frameID + '.jpg',
                            blended)
                cv2.imwrite(out_dir + '/maskOnly_' + frameID + '.jpg',
                            (segRen[0] * 255).astype(np.uint8))
                depthEnc = encodeDepthImg(depRen[0, :, :, 0])
                cv2.imwrite(out_dir + '/renderDepth_' + frameID + '.jpg',
                            depthEnc)
                if doPyRendFinalImage:
                    cv2.imwrite(out_dir + '/renderCol_' + frameID + '.jpg',
                                (finalCol[:, :, [2, 1, 0]] * 255).astype(
                                    np.uint8))

        if showFig:
            plt.savefig(out_dir + '/' + frameID + '.png')
            plt.waitforbuttonpress(0.01)

        # save all the vars
        optVarListNp = []
        for optVar in optVarsHandDelta:
            optVarListNp.append(optVar.eval())

        thetaNp = thetaMat.eval(feed_dict={loadData: False})[0]
        betaNp = betaHand.eval(feed_dict={loadData: False})[0]
        transNp = transHand.eval(feed_dict={loadData: False})[0]
        rotObjNp = rotObj.eval(feed_dict={loadData: False})[0]
        transObjNp = transObj.eval(feed_dict={loadData: False})[0]
        JTransformed = scene.itemPropsDict[handID].transorfmedJs.eval(
            feed_dict={loadData: False})
        handJproj = np.reshape(
            cv2ProjectPoints(camProp, np.reshape(JTransformed, [-1, 3])),
            [numFrames, JTransformed.shape[1], 2])
        # vis = getBatch2DPtVisFromDep(depRen, segRen, projPts, JTransformed, handSegColor)
        objCornersRest = np.load(
            os.path.join(YCB_OBJECT_CORNERS_DIR,
                         obj.split('/')[0], 'corners.npy'))
        objCornersTransormed = objCornersRest.dot(
            cv2.Rodrigues(rotObjNp)[0].T) + transObjNp
        objCornersproj = np.reshape(
            cv2ProjectPoints(camProp, np.reshape(objCornersTransormed,
                                                 [-1, 3])),
            [objCornersTransormed.shape[0], 2])

        savePickleData(
            out_dir + '/' + frameID + '.pkl', {
                'beta': betaNp,
                'fullpose': thetaNp,
                'trans': transNp,
                'rotObj': rotObjNp,
                'transObj': transObjNp,
                'JTransformed': JTransformed,
                'objCornersRest': objCornersRest,
                'objCornersTransormed': objCornersTransormed,
                'objName': obj.split('/')[0],
                'objLabel': objLabel
            })
Exemple #58
0
def example3_EKF():
    
    #2.5 sine period
    
    #### Simulated Data #####
    # Time steps
    T = 500
    tseq = np.linspace(0, 2.5*(2*math.pi), T)
    X = np.zeros((1,T))
    XT = np.zeros((1,T))
    Z = np.zeros((2,T))
    _stateDim = 1
    _sensDim = 2
    # Sensor noise var
    R = np.array([[0.64, 0],
                  [0, 0.64]])
    # Constant transition matrix for State vector
    A = np.eye(_stateDim)
    # Constant transition matrix for Control vector
    B = np.eye(_stateDim)
    # Constant transition matrix for Measurement from State
    C = np.ones((_sensDim,_stateDim))
    # Init prediction err
    P = np.eye(_stateDim)
    # Constant processing noise for both sensor
    Q = np.array([[0.5]])
    # Bias for sensor
    V = np.array([[-1],
                  [1]])
    # Control 
    U = np.zeros((_stateDim,1))

    for t in range(T):
        XT[0,t] = math.sin(tseq[t]) + 20
        X[0,t] = XT[0,t] + np.random.randn(1)*Q
        Z[0,t] = V[0,0] + X[0,t] + np.random.randn(1)*R[0,0]
        Z[1,t] = V[1,0] + X[0,t] + np.random.randn(1)*R[1,1]

    #### Simulated Data above ########
    print("[INFO] Generated Data!")
    ekf = EKF()

    def f(_X, _U):
    	return _X.copy(), np.eye(_stateDim)

    def h(_Z):
    	return _Z.copy(), np.ones((_sensDim, _stateDim))

    ekf.f = f
    ekf.h = h
    t = 0
    mean = np.mean(Z[:2,t:t+1])
    _X = np.array([[mean]])
    ekf.setInit(X=_X, Z=Z[:2,t:t+1], R=R, P=P, Q=Q)
    plt.axis([0, T+10, 15, 25])
    plt.plot(np.arange(T),X[0,:],np.arange(T),Z[0,:],np.arange(T),Z[1,:])
    plt.grid(True)
    plt.show()
    plt.scatter(t+1, mean, c='g')
    print("t:%d Z:%s X:%s"%(t, Z[0,t], ekf.X))
    print("t:%d Z:%s X:%s"%(t, Z[1,t], ekf.X))
    update = []
    ti = []
    for t in range(1,T):
        plt.grid(True)
        #plt.pause(0.05)
        str_p = ["t:%d"%t]
        str_p.append("[G: %s]"%(ekf.G))
        #print(t, 'G', ekf.G)
        #plt.scatter(t+1, Z[0,t],c='g')
        str_p.append("[Z: %s]"%(Z[0,t]))
        #print(t, 0, int(Z[0,t]), t+1)

        # initialize State
        ekf.predict()

        # update State
        ekf.update(Z=Z[:2,t:t+1])
        ti.append(t+1)
        update.append(ekf.X[0,0])
        #plt.scatter(t+1, int(ekf.X[0,0]),c='r')
        #plt.plot(ti, Z[0,0:t], c='b')
        #plt.plot(ti, Z[1,0:t], c='g')
        #plt.plot(ti, update, c='r')
        str_p.append("[X+: %s]"%(ekf.X[0,0]))
        #print(t, 1, int(ekf.X[0,0]), t+1)
        print(" ".join(str_p))
    
    ## 1. Sensor one measurement
    #plt.plot(ti, Z[0,0:t], c='b')
    ## 2. Sensor two measurement
    #plt.plot(ti, Z[1,0:t], c='y')
    ## 3. Mean measurement
    plt.plot(ti, np.mean(Z[:,:t],axis=0), c='b')
    ## 4. Simulated Data
    plt.plot(ti, X[0,0:t], c='g')
    ## 5. Ground True data
    plt.plot(ti, XT[0,0:t], c='c')
    ## 6. Kalman filtered data
    plt.plot(ti, update, c='r')
    keyboardClick = False
    plt.show()
    while (keyboardClick != True) and ((t==T-1)):
        time.sleep(0.05)
        keyboardClick=plt.waitforbuttonpress()
Exemple #59
0
def label_images(metadata, settings):
    """
    Load satellite images and interactively label different classes (hard-coded)

    KV WRL 2019

    Arguments:
    -----------
    metadata: dict
        contains all the information about the satellite images that were downloaded
    settings: dict with the following keys
        'cloud_thresh': float
            value between 0 and 1 indicating the maximum cloud fraction in 
            the cropped image that is accepted    
        'cloud_mask_issue': boolean
            True if there is an issue with the cloud mask and sand pixels
            are erroneously being masked on the images
        'labels': dict
            list of label names (key) and label numbers (value) for each class
        'flood_fill': boolean
            True to use the flood_fill functionality when labelling sand pixels
        'tolerance': float
            tolerance value for flood fill when labelling the sand pixels
        'filepath_train': str
            directory in which to save the labelled data
        'inputs': dict
            input parameters (sitename, filepath, polygon, dates, sat_list)
                
    Returns:
    -----------
    Stores the labelled data in the specified directory

    """

    filepath_train = settings['filepath_train']
    # initialize figure
    fig, ax = plt.subplots(1,
                           1,
                           figsize=[17, 10],
                           tight_layout=True,
                           sharex=True,
                           sharey=True)
    mng = plt.get_current_fig_manager()
    mng.window.showMaximized()

    # loop through satellites
    for satname in metadata.keys():
        filepath = SDS_tools.get_filepath(settings['inputs'], satname)
        filenames = metadata[satname]['filenames']
        # loop through images
        for i in range(len(filenames)):
            # image filename
            fn = SDS_tools.get_filenames(filenames[i], filepath, satname)
            # read and preprocess image
            im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata, im_proj = SDS_preprocess.preprocess_single(
                fn, satname, settings['cloud_mask_issue'])
            # calculate cloud cover
            cloud_cover = np.divide(
                sum(sum(cloud_mask.astype(int))),
                (cloud_mask.shape[0] * cloud_mask.shape[1]))
            # skip image if cloud cover is above threshold
            if cloud_cover > settings['cloud_thresh'] or cloud_cover == 1:
                continue
            # get individual RGB image
            im_RGB = SDS_preprocess.rescale_image_intensity(
                im_ms[:, :, [2, 1, 0]], cloud_mask, 99.9)
            im_NDVI = SDS_tools.nd_index(im_ms[:, :, 3], im_ms[:, :, 2],
                                         cloud_mask)
            im_NDWI = SDS_tools.nd_index(im_ms[:, :, 3], im_ms[:, :, 1],
                                         cloud_mask)
            # initialise labels
            im_viz = im_RGB.copy()
            im_labels = np.zeros([im_RGB.shape[0], im_RGB.shape[1]])
            # show RGB image
            ax.axis('off')
            ax.imshow(im_RGB)
            implot = ax.imshow(im_viz, alpha=0.6)
            filename = filenames[i][:filenames[i].find('.')][:-4]
            ax.set_title(filename)

            ##############################################################
            # select image to label
            ##############################################################
            # set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071)
            # this variable needs to be immuatable so we can access it after the keypress event
            key_event = {}

            def press(event):
                # store what key was pressed in the dictionary
                key_event['pressed'] = event.key

            # let the user press a key, right arrow to keep the image, left arrow to skip it
            # to break the loop the user can press 'escape'
            while True:
                btn_keep = ax.text(1.1,
                                   0.9,
                                   'keep ⇨',
                                   size=12,
                                   ha="right",
                                   va="top",
                                   transform=ax.transAxes,
                                   bbox=dict(boxstyle="square", ec='k',
                                             fc='w'))
                btn_skip = ax.text(-0.1,
                                   0.9,
                                   '⇦ skip',
                                   size=12,
                                   ha="left",
                                   va="top",
                                   transform=ax.transAxes,
                                   bbox=dict(boxstyle="square", ec='k',
                                             fc='w'))
                btn_esc = ax.text(0.5,
                                  0,
                                  '<esc> to quit',
                                  size=12,
                                  ha="center",
                                  va="top",
                                  transform=ax.transAxes,
                                  bbox=dict(boxstyle="square", ec='k', fc='w'))
                fig.canvas.draw_idle()
                fig.canvas.mpl_connect('key_press_event', press)
                plt.waitforbuttonpress()
                # after button is pressed, remove the buttons
                btn_skip.remove()
                btn_keep.remove()
                btn_esc.remove()

                # keep/skip image according to the pressed key, 'escape' to break the loop
                if key_event.get('pressed') == 'right':
                    skip_image = False
                    break
                elif key_event.get('pressed') == 'left':
                    skip_image = True
                    break
                elif key_event.get('pressed') == 'escape':
                    plt.close()
                    raise StopIteration('User cancelled labelling images')
                else:
                    plt.waitforbuttonpress()

            # if user decided to skip show the next image
            if skip_image:
                ax.clear()
                continue
            # otherwise label this image
            else:
                ##############################################################
                # digitize sandy pixels
                ##############################################################
                ax.set_title(
                    'Click on SAND pixels (flood fill activated, tolerance = %.2f)\nwhen finished press <Enter>'
                    % settings['tolerance'])
                # create erase button, if you click there it delets the last selection
                btn_erase = ax.text(im_ms.shape[1],
                                    0,
                                    'Erase',
                                    size=20,
                                    ha='right',
                                    va='top',
                                    bbox=dict(boxstyle="square",
                                              ec='k',
                                              fc='w'))
                fig.canvas.draw_idle()
                color_sand = settings['colors']['sand']
                sand_pixels = []
                while 1:
                    seed = ginput(n=1, timeout=0, show_clicks=True)
                    # if empty break the loop and go to next label
                    if len(seed) == 0:
                        break
                    else:
                        # round to pixel location
                        seed = np.round(seed[0]).astype(int)
                    # if user clicks on erase, delete the last selection
                    if seed[0] > 0.95 * im_ms.shape[1] and seed[
                            1] < 0.05 * im_ms.shape[0]:
                        if len(sand_pixels) > 0:
                            im_labels[sand_pixels[-1]] = 0
                            for k in range(im_viz.shape[2]):
                                im_viz[sand_pixels[-1],
                                       k] = im_RGB[sand_pixels[-1], k]
                            implot.set_data(im_viz)
                            fig.canvas.draw_idle()
                            del sand_pixels[-1]

                    # otherwise label the selected sand pixels
                    else:
                        # flood fill the NDVI and the NDWI
                        fill_NDVI = flood(im_NDVI, (seed[1], seed[0]),
                                          tolerance=settings['tolerance'])
                        fill_NDWI = flood(im_NDWI, (seed[1], seed[0]),
                                          tolerance=settings['tolerance'])
                        # compute the intersection of the two masks
                        fill_sand = np.logical_and(fill_NDVI, fill_NDWI)
                        im_labels[fill_sand] = settings['labels']['sand']
                        sand_pixels.append(fill_sand)
                        # show the labelled pixels
                        for k in range(im_viz.shape[2]):
                            im_viz[im_labels == settings['labels']['sand'],
                                   k] = color_sand[k]
                        implot.set_data(im_viz)
                        fig.canvas.draw_idle()

                ##############################################################
                # digitize white-water pixels
                ##############################################################
                color_ww = settings['colors']['white-water']
                ax.set_title(
                    'Click on individual WHITE-WATER pixels (no flood fill)\nwhen finished press <Enter>'
                )
                fig.canvas.draw_idle()
                ww_pixels = []
                while 1:
                    seed = ginput(n=1, timeout=0, show_clicks=True)
                    # if empty break the loop and go to next label
                    if len(seed) == 0:
                        break
                    else:
                        # round to pixel location
                        seed = np.round(seed[0]).astype(int)
                    # if user clicks on erase, delete the last labelled pixels
                    if seed[0] > 0.95 * im_ms.shape[1] and seed[
                            1] < 0.05 * im_ms.shape[0]:
                        if len(ww_pixels) > 0:
                            im_labels[ww_pixels[-1][1], ww_pixels[-1][0]] = 0
                            for k in range(im_viz.shape[2]):
                                im_viz[ww_pixels[-1][1], ww_pixels[-1][0],
                                       k] = im_RGB[ww_pixels[-1][1],
                                                   ww_pixels[-1][0], k]
                            implot.set_data(im_viz)
                            fig.canvas.draw_idle()
                            del ww_pixels[-1]
                    else:
                        im_labels[seed[1],
                                  seed[0]] = settings['labels']['white-water']
                        for k in range(im_viz.shape[2]):
                            im_viz[seed[1], seed[0], k] = color_ww[k]
                        implot.set_data(im_viz)
                        fig.canvas.draw_idle()
                        ww_pixels.append(seed)

                im_sand_ww = im_viz.copy()
                btn_erase.set(text='<Esc> to Erase', fontsize=12)

                ##############################################################
                # digitize water pixels (with lassos)
                ##############################################################
                color_water = settings['colors']['water']
                ax.set_title(
                    'Click and hold to draw lassos and select WATER pixels\nwhen finished press <Enter>'
                )
                fig.canvas.draw_idle()
                selector_water = SelectFromImage(ax, implot, color_water)
                key_event = {}
                while True:
                    fig.canvas.draw_idle()
                    fig.canvas.mpl_connect('key_press_event', press)
                    plt.waitforbuttonpress()
                    if key_event.get('pressed') == 'enter':
                        selector_water.disconnect()
                        break
                    elif key_event.get('pressed') == 'escape':
                        selector_water.array = im_sand_ww
                        implot.set_data(selector_water.array)
                        fig.canvas.draw_idle()
                        selector_water.implot = implot
                        selector_water.im_bool = np.zeros(
                            (selector_water.array.shape[0],
                             selector_water.array.shape[1]))
                        selector_water.ind = []
                # update im_viz and im_labels
                im_viz = selector_water.array
                selector_water.im_bool = selector_water.im_bool.astype(bool)
                im_labels[selector_water.im_bool] = settings['labels']['water']

                im_sand_ww_water = im_viz.copy()

                ##############################################################
                # digitize land pixels (with lassos)
                ##############################################################
                color_land = settings['colors']['other land features']
                ax.set_title(
                    'Click and hold to draw lassos and select OTHER LAND pixels\nwhen finished press <Enter>'
                )
                fig.canvas.draw_idle()
                selector_land = SelectFromImage(ax, implot, color_land)
                key_event = {}
                while True:
                    fig.canvas.draw_idle()
                    fig.canvas.mpl_connect('key_press_event', press)
                    plt.waitforbuttonpress()
                    if key_event.get('pressed') == 'enter':
                        selector_land.disconnect()
                        break
                    elif key_event.get('pressed') == 'escape':
                        selector_land.array = im_sand_ww_water
                        implot.set_data(selector_land.array)
                        fig.canvas.draw_idle()
                        selector_land.implot = implot
                        selector_land.im_bool = np.zeros(
                            (selector_land.array.shape[0],
                             selector_land.array.shape[1]))
                        selector_land.ind = []
                # update im_viz and im_labels
                im_viz = selector_land.array
                selector_land.im_bool = selector_land.im_bool.astype(bool)
                im_labels[selector_land.
                          im_bool] = settings['labels']['other land features']

                # save labelled image
                ax.set_title(filename)
                fig.canvas.draw_idle()
                fp = os.path.join(filepath_train,
                                  settings['inputs']['sitename'])
                if not os.path.exists(fp):
                    os.makedirs(fp)
                fig.savefig(os.path.join(fp, filename + '.jpg'), dpi=150)
                ax.clear()
                # save labels and features
                features = dict([])
                for key in settings['labels'].keys():
                    im_bool = im_labels == settings['labels'][key]
                    features[key] = SDS_shoreline.calculate_features(
                        im_ms, cloud_mask, im_bool)
                training_data = {
                    'labels': im_labels,
                    'features': features,
                    'label_ids': settings['labels']
                }
                with open(os.path.join(fp, filename + '.pkl'), 'wb') as f:
                    pickle.dump(training_data, f)

    # close figure when finished
    plt.close(fig)
 def plot_mean(th):
     for c in range(self.num_classes):
         for n in range(self.num_nuisances):
             plt.scatter(*th[c][n].mean.T, c=colours[c], marker="x")
     plt.waitforbuttonpress()