Пример #1
0
def label_data(prefix, size=100, savename=None):
    from glob import glob
    from os.path import basename
    from PIL import Image
    from os.path import isfile
    if savename==None: savename=labelpath+'label_'+prefix+'.txt'
    # We want to avoid labeling an image twice, so keep track
    # of what we've labeled in previous labeling sessions.
    if isfile(savename):
        fileout = open(savename,'r')
        already_seen = [line.split(',')[0] for line in fileout]
        fileout.close()
    else: already_seen = []
    # Now reopen the file for appending.
    fileout = open(savename,'a')
    pl.ion()
    pl.figure(1,figsize=(9,9))
    files = glob(imgpath+prefix+'*.png')
    for file in np.random.choice(files, size=size, replace=False):
        if basename(file) in already_seen: continue
        pl.clf()
        pl.subplot(1,1,1)
        pl.imshow(np.array(Image.open(file)))
        pl.title(file)
        pl.axis('off')
        pl.draw()
        label = get_one_char()
        if label=='q': break
        fileout.write(basename(file)+','+label+'\n')
        print file,label
    fileout.close()
    return
Пример #2
0
    def writeNudges(self, outfile='jitter.txt'):

        counters = np.arange(len(self.x))
        bjds = self.camera.counterToBJD(counters)
        time = bjds - np.min(bjds)
        plt.figure('jitter timeseries')
        gs = gridspec.GridSpec(2, 1, hspace=0.15)
        kw = dict(linewidth=2)
        ax = None

        for i, what in enumerate((self.x, self.y)):
            ax = plt.subplot(gs[i], sharex=ax, sharey=ax)
            ax.plot(time, what, **kw)
            ax.set_ylabel(['dRA (arcsec)', 'dDec (arcsec)'][i])
            if i == 0:
                ax.set_title('Jitter Timeseries from\n{}'.format(self.basename))

        plt.xlabel('Time from Observation Start (days)')
        plt.xlim(np.min(time), np.max(time))
        plt.draw()
        plt.savefig(outfile.replace('.txt', '.pdf'))

        data = [counters, bjds, self.x, self.y]
        names = ['imagenumber', 'bjd', 'arcsecnudge_ra', 'arcsecnudge_dec']

        t = astropy.table.Table(data=data, names=names)
        t.write(outfile.replace('.txt', '_amplifiedby{}.txt'.format(self.amplifyinterexposurejitter)),
                format='ascii.fixed_width', delimiter=' ')
        logger.info("save jitter nudge timeseries to {0}".format(outfile))
Пример #3
0
def matplotlib_set_plot(ax, plotter, outfile, default_camera=(14, -120),
                        hide_x=False, hide_y=False):
    ax.set_title(plotter.plot_title)

    tsize = 'medium'
    ax.set_xlabel(plotter.xaxis_label, fontsize=tsize)
    ax.set_ylabel(plotter.yaxis_label, fontsize=tsize)
    ax.set_zlabel(plotter.zaxis_label, fontsize=tsize)
    ax.ticklabel_format(axis='both', labelpad=150, useOffset=False)
    ax.set_xlim(*plotter.xaxis_range)
    ax.set_ylim(*plotter.yaxis_range)
    ax.set_zlim(*plotter.zaxis_range)
    ax.legend(fontsize='small')

    # getting a nice view over the whole mess in ppv
    ax.view_init(*default_camera)

    # hide axis-numbers:
    if hide_x:
        ax.get_xaxis().set_ticks([])
        ax.xaxis.set_visible(False)
        ax.get_xaxis().set_visible(False)
    if hide_y:
        ax.get_yaxis().set_ticks([])
        ax.yaxis.set_visible(False)
        ax.get_yaxis().set_visible(False)

    plt.draw()
    plt.savefig(outfile)
    plt.show()
def find_gates(mag1, mag2, param):
    col = mag1 - mag2

    lines = open(param, 'r').readlines()
    colmin, colmax = map(float, lines[4].split()[3:-1])
    mag1min, mag1max = map(float, lines[5].split()[:-1])
    #mag2min, mag2max = map(float, lines[5].split()[:-1])
    # click around
    fig, ax = plt.subplots()
    ax.plot(col, mag2, ',', color='k', alpha=0.2)
    ax.set_ylim(mag1max, mag1min)
    ax.set_xlim(colmin, colmax)

    ok = 1
    while ok == 1:
        print 'click '
        pts = np.asarray(plt.ginput(n=4, timeout=-1))
        exclude_gate = '1 {} 0 \n'.format(' '.join(['%.4f' % p for p in pts.flatten()]))
        pts = np.append(pts, pts[0]).reshape(5,2)
        ax.plot(pts[:,0], pts[:,1], color='r', lw=3, alpha=0.3)
        plt.draw()
        ok = move_on(0)
    lines[7] = exclude_gate
    # not so simple ... need them to be parallelograms.
    # PASS!

    # write new param file with exclude/include gate
    os.system('mv {0} {0}_bkup'.format(param))
    with open(param, 'w') as outp:
        [outp.write(l) for l in lines]
    print('wrote %s' % param)
Пример #5
0
def show_stat(net):
    plt.clf()

    f = plt.gcf()
    f.add_subplot('211')
    plt.title(net.checkpoint_name)
    plt.plot(net.stat['epoch'], net.stat['train']['error'], label='train')
    plt.plot(net.stat['epoch'], net.stat['val']['error'], label='val')
    plt.plot(net.stat['epoch'], net.stat['test']['error'], label='test')
    plt.legend(loc = 'lower left')
    plt.ylabel('error')
    plt.xlabel('epochs')
    plt.grid()

    f.add_subplot('212')
    plt.plot(net.stat['epoch'], net.stat['train']['cost'], label='train')
    plt.plot(net.stat['epoch'], net.stat['val']['cost'], label='val')
    plt.plot(net.stat['epoch'], net.stat['test']['cost'], label='test')
    plt.legend(loc = 'lower left')
    plt.ylabel('cost')
    plt.xlabel('epochs')
    plt.grid()

    plt.draw()
    plt.savefig(net.output_dir + 'stat.png')
    time.sleep(0.05)
Пример #6
0
def waterfall_plot(q,x,sampling=10,cmap=None,num_colors=100,outdir='./',outname='waterfall',format='eps',cbar_label='$|q| (a.u.)$'):
    plt.figure()
    plt.hold(True)
    colorVal = 'b'
    vmax = q[:,:].max()
    print vmax,len(q)
    for n in range(0,len(q),sampling):
        if cmap is not None:
            print q[n,:].max()
            colorVal = get_color(value=q[n,:].max(),cmap=cmap,vmax=vmax+.1,num_colors=num_colors)

        plt.plot(x,q[n,:]+n/10.0,label=str(n),color=colorVal,alpha=0.7)
    ax = plt.gca()
    for tic in ax.yaxis.get_major_ticks():
        tic.tick1On = tic.tick2On = False
        tic.label1On = tic.label2On = False

    if cmap is not None:
        scalar = get_smap(vmax=q[:,:].max()+.1,num_colors=sampling)
        cbar = plt.colorbar(scalar)

    plt.xlabel('$x\quad (a.u.)$')
    cbar.set_label(cbar_label)
    plt.draw()

    plt.savefig(os.path.join(outdir,outname+'.'+format),format=format,dpi=320,bbox_inches='tight')
    plt.close()
    return
Пример #7
0
def graphical_test(satisfactory=0):
    from matplotlib import cm, pylab
    def cons():
        return np.random.random(2)*4-2

    def foo(x,y,a,b):
        "banana function"
        tmp=a-x
        tmp*=tmp
        out=-x*x
        out+=y
        out*=out
        out*=b
        out+=tmp
        return out*(abs(np.cos((x-1)**2+(y-1)**2))+10.0/b)
    def f(params):
        return foo(params[0], params[1],1,100)

    optimizer=optimize(f, cons, verbose=False,its=1, hillWalks=0, satisfactory=satisfactory, finalWalk=0)
    
    bgx,bgy=np.mgrid[-2:2:1000j,-2:2:1000j]
    bg=foo(bgx,bgy, 1,100)
    for i in xrange(20):
        pylab.clf()
        pylab.imshow(bg, cmap=cm.RdBu,vmax=bg.mean()/10)
        for x in optimizer.pool: pylab.plot((x[2]+2)/4*1000,(x[1]+2)/4*1000, ('gx'))
        print optimizer.pool[0],optimizer.muterate
        pylab.gca().set_xbound(0,1000)
        pylab.gca().set_ybound(0,1000)
        pylab.draw()
        pylab.colorbar()
        optimizer.run()
        raw_input('enter to advance')
    return optimizer
Пример #8
0
def ttplot(corfp,srp,slp,n,I1,I2):
    global tplot_cum,dt,firstfile
    tplot=time.time()
    rchplot=int(ceil(log(n/chn)/log(2))+1)
    normplot=zeros((1,rcr),dtype=float32)
    
    for ir in xrange(rchplot):
       if ir==0:
           normplot[0,:chn]=1./arange(n-2,n-chn-2,-1)
       else:
           normplot[0,chn2*(ir+1.):chn2*(ir+2.)]=1./arange((n-1)/(2**ir)-chn2-1,(n-1)/(2**ir)-chn-1,-1)

    indt=int(chn+chn2*log(n/chn)/log(2))-2
    cc1=corfp[0,:indt]/(slp[0,:indt]*srp[0,:indt])/normplot[0,:indt]
    cc2=corfp[-1,:indt]/(slp[-1,:indt]*srp[-1,:indt])/normplot[0,:indt]
    t_axis=lag[0,:indt]
    t_axis2=tI_avg[0,:n]
    t_axis2b=tI_avg[0,:n]/dt+firstfile
    lm1.set_data(t_axis,cc1)
    lm2.set_data(t_axis2,I1)
    lm1b.set_data(t_axis,cc2)
    lm2b.set_data(t_axis2b,I2)
    ax1.set_xlim(min(t_axis),max(t_axis))
    ax1.set_ylim(min(cc1),max(cc1))
    ax1b.set_ylim(min(cc2),max(cc2))
    ax2.set_xlim(min(t_axis2),max(t_axis2))
    ax2b.set_xlim(min(t_axis2b),max(t_axis2b))
    ax2.set_ylim(min(I1),max(I1))
    ax2b.set_ylim(min(I2),max(I2))
    p.draw()
    tplot_cum+=time.time()-tplot
    return 
    def plot(self):
        Ns = 1
        h = 0.1
        m = []
        for i in range(len(self.l)):
            for s in range(Ns):
                m.append(ml_to_xy((i, float(s)/Ns, 0), self.kappa, self.l, self.x0, self.theta0))
        m.append(ml_to_xy((len(self.l)-1, 1., 0), self.kappa, self.l, self.x0, self.theta0))

        plt.clf()
        plt.hold(True)
        x=[p[0] for p in m]
        y=[p[1] for p in m]
        
        bp, angles, tangents= breakpoints(self.kappa, self.l, self.x0, self.theta0)
        x=[p[0] for p in bp]
        y=[p[1] for p in bp]
        plt.plot(x[::10],y[::10],'k-')
#        xm = [p[0] for i, p in enumerate(bp) if self.markers[i]==1]
#        ym = [p[1] for i, p in enumerate(bp) if self.markers[i]==1]


#        plt.plot(xm,ym,'kx')

        plt.xlim((-100, 3000))
        plt.ylim((-3000, 100))
        plt.draw()
Пример #10
0
def plot(y, function):
    """ Show an animation of Poincare plot.

    --- arguments ---
    y: A list of initial values
    function: function which is argument of Runge-Kutta solver
    """
    h = dt
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.grid()
    time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
    plt.ion()

    for i in range(nmax + 1):
        for j in range(nstep):
            rk4 = RK.RK4(function)
            y = rk4.solve(y, j * h, h)
            # -pi <= theta <= pi
            while y[0] > pi:
                y[0] = y[0] - 2 * pi
            while y[0] < -pi:
                y[0] = y[0] + 2 * pi

        if ntransient <= i < nmax:          # <-- draw the poincare plots
            plt.scatter(y[0], y[1], s=2.0, marker='o', color='blue')
            time_text.set_text('n = %d' % i)
            plt.draw()

        if i == nmax:                       # <-- to stop the interactive mode
            plt.ioff()
            plt.scatter(y[0], y[1], s=2.0, marker='o', color='blue')
            time_text.set_text('n = %d' % i)
            plt.show()
Пример #11
0
	def transition_related_averaging_run(self, simulation_data, smoothing_kernel_width = 200, sampling_interval = [-50, 150], plot = True ):
		"""docstring for transition_related_averaging"""
		transition_occurrence_times = self.transition_occurrence_times(simulation_data = simulation_data, smoothing_kernel_width = smoothing_kernel_width)
		# make sure only valid transition_occurrence_times survive
		transition_occurrence_times = transition_occurrence_times[(transition_occurrence_times > -sampling_interval[0]) * (transition_occurrence_times < (simulation_data.shape[0] - sampling_interval[1]))]
		
		# separated into on-and off periods:
		transition_occurrence_times_separated = [transition_occurrence_times[::2], transition_occurrence_times[1::2]]
		
		mean_time_course, std_time_course = np.zeros((2, sampling_interval[1] - sampling_interval[0], 5)), np.zeros((2, sampling_interval[1] - sampling_interval[0], 5))
		if transition_occurrence_times_separated[0].shape[0] > 2:
			
			for k in [0,1]:
				averaging_interval_times = np.array([transition_occurrence_times_separated[k] + sampling_interval[0],transition_occurrence_times_separated[k] + sampling_interval[1]]).T
				interval_data = np.array([simulation_data[avit[0]:avit[1]] for avit in averaging_interval_times])
				mean_time_course[k] = interval_data.mean(axis = 0)
				std_time_course[k] = (interval_data.std(axis = 0) / np.sqrt(interval_data.shape[0]))
			
			if plot:
				f = pl.figure(figsize = (10,8))
				for i in range(simulation_data.shape[1]):
					s = f.add_subplot(simulation_data.shape[1], 1, 1 + i)
					for j in [0,1]:
						pl.plot(np.arange(mean_time_course[j].T[i].shape[0]), mean_time_course[j].T[i], ['r--','b--'][j], linewidth = 2.0 )
						pl.fill_between(np.arange(mean_time_course[j].shape[0]), mean_time_course[j].T[i] + std_time_course[j].T[i], mean_time_course[j].T[i] - std_time_course[j].T[i], ['r','b'][j], alpha = 0.2)
					s.set_title(self.variable_names[i])
				pl.draw()
			
		return (mean_time_course, std_time_course)
Пример #12
0
def runcand(doplot=False):
    
    import time
    l = os.listdir("BenLike/")
    m = open("features.csv","w")
    has_run = False
    for f in l:
        if f.find(".xml") != -1:
            fname = "BenLike/" + f
            print "working on", f
            x0,y,dy, name = _load_dotastro_data(fname)
            a = ebfeature(t=x0,m=y,merr=dy,name=name)
            a.gen_orbital_period(doplot=doplot)
            if doplot:
                plt.draw()
            if not has_run:
                ff = a.features.keys()
                ff.remove("run")
                ff.remove("p_pulse_initial")
                m.write("name," + ",".join(ff) + "\n")
                has_run = True
            
            m.write(os.path.basename(name) + "," + ",".join([str(a.features.get(s)) for s in ff]) + "\n")
            time.sleep(1)
    m.close()
Пример #13
0
 def plot_data(self,data,plots):
     """
     plot the parameters requested. In principle, this list of tuple pairs 
     can be very large and you can have multiple subplot views of your data.
     """
     self.fig = plt.figure()
     
     ## figure out the number plots to make and how to order them
     if plots is None:
         print len(data[0])
         parameters = ["p%i" % i for i in range(len(data[0]))]
         plots = []
         for i,p in enumerate(parameters[:-1]):
             for pp in parameters[i+1:]:
                 plots.append( (p,pp))
 
     nrow  = int(np.floor(np.sqrt(len(plots))))
     ncols = int(np.ceil(len(plots)/nrow))
 
     ## loop over all the plots
     self.axes = []
     for i,p in enumerate(plots):
         
         # add the axes and also save what's being plotted here
         self.axes.append( (self.fig.add_subplot(nrow,ncols,i+1),p))
         
         # set the color explicitly for each point
         cols = np.ones( (len(data),4))
         cols[:,0:2] = 0.4
         
         plt.scatter(data[p[0]],data[p[1]],c=cols,edgecolors='none')
         plt.xlabel( p[0] )
         plt.ylabel( p[1] )
         
     plt.draw()
def axes_animate_out(ax, iterations=10, max_animation_time=1):
    bottom, top = ax.get_ylim()

    alpha_objects = ax.findobj(has_alpha)
    alpha_divider = np.power(float(iterations-1), 2)

    def ease_out(cur_index, end_index, func):
        return 1. / (func(cur_index) / float(func(end_index)))

    start_time = time()
    for i in range(0, iterations):
        c = ease_out(iterations - i, iterations, roll_index_transformation)
        ax.set_ylim([bottom, c*top])

        for item in alpha_objects:
            item.set_alpha(alpha_index_transformation(iterations-i-1) / alpha_divider)

        if time() - start_time > max_animation_time:
            for item in alpha_objects:
                item.set_alpha(0)

            pl.draw()
            return

        pl.draw()
Пример #15
0
def main():
    # u_t = u_xx
    dx = .1
    dt = .5
    timesteps = 100000

    x = np.arange(-10,10,dx)
    m = len(x)
    kappa = 50

    # u''(x) = (u(x + dx) - 2u(x) + u(x - dx)) / dx^2
    ones = lambda x: np.ones(x)
    A = np.diag(ones(m-1),k=-1) + -2*np.diag(ones(m)) + np.diag(ones(m-1),k=1)
    A *= kappa*(dx**2)

    U = 0*ones(m)
    for i in xrange(0,m):
        if x[i] > -2 and x[i] < 2:
            U[i] = 1

    p.ion()
    lines, = p.plot(x,U)
    for n in xrange(0,timesteps):
        U = U + dt*dudt(U,A)
        
        if n % 100 == 0:
            lines.set_ydata(U)
            p.draw()
    p.show()
Пример #16
0
def histograma(hist):
    
    hist=hist.histogram(255)
##    hist.save("hola4Hist.txt")
    pylab.plot(hist)
    pylab.draw()
    pylab.pause(0.0001)
Пример #17
0
def test():

    GP = GaussianProcess(GaussianKernel_iso([0.2, 1.0]))
    X = array([[0.2], [0.3], [0.5], [1.5]])
    Y = [1, 0, 1, 0.75]
    GP.addData(X, Y)

    figure(1)
    A = arange(0, 2, 0.01)
    mu = array([GP.mu(x) for x in A])
    sig2 = array([GP.posterior(x)[1] for x in A])

    Ei = EI(GP)
    ei = [-Ei.negf(x) for x in A]

    Pi = PI(GP)
    pi = [-Pi.negf(x) for x in A]

    Ucb = UCB(GP, 1, T=2)
    ucb = [-Ucb.negf(x) for x in A]

    ax = subplot(1, 1, 1)
    ax.plot(A, mu, "k-", lw=2)
    xv, yv = poly_between(A, mu - sig2, mu + sig2)
    ax.fill(xv, yv, color="#CCCCCC")

    ax.plot(A, ei, "g-", lw=2, label="EI")
    ax.plot(A, ucb, "g--", lw=2, label="UCB")
    ax.plot(A, pi, "g:", lw=2, label="PI")
    ax.plot(X, Y, "ro")
    ax.legend()
    draw()
    show()
Пример #18
0
    def test1():
        x = [0.5]*3
        xbounds = [(-5, 5) for y in x]


        GA = GenAlg(fitcalc1, x, xbounds, popMult=100, bitsPerGene=9, mutation=(1./9.), crossover=0.65, crossN=2, direction='min', maxGens=60, hammingDist=False)
        results = GA.run()
        print "*** DONE ***"
        #print results
        plt.ioff()
        #generate pareto frontier numerically
        x1_ = np.arange(-5., 0., 0.05)
        x2_ = np.arange(-5., 0., 0.05)
        x3_ = np.arange(-5., 0., 0.05)

        pfn = []
        for x1 in x1_:
            for x2 in x2_:
                for x3 in x3_:
                    pfn.append(fitcalc1([x1,x2,x3]))

        pfn.sort(key=lambda x:x[0])
        
        plt.figure()
        i = 0
        for x in results:
            plt.scatter(x[1][0], x[1][1], 20, c='r')

        plt.scatter([x[0] for x in pfn], [x[1] for x in pfn], 1.0, c='b', alpha=0.1)
        plt.xlim([-20,-1])
        plt.ylim([-12, 2])
        plt.draw()
Пример #19
0
 def click(event):
     print([event.key])
     if event.key == 'm':
         mode = raw_input('Enter new mode: ')
         for k in plots:
             try:
                 d = data_mode(plt_data[k], mode)
                 plots[k].set_data(d)
             except(ValueError):
                 print('Unrecognized plot mode')
         p.draw()
     elif event.key == 'd':
         max = raw_input('Enter new max: ')
         try: max = float(max)
         except(ValueError): max = None
         drng = raw_input('Enter new drng: ')
         try: drng = float(drng)
         except(ValueError): drng = None
         for k in plots:
             _max,_drng = max, drng
             if _max is None or _drng is None:
                 d = plots[k].get_array()
                 if _max is None: _max = d.max()
                 if _drng is None: _drng = _max - d.min()
             plots[k].set_clim(vmin=_max-_drng, vmax=_max)
         print('Replotting...')
         p.draw()
Пример #20
0
def ttplot(corf,sr,sl,norm,n, I1, I2,lag):
    global tplot_cum
    firstfile=int(input_info['n_first_image'])
    tplot=time.time()
    rchplot=int(ceil(log(n/nchannels)/log(2))+1)
    normplot=zeros((1,rcr),dtype=float32)
    
    for ir in xrange(rchplot):
       if ir==0:
           normplot[0,:nchannels]=1./arange(n-2,n-nchannels-2,-1)
       else:
           normplot[0,nchannels2*(ir+1):nchannels2*(ir+2)]=1./arange((n-1)/(2**ir)-nchannels2-1,(n-1)/(2**ir)-nchannels-1,-1)

    indt=int(nchannels+nchannels2*log(n/nchannels)/log(2))-2
    cc1=corf[0,:indt]/(sl[0,:indt]*sr[0,:indt])/normplot[0,:indt]
    cc2=corf[nq/2,:indt]/(sl[nq/2,:indt]*sr[nq/2,:indt])/normplot[0,:indt]
    t_axis=lag[0,:indt]
    t_axis2=tI_avg[0,:n]
    t_axis2b=tI_avg[0,:n]/dt+firstfile
    lm1.set_data(t_axis,cc1)
    lm2.set_data(t_axis2,I1)
    lm1b.set_data(t_axis,cc2)
    lm2b.set_data(t_axis2b,I2)
    ax1.set_xlim(min(t_axis),max(t_axis))
    ax1.set_ylim(min(cc1),max(cc1))
    ax1b.set_ylim(min(cc2),max(cc2))
    ax2.set_xlim(min(t_axis2),max(t_axis2))
    ax2b.set_xlim(min(t_axis2b),max(t_axis2b))
    ax2.set_ylim(min(I1),max(I1))
    ax2b.set_ylim(min(I2),max(I2))
    p.draw()
    tplot_cum+=time.time()-tplot
Пример #21
0
    def update(self, train_dict_list, test_dict_list, folder):  
  
        if test_dict_list != []:

            self.p1.set_xdata(np.append(self.p1.get_xdata(), test_dict_list[-1]['NumIters']))
            self.p1.set_ydata(np.append(self.p1.get_ydata(), test_dict_list[-1]['PixelAccuracy']))
        
            self.p2.set_xdata(np.append(self.p2.get_xdata(), test_dict_list[-1]['NumIters']))
            self.p2.set_ydata(np.append(self.p2.get_ydata(), test_dict_list[-1]['MeanAccuracy']))
        
            self.p3.set_xdata(np.append(self.p3.get_xdata(), test_dict_list[-1]['NumIters']))
            self.p3.set_ydata(np.append(self.p3.get_ydata(), test_dict_list[-1]['IU']))
        
            self.p4.set_xdata(np.append(self.p4.get_xdata(), test_dict_list[-1]['NumIters']))
            self.p4.set_ydata(np.append(self.p4.get_ydata(), test_dict_list[-1]['FreqWeighMeanAcc']))

        if train_dict_list != []:

            self.p5.set_xdata(np.append(self.p5.get_xdata(), train_dict_list[-1]['NumIters']))
            self.p5.set_ydata(np.append(self.p5.get_ydata(), train_dict_list[-1]['PixelAccuracy']))
        
            self.p6.set_xdata(np.append(self.p6.get_xdata(), train_dict_list[-1]['NumIters']))
            self.p6.set_ydata(np.append(self.p6.get_ydata(), train_dict_list[-1]['MeanAccuracy']))
        
            self.p7.set_xdata(np.append(self.p7.get_xdata(), train_dict_list[-1]['NumIters']))
            self.p7.set_ydata(np.append(self.p7.get_ydata(), train_dict_list[-1]['IU']))
        
            self.p8.set_xdata(np.append(self.p8.get_xdata(), train_dict_list[-1]['NumIters']))
            self.p8.set_ydata(np.append(self.p8.get_ydata(), train_dict_list[-1]['FreqWeighMeanAcc']))
            
        self.ax2.relim()
        self.ax2.autoscale_view()
        plt.draw()
        
        self.metrics_fig.savefig(os.path.join(os.path.join(folder, 'plots'), 'Metrics.png'), bbox_extra_artists=(self.lgd2,), bbox_inches="tight")
Пример #22
0
def histogramac(histc):
    
    histc=histc.histogram(255)
    B.show()
    pylab.plot(histc)
    pylab.draw()
    pylab.pause(0.0001)
Пример #23
0
	def callback(self,event):
		self.changed = True
		self.last_time+=1
		if(self.last_time>6):
			self.last_time=0
			self.update(0)
			plt.draw()
Пример #24
0
    def __init__(self, folder, **kwargs):  
        
        if not os.path.isdir(os.path.join(folder, 'plots')):
            os.mkdir(os.path.join(folder, 'plots'))
        plt.ioff()
        self.metrics_fig = plt.figure('Metrics')
        self.ax2 = self.metrics_fig.add_subplot(111)

        self.p1, = self.ax2.plot([], [], 'ro-', label='TEST: Pixel accuracy')
        self.p5, = self.ax2.plot([], [], 'rv-', label='TRAIN: Pixel accuracy')
        
        self.p2, = self.ax2.plot([], [], 'bo-', label='TEST: Mean-Per-Class accuracy')
        self.p6, = self.ax2.plot([], [], 'bv-', label='TRAIN:Mean-Per-Class accuracy')
        
        self.p3, = self.ax2.plot([], [], 'go-', label='TEST: Mean-Per-Class IU')
        self.p7, = self.ax2.plot([], [], 'gv-', label='TRAIN:Mean-Per-Class IU')
        
        self.p4, = self.ax2.plot([], [], 'ko-', label='TEST: Freq. weigh. mean IU')
        self.p8, = self.ax2.plot([], [], 'kv-', label='TRAIN:Freq. weigh. mean IU')
        


        plt.xlabel('iterations')
        self.handles2, self.labels2 = self.ax2.get_legend_handles_labels()
        self.lgd2 = self.ax2.legend(self.handles2, self.labels2, loc='upper center', bbox_to_anchor=(0.5,-0.2))
        self.ax2.grid(True)    
        plt.draw()
 def redrawall(self):
     #Color all class one labeled pixels red 
     oneclazz = np.nonzero(self.mmc.classvec)[0]
     col_row = self.collection[oneclazz]
     rowcs, colcs = col_row[:, 1], col_row[:, 0]
     red = np.array([255, 0, 0])
     for i in range(-self.windowsize, self.windowsize + 1):
         for j in range(-self.windowsize, self.windowsize + 1):
             self.img[rowcs+i, colcs+j, :] = red
     
     #Return the original color of the class zero labeled pixels 
     zeroclazz = np.nonzero(self.mmc.classvec - 1)[0]
     col_row = self.collection[zeroclazz]
     rowcs, colcs = col_row[:, 1], col_row[:, 0]
     for i in range(-self.windowsize, self.windowsize + 1):
         for j in range(-self.windowsize, self.windowsize + 1):
             self.img[rowcs+i, colcs+j, :] = self.img_orig[rowcs+i, colcs+j, :]
     self.imdata.set_data(self.img)
     
     #Update the slider position according to labeling of the current working set
     sliderval = 0
     if len(mmc.working_set) > 0:
         sliderval = len(np.nonzero(self.mmc.classvec_ws)[0]) / len(mmc.working_set)
     self.in_selection_slider.set_val(sliderval)
     
     #Update the RLS objective function display
     self.objfun_display_axis.imshow(mmc.compute_steepness_vector()[np.newaxis, :], cmap=plt.get_cmap("Oranges"))
     self.objfun_display_axis.set_aspect('auto')
     
     #Final stuff
     self.lasso.canvas.draw_idle()
     plt.draw()
     print_instructions()
Пример #26
0
def kmr_test_plot(data, k, end_thresh):
    from matplotlib.pylab import ion, figure, draw, ioff, show, plot, cla
    ion()
    fig = figure()
    ax = fig.add_subplot(111)
    ax.grid(True)

    # get k centroids
    kmr = kmeans.kmeans_runner(k, end_thresh)
    kmr.init_data(data)
    print kmr.centroids

    plot(data[:,0], data[:,1], 'o')

    i = 0
    while kmr.stop_flag is False:
        kmr.iterate()
        #print kmr.centroids, kmr.itr_count
        plot(kmr.centroids[:, 0], kmr.centroids[:, 1], 'sr')
        time.sleep(.2)
        draw()
        i += 1

    print "N Iterations: %d" % (i)
    plot(kmr.centroids[:, 0], kmr.centroids[:, 1], 'g^', linewidth=3)

    ioff()
    show()
    print kmr.itr_count, kmr.centroids
Пример #27
0
 def plot(self, outf=None, dosave=True, savedir="Plot/", show=True):
     if outf is None:
         outf = self.outf
         # print outf
     oo = mlab.csv2rec(outf, delimiter=" ")
     # print oo
     plt.errorbar(oo["time"] % self.period, oo["magnitude"], oo["error"], fmt="b.")
     plt.plot(oo["time"] % self.period, oo["model"], "ro")
     plt.title(
         "#%i P=%f d (chisq/dof = %f) r1+r2=%f"
         % (self.dotastro_id, self.period, self.outrez["chisq"], self.outrez.get("r1") + self.outrez.get("r2"))
     )
     ylim = plt.ylim()
     # print ylim
     if ylim[0] < ylim[1]:
         plt.ylim(ylim[1], ylim[0])
     plt.draw()
     if show:
         plt.show()
     if dosave:
         if not os.path.isdir(savedir):
             os.mkdir(savedir)
         plt.savefig("%splot%i.png" % (savedir, self.dotastro_id))  # ,self.period))
         print("Saved", "%splot%i.png" % (savedir, self.dotastro_id))  # ,self.period)
     plt.clf()
Пример #28
0
    def _brush(self,event,region,inverse=False):
        """
        This will loop through all the other subplots (without the brush region)
        and change the opacity of the points not associated with that region.
        
        when inverse is True, it will "unbrush" by resetting the opacity of the brushed points
        """
        opacity_fraction = self.opac
        # what variables are in the plot?
        plot_vars = [x[1] for x in self.axis_info if x[0] == event.inaxes][0]
        
        ## figure out the min max of this region
        minx, miny   = region.get_xy()
        maxx  = minx + region.get_width()
        maxy =  miny + region.get_height()
        
        ## now query the data to get all the sources that are inside this range
        if isinstance(self.data[plot_vars[0]][0],datetime.datetime):
            maxx = datetime.datetime.fromordinal(maxx)
            minx= datetime.datetime.fromordinal(minx)
        elif isinstance(self.data[plot_vars[0]][0],datetime.date):
            maxx = datetime.date.fromordinal(maxx)
            minx= datetime.date.fromordinal(minx)
            
        if isinstance(self.data[plot_vars[1]][0],datetime.datetime):
            maxy = datetime.datetime.fromordinal(maxx)
            miny= datetime.datetime.fromordinal(minx)
        elif isinstance(self.data[plot_vars[1]][0],datetime.date):
            maxy = datetime.date.fromordinal(maxy)
            miny= datetime.date.fromordinal(miny)
        
        inds = (self.data[plot_vars[0]]<= maxx) & (self.data[plot_vars[0]] > minx) & \
               (self.data[plot_vars[1]] <= maxy) & (self.data[plot_vars[1]] > miny)
        invinds = ~ inds  # get all indicies of those records not inside the region
        
        for a,pv in self.axis_info:
            # dont self brush!
            if a == event.inaxes:
                continue

            ## get the scatterplot color and alpha channel data
            self.t = a.collections[0]
            fc = self.t.get_facecolor() # this will be a 2d array
            '''Here we change the color and opacity of the points
            fc[index,0] = Red
            fc[index,1] = Green
            fc[index,2] = Blue
            fc[index,3] = Alpha
            
            default is  [ 0.4   ,  0.4   ,  1.    ,  1.0]
            '''
            if not inverse: 
                fc[invinds,2] /= 20. #reduce blue channel greatly
                fc[invinds,3] /= opacity_fraction 
            else:
                fc[invinds,2] *= 20.
                fc[invinds,3] *= opacity_fraction
            self.t.set_facecolor(fc)
            
        plt.draw()
Пример #29
0
def pick(event) :

    global data
    global X
    global Y
    global numpy_container
    if event.key == 'q' :
        if len(X) == 0 : return
        if not numpy_container :
            data = VectorDataSet(X)
        else :
            data = PyVectorDataSet(numpy.array(X))
        data.attachLabels(Labels(Y))
        X = []
        Y = []
        print 'done creating data.  close this window and use the decisionSurface function'
        pylab.disconnect(binding_id)
    if event.key =='1' or event.key == '2' :
        if event.inaxes is not None:
            print 'data coords', event.xdata, event.ydata
            X.append([event.xdata, event.ydata])
            Y.append(event.key)
            pylab.plot([event.xdata], [event.ydata], 
                       plotStr[int(event.key) - 1])
            pylab.draw()
Пример #30
0
def plot_spikes(time,voltage,APTimes,titlestr):
    """
    plot_spikes takes four arguments - the recording time array, the voltage
    array, the time of the detected action potentials, and the title of your
    plot.  The function creates a labeled plot showing the raw voltage signal
    and indicating the location of detected spikes with red tick marks (|)
    """
# Make a plot and markup
    plt.figure()
    plt.title(titlestr)
    plt.xlabel("Time (s)")
    plt.ylabel("Voltage (uV)") 

    plt.plot(time, voltage)
    
# Vertical positions for red marker
# The following attributes are configurable if required    
    vertical_markers_indent = 0.01 # 1% of Voltage scale height
    vertical_markers_height = 0.03 # 5% of Voltage scale height
    y_scale_height = 100 # Max of scale
    
    marker_ymin = 0.5 + ( max(voltage) / y_scale_height / 2 ) + vertical_markers_indent
    marker_ymax = marker_ymin + vertical_markers_height

# Drawing red markers for detected spikes
    for spike in APTimes:
        plt.axvline(spike, ymin=marker_ymin, ymax=marker_ymax, color='red')
    
    plt.draw()
pylab.figure()

pylab.plot(mixdiag_info_dict['lap_history'],
           mixdiag_info_dict['loss_history'],
           'b.-',
           label='mix + diag gauss')

pylab.plot(hmmdiag_info_dict['lap_history'],
           hmmdiag_info_dict['loss_history'],
           'k.-',
           label='hmm + diag gauss')

pylab.plot(hmmfull_info_dict['lap_history'],
           hmmfull_info_dict['loss_history'],
           'r.-',
           label='hmm + full gauss')

pylab.plot(hmmar_info_dict['lap_history'],
           hmmar_info_dict['loss_history'],
           'c.-',
           label='hmm + ar gauss')

pylab.legend(loc='upper right')
pylab.xlabel('num. laps')
pylab.ylabel('loss')
pylab.xlim([4, 100])  # avoid early iterations
pylab.ylim([2.4, 3.7])  # handpicked
pylab.draw()
pylab.tight_layout()
pylab.show()
Пример #32
0
 def Update(self):
     plb.draw()
Пример #33
0
def extract_pseudo_lms(pos_list):
    """Determine pseudo-landmarks

    Calculate the landmarks using the positions of the robot as anchor points.
    NOTE For now this is only a nice approach for square datasets.

    pos_list : list.
        List of robot positions.

    """
    pos_arr = np.hstack(pos_list).T

    fig = plt.figure()
    plt.ion()
    plt.axis("equal")

    hull = ConvexHull(pos_arr[:, :2])
    indices = hull.vertices
    indices = np.hstack((indices, indices[0]))
    # Visual check for triangulation
    plt.gca().clear()
    plt.scatter(*pos_arr[:, :2].T)
    plt.plot(*pos_arr[indices, :2].T)
    for i in indices:
        plt.text(*pos_arr[i, :2].T, s=str(i))
    plt.draw()
    plt.pause(0.01)
    while 1:
        keep_str = input(
            "Enter the IDs of landmarks to be keep (comma seperated, blank continues, order matters!): "
        )
        try:
            keep_new = ast.literal_eval(keep_str)

            # XXX incase something wrong is entered, just restart loop
            if type(keep_new) is int:
                continue
            elif len(keep_new) != 3:
                continue
            else:
                keep = keep_new
        except Exception:
            logger.exception("Error understanding input")
            break

        plt.gca().clear()
        plt.scatter(*pos_arr[:, :2].T)
        plt.plot(*pos_arr[keep[:2], :2].T)
        plt.plot(*pos_arr[keep[::2], :2].T)
        for i in indices:
            plt.text(*pos_arr[i.T, :2], s=str(i))
        plt.draw()
        plt.pause(0.01)

    if len(keep) != 3:
        raise ValueError("Can only keep 3 landmarks")
    lms = pos_arr[keep, :].reshape(3, 3, 1)  # (N, d, 1)
    # Enforce all lms in the same plane
    lms[1:, 2] = 1 * lms[0, 2]

    # Axis vectors
    vec_x = lms[1, :] - lms[0, :]
    axis_x = vec_x / np.linalg.norm(vec_x)
    axis_z = np.array([[0, 0, 1]]).T
    axis_y = np.cross(axis_x.flatten(), axis_z.flatten()).reshape(3, 1)
    vec_2 = lms[2, :] - lms[0, :]
    vec_y = axis_y.T.dot(vec_2) * axis_y

    vec_x = vec_x.flatten()
    vec_y = vec_y.flatten()

    tri = Delaunay(lms[:, :2, 0])
    simplices = tri.simplices

    # Visual check for triangulation
    plt.gca().clear()
    plt.triplot(*lms[:, :2, 0].T, simplices)
    plt.scatter(*pos_arr[:, :2].T)
    plt.draw()
    plt.pause(0.01)

    indices = np.arange(0, lms.shape[1])
    valid_indices = np.arange(0, lms.shape[1])
    while 1:
        div_str = input("Enter the number of divisions (>= 1): ")
        try:
            div = ast.literal_eval(div_str)
            if (type(div) is not int) or (div < 0):
                raise ValueError("Divisons must be integer and >=1.")

        except Exception:
            logger.exception("Error understanding input")
            div = 0
            break

        N_lms = (div + 1)**2
        spacing = np.linspace(0, 1, div + 1)
        x, y = np.meshgrid(spacing, spacing)
        coords = np.hstack(
            (x.reshape(N_lms, 1), y.reshape(N_lms, 1), np.zeros(
                (N_lms, 1))))  # (N_lms, 3)

        pseudo_lms = np.zeros((N_lms, 3, 1), dtype=float)
        pseudo_lms[:, :,
                   0] = coords[:, 0, None] * vec_x + coords[:, 1, None] * vec_y
        pseudo_lms += lms[0, :]

        triangulation = Delaunay(pseudo_lms[:, :2, 0])

        # Check number of regions from triangulation matches the divisions
        N = triangulation.simplices.shape[0]

        # This shouldn't be an issue anymore
        # assert (N == 2*div**2) , "Number of triangulation regions is
        # inconsistent with number of divisions."

        simplices = triangulation.simplices

        # Visual check for triangulation
        plt.gca().clear()
        plt.triplot(*pseudo_lms[:, :2, 0].T, simplices)
        plt.scatter(*pos_arr[:, :2].T)
        plt.draw()
        plt.pause(0.01)

    plt.close(fig)
    plt.ioff()

    return pseudo_lms, triangulation
def propag(n, V, a):
    '''Prend en argument un format de matrice carrée, la provenance du vent à choisir parmi 'N', 'S', 'E', 'O', 'NE','NO', 'SO', 'SE', 'A', un type d'arbre 'R' pour résineux et 'F' pour feuillu et renvoie la création d'une forêt, la mise en feu d'un arbre et la première étape de la propagation'''
    cmap = colors.ListedColormap(['white', 'limegreen', 'red', 'black'])
    anima = []
    F, L = crea_foret_feu(n)
    input()
    plt.close()
    p = 0.8
    if a == 'R':
        p = 1
    P = liste(p)
    k = 1
    for (m, r) in L:
        if m < n - 1 and r < n - 1 and m > 1 and r > 1:
            if V == 'S':  #vent du sud
                if F[m - 1, r - 1] == 1:
                    F[m - 1, r - 1] = choice(P)
                    if F[m - 1, r - 1] == 2:
                        L.append((m - 1, r - 1))
                if F[m - 1, r] == 1:
                    F[m - 1, r] = choice(P)
                    if F[m - 1, r] == 2:
                        L.append((m - 1, r))
                if F[m - 1, r + 1] == 1:
                    F[m - 1, r + 1] = choice(P)
                    if F[m - 1, r + 1] == 2:
                        L.append((m - 1, r + 1))
            if V == 'N':  #vent du nord
                if F[m + 1, r - 1] == 1:
                    F[m + 1, r - 1] = choice(P)
                    if F[m + 1, r - 1] == 2:
                        L.append((m + 1, r - 1))
                if F[m + 1, r] == 1:
                    F[m + 1, r] = choice(P)
                    if F[m + 1, r] == 2:
                        L.append((m + 1, r))
                if F[m + 1, r + 1] == 1:
                    F[m + 1, r + 1] = choice(P)
                    if F[m + 1, r + 1] == 2:
                        L.append((m + 1, r + 1))
            if V == 'E':  #vent d'ouest
                if F[m - 1, r - 1] == 1:
                    F[m - 1, r - 1] = choice(P)
                    if F[m - 1, r - 1] == 2:
                        L.append((m - 1, r - 1))
                if F[m, r - 1] == 1:
                    F[m, r - 1] = choice(P)
                    if F[m, r - 1] == 2:
                        L.append((m, r - 1))
                if F[m + 1, r - 1] == 1:
                    F[m + 1, r - 1] = choice(P)
                    if F[m + 1, r - 1] == 2:
                        L.append((m + 1, r - 1))
            if V == 'O':  #vent d'est
                if F[m - 1, r + 1] == 1:
                    F[m - 1, r + 1] = choice(P)
                    if F[m - 1, r + 1] == 2:
                        L.append((m - 1, r + 1))
                if F[m, r + 1] == 1:
                    F[m, r + 1] = choice(P)
                    if F[m, r + 1] == 2:
                        L.append((m, r + 1))
                if F[m + 1, r + 1] == 1:
                    F[m + 1, r + 1] = choice(P)
                    if F[m + 1, r + 1] == 2:
                        L.append((m + 1, r + 1))
            if V == 'SO':  #vent du sud ouest
                if F[m - 1, r + 1] == 1:
                    F[m - 1, r + 1] = choice(P)
                    if F[m - 1, r + 1] == 2:
                        L.append((m - 1, r + 1))
                if F[m - 1, r] == 1:
                    F[m - 1, r] = choice(P)
                    if F[m - 1, r] == 2:
                        L.append((m - 1, r))
                if F[m, r + 1] == 1:
                    F[m, r + 1] = choice(P)
                    if F[m, r + 1] == 2:
                        L.append((m, r + 1))
            if V == 'SE':  #vent du sud est
                if F[m - 1, r - 1] == 1:
                    F[m - 1, r - 1] = choice(P)
                    if F[m - 1, r - 1] == 2:
                        L.append((m - 1, r - 1))
                if F[m - 1, r] == 1:
                    F[m - 1, r] = choice(P)
                    if F[m - 1, r] == 2:
                        L.append((m - 1, r))
                if F[m, r - 1] == 1:
                    F[m, r - 1] = choice(P)
                    if F[m, r - 1] == 2:
                        L.append((m, r - 1))
            if V == 'NE':  #vent du nord est
                if F[m, r - 1] == 1:
                    F[m, r - 1] = choice(P)
                    if F[m, r - 1] == 2:
                        L.append((m, r - 1))
                if F[m + 1, r - 1] == 1:
                    F[m + 1, r - 1] = choice(P)
                    if F[m + 1, r - 1] == 2:
                        L.append((m + 1, r - 1))
                if F[m + 1, r] == 1:
                    F[m + 1, r] = choice(P)
                    if F[m + 1, r] == 2:
                        L.append((m + 1, r))
            if V == 'NO':  #vent du nord ouest
                if F[m + 1, r + 1] == 1:
                    F[m + 1, r + 1] = choice(P)
                    if F[m + 1, r + 1] == 2:
                        L.append((m + 1, r + 1))
                if F[m + 1, r] == 1:
                    F[m + 1, r] = choice(P)
                    if F[m + 1, r] == 2:
                        L.append((m + 1, r))
                if F[m, r + 1] == 1:
                    F[m, r + 1] = choice(P)
                    if F[m, r + 1] == 2:
                        L.append((m, r + 1))
            if V == 'A':  #Absence de vent
                if F[m, r - 1] == 1:
                    F[m, r - 1] = choice(P)
                    if F[m, r - 1] == 2:
                        L.append((m, r - 1))
                if F[m - 1, r] == 1:
                    F[m - 1, r] = choice(P)
                    if F[m - 1, r] == 2:
                        L.append((m - 1, r))
                if F[m + 1, r] == 1:
                    F[m + 1, r] = choice(P)
                    if F[m + 1, r] == 2:
                        L.append((m + 1, r))
                if F[m, r + 1] == 1:
                    F[m, r + 1] = choice(P)
                    if F[m, r + 1] == 2:
                        L.append((m, r + 1))
            F[m,
              r] = 3  #Arbre en cendres: détruit mais ne permet plus de propager de le feu
            G = F[2:n - 2, 2:n - 2]
            print(G)
            anima.append([matshow(G, fignum=False, animated=True, cmap=cmap)])
            plt.draw()
            plt.show()
            i = input()
            plt.close()
            if i == 's':
                return ('Fin de la modélisation')
        k = k + 1
        print(k)
Пример #35
0
        v[p] = 20.0
        v[p1] = 15.0
        v[p2] = -20.0
        sp = findspikes(t, v, 0.0, dt=dt, mode='schmitt', interpolate=False)
        print('findSpikes')
        print('sp: ', sp)
        f = MP.figure(1)
        MP.plot(t, v, 'ro-')
        si = (numpy.floor(sp / dt))
        print('si: ', si)
        spk = []
        for k in si:
            spk.append(numpy.argmax(v[k - 1:k + 1]) + k)
        MP.plot(sp, v[spk], 'bs')
        MP.ylim((0, 25))
        MP.draw()
        MP.show()

        exit()
        print("getSpikes")
        y = [] * 5
        for j in range(0, 1):
            d = numpy.zeros((5, 1, len(v)))
            for k in range(0, 5):
                p = range(20 * k, 500, 50 + int(50.0 * (k / 2.0)))
                vn = v.copy()
                vn[p] = 20.0
                d[k, 0, :] = numpy.array(vn)  # load up the "spike" array
            y.append(d)
        tpts = range(0, len(t))  # numpy.arange(0, len(t)).astype(int).tolist()
        #def findspikes(x, v, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False):
Пример #36
0
def main(unused_args):
    print unused_args
    #Generating some data
    x, y = gen_seq()
    n_steps = len(x) / 2
    plt.plot(x, y)
    plt.show()
    seq_width = 10
    num_hidden = 10

    ### Model initialiation

    #random uniform initializer for the LSTM nodes
    initializer = tf.random_uniform_initializer(-.1, .1)

    #placeholders for input/target/sequence_length
    seq_input = tf.placeholder(tf.float32, [n_steps, seq_width])
    seq_target = tf.placeholder(tf.float32, [n_steps, 1.])
    early_stop = tf.placeholder(tf.int32)

    #making a list of timestamps for rnn input
    inputs = [
        tf.reshape(i, (1, seq_width)) for i in tf.split(0, n_steps, seq_input)
    ]

    #LSTM cell
    cell = rnn_cell.LSTMCell(num_hidden, seq_width, initializer=initializer)

    initial_state = cell.zero_state(1, tf.float32)
    #feeding sequence to the RNN
    outputs, states = rnn(cell,
                          inputs,
                          initial_state=initial_state,
                          sequence_length=early_stop)

    #outputs is a list, but we need a single tensor instead
    outputs = tf.reshape(tf.concat(1, outputs), [-1, num_hidden])

    #mapping to 1-D
    W = tf.get_variable('W', [num_hidden, 1])
    b = tf.get_variable('b', [1])

    #final prediction
    output = tf.matmul(outputs, W) + b

    #squared error
    error = tf.pow(tf.reduce_sum(tf.pow(tf.sub(output, seq_target), 2)), .5)

    lr = tf.Variable(0., trainable=False, name='lr')

    #optimizer setup
    tvars = tf.trainable_variables()
    grads, _ = tf.clip_by_global_norm(tf.gradients(error, tvars), 5.)
    optimizer = tf.train.AdamOptimizer(lr)
    train_op = optimizer.apply_gradients(zip(grads, tvars))
    ### Model initialization DONE

    ###Let the training begin
    init = tf.initialize_all_variables()

    session = tf.Session()
    session.run(init)

    #training and testing data
    train_input, train_target = gen_input(y,
                                          n_steps,
                                          offset=0,
                                          seq_width=10,
                                          lag=60)
    test_input, test_target = gen_input(y,
                                        n_steps,
                                        offset=n_steps,
                                        seq_width=10,
                                        lag=60)

    feed = {
        early_stop: n_steps,
        seq_input: train_input,
        seq_target: train_target
    }

    #initial predictions on untrained model
    outs = session.run(output, feed_dict=feed)
    plt.figure(1)
    plt.plot(x[:n_steps], train_target, 'b-', x[:n_steps], outs[:n_steps],
             'r-')
    plt.ion()
    plt.show()

    tf.get_variable_scope().reuse_variables()

    session.run(tf.assign(lr, 1.))
    saver = tf.train.Saver()

    is_training = True

    if is_training:
        #Training for 100 epochs
        for i in range(100):
            new_lr = 1e-2
            if i > 25:
                new_lr = 1e-2
            elif i > 50:
                new_lr = 5e-3
            elif i > 75:
                new_lr = 1e-4
            session.run(tf.assign(lr, new_lr))

            err, outs, _ = session.run([error, output, train_op],
                                       feed_dict=feed)

            print('Epoch %d done. Error: %1.5f') % (i + 1, err)
            plt.clf()
            plt.plot(x[:n_steps], train_target, 'b-', x[:n_steps],
                     outs[:n_steps], 'r-')
            plt.draw()
            time.sleep(.1)

        #saving the model variables
        saver.save(session,
                   'sine-wave-rnn-' + str(num_hidden) + '-' + str(seq_width),
                   global_step=0)

    if not is_training:
        saver.restore(
            session,
            'sine-wave-rnn-' + str(num_hidden) + '-' + str(seq_width) + '-0')

    plt.ioff()
    plt.figure(1)
    plt.clf()
    #model prediction on training data
    train_outs = session.run(output, feed_dict=feed)
    plt.plot(x[:n_steps], train_target[:n_steps], 'b-', x[:n_steps],
             train_outs[:n_steps], 'g--')
    #model prediction on test data
    feed = {
        seq_input: test_input,
        seq_target: test_target,
        early_stop: n_steps
    }
    test_outs = session.run(output, feed_dict=feed)

    #plotting
    plt.plot(x[n_steps:2 * n_steps], test_outs, 'r--')
    plt.plot(x[n_steps:2 * n_steps], test_target, 'b-')
    plt.show()
Пример #37
0
def SingleStarReg(imfile,
                  ra,
                  dec,
                  wcsname='SINGLESTAR',
                  computesig=False,
                  refim=None,
                  threshmin=0.5,
                  peakmin=None,
                  peakmax=None,
                  searchrad=5.0,
                  nsigma=1.5,
                  fluxmin=None,
                  fluxmax=None,
                  verbose=True,
                  clobber=True):
    """ Update the WCS of imfile so that it matches the WCS of the refimfile,
    using a single star to define the necessary shift.
    If xy or xyref are provided, then assume the star is located within
    searchrad arcsec of that position in the imfile or the refimfile,
    respectively. If either of those pixel coordinates are not provided, then
    use the brightest star in the image.
    """
    # noinspection PyUnresolvedReferences
    from numpy import deg2rad, sqrt, cos, where, median
    from astropy.io import ascii
    from drizzlepac.updatehdr import updatewcs_with_shift
    from numpy import unique

    if refim is None: refim = imfile
    if computesig == False:
        skysigma = getskysigma(imfile)
        if verbose:
            print(("sndrizipipe.register.SingleStarReg: "
                   " Manually computed sky sigma for %s as %.5e" %
                   (imfile, skysigma)))
    else:
        skysigma = 0.0

    # locate stars in imfile, pick out the brightest one
    topdir = os.path.abspath('.')
    imfiledir = os.path.dirname(os.path.abspath(imfile))
    imfilebase = os.path.basename(imfile)
    os.chdir(imfiledir)

    # Iterate the source-finding algorithm with progressively smaller threshold
    # values.  This helps to ensure that we correctly locate the single bright
    # source in the image
    xycatfile = None
    threshold = 200.
    while threshold >= threshmin:
        try:
            xycatfile = mkSourceCatalog(imfilebase,
                                        computesig=computesig,
                                        skysigma=skysigma,
                                        nsigma=nsigma,
                                        threshold=threshold,
                                        peakmin=peakmin,
                                        peakmax=peakmax,
                                        fluxmin=fluxmin,
                                        fluxmax=fluxmax)[0]
            # The source finder succeeded!
            radeccatfile = xycatfile.replace('xy.coo', 'radec.coo')
            break
        except NameError:
            # the source finder failed, try again with a lower threshold
            threshold /= 2.
            continue
    if xycatfile is None:
        print(( "Failed to generate a clean source catalog for %s"%imfile + \
            " using threshmin = %.3f"%threshmin ))
        import pdb
        pdb.set_trace()
        raise RuntimeError(
            "Failed to generate a clean source catalog for %s"%imfile + \
            " using threshmin = %.3f"%threshmin )

    xycat = ascii.read(xycatfile)
    radeccat = ascii.read(radeccatfile)
    if verbose:
        print(("Located %i sources with threshold=%.1f sigma" %
               (len(xycat), threshold)))

    os.chdir(topdir)

    # compute the approximate separation in arcsec from the target ra,dec
    # to each of the detected sources, then limit to those within searchrad
    rasrc, decsrc = radeccat['col1'], radeccat['col2']
    darcsec = sqrt(((rasrc - ra) * cos(deg2rad(dec)))**2 +
                   (decsrc - dec)**2) * 3600.
    inear = where(darcsec <= searchrad)[0]

    if verbose:
        print(("   %i of these sources are within %.1f arcsec of the target" %
               (len(inear), searchrad)))

    # identify the brightest source within searchrad arcsec of the target
    ibrightest = inear[xycat['col3'][inear].argmax()]
    xfnd, yfnd = [xycat['col1'][ibrightest], xycat['col2'][ibrightest]]

    if verbose:
        brightratio = xycat['col3'][ibrightest] / median(xycat['col3'][inear])
        print((
            "   The brightest of these sources is %.1fx brighter than the median."
            % brightratio))

    # The TweakReg imagefind algorithm sometimes misses the true center
    # catastrophically. Here we use a centroiding algorithm to re-center the
    # position on the star, checking for a large offset.
    # Note that we are using numpy arrays, so the origin is (0,0) and we need
    #  to correct the cntrd output to the (1,1) origin used by pyfits and drizzlepac.
    imdat = pyfits.getdata(imfile)
    fwhmpix = getfwhmpix(imfile)
    xcntrd, ycntrd = cntrd(imdat, xfnd, yfnd, fwhmpix)
    if xcntrd == -1:
        if verbose: print('Recentering within a 5-pixel box')
        xcntrd, ycntrd = cntrd(imdat, xfnd, yfnd, fwhmpix, extendbox=5)
    assert ((xcntrd > 0) &
            (ycntrd > 0)), "Centroid recentering failed for %s" % imfile
    xcntrd += 1
    ycntrd += 1
    dxcntrd = xfnd - xcntrd
    dycntrd = yfnd - ycntrd
    if verbose > 9:
        # plot the centroid shift and save a .png image
        from matplotlib import pylab as pl
        from matplotlib import cm
        pl.clf()
        vmin = imdat[ycntrd - 10:ycntrd + 10, xcntrd - 10:xcntrd + 10].min()
        vmax = imdat[ycntrd - 10:ycntrd + 10, xcntrd - 10:xcntrd + 10].max()
        pl.imshow(imdat, cmap=cm.Greys, vmin=vmin, vmax=vmax)
        pl.plot(xfnd - 1,
                yfnd - 1,
                'r+',
                ms=12,
                mew=1.5,
                label='drizzlepac.ndfind source position: %.3f, %.3f' %
                (xfnd, yfnd))
        pl.plot(xcntrd - 1,
                ycntrd - 1,
                'gx',
                ms=12,
                mew=1.5,
                label='register.cntrd recentered position: %.3f, %.3f' %
                (xcntrd, ycntrd))
        pl.title("Centroiding shift :  dx = %.3f   dy = %.3f" %
                 (dxcntrd, dycntrd))
        ax = pl.gca()
        ax.set_xlim(xcntrd - 10, xcntrd + 10)
        ax.set_ylim(ycntrd - 10, ycntrd + 10)
        pl.colorbar()
        ax.set_xlabel('X (pixels)')
        ax.set_ylabel('Y (pixels)')
        ax.legend(loc='upper right', numpoints=1, frameon=False)
        pl.draw()
        outpng = imfile.replace('.fits', '_recenter.png')
        pl.savefig(outpng)
        print(("Saved a recentering image as " + outpng))

    # locate the appropriate extensions for updating
    hdulist = pyfits.open(imfile)
    sciextlist = [hdu.name for hdu in hdulist if 'WCSAXES' in hdu.header]
    # convert the target position from ra,dec to x,y
    if len(sciextlist) > 0:
        imwcs = stwcs.wcsutil.HSTWCS(hdulist, ext=(sciextlist[0], 1))
    else:
        sciextlist = ['PRIMARY']
        imwcs = stwcs.wcsutil.HSTWCS(hdulist, ext=None)
    xref, yref = imwcs.wcs_world2pix(ra, dec, 1)

    # If the new centroid position differs substantially from the original
    # ndfind position, then update the found source position
    # (i.e. in cases of catastrophic ndfind failure)
    if (abs(dxcntrd) > 0.5) or (abs(dycntrd) > 0.5):
        xfnd = xcntrd
        yfnd = ycntrd

    # compute the pixel shift from the xy position found in the image
    # to the reference (target) xy position
    xshift = xfnd - xref
    yshift = yfnd - yref

    # apply that shift to the image wcs
    for sciext in unique(sciextlist):
        print(("Updating %s ext %s with xshift,yshift = %.5f %.5f" %
               (imfile, sciext, xshift, yshift)))
        updatewcs_with_shift(imfile,
                             refim,
                             wcsname=wcsname,
                             rot=0.0,
                             scale=1.0,
                             xsh=xshift,
                             ysh=yshift,
                             fit=None,
                             xrms=None,
                             yrms=None,
                             verbose=verbose,
                             force=clobber,
                             sciext=sciext)
    hdulist.close()
    return (wcsname)
def my_visualization_function(**kwargs):
    print("\r{}".format(kwargs), end="")
    plot_data["iterations"].append(kwargs['iterations'])
    plot_data["loss"].append(kwargs['loss'])
    plot_data["axis_00"].append(kwargs['axis_00'])
    plot_data["axis_01"].append(kwargs['axis_01'])
    plot_data["axis_02"].append(kwargs['axis_02'])
    plot_data["axis_03"].append(kwargs['axis_03'])
    plot_data["axis_04"].append(kwargs['axis_04'])

    axes[0, 0].clear()
    axes[0, 0].scatter(plot_data["axis_00"],
                       plot_data["loss"],
                       c=plot_data["loss"],
                       cmap="jet",
                       marker='.')
    axes[0, 0].set_ylabel("loss")
    axes[0, 0].set_xlabel("axis_00")

    axes[0, 1].clear()
    axes[0, 1].scatter(plot_data["axis_01"],
                       plot_data["loss"],
                       c=plot_data["loss"],
                       cmap="jet",
                       marker='.')
    axes[0, 1].set_xlabel("axis_01")

    axes[0, 2].clear()
    axes[0, 2].scatter(plot_data["axis_02"],
                       plot_data["loss"],
                       c=plot_data["loss"],
                       cmap="jet",
                       marker='.')
    axes[0, 2].set_xlabel("axis_02")

    axes[1, 0].clear()
    axes[1, 0].scatter(plot_data["axis_03"],
                       plot_data["loss"],
                       c=plot_data["loss"],
                       cmap="jet",
                       marker='.')
    axes[1, 0].set_ylabel("loss")
    axes[1, 0].set_xlabel("axis_03")

    axes[1, 1].clear()
    axes[1, 1].scatter(plot_data["axis_04"],
                       plot_data["loss"],
                       c=plot_data["loss"],
                       cmap="jet",
                       marker='.')
    axes[1, 1].set_xlabel("axis_04")

    axes[1, 2].clear()
    axes[1, 2].plot(plot_data["iterations"],
                    plot_data["loss"],
                    "--",
                    c=(0.8, 0.8, 0.8, 0.5))
    axes[1, 2].scatter(plot_data["iterations"],
                       plot_data["loss"],
                       marker='.',
                       c=(0.2, 0.2, 0.2))
    axes[1, 2].set_xlabel("iterations")

    plt.draw()
    plt.tight_layout()
    plt.pause(0.001)
Пример #39
0
def kohonen(targetdigits, size_k=6, sigma=10.0, eta=0.9, tmax=5000, threshold=1000, plot_errors=False):
    """Example for using create_data, plot_data and som_step.

    :param targetdigits: Set of labels to take into account for this algorithm
    :param size_k: Size of the Kohonen map. In this case it will be 6 X 6 by default
    :param sigma: Width of the neighborhood via the width of the gaussian that describes it, 10.0 by default
    :param eta: Learning rate, 0.9 by default
    :param tmax: Maximal iteration count. 5000 by default
    :param threshold: threshold of the error for convergence criteria. 1000 by default
    :param plot_errors: Plot the errors
    """
    plb.close('all')

    dim = 28 * 28
    data_range = 255.0
    unit_of_mean = 400

    # load in data and labels
    data = np.array(np.loadtxt('data.txt'))
    labels = np.loadtxt('labels.txt')

    # this selects all data vectors that corresponds to one of the four digits
    data = data[np.logical_or.reduce([labels == x for x in targetdigits]), :]
    # filter the labels
    labels = labels[np.logical_or.reduce([labels == x for x in targetdigits])]

    dy, dx = data.shape

    # initialise the centers randomly
    centers = np.random.rand(size_k ** 2, dim) * data_range

    # build a neighborhood matrix
    neighbor = np.arange(size_k ** 2).reshape((size_k, size_k))

    # set the random order in which the datapoints should be presented
    i_random = np.arange(tmax) % dy
    np.random.shuffle(i_random)

    # Converge step
    last_centers = np.copy(centers)
    errors = []
    mean_errors = []
    last_errors = [0.0]

    etas = [eta]

    for t, i in enumerate(i_random):
        sigma = som_step(centers, data[i, :], neighbor, eta, sigma)
        eta = max(0.9999 * eta, 0.1)
        etas.append(eta)

        err = np.sum(np.sum((last_centers - centers) ** 2, 1)) * 0.01

        if t > unit_of_mean:
            if len(last_errors) >= unit_of_mean:
                last_errors.pop(0)
            last_errors.append(err)

            # Update the mean error term
            tmp_error = np.mean(last_errors)
            mean_errors.append(tmp_error)

            if tmp_error < threshold:
                print('The algorithm converges after', t, 'iterations')
                break

        errors.append(err)
        last_centers = np.copy(centers)

    # Digit assignment given labels.txt
    digit_assignment = []
    for i in range(0, size_k ** 2):
        index = np.argmin(np.sum((data[:] - centers[i, :]) ** 2, 1))
        digit_assignment.append(labels[index])

    print('Digit assignment: \n')
    print(np.resize(digit_assignment, (size_k, size_k)))

    # for visualization, you can use this:
    for i in range(size_k ** 2):
        plb.subplot(size_k, size_k, i + 1)

        plb.imshow(np.reshape(centers[i, :], [28, 28]), interpolation='bilinear')
        plb.axis('off')

    # leave the window open at the end of the loop
    plb.show()
    plb.draw()

    if(plot_errors):
        plb.plot(errors)
        plb.title('Square of the errors over iterations', fontsize=20)
        plb.ylabel('Squared errors', fontsize=18)
        plb.xlabel('Iterations', fontsize=18)
        plb.ylim([0, 600000])
        plb.show()

        plb.plot(mean_errors)
        plb.title('Mean squared errors of the last 400 terms iterations', fontsize=20)
        plb.ylabel('Mean squared error', fontsize=18)
        plb.xlabel('Iterations', fontsize=18)
        plb.show()

        plb.plot(etas)
        plb.title('Learning rates decrease over iterations', fontsize=20)
        plb.ylabel('Learning rates', fontsize=18)
        plb.xlabel('Iterations', fontsize=18)
        plb.show()
Пример #40
0
def update_plot(val, feature, length):
    global fig, gs
    global p_feat, p_svd_lpf_feat, p_clusters_feat
    global p_clusters_svd_lpf_feat, p_segmentation

    beat_sync = True
    # set parameters
    if beat_sync:
        n_fft = 8
        hop_length = 4
        window = 16
        step_size = 4
        kernel_size = 4
    else:
        n_fft = 32
        hop_length = 16
        window = 4
        step_size = 2
        kernel_size = 4

    # extract features
    file_path, file_ext = os.path.splitext(open_file_dialog())
    feats, beat_times = extract_features(file_path,
                                         file_ext,
                                         feature,
                                         beat_sync=beat_sync,
                                         n_fft=n_fft,
                                         hop_length=hop_length)

    # convert beat_times to nicely formatted strings
    beat_times_str = np.array(
        ['{}:{}'.format(int(x / 60), int(x % 60)) for x in beat_times])

    # perform segmentation
    print '\tSegmentation'
    flip = True
    peaks = {}
    distances = {}
    distances_filtered = {}
    segments = {}
    for k, v in feats.items():
        peaks[k], distances[k], distances_filtered[k] = compute_segmentation(
            v, window, step_size, kernel_size, flip=flip)
        # computes segments
        segments[k] = get_segments(v.T, peaks[k])

    # compute pair-wise distances using DTW
    print '\tDTW'
    dtw_distances = {}
    for k, v in segments.items():
        dists = np.zeros((len(v), len(v)))
        for i in xrange(len(v)):
            for j in xrange(len(v)):
                if i == j:
                    dists[i, j] = 0.0
                else:
                    dists[i, j] = compute_dtw_distance(v[i], v[j], 'cosine')
        dtw_distances[k] = dists

    # perform clustering using DTW distances
    print '\tClustering'
    clustering = {}
    for k, v in dtw_distances.items():
        if np.isnan(v).any():
            pdb.bset_trace()
        clustering[k] = compute_clusters_dtw(v, 'dbscan', segments[k], False)

    # clean axes and update data individually
    plt.suptitle(ntpath.basename(file_path))

    ax_plt = fig.add_subplot(gs[0, 0])
    ax_plt.set_title('Original {}'.format(feature))
    ax_plt.imshow(feats[feature],
                  interpolation='nearest',
                  origin='low',
                  aspect='auto',
                  cmap=plt.cm.plasma)
    for p in peaks[feature]:
        ax_plt.axvline(p, color='k', linestyle='--', alpha=0.5)
    ax_plt.set_xticks(peaks[feature])
    ax_plt.set_xticklabels(beat_times_str[peaks[feature]], rotation=70)

    ax = fig.add_subplot(gs[0, 1])
    ax.imshow(clustering[feature][0].T,
              interpolation='nearest',
              origin='low',
              aspect='auto',
              cmap=plt.cm.plasma)
    ax.set_xticks(clustering[feature][2])
    ax.set_xticklabels(xrange(1, max(clustering[feature][1]) + 2))

    ax = fig.add_subplot(gs[1, 0], sharex=ax_plt)
    ax.plot(distances[feature])
    for p in peaks[feature]:
        ax.axvline(p, color='k', linestyle='--', alpha=0.5)
    ax.set_xticks(peaks[feature])
    ax.set_xticklabels(beat_times_str[peaks[feature]], rotation=70)

    ax_plt = fig.add_subplot(gs[2, 0])
    ax_plt.set_title('FFT({})'.format(feature))
    ax_plt.imshow(feats['FFT({})'.format(feature)],
                  interpolation='nearest',
                  origin='low',
                  aspect='auto',
                  cmap=plt.cm.plasma)
    for p in peaks['FFT({})'.format(feature)]:
        ax_plt.axvline(p, color='k', linestyle='--', alpha=0.5)
    ax_plt.set_xticks(peaks['FFT({})'.format(feature)])
    ax_plt.set_xticklabels(beat_times_str[peaks['FFT({})'.format(feature)] *
                                          hop_length],
                           rotation=70)

    ax = fig.add_subplot(gs[2, 1])
    ax.imshow(clustering['FFT({})'.format(feature)][0].T,
              interpolation='nearest',
              origin='low',
              aspect='auto',
              cmap=plt.cm.plasma)
    ax.set_xticks(clustering['FFT({})'.format(feature)][2])
    ax.set_xticklabels(
        xrange(1, max(clustering['FFT({})'.format(feature)][1] + 2)))

    ax = fig.add_subplot(gs[3, 0], sharex=ax_plt)
    ax.plot(distances['FFT({})'.format(feature)])
    for p in peaks['FFT({})'.format(feature)]:
        ax.axvline(p, color='k', linestyle='--', alpha=0.5)
    ax.set_xticks(peaks['FFT({})'.format(feature)])
    ax.set_xticklabels(beat_times_str[peaks['FFT({})'.format(feature)] *
                                      hop_length],
                       rotation=70)
    plt.tight_layout()
    plt.draw()
Пример #41
0
def image(sim,
          qty='rho',
          width="10 kpc",
          resolution=500,
          units=None,
          log=True,
          vmin=None,
          vmax=None,
          av_z=False,
          filename=None,
          z_camera=None,
          clear=True,
          cmap=None,
          title=None,
          qtytitle=None,
          show_cbar=True,
          subplot=False,
          noplot=False,
          ret_im=False,
          fill_nan=True,
          fill_val=0.0,
          linthresh=None,
          kernel_type='spline',
          **kwargs):
    """

    Make an SPH image of the given simulation.

    **Keyword arguments:**

    *qty* (rho): The name of the array to interpolate

    *width* (10 kpc): The overall width and height of the plot. If
     ``width`` is a float or an int, then it is assumed to be in units
     of ``sim['pos']``. It can also be passed in as a string
     indicating the units, i.e. '10 kpc', in which case it is
     converted to units of ``sim['pos']``.

    *resolution* (500): The number of pixels wide and tall

    *units* (None): The units of the output

    *av_z* (False): If True, the requested quantity is averaged down
            the line of sight (default False: image is generated in
            the thin plane z=0, unless output units imply an integral
            down the line of sight). If a string, the requested quantity
            is averaged down the line of sight weighted by the av_z
            array (e.g. use 'rho' for density-weighted quantity;
            the default results when av_z=True are volume-weighted).

    *z_camera* (None): If set, a perspective image is rendered. See
                :func:`pynbody.sph.image` for more details.

    *filename* (None): if set, the image will be saved in a file

    *clear* (True): whether to call clf() on the axes first

    *cmap* (None): user-supplied colormap instance

    *title* (None): plot title

    *qtytitle* (None): colorbar quantity title

    *show_cbar* (True): whether to plot the colorbar

    *subplot* (False): the user can supply a AxesSubPlot instance on
    which the image will be shown

    *noplot* (False): do not display the image, just return the image array

    *ret_im* (False): return the image instance returned by imshow

    *num_threads* (None) : if set, specify the number of threads for
    the multi-threaded routines; otherwise the pynbody.config default is used

    *fill_nan* (True): if any of the image values are NaN, replace with fill_val

    *fill_val* (0.0): the fill value to use when replacing NaNs

    *linthresh* (None): if the image has negative and positive values
     and a log scaling is requested, the part between `-linthresh` and
     `linthresh` is shown on a linear scale to avoid divergence at 0

    *kernel_type* ('spline'): SPH kernel to use for smoothing. Defualts to a
    cubic spline, but can also be set to 'wendlandC2'
    """

    if not noplot:
        import matplotlib.pylab as plt

    global config
    if not noplot:
        if subplot:
            p = subplot
        else:
            p = plt

    if qtytitle is None:
        qtytitle = qty

    if isinstance(units, str):
        units = _units.Unit(units)

    if isinstance(width, str) or issubclass(width.__class__, _units.UnitBase):
        if isinstance(width, str):
            width = _units.Unit(width)
        width = width.in_units(sim['pos'].units, **sim.conversion_context())

    width = float(width)

    kernel = sph.Kernel(type=kernel_type)

    perspective = z_camera is not None
    if perspective and not av_z:
        kernel = sph.Kernel2D()

    is_projected = False
    if units is not None:
        is_projected = _units_imply_projection(sim, qty, units)

    if is_projected:
        kernel = sph.Kernel2D(type=kernel_type)

    if av_z:
        if isinstance(kernel, sph.Kernel2D):
            raise _units.UnitsException(
                "Units already imply projected image; can't also average over line-of-sight!"
            )
        else:
            kernel = sph.Kernel2D(type=kernel_type)
            if units is not None:
                aunits = units * sim['z'].units
            else:
                aunits = None

            if isinstance(av_z, str):
                if units is not None:
                    aunits = units * sim[av_z].units * sim['z'].units
                sim["__prod"] = sim[av_z] * sim[qty]
                qty = "__prod"

            else:
                av_z = "__one"
                sim["__one"] = np.ones_like(sim[qty])
                sim["__one"].units = "1"

            im = sph.render_image(sim,
                                  qty,
                                  width / 2,
                                  resolution,
                                  out_units=aunits,
                                  kernel=kernel,
                                  z_camera=z_camera,
                                  **kwargs)
            im2 = sph.render_image(sim,
                                   av_z,
                                   width / 2,
                                   resolution,
                                   kernel=kernel,
                                   z_camera=z_camera,
                                   **kwargs)

            top = sim.ancestor

            try:
                del top["__one"]
            except KeyError:
                pass

            try:
                del top["__prod"]
            except KeyError:
                pass

            im = im / im2

    else:
        im = sph.render_image(sim,
                              qty,
                              width / 2,
                              resolution,
                              out_units=units,
                              kernel=kernel,
                              z_camera=z_camera,
                              **kwargs)

    if fill_nan:
        im[np.isnan(im)] = fill_val

    if not noplot:

        # set the log or linear normalizations
        if log:
            try:
                im[np.where(im == 0)] = abs(im[np.where(abs(im != 0))]).min()
            except ValueError:
                raise ValueError(
                    "Failed to make a sensible logarithmic image. This probably means there are no particles in the view."
                )

            # check if there are negative values -- if so, use the symmetric
            # log normalization
            if (vmin is None and
                (im < 0).any()) or ((vmin is not None) and vmin < 0):

                # need to set the linear regime around zero -- set to by
                # default start at 1/1000 of the log range
                if linthresh is None:
                    linthresh = np.nanmax(abs(im)) / 1000.
                norm = matplotlib.colors.SymLogNorm(linthresh,
                                                    vmin=vmin,
                                                    vmax=vmax)
            else:
                norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)

        else:
            norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)

        #
        # do the actual plotting
        #
        if clear and not subplot:
            p.clf()

        if ret_im:
            return p.imshow(im[::-1, :].view(np.ndarray),
                            extent=(-width / 2, width / 2, -width / 2,
                                    width / 2),
                            vmin=vmin,
                            vmax=vmax,
                            cmap=cmap,
                            norm=norm)

        ims = p.imshow(im[::-1, :].view(np.ndarray),
                       extent=(-width / 2, width / 2, -width / 2, width / 2),
                       vmin=vmin,
                       vmax=vmax,
                       cmap=cmap,
                       norm=norm)

        u_st = sim['pos'].units.latex()
        if not subplot:
            plt.xlabel("$x/%s$" % u_st)
            plt.ylabel("$y/%s$" % u_st)
        else:
            p.set_xlabel("$x/%s$" % u_st)
            p.set_ylabel("$y/%s$" % u_st)

        if units is None:
            units = im.units

        if units.latex() == "":
            units = ""
        else:
            units = "$" + units.latex() + "$"

        if show_cbar:
            plt.colorbar(ims).set_label(qtytitle + "/" + units)

        # colorbar doesn't work wtih subplot:  mappable is NoneType
        # elif show_cbar:
        #    import matplotlib.pyplot as mpl
        #    if qtytitle: mpl.colorbar().set_label(qtytitle)
        #    else:        mpl.colorbar().set_label(units)

        if title is not None:
            if not subplot:
                p.title(title)
            else:
                p.set_title(title)

        if filename is not None:
            p.savefig(filename)

        plt.draw()
        # plt.show() - removed by AP on 30/01/2013 - this should not be here as
        # for some systems you don't get back to the command prompt

    return im
Пример #42
0
    def plot_hist(self, par_list):
        """ Plots histograms for samples of parameters, model results, residuals, and test data

        :param par_list: list of string for parameter names
        :return: None; creates and saves plots in main class output directory
        """
        print("Plotting histograms and graphs...")
        outdir = self.outmcmc+'histograms/'
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        # plotting histograms for parameters
        for i in range(self.ndim):
            plt.figure(i)
            dist = self.samples_i[:, i]
            plt.hist(dist, 30, facecolor='blue', alpha=0.5)
            if i ==0: plt.title('Alpha')
            if i ==1: plt.title('Sigma')
            if (i > 1) and (i<self.ndim_blr + 2): plt.title(par_list[i])
            if i >= self.ndim_blr + 2:
                plt.title('GP par: '+str(i-int(self.ndim_blr) - 1))
            plt.axvline(self.params_fit[i], c='r', ls='-')
            plt.axvline(self.params_mean[i], c='b', ls='-')
            plt.axvline(self.params_mean[i] + self.errors_fit[i], c='b', ls='--')
            plt.axvline(self.params_mean[i] - self.errors_fit[i], c='b', ls='--')
            plt.axvline(0., c='k', ls='--')
            plt.draw()
            plt.savefig(outdir+'hist_'+par_list[i]+'.png')
        # plot observed vs modeled
        xmin = np.min(np.concatenate([self.y,self.y_test]))
        xmax = np.max(np.concatenate([self.y,self.y_test]))
        xdiff = xmax - xmin
        ymin = np.min(np.concatenate([self.y_model,self.y_model_test]))
        ymax = np.max(np.concatenate([self.y_model,self.y_model_test]))
        ydiff = ymax - ymin
        x_range = [xmin - xdiff*0.2, xmax + xdiff*0.2]
        y_range = [ymin - ydiff*0.2, ymax + ydiff*0.2]
        plt.clf()
        plt.plot(x_range, y_range, '.', alpha=0.0)
        plt.plot(self.y, self.y_model, 'o', c='b',label='Train', alpha=0.5)
        plt.plot(self.y_test, self.y_model_test, 'o', c='r',label='Test', alpha=0.5)
        plt.xlabel('Observed y')
        plt.ylabel('Predicted y')
        plt.title('Predicted vs Ground Truth')
        # plt.title('Predicted vs Ground Truth')
        plt.legend(loc='upper left', numpoints=1)
        ax = plt.axes([0.63, 0.15, 0.25, 0.25], frameon=True)
        ax.hist(self.residual, alpha=0.5, bins=16, color='b', stacked=True, normed=True)
        ax.hist(self.residual_test, alpha=0.5, bins=16, color='r', stacked=True, normed=True)
        #ax.xaxis.set_ticks(np.arange(-2, 2, 1))
        ax.set_title('Residual')
        plt.draw()
        plt.savefig(self.outmcmc + 'Data_vs_Model_1d.png')
        # plot more histograms:
        # plt.clf()
        # plt.hist(self.residual_i, 30, facecolor='blue', alpha=0.5)
        # plt.title('Sum Residual y-model')
        # plt.ylabel('N Samples')
        # plt.draw()
        # plt.savefig(outdir + 'hist_residual.png')
        plt.clf()
        plt.plot(self.y, self.mu_blr, 'o')
        plt.xlabel('Log Crime Rate')
        plt.ylabel('Predicted Crime Rate')
        plt.title('Predicted from Demographics vs Ground Truth')
        plt.draw()
        plt.savefig(self.outmcmc + 'Data_ModelBLR_1d.png')
        plt.clf()
        plt.plot(self.y_model, self.residual, 'o')
        plt.xlabel('Predicted Crime Rate')
        plt.ylabel('Residual')
        plt.draw()
        plt.savefig(self.outmcmc + 'Model_Residual_1d.png')
Пример #43
0
    def plot(self, fig_number=322):
        """plot the stored data in figure `fig_number`.

        Dependencies: `matlabplotlib.pylab`
        """
        from matplotlib import pylab
        from matplotlib.pylab import (gca, figure, plot, xlabel, grid,
                                      semilogy, text, draw, show, subplot,
                                      tight_layout, rcParamsDefault, xlim,
                                      ylim)

        def title_(*args, **kwargs):
            kwargs.setdefault('size', rcParamsDefault['axes.labelsize'])
            pylab.title(*args, **kwargs)

        def subtitle(*args, **kwargs):
            kwargs.setdefault('horizontalalignment', 'center')
            text(0.5 * (xlim()[1] - xlim()[0]), 0.9 * ylim()[1], *args,
                 **kwargs)

        def legend_(*args, **kwargs):
            kwargs.setdefault('framealpha', 0.3)
            kwargs.setdefault('fancybox', True)
            kwargs.setdefault('fontsize', rcParamsDefault['font.size'] - 2)
            pylab.legend(*args, **kwargs)

        figure(fig_number)

        dat = self._data  # dictionary with entries as given in __init__
        if not dat:
            return
        try:  # a hack to get the presumable population size lambda
            strpopsize = ' (evaluations / %s)' % str(dat['eval'][-2] -
                                                     dat['eval'][-3])
        except IndexError:
            strpopsize = ''

        # plot fit, Delta fit, sigma
        subplot(221)
        gca().clear()
        if dat['fit'][0] is None:  # plot is fine with None, but comput-
            dat['fit'][0] = dat['fit'][1]  # tations need numbers
            # should be reverted later, but let's be lazy
        assert dat['fit'].count(None) == 0
        fmin = min(dat['fit'])
        imin = dat['fit'].index(fmin)
        dat['fit'][imin] = max(dat['fit']) + 1
        fmin2 = min(dat['fit'])
        dat['fit'][imin] = fmin
        semilogy(dat['iter'],
                 [f - fmin if f - fmin > 1e-19 else None for f in dat['fit']],
                 'c',
                 linewidth=1,
                 label='f-min(f)')
        semilogy(dat['iter'], [
            max((fmin2 - fmin, 1e-19)) if f - fmin <= 1e-19 else None
            for f in dat['fit']
        ], 'C1*')

        semilogy(dat['iter'], [abs(f) for f in dat['fit']],
                 'b',
                 label='abs(f-value)')
        semilogy(dat['iter'], dat['sigma'], 'g', label='sigma')
        semilogy(dat['iter'][imin], abs(fmin), 'r*', label='abs(min(f))')
        if dat['more_data']:
            gca().twinx()
            plot(dat['iter'], dat['more_data'])
        grid(True)
        legend_(*[
            [v[i] for i in [1, 0, 2, 3]]  # just a reordering
            for v in gca().get_legend_handles_labels()
        ])

        # plot xmean
        subplot(222)
        gca().clear()
        plot(dat['iter'], dat['xmean'])
        for i in range(len(dat['xmean'][-1])):
            text(dat['iter'][0], dat['xmean'][0][i], str(i))
            text(dat['iter'][-1], dat['xmean'][-1][i], str(i))
        subtitle('mean solution')
        grid(True)

        # plot squareroot of eigenvalues
        subplot(223)
        gca().clear()
        semilogy(dat['iter'], dat['D'], 'm')
        xlabel('iterations' + strpopsize)
        title_('Axis lengths')
        grid(True)

        # plot stds
        subplot(224)
        # if len(gcf().axes) > 1:
        #     sca(pylab.gcf().axes[1])
        # else:
        #     twinx()
        gca().clear()
        semilogy(dat['iter'], dat['stds'])
        for i in range(len(dat['stds'][-1])):
            text(dat['iter'][-1], dat['stds'][-1][i], str(i))
        title_('Coordinate-wise STDs w/o sigma')
        grid(True)
        xlabel('iterations' + strpopsize)
        _stdout.flush()
        tight_layout()
        draw()
        show()
        CMAESDataLogger.plotted += 1
Пример #44
0
        simplices = np.array([valid_lms])
        notHappy = False
        break
    else:
        triangles = Delaunay(last_lms_valid, incremental=True)
        simplices = triangles.simplices  # Indices of the points in each triangulation
        # Remap simplices to valid landmark ids
        remap = lambda x: valid_lms_arr[x]
        simplices = np.apply_along_axis(remap, 0, simplices)

    # Visual check for triangulation
    plt.gca().clear()
    plt.triplot(last_lms[:, 0], last_lms[:, 1], simplices.copy())
    for i in valid_lms:
        plt.text(*last_lms[i, :], s=str(i))
    plt.draw()
    plt.pause(0.01)

    if check_triangulation:
        remove_str = input("Enter the IDs of landmarks to be removed: ")
        try:
            remove = ast.literal_eval(remove_str)
        except Exception as e:
            print("Error understanding input:", e)
            remove = ()
            notHappy = False

        # If only one number entered
        if type(remove) is int:
            remove = (remove, )
        new_valid = sorted(list(set(valid_lms) - set(remove)))
Пример #45
0
def plotxy_old(beam,cols1,cols2,nbins=25,nbins_h=None,level=5,xrange=None,yrange=None,nolost=0,title='PLOTXY',xtitle=None,ytitle=None,noplot=0,calfwhm=0,contour=0):
  '''
  Draw the scatter or contour or pixel-like plot of two columns of a Shadow.Beam instance or of a given shadow file, along with histograms for the intensity on the top and right side.
  Inumpy.ts:
     beam     : str instance with the name of the shadow file to be loaded, or a Shadow.Beam initialized instance.
     cols1    : first column.
     cols2    : second column.
  
  Optional Inumpy.ts:
     nbins    : int for the size of the grid (nbins x nbins). It will affect the plot only if non scatter.
     nbins_h  : int for the number of bins for the histograms
     level    : int number of level to be drawn. It will affect the plot only if contour.
     xrange   : tuple or list of length 2 describing the interval of interest for x, the data read from the chosen column.
     yrange   : tuple or list of length 2 describing the interval of interest for y, counts or intensity depending on ref.
     nolost   : 
           0   All rays
           1   Only good rays
           2   Only lost rays
     title    : title of the figure, it will appear on top of the window.
     xtitle   : label for the x axis.
     ytitle   : label for the y axis.
     noplot   : 
           0   plot the histogram
           1   don't plot the histogram
     calfwhm :
           0   don't compute the fwhm
           1   compute the fwhm and draw it
           2   in addition to calfwhm=1, it computes now the intensity in a
               slit of FWHM_h x FWHM_v
     contour  :
           0   scatter plot
           1   contour, black & white, only counts (without intensity)
           2   contour, black & white, with intensity.
           3   contour, colored, only counts (without intensity)
           4   contour, colored, with intensity.
           5   pixelized, colored, only counts (without intensity)
           6   pixelized, colored, with intensity.
  Outputs:
     ShadowTools.Histo1_Ticket instance.
     
  Error:
     if an error occurs an ArgsError is raised.

  Possible choice for col are:
           1   X spatial coordinate [user's unit]
           2   Y spatial coordinate [user's unit]
           3   Z spatial coordinate [user's unit]
           4   X' direction or divergence [rads]
           5   Y' direction or divergence [rads]
           6   Z' direction or divergence [rads]
           7   X component of the electromagnetic vector (s-polariz)
           8   Y component of the electromagnetic vector (s-polariz)
           9   Z component of the electromagnetic vector (s-polariz)
          10   Lost ray flag
          11   Energy [eV]
          12   Ray index
          13   Optical path length
          14   Phase (s-polarization)
          15   Phase (p-polarization)
          16   X component of the electromagnetic vector (p-polariz)
          17   Y component of the electromagnetic vector (p-polariz)
          18   Z component of the electromagnetic vector (p-polariz)
          19   Wavelength [A]
          20   R= SQRT(X^2+Y^2+Z^2)
          21   angle from Y axis
          22   the magnituse of the Electromagnetic vector
          23   |E|^2 (total intensity)
          24   total intensity for s-polarization
          25   total intensity for p-polarization
          26   K = 2 pi / lambda [A^-1]
          27   K = 2 pi / lambda * col4 [A^-1]
          28   K = 2 pi / lambda * col5 [A^-1]
          29   K = 2 pi / lambda * col6 [A^-1]
          30   S0-stokes = |Es|^2 + |Ep|^2
          31   S1-stokes = |Es|^2 - |Ep|^2
          32   S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
          33   S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
  '''
  if nbins_h==None: nbins_h=nbins+1
  try: 
    stp.plotxy_CheckArg(beam,cols1,cols2,nbins,nbins_h,level,xrange,yrange,nolost,title,xtitle,ytitle,noplot,calfwhm,contour)
  except stp.ArgsError as e: 
    raise e
  #plot_nicc.ioff()
  plt.ioff()
  col1,col2,col3,col4 = getshcol(beam,(cols1,cols2,10,23,))

  nbins=nbins+1
  if xtitle==None: xtitle=(stp.getLabel(cols1-1))[0]
  if ytitle==None: ytitle=(stp.getLabel(cols2-1))[0]
  
  if nolost==0: t = numpy.where(col3!=-3299)
  if nolost==1: t = numpy.where(col3==1.0)
  if nolost==2: t = numpy.where(col3!=1.0)  

  if xrange==None: xrange = stp.setGoodRange(col1[t])
  if yrange==None: yrange = stp.setGoodRange(col2[t])
  #print xrange
  #print yrange
  tx = numpy.where((col1>xrange[0])&(col1<xrange[1]))
  ty = numpy.where((col2>yrange[0])&(col2<yrange[1]))
  
  tf = set(list(t[0])) & set(list(tx[0])) & set(list(ty[0]))
  t = (numpy.array(sorted(list(tf))),)
  if len(t[0])==0: 
    print ("no point selected")
    return None
  
  #figure = pylab.plt.figure(figsize=(12,8),dpi=96)
  figure = plt.figure(figsize=(12,8),dpi=96)

  ratio = 8.0/12.0
  left, width = 0.1*ratio, 0.65*ratio
  bottom, height = 0.1, 0.65
  bottom_h = bottom+height+0.02
  left_h = left+width+0.02*ratio

  rect_scatter = [0.10*ratio, 0.10, 0.65*ratio, 0.65]
  rect_histx =   [0.10*ratio, 0.77, 0.65*ratio, 0.20]
  rect_histy =   [0.77*ratio, 0.10, 0.20*ratio, 0.65]
  rect_text =    [1.00*ratio, 0.10, 1.20*ratio, 0.65]


  axScatter = figure.add_axes(rect_scatter)
  axScatter.set_xlabel(xtitle)
  axScatter.set_ylabel(ytitle)

  if contour==0:
    axScatter.scatter(col1[t],col2[t],s=0.5)
  if contour>0 and contour<7:
    if contour==1 or contour==3 or contour==5: w = numpy.ones( len(col1) )
    if contour==2 or contour==4 or contour==6: w = col4
    grid = numpy.zeros(nbins*nbins).reshape(nbins,nbins)
    for i in t[0]:
      indX = stp.findIndex(col1[i],nbins,xrange[0],xrange[1])
      indY = stp.findIndex(col2[i],nbins,yrange[0],yrange[1])
      try:
        grid[indX][indY] = grid[indX][indY] + w[i]
      except IndexError:
        pass
    X, Y = numpy.mgrid[xrange[0]:xrange[1]:nbins*1.0j,yrange[0]:yrange[1]:nbins*1.0j]
    L = numpy.linspace(numpy.amin(grid),numpy.amax(grid),level)
    if contour==1 or contour==2: axScatter.contour(X, Y, grid, colors='k', levels=L)
    if contour==3 or contour==4: axScatter.contour(X, Y, grid, levels=L)
    if contour==5 or contour==6: axScatter.pcolor(X, Y, grid)  
  #axScatter.set_xlim(xrange)
  #axScatter.set_ylim(yrange)
  
  #axScatter.axis(xmin=xrange[0],xmax=xrange[1])
  #axScatter.axis(ymin=yrange[0],ymax=yrange[1])
  
  for tt in axScatter.get_xticklabels():
    tt.set_size('x-small')
  for tt in axScatter.get_yticklabels():
    tt.set_size('x-small')
  
  #if ref==0: col4 = numpy.ones(len(col4),dtype=float)
  
  axHistx = figure.add_axes(rect_histx, sharex=axScatter)
  axHisty = figure.add_axes(rect_histy, sharey=axScatter)
  
  binx = numpy.linspace(xrange[0],xrange[1],nbins_h)
  biny = numpy.linspace(yrange[0],yrange[1],nbins_h)
  if contour==0 or contour==1 or contour==3 or contour==5:
    hx, binx, patchx = axHistx.hist(col1[t],bins=binx,range=xrange,histtype='step',color='k')
    hy, biny, patchy = axHisty.hist(col2[t],bins=biny,range=yrange,orientation='horizontal',histtype='step',color='k')
  if contour==2 or contour==4 or contour==6:
    hx, binx, patchx = axHistx.hist(col1[t],bins=binx,range=xrange,weights=col4[t],histtype='step',color='b')
    hy, biny, patchy = axHisty.hist(col2[t],bins=biny,range=yrange,weights=col4[t],orientation='horizontal',histtype='step',color='b')
  for tl in axHistx.get_xticklabels(): tl.set_visible(False)
  for tl in axHisty.get_yticklabels(): tl.set_visible(False)
  for tt in axHisty.get_xticklabels():
    tt.set_rotation(270)
    tt.set_size('x-small')
  for tt in axHistx.get_yticklabels():
    tt.set_size('x-small')

  intensityinslit = 0.0
  if calfwhm>=1:
    fwhmx,txf, txi = stp.calcFWHM(hx,binx[1]-binx[0])
    fwhmy,tyf, tyi = stp.calcFWHM(hy,biny[1]-biny[0])
    axHistx.plot([binx[txi],binx[txf+1]],[max(hx)*0.5,max(hx)*0.5],'x-')
    axHisty.plot([max(hy)*0.5,max(hy)*0.5],[biny[tyi],biny[tyf+1]],'x-')
    print ("fwhm horizontal:  %g" % fwhmx)
    print ("fwhm vertical:    %g" % fwhmy)
  if calfwhm>=2:
    xx1 = binx[txi]
    xx2 = binx[txf+1]
    yy1 = biny[tyi]
    yy2 = biny[tyf+1]
    print ("limits horizontal: %g %g " % (binx[txi],binx[txf+1]))
    print ("limits vertical:   %g %g " % (biny[tyi],biny[tyf+1]))
    axScatter.plot([xx1,xx2,xx2,xx1,xx1],[yy1,yy1,yy2,yy2,yy1])
    #fwhmx,txf, txi = stp.calcFWHM(hx,binx[1]-binx[0])
    #fwhmy,tyf, tyi = stp.calcFWHM(hy,biny[1]-biny[0])
    #calculate intensity in slit
    if nolost==0: tt = numpy.where(col3!=-3299)
    if nolost==1: tt = numpy.where(col3==1.0)
    if nolost==2: tt = numpy.where(col3!=1.0)  

    ttx = numpy.where((col1>=xx1)&(col1<=xx2))
    tty = numpy.where((col2>=yy1)&(col2<=yy2))
  
    ttf = set(list(tt[0])) & set(list(ttx[0])) & set(list(tty[0]))
    tt = (numpy.array(sorted(list(ttf))),)
    if len(tt[0])>0: 
      intensityinslit = col4[tt].sum()
      print ("Intensity in slit: %g ",intensityinslit)
    
  if title!=None:
    axHistx.set_title(title)
  axText = figure.add_axes(rect_text)
  ntot = len(numpy.where(col3!=3299)[0])
  ngood = len(numpy.where(col3==1)[0])
  nbad = ntot - ngood
  if nolost==0: axText.text(0.0,0.8,"ALL RAYS")
  if nolost==1: axText.text(0.0,0.8,"GOOD RAYS")
  if nolost==2: axText.text(0.0,0.8,"LOST RAYS")
  tmps = "intensity: "+str(col4[t].sum())
  if calfwhm == 2:
      tmps=tmps+" (in slit:"+str(intensityinslit)+") "
  axText.text(0.0,0.7,tmps)
  axText.text(0.0,0.6,"total number of rays: "+str(ntot))
  axText.text(0.0,0.5,"total good rays: "+str(ngood))
  axText.text(0.0,0.4,"total lost rays: "+str(ntot-ngood))
  if calfwhm>=1:
    axText.text(0.0,0.3,"fwhm H: "+str(fwhmx))
    axText.text(0.0,0.2,"fwhm V: "+str(fwhmy))
  if isinstance(beam,str): axText.text(0.0,0.1,"FILE: "+beam)
  if isinstance(beam,sd.Beam): axText.text(0.0,0.1,"from Shadow3 Beam instance")
  axText.text(0.0,0.0,"DIR: "+os.getcwd())
  axText.set_axis_off()
  #pylab.plt.draw()
  plt.draw()
  if noplot==0: figure.show()
  ticket = plotxy_Ticket()
  ticket.figure = figure
  ticket.xrange = xrange
  ticket.yrange = yrange
  ticket.xtitle = xtitle
  ticket.ytitle = ytitle
  ticket.title = title
  if calfwhm>=1:
    ticket.fwhmx = fwhmx
    ticket.fwhmy = fwhmy
  ticket.intensity = col4[t].sum()
  ticket.averagex = numpy.average( col1[t] )
  ticket.averagey = numpy.average( col2[t] )
  ticket.intensityinslit = intensityinslit
  return ticket  
Пример #46
0
    def draw_ortho(self, im, g, cmap=None, vmin=0, vmax=1):
        slices = self.slices
        int_slice = np.clip(np.round(slices), 0,
                            np.array(im.shape) - 1).astype('int')

        g['xy'].cla()
        g['yz'].cla()
        g['xz'].cla()
        g['in'].cla()

        g['xy'].imshow(im[int_slice[0], :, :], vmin=vmin, vmax=vmax, cmap=cmap)
        g['xy'].hlines(slices[1],
                       0,
                       im.shape[2],
                       colors='y',
                       linestyles='dashed',
                       lw=1)
        g['xy'].vlines(slices[2],
                       0,
                       im.shape[1],
                       colors='y',
                       linestyles='dashed',
                       lw=1)
        self._format_ax(g['xy'])

        g['yz'].imshow(im[:, int_slice[1], :], vmin=vmin, vmax=vmax, cmap=cmap)
        g['yz'].hlines(slices[0],
                       0,
                       im.shape[2],
                       colors='y',
                       linestyles='dashed',
                       lw=1)
        g['yz'].vlines(slices[2],
                       0,
                       im.shape[0],
                       colors='y',
                       linestyles='dashed',
                       lw=1)
        self._format_ax(g['yz'])

        g['xz'].imshow(im[:, :, int_slice[2]].T,
                       vmin=vmin,
                       vmax=vmax,
                       cmap=cmap)
        g['xz'].hlines(slices[1],
                       0,
                       im.shape[0],
                       colors='y',
                       linestyles='dashed',
                       lw=1)
        g['xz'].vlines(slices[0],
                       0,
                       im.shape[1],
                       colors='y',
                       linestyles='dashed',
                       lw=1)
        self._format_ax(g['xz'])

        if self.inset == 'exposure':
            m = im * self.state.image_mask
            self.pix = np.r_[m[slices[0], :, :], m[:, :, slices[2]],
                             m[:, slices[1], :]].ravel()
            self.pix = self.pix[self.pix != 0.]
            g['in'].hist(self.pix, bins=300, histtype='step')
            g['in'].semilogy()

            g['in'].set_xlim(0, 1)
            g['in'].set_ylim(9e-1, 1e3)

            if self.view == 'diff' and g == self.gr:
                g['in'].set_xlim(-0.3, 0.3)

        self._format_ax(g['in'])
        pl.draw()
Пример #47
0
    print 'Generating plots... this may take a while...'
    AntNos = np.array(AntNos)
    figcnt = 0

    for pol in pols:
        pl.figure(figcnt)
        DDD = np.zeros((Nant,Nant))
        for i,ant1 in enumerate(AntNos):
            for j,ant2 in enumerate(AntNos):
                if not (ant1,ant2) in G_ij[pol].keys(): continue
                DDD[i,j] = 10**G_ij[pol][(ant1,ant2)]/flx
        DDD = np.ma.array(DDD,mask=np.where(DDD==0,1,0))
        pl.imshow(DDD,aspect='auto',interpolation='nearest')
        pl.colorbar()
        pl.title('Gains by baseline, polarization %s'%pol[0])
        pl.draw()
        figcnt += 1
    
    if opts.wgt != 'equal':
        for pol in pols:
            pl.figure(figcnt)
            WWW = np.zeros((Nant,Nant))
            for i,ant1 in enumerate(AntNos):
                for j,ant2 in enumerate(AntNos):
                    if not (ant1,ant2) in W_ij[pol].keys(): continue
                    WWW[i,j] = W_ij[pol][(ant1,ant2)]
            WWW = np.ma.array(WWW/np.sum(WWW),mask=np.where(WWW==0,1,0))
            pl.imshow(WWW,aspect='auto',interpolation='nearest')
            pl.colorbar()
            pl.title('Weights for each baseline, polarization %s'%pol[0])
            pl.draw()
Пример #48
0
    def draw_ortho(self, im, g, cmap=None, vmin=0, vmax=1):
        im, vmin, vmax, cmap = self.field, self.vmin, self.vmax, self.cmap

        if self.fourier:
            im = np.abs(im)

        slices = self.slices
        int_slice = np.clip(np.round(slices), 0,
                            np.array(im.shape[:3]) - 1).astype('int')

        # if vmin is None:
        # vmin = im.min()
        # if vmax is None:
        # vmax = im.max()

        # if self.cmap == 'RdBu_r':
        # val = np.max(np.abs([vmin, vmax]))
        # vmin = -val
        # vmax = val

        self.g['xy'].cla()
        self.g['yz'].cla()
        self.g['xz'].cla()
        self.g['in'].cla()

        self.g['xy'].imshow(im[int_slice[0], :, :], cmap=cmap)
        self.g['xy'].hlines(slices[1],
                            0,
                            im.shape[2],
                            colors='y',
                            linestyles='dashed',
                            lw=1)
        self.g['xy'].vlines(slices[2],
                            0,
                            im.shape[1],
                            colors='y',
                            linestyles='dashed',
                            lw=1)
        self._format_ax(self.g['xy'])

        self.g['yz'].imshow(im[:, int_slice[1], :],
                            vmin=vmin,
                            vmax=vmax,
                            cmap=cmap)
        self.g['yz'].hlines(slices[0],
                            0,
                            im.shape[2],
                            colors='y',
                            linestyles='dashed',
                            lw=1)
        self.g['yz'].vlines(slices[2],
                            0,
                            im.shape[0],
                            colors='y',
                            linestyles='dashed',
                            lw=1)
        self._format_ax(self.g['yz'])

        self.g['xz'].imshow(np.rollaxis(im[:, :, int_slice[2]], 1), cmap=cmap)
        self.g['xz'].hlines(slices[1],
                            0,
                            im.shape[0],
                            colors='y',
                            linestyles='dashed',
                            lw=1)
        self.g['xz'].vlines(slices[0],
                            0,
                            im.shape[1],
                            colors='y',
                            linestyles='dashed',
                            lw=1)
        self._format_ax(self.g['xz'])

        if self.dohist:
            tt = np.real(self.field).ravel()
            c, s = tt.mean(), 5 * tt.std()
            y, x = np.histogram(tt,
                                bins=np.linspace(c - s, c + s, 700),
                                normed=True)
            x = (x[1:] + x[:-1]) / 2

            self.g['in'].plot(x, y, 'k-', lw=1)
            self.g['in'].fill_between(x, y, 1e-10, alpha=0.5)
            self.g['in'].set_yscale('log', nonposy='clip')

            self.g['in'].set_xlim(c - s, c + s)
            self.g['in'].set_ylim(1e-3 * y.max(), 1.4 * y.max())

        self._format_ax(self.g['in'])

        pl.draw()
Пример #49
0
    def plot_deviations(
            self,
            figsize=None,
            savefig=None,
            plots=["data", "strangeness", "pvalue", "deviation", "threshold"],
            debug=False):
        '''Plots the anomaly score, deviation level and p-value, over time.'''

        register_matplotlib_converters()

        plots, nb_axs, i = list(set(plots)), 0, 0
        if "data" in plots:
            nb_axs += 1
        if "strangeness" in plots:
            nb_axs += 1
        if any(s in ["pvalue", "deviation", "threshold"] for s in plots):
            nb_axs += 1

        fig, axes = plt.subplots(nb_axs, sharex="row", figsize=figsize)
        if not isinstance(axes, (np.ndarray)):
            axes = np.array([axes])

        if "data" in plots:
            axes[i].set_xlabel("Time")
            axes[i].set_ylabel("Feature 0")
            axes[i].plot(self.df.index, self.df.values[:, 0], label="Data")
            if debug:
                axes[i].plot(self.T,
                             np.array(self.representatives)[:, 0],
                             label="Representative")
            axes[i].legend()
            i += 1

        if "strangeness" in plots:
            axes[i].set_xlabel("Time")
            axes[i].set_ylabel("Strangeness")
            axes[i].plot(self.T, self.S, label="Strangeness")
            if debug:
                axes[i].plot(self.T,
                             np.array(self.diffs)[:, 0],
                             label="Difference")
            axes[i].legend()
            i += 1

        if any(s in ["pvalue", "deviation", "threshold"] for s in plots):
            axes[i].set_xlabel("Time")
            axes[i].set_ylabel("Deviation")
            axes[i].set_ylim(0, 1)
            if "pvalue" in plots:
                axes[i].scatter(self.T,
                                self.P,
                                alpha=0.25,
                                marker=".",
                                color="green",
                                label="p-value")
            if "deviation" in plots:
                axes[i].plot(self.T, self.M, label="Deviation")
            if "threshold" in plots:
                axes[i].axhline(y=self.dev_threshold,
                                color='r',
                                linestyle='--',
                                label="Threshold")
            axes[i].legend()

        fig.autofmt_xdate()

        if savefig is None:
            plt.draw()
            plt.show()
        else:
            figpathname = utils.create_directory_from_path(savefig)
            plt.savefig(figpathname)
print("Found %d time series" % nCells)

# #### Plot one time series

# In[18]:

n = 200
gr.figure(figsize=(11, 4))
gr.plot(sampTimes, fMinData[n])

# #### Plot several time series in a single graph

# In[27]:

# Extract a random sample of nWaves integers to pick the waves
nWaves = 5
wNumbers = sc.random.randint(0, nCells, nWaves)
print("Ploting waves:")
print(wNumbers)

gr.figure(figsize=(11, 4))
gr.ioff()
for n in wNumbers:
    gr.plot(sampTimes, fMinData[n], label="w%d" % n)
gr.xlim(0, sampTimes.max())
gr.legend(ncol=nWaves)
gr.ion()
gr.draw()

# In[ ]:
Пример #51
0
def onkeycazzo(event):
    global _pixel,_flux, idd, _lampixel, _refpeak, _deg, nonincl, fig, ax1, _params5, _num, _line, _line3,\
        ax2, ax3,  _skyref_wav, _skyref_flux,  _line2, _line4, _line5

    xdata, ydata = event.xdata, event.ydata

    _params5 = np.polyfit(_lampixel[idd], _refpeak[idd], _deg)
    p2 = np.poly1d(_params5)

    dist = np.sqrt((xdata - np.array(p2(_lampixel)))**2 +
                   (ydata - (np.array(_refpeak) - p2(_lampixel)))**2)
    ii = np.argmin(dist)
    _num = ii

    if event.key == 'a':
        idd.append(idd[-1] + 1)
        __lampixel = list(_lampixel)
        __lampixel.append(xdata)
        _lampixel = np.array(__lampixel)
        __refpeak = list(_refpeak)
        __refpeak.append(ydata)
        _refpeak = np.array(__refpeak)
        ax1.plot(xdata, ydata, 'ob')
    if event.key == 'd':
        idd.remove(ii)
        _num = ii
        for i in range(len(_lampixel)):
            if i not in idd: nonincl.append(i)
    if event.key == 'c':
        _num = ii

    if event.key in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
        _deg = int(event.key)

    _params5 = np.polyfit(_lampixel[idd], _refpeak[idd], _deg)
    p2 = np.poly1d(_params5)

    _line.pop(0).remove()
    _line3.pop(0).remove()
    _line = ax1.plot(p2(_lampixel), _refpeak - p2(_lampixel), '.r')
    _line3 = ax1.plot(
        np.array(p2(_lampixel))[nonincl], (_refpeak - p2(_lampixel))[nonincl],
        'oc')
    ax1.set_xlim(np.min(p2(_pixel)), np.max(p2(_pixel)))

    ax2.plot(_skyref_wav, _skyref_flux, '-b')
    ax2.plot(p2(_pixel), _flux, '-r')
    ax2.plot(p2(_lampixel), np.ones(len(_lampixel)), '|b')
    ax2.plot(_refpeak, np.ones(len(_lampixel)), '|r')
    ax2.set_ylim(0, 1.1)
    ax2.set_xlim(np.min(p2(_pixel)), np.max(p2(_pixel)))

    _line2.pop(0).remove()
    _line4.pop(0).remove()
    _line5.pop(0).remove()
    ax3.plot(_skyref_wav, _skyref_flux, '-b')
    _line2 = ax3.plot(p2(_pixel), _flux, '-r')
    ax3.set_xlim(p2(_lampixel[_num]) - 50, p2(_lampixel)[_num] + 50)
    ax3.set_ylim(0, 1.1)
    _line4 = ax3.plot(p2(_lampixel[_num]), [1],
                      '|b',
                      label='ref. lamp detection')
    _line5 = ax3.plot(_refpeak[_num], [1], '|r', label='lamp detection')
    plt.legend()
    plt.draw()
Пример #52
0
    def move(self):
        """ Move the Person

        """
        if self.pdshow:
            fig = plt.gcf()
            fig, ax = self.L.showG('w',
                                   labels=False,
                                   alphacy=0.,
                                   edges=False,
                                   fig=fig)
            plt.draw()
            plt.ion()
        while True:
            if self.moving:
                if self.sim.verbose:
                    print 'meca: updt ag ' + self.ID + ' @ ', self.sim.now()

                # if np.allclose(conv_vecarr(self.destination)[:2],self.L.Gw.pos[47]):
                #     import ipdb
                #     ipdb.set_trace()

                while self.cancelled:
                    yield passivate, self
                    print "Person.move: activated after being cancelled"
                checked = []
                for zone in self.world.zones(self):
                    if zone not in checked:
                        checked.append(zone)
                        zone(self)

                # updating acceleration
                acceleration = self.steering_mind(self)
                acceleration = acceleration.truncate(self.max_acceleration)
                self.acceleration = acceleration

                # updating velocity
                velocity = self.velocity + acceleration * self.interval
                self.velocity = velocity.truncate(self.max_speed)

                if velocity.length() > 0.2:
                    # record direction only when we've really had some
                    self.localy = velocity.normalize()
                    self.localx = vec3(self.localy.y, -self.localy.x)

                # updating position
                self.position = self.position + self.velocity * self.interval
                #                self.update()
                self.position.z = 0
                self.world.update_boid(self)

                self.net.update_pos(self.ID, conv_vecarr(self.position),
                                    self.sim.now())
                p = conv_vecarr(self.position).reshape(3, 1)
                v = conv_vecarr(self.velocity).reshape(3, 1)
                a = conv_vecarr(self.acceleration).reshape(3, 1)

                # fill panda dataframe 2D trajectory
                self.df = self.df.append(
                    pd.DataFrame(
                        {
                            't': pd.Timestamp(self.sim.now(), unit='s'),
                            'x': p[0],
                            'y': p[1],
                            'vx': v[0],
                            'vy': v[1],
                            'ax': a[0],
                            'ay': a[1]
                        },
                        columns=['t', 'x', 'y', 'vx', 'vy', 'ax', 'ay']))

                if self.pdshow:
                    ptmp = np.array([p[:2, 0], p[:2, 0] + v[:2, 0]])

                    if hasattr(self, 'pl'):
                        self.pl[0].set_data(self.df['x'].tail(1),
                                            self.df['y'].tail(1))
                        self.pla[0].set_data(ptmp[:, 0], ptmp[:, 1])
                        circle = plt.Circle(
                            (self.df['x'].tail(1), self.df['y'].tail(1)),
                            radius=self.radius,
                            alpha=0.3)
                        ax.add_patch(circle)
                    else:
                        self.pl = ax.plot(self.df['x'].tail(1),
                                          self.df['y'].tail(1),
                                          'o',
                                          color=self.color,
                                          ms=self.radius * 10)
                        self.pla = ax.plot(ptmp[:, 0], ptmp[:, 1], 'r')
                        circle = plt.Circle(
                            (self.df['x'].tail(1), self.df['y'].tail(1)),
                            radius=self.radius,
                            alpha=0.3)
                        ax.add_patch(circle)
                    # try:
                    #     fig,ax=plu.displot(p[:2],p[:2]+v[:2],'r')
                    # except:
                    #     pass
                    # import ipdb
                    # ipdb.set_trace()
                    plt.draw()
                    plt.pause(0.0001)
                if 'mysql' in self.save:
                    self.db.writemeca(self.ID, self.sim.now(), p, v, a)

                if 'txt' in self.save:
                    pyu.writemeca(self.ID, self.sim.now(), p, v, a)

                # new target when arrived in poi

                if self.arrived and\
                    (self.L.pt2ro(self.position) ==\
                        self.L.Gw.node[self.rooms[1]]['room']):

                    self.arrived = False
                    if self.endpoint:
                        self.endpoint = False
                        self.roomId = self.nextroomId
                        # remove the remaining waypoint which correspond
                        # to current room position
                        del self.waypoints[0]
                        del self.rooms[0]
                        # del self.dlist[0]
                        #
                        # If door lets continue
                        #
                        #
                        # ig destination --> next room
                        #
                        #adjroom  = self.L.Gr.neighbors(self.roomId)
                        #Nadjroom = len(adjroom)
                        if self.cdest == 'random':
                            # self.nextroomId   = int(np.floor(random.uniform(0,self.L.Gr.size())))
                            self.nextroomId = random.sample(
                                self.L.Gr.nodes(), 1)[0]
                            # test 1 ) next != actualroom
                            #      2 ) nextroom != fordiden room
                            #      3 ) room not share without another agent
                            while self.nextroomId == self.roomId or (
                                    self.nextroomId in self.forbidroomId
                            ):  # or (self.nextroomId in self.sim.roomlist):
                                # self.nextroomId   = int(np.floor(random.uniform(0,self.L.Gr.size())))
                                self.nextroomId = random.sample(
                                    self.L.Gr.nodes(), 1)[0]
                        elif self.cdest == 'file':
                            self.room_counter = self.room_counter + 1
                            if self.room_counter >= self.nb_room:
                                self.room_counter = 0
                            self.nextroomId = self.room_seq[self.room_counter]
                            self.wait = self.room_wait[self.room_counter]
                        #self.sim.roomlist.append(self.nextroomId) # list of all destiantion of all nodes in object sim
                        self.rooms, wp = self.L.waypointGw(
                            self.roomId, self.nextroomId)
                        # self.dlist =  [i in self.L.Gw.ldo for i in self.rooms]
                        for tup in wp[1:]:
                            self.waypoints.append(vec3(tup))
                    #nextroom = adjroom[k]
                    #    print "room : ",self.roomId
                    #    print "nextroom : ",self.nextroomId
                    #p_nextroom = self.L.Gr.pos[self.nextroomId]
                    #setdoors1  = self.L.Gr.node[self.roomId]['doors']
                    #setdoors2  = self.L.Gr.node[nextroom]['doors']
                    #doorId     = np.intersect1d(setdoors1,setdoors2)[0]
                    #
                    # coord door
                    #
                    #unode = self.L.Gs.neighbors(doorId)
                    #p1    = self.L.Gs.pos[unode[0]]
                    #p2    = self.L.Gs.pos[unode[1]]
                    #print p1
                    #print p2
                    #pdoor = (np.array(p1)+np.array(p2))/2
                        self.destination = self.waypoints[0]

                        if self.sim.verbose:
                            print 'meca: ag ' + self.ID + ' wait ' + str(
                                self.wait)  #*self.interval)
                        yield hold, self, self.wait

                    else:
                        del self.waypoints[0]
                        del self.rooms[0]
                        # del self.dlist[0]
                        #print "wp : ", self.waypoints
                        if len(self.waypoints) == 1:
                            self.endpoint = True
                        self.destination = self.waypoints[0]
                    #print "dest : ", self.destination
                else:
                    yield hold, self, self.interval
            else:
                #                self.update()
                self.world.update_boid(self)
                self.net.update_pos(self.ID, conv_vecarr(self.position),
                                    self.sim.now())

                yield hold, self, self.interval
Пример #53
0
def kohonen(data,
            labels,
            filter_size=6,
            neighbourhood_width=3.0,
            display_label=False,
            n_epochs=100,
            lr=0.001):
    """Example for using create_data, plot_data and som_step.
    """
    plb.close('all')

    dim = 28 * 28
    data_range = 255.0

    dy, dx = data.shape

    #set the size of the Kohonen map. In this case it will be 6 X 6
    size_k = filter_size

    #set the width of the neighborhood via the width of the gaussian that
    #describes it
    sigma = neighbourhood_width

    #initialise the centers randomly
    centers = np.random.rand(size_k**2, dim) * data_range

    #build a neighborhood matrix
    neighbor = np.arange(size_k**2).reshape((size_k, size_k))

    #set the learning rate
    eta = lr  # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE

    num_epochs = n_epochs
    #set the maximal iteration count

    quantization_errors = np.zeros(num_epochs)

    for epoch in range(num_epochs):
        #set the random order in which the datapoints should be presented
        i_random = np.arange(np.shape(data)[0])
        np.random.shuffle(i_random)
        mean_error = 0
        for t, i in enumerate(i_random):
            mean_error += som_step(centers, data[i, :], neighbor, eta, sigma)

        mean_error /= np.shape(data)[0]
        print(mean_error)
        quantization_errors[epoch] = mean_error

    # for visualization, you can use this:


#    for i in range(1, size_k**2+1):
#        ax = plb.subplot(size_k,size_k,i)
#        plb.imshow(np.reshape(centers[i-1,:], [28, 28]),interpolation='bilinear')
#        if display_label:
#            ax.set_title('label ' + str(assign_label(centers[i-1,:],data,labels)))
#        plb.axis('off')
#
#    if display_label:
#        plb.subplots_adjust(hspace=1.0)
#        plb.savefig('labeled_proto_sigma_'+str(int(sigma))+'_k_'+str(size_k)+'.png')
#    else:
#        plb.savefig('proto_sigma_'+str(int(sigma))+'_k_'+str(size_k)+'.png')
#        plb.figure()
#        plb.plot(range(1,num_epochs+1),quantization_errors)
#        plb.xlabel('Number of epochs')
#        plb.ylabel('Mean weighted quantization error')
#        plb.savefig('weights_sigma_'+str(int(sigma))+'_k_'+str(size_k)+'.png')
# leave the window open at the end of the loop
    plb.show()
    plb.draw()
    return centers, quantization_errors
Пример #54
0
def test():
    import matplotlib.pylab as pyb
    G=nx.path_graph(10,create_using=nx.DiGraph())
    draw(G)
    pyb.draw()
    pyb.show()
    def gen_outlier_stat_features(self,doplot=False,sig_features=[30,20,15,8,5],\
        min_freq=10.0,dosave=True,max_pulse_period=400.0):
        """here we generate outlier features and refine the initial pulsational period
            by downweighting those outliers.
        """
        
        res2 = self._get_pulsational_period(doplot=doplot,min_freq=min_freq)
        
        ## now sigclip
        offs = (self.y - res2['model'])/self.dy0
        moffs = np.median(offs)
        offs -= moffs
        
        ## do some feature creation ... find the statistics of major outliers
        for i,s in enumerate(sig_features):
            rr = (np.inf,s) if i == 0 else (sig_features[i-1],s)
            tmp = (offs < rr[0]) & (offs > rr[1])
            nlow = float(tmp.sum())/self.nepochs
            tmp = (offs > -1*rr[0]) & (offs < -1*rr[1])
            nhigh = float(tmp.sum())/self.nepochs
            if self.verbose:
                print "%i: low = %f high = %f  feature-%i-ratio-diff = %f" % (s,nlow,nhigh,s,nhigh - nlow)
            
            self.features.update({"feature-%i-ratio-diff" % s: (nhigh - nlow)*100.0})
            
        tmp = np.where(abs(offs) > 4)
        self.dy_orig = copy.copy(self.merr)
        dy      = copy.copy(self.merr)
        dy[tmp] = np.sqrt(dy[tmp]**2 + res2['model_error'][tmp]**2 + (8.0*(1 - np.exp(-1.0*abs(offs[tmp])/4)))**2)
        dy0 = np.sqrt(dy**2+self.sys_err**2)
        
        #Xmax = self.x0.max()
        #f0 = 1.0/max_pulse_period; df = 0.1/Xmax; fe = min_freq
        #numf = int((fe-f0)/df)
        
        #refine around original period
        ## Josh's original calcs, which fail for sources like: 221205
        ##df = 0.1/self.x0.max()
        ##f0 = res2['freq']*0.95
        ##fe = res2['freq']*1.05
        ##numf = int((fe-f0)/df)

        df = 0.1/self.x0.max()
        f0 = res2['freq']*0.95
        fe = res2['freq']*1.05
        numf = int((fe-f0)/df)
        if numf == 0:
            ## Josh's original calcs, which fail for sources like: 221205
            numf = 100 # kludge / fudge / magic number
            df = (fe-f0) / float(numf)
            
        psdr,res = lombr(self.x0,self.y,dy0,f0,df,numf,detrend_order=1)
        period=1./res['freq']
        
        self.features.update({"p_pulse": period})

        if self.allow_plotting and doplot:
            try:
                tt=(self.x0*res2['freq']) % 1.; s=tt.argsort()
                plt.errorbar (tt[tmp],self.y[tmp],self.dy_orig[tmp],fmt='o',c="r")
                tt=(self.x0*res['freq']) % 1.; s=tt.argsort()
                plt.plot(tt[s],res['model'][s],c="r")
                if dosave:
                    plt.savefig("pulse-%s-p=%f.png" % (os.path.basename(self.name),period))
                    if self.verbose:
                        print "saved...", "pulse-%s-p=%f.png" % (os.path.basename(self.name),period)
                plt.draw()
            except:
                pass
        return offs, res2
Пример #56
0
def updateplot(struct, x_change):
    """
    Update the plot after the drag event of a stationary point (struct),
    move all related objects by x_change in the x direction,
    and regenerate the corresponding lines
    """
    global xlow, xhigh, xmargin, ylow, yhigh, ymargin
    # set the new sizes of the figure
    get_sizes()
    plt.gca().set_xlim([xlow - xmargin, xhigh + xmargin])
    plt.gca().set_ylim([ylow - ymargin, yhigh + ymargin])
    # generate new coordinates for the images
    if struct in imgsd:
        old_extent = imgsd[struct].get_extent()
        extent_change = (x_change, x_change, 0, 0)
        extent = [old_extent[i] + extent_change[i] for i in range(0, 4)]
        imgsd[struct].set_extent(extent=extent)
    # end if
    # generate new coordinates for the text
    if struct in textd:
        old_pos = textd[struct].get_position()
        new_pos = (old_pos[0] + x_change, old_pos[1])
        textd[struct].set_position(new_pos)
    # generate new coordinates for the lines
    for t in tss:
        if (struct == t or struct == t.reactant or struct == t.product):
            t.lines[0] = line(t.x,
                              t.y,
                              t.reactant.x,
                              t.reactant.y, [t, t.reactant],
                              col=t.color)
            t.lines[1] = line(t.x,
                              t.y,
                              t.product.x,
                              t.product.y, [t, t.product],
                              col=t.color)
            for i in range(0, 2):
                li = t.lines[i]
                if li.straight_line:
                    print('straight line')
                else:
                    xlist = np.arange(li.xmin, li.xmax,
                                      (li.xmax - li.xmin) / 1000)
                    a = li.coeffs
                    y = a[0] * xlist**3 + a[1] * xlist**2 + a[2] * xlist + a[3]
                    linesd[t][i][0].set_xdata(xlist)
                    linesd[t][i][0].set_ydata(y)
                # end if
            # end for
        # end if
    # end for
    for b in barrierlesss:
        if (struct == b.reactant or struct == b.product):
            b.line = line(b.reactant.x,
                          b.reactant.y,
                          b.product.x,
                          b.product.y, [b.reactant, b.product],
                          col=b.color)
            li = b.line
            if li.straight_line:
                print('straight line')
            else:
                xlist = np.arange(li.xmin, li.xmax, (li.xmax - li.xmin) / 1000)
                a = li.coeffs
                y = a[0] * xlist**3 + a[1] * xlist**2 + a[2] * xlist + a[3]
                linesd[b][0][0].set_xdata(xlist)
                linesd[b][0][0].set_ydata(y)
            # end if
        # end if
    # end for
    plt.draw()
Пример #57
0
def plotCurrentW(ww, currentData, delay):
    # Plot the randomly generated data
    fig = plt.figure()
    ax = fig.add_subplot(111, projection="3d")

    ax.scatter(currentData["x1"],
               currentData["x2"],
               currentData["yp"],
               marker=".")

    ax.set_xlim(-1, 1)
    ax.set_ylim(-1, 1)
    ax.set_zlim(-1, 1)
    ax.set_xlabel("x1")
    ax.set_ylabel("x2")
    ax.set_zlabel("yp")

    # Create data points corresponding the the weights so that they can be plotted on the graph
    wDict = {'x1': [], 'x2': [], 'yp': []}

    # y = yIntercept + gradient*x
    #   = w[0] + w[1]*x
    # To plot a straight line, we need the x and y at the far left (x=-1) and far right (x=+1)
    #0 = w[0] + w[1]*x1 + w[2]*x2
    #-w[0] = w[1]*x1 + w[2]*x2
    #x2 = (-w[0] -w[1]*x1)/w[2]

    #draw a plane
    #X1,X2 = numpy.meshgrid(x1, x2)
    #h = ww[0] + ww[1]*x1 + ww[2]*x2

    ##possibility??
    #mn = np.min(currentData, axis=0)
    #mx = np.max(currentData, axis=0)
    #X,Y = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1],20))

    leftleftY = ww[0] + ww[1] * (-1.0) + ww[2] * (-1.0)
    leftrightY = ww[0] + ww[1] * (-1.0) + ww[2] * (1.0)
    rightleftY = ww[0] + ww[1] * (1.0) + ww[2] * (-1.0)
    rightrightY = ww[0] + ww[1] * (1.0) + ww[2] * (1.0)

    wDict["x1"].append(-1.0)
    wDict["x2"].append(-1.0)
    wDict["yp"].append(leftleftY)

    wDict["x1"].append(-1.0)
    wDict["x2"].append(1.0)
    wDict["yp"].append(leftrightY)

    wDict["x1"].append(1.0)
    wDict["x2"].append(-1.0)
    wDict["yp"].append(rightleftY)

    wDict["x1"].append(1.0)
    wDict["x2"].append(1.0)
    wDict["yp"].append(rightrightY)

    # Convert to a dataframe so it can be plotted like the other data
    resultW = pd.DataFrame(wDict)

    xx1, xx2 = numpy.meshgrid(resultW.x1, resultW.x2)

    z = ww[0] + ww[1] * xx1 + ww[2] * xx2

    # Plot the corresponding classification separating line
    #plt.plot(resultW.x, resultW.yp)

    ax.plot_surface(xx1, xx2, z, alpha=0.2)

    plt.draw()
    plt.pause(delay)
    plt.clf()
Пример #58
0
def kohonen():
    """Example for using create_data, plot_data and som_step.
    """
    plb.close('all')

    dynamicEta = True
    dynamicSigma = True
    Equilibrate = False

    dim = 28 * 28
    data_range = 255.0

    # load in data and labels
    data = np.array(np.loadtxt('data.txt'))
    labels = np.loadtxt('labels.txt')

    # select 4 digits
    name = 'Lorkowski'  # REPLACE BY YOUR OWN NAME
    targetdigits = name2digits(
        name)  # assign the four digits that should be used
    print targetdigits  # output the digits that were selected
    # this selects all data vectors that corresponds to one of the four digits
    data = data[np.logical_or.reduce([labels == x for x in targetdigits]), :]

    # filter the label
    labels = labels[np.logical_or.reduce([labels == x for x in targetdigits])]

    dy, dx = data.shape

    #set the size of the Kohonen map. In this case it will be 6 X 6
    size_k = 8

    #set the width of the neighborhood via the width of the gaussian that
    #describes it
    #initial_sigma = 5
    initial_sigma = float(size_k / 2)
    sigma = [initial_sigma]

    #initialise the centers randomly
    centers = np.random.rand(size_k**2, dim) * data_range

    #build a neighborhood matrix
    neighbor = np.arange(size_k**2).reshape((size_k, size_k))

    #set the learning rate
    eta = [0.1]  # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE

    #set the maximal iteration count
    tmax = (
        500 * size_k * size_k
    ) + 1000  # this might or might not work; use your own convergence criterion
    #tmax = 20000

    #set the random order in which the datapoints should be presented
    i_random = np.arange(tmax) % dy
    np.random.shuffle(i_random)

    #convergence criteria
    tol = 0.1
    previousCenters = np.copy(centers)

    errors = []
    mErrors = []
    logError = []
    finalErrors = []

    tailErrors = [0.0]
    # 500 last errors

    if ((dynamicEta == True) & (dynamicSigma == True)):
        filename = 'k' + str(size_k) + 'dynamicEta' + str(
            eta[0]) + 'dynamicSigma' + str(sigma[0]) + '_tmax' + str(tmax)
        print filename
    elif ((dynamicEta == True) & (dynamicSigma == False)):
        filename = 'k' + str(size_k) + 'dynamicEta' + str(
            eta[0]) + 'sigma' + str(sigma[0]) + '_tmax' + str(tmax)
        print filename
    elif ((dynamicEta == False) & (dynamicSigma == True)):
        filename = 'k' + str(size_k) + 'eta' + str(
            eta[0]) + 'dynamicSigma' + str(sigma[0]) + '_tmax' + str(tmax)
        print filename
    else:
        filename = 'k' + str(size_k) + 'eta' + str(eta[0]) + 'sigma' + str(
            sigma[0]) + '_tmax' + str(tmax)
        print filename

    #convergedList=[]
    #numConverged=0
    #holdConvergedLabelsCount=0
    #t=-1

    for t, i in enumerate(i_random):
        '''
        if ( labels[i] in convergedList ):
            holdConvergedLabelsCount += 1
            if (holdConvergedLabelsCount >= len(targetdigits)):
                del convergedList[:]
                holdConvergedLabelsCount = 0
                numConverged = 0
                print "releasing labels"
            continue

        t+=1 # If you use this with t in the iterator to tn
        '''

        if dynamicEta == True:
            new_eta = eta[0] * exp(-float(t) / float(tmax))
            '''
            C = tmax/100
            new_eta = C * eta[0] / (C+t)
            '''
            eta.append(new_eta)

        if dynamicSigma == True:
            if sigma[0] == 1:
                new_sigma = sigma[0]
            else:
                mlambda = tmax / log(sigma[0])
                new_sigma = sigma[0] * exp(-float(t / mlambda))
            sigma.append(new_sigma)

        # Change to sigma[0] for static and sigma[t] for dynamic neighborhood function
        if ((dynamicEta == True) & (dynamicSigma == True)):
            som_step(centers, data[i, :], neighbor, eta[t], sigma[t])
        elif ((dynamicEta == False) & (dynamicSigma == True)):
            som_step(centers, data[i, :], neighbor, eta[0], sigma[t])
        elif ((dynamicEta == True) & (dynamicSigma == False)):
            som_step(centers, data[i, :], neighbor, eta[t], sigma[0])
        else:
            som_step(centers, data[i, :], neighbor, eta[0], sigma[0])

        # convergence check
        e = sum(sum((centers - previousCenters)**2))
        tailErrors.append(e)

        # Since this is an online method, the centers will most likely change in
        # the future even though the current iteration has a residual of 0.
        # Basing the convergence on the residual of the mean of the last 500 errors
        # may be a better convergence criterion.
        if (t > 500):
            if (len(tailErrors) >= 500):
                tailErrors.pop(0)
            tailErrors.append(e)

            # Update the mean error term
            tmpError = sum(tailErrors) / len(tailErrors)
            mErrors.append(tmpError)

            if t > (500 * size_k * size_k):
                tolerance_check = np.abs(mErrors[-1] - mErrors[-501])
                logError.append(tolerance_check)
                data_print_static("Tol Error Minimum Is: {0}, "
                                  "Iteration: {1}, Current Error: {2}".format(
                                      np.min(logError), t, tolerance_check))
                if logError[-1] < tol:
                    print ""
                    print "Converage after ", t, " iterations"
                    break
                """
                future_tolerance_check = np.sum(mErrors[-500])/500
                past_tolerance_check = np.sum(mErrors[-1000:-500])/500
                tolerance_check = np.abs((future_tolerance_check -
                                          past_tolerance_check))
                logError.append(tolerance_check)
                if np.size(logError) > 2:
                    log_d_v = ((np.sqrt((logError[-2] - logError[-1])**2)))
                    # plb.scatter(t, log_d_v, color='red')
                    # plb.pause(0.1)
                    finalErrors.append(log_d_v)
                    data_print_static("Tol Error Minimum Is: {0}, "
                                      "Iteration: {1}, Current Error: {2}".
                                      format(np.min(finalErrors), t,
                                             log_d_v))
                    if log_d_v < tol:
                        print ""
                        print "Converage after ", t, " iterations"
                        break
                """
            """
            if (len(mErrors) >= 2):
                tolerance_check = np.abs(mErrors[-1] - mErrors[-2])
                finalErrors.append(tolerance_check)
                data_print_static("Tol Error Minimum Is: {0}, "
                                  "Iteration: {1}, Current Error: {2}".
                                  format(np.min(finalErrors), t, tolerance_check))
                if ((tolerance_check < tol) & (t >= 500*size_k*size_k)):
                    '''
                    numConverged +=1
                    convergedList.append(labels[i])
                    print "Holding "+str(labels[i])
                    if (numConverged == 4):
                        print ""
                        print "Converage after ", t, " iterations"
                        break
                    '''
                    print ""
                    print "Converage after ", t, " iterations"
                    break
            """
        errors.append(e)
        previousCenters = np.copy(centers)

    if Equilibrate == True:
        old_eta = eta[-1]
        new_tmax = 0.10 * tmax
        i_random = np.arange(new_tmax) % dy
        np.random.shuffle(i_random)
        for t, i in enumerate(i_random):
            new_eta = old_eta
            eta.append(new_eta)

            new_sigma = 1.0
            sigma.append(new_sigma)

            # Change to sigma[0] for static and sigma[t] for dynamic neighborhood function
            if ((dynamicEta == True) & (dynamicSigma == True)):
                som_step(centers, data[i, :], neighbor, eta[-1], sigma[-1])
            elif ((dynamicEta == False) & (dynamicSigma == True)):
                som_step(centers, data[i, :], neighbor, eta[0], sigma[-1])
            elif ((dynamicEta == True) & (dynamicSigma == False)):
                som_step(centers, data[i, :], neighbor, eta[-1], sigma[0])
            else:
                som_step(centers, data[i, :], neighbor, eta[0], sigma[0])

            # convergence check
            e = sum(sum((centers - previousCenters)**2))
            tailErrors.append(e)

            # Since this is an online method, the centers will most likely change in
            # the future even though the current iteration has a residual of 0.
            # Basing the convergence on the residual of the mean of the last 500 errors
            # may be a better convergence criterion.
            if (len(tailErrors) >= 500):
                tailErrors.pop(0)
                tailErrors.append(e)

            # Update the mean error term
            tmpError = sum(tailErrors) / len(tailErrors)
            mErrors.append(tmpError)

            if (len(mErrors) >= 2):
                tolerance_check = np.abs(mErrors[-1] - mErrors[-501])
                logError.append(tolerance_check)
                data_print_static("Tol Error Minimum Is: {0}, "
                                  "Iteration: {1}, Current Error: {2}".format(
                                      np.min(logError), t, tolerance_check))
                if logError[-1] < tol:
                    print ""
                    print "Converage after ", t, " iterations"
                    break

            errors.append(e)
            previousCenters = np.copy(centers)

    # Find the digit assigned to each center
    index = 0
    digits = []
    for i in range(0, size_k**2):
        index = np.argmin(np.sum((data[:] - centers[i, :])**2, 1))
        digits.append(labels[index])

    print "Digit assignement to the clusters: \n"
    print np.resize(digits, (size_k, size_k))
    np.savetxt('data/' + filename + '_cluster.txt',
               np.resize(digits, (size_k, size_k)),
               fmt='%i')

    # for visualization, you can use this:
    for i in range(size_k**2):
        plb.subplot(size_k, size_k, i + 1)

        plb.imshow(np.reshape(centers[i, :], [28, 28]),
                   interpolation='bilinear')
        plb.axis('off')

        plb.draw()

    # leave the window open at the end of the loop
    plb.savefig('data/' + filename + '_kohonen.pdf')
    plb.show()
    # plb.draw()

    import seaborn as sb

    if dynamicSigma == True:
        plb.plot(sigma)
        plb.ylabel('Sigma values')
        plb.xlabel('Iterations')
        plb.savefig('data/sigma.pdf')
        plb.show()

    if dynamicEta == True:
        plb.plot(eta)
        plb.ylabel('Learning Rate')
        plb.xlabel('Iterations')
        plb.savefig('data/eta.pdf')
        plb.show()

    plb.plot(errors)
    plb.ylabel('Sum of the Squared Errors')
    plb.xlabel('Iterations')
    plb.savefig('data/' + filename + '_sqerrors.pdf')
    plb.show()

    plb.plot(mErrors)
    plb.ylabel('Mean of last 500 errors')
    plb.xlabel('Iterations')
    plb.savefig('data/' + filename + '_mean500.pdf')
    plb.show()

    plb.plot(logError)
    plb.ylabel('Convergence Criteria')
    plb.xlabel('Iterations')
    plb.savefig('data/' + filename + '_convergence.pdf')
    plb.show()
Пример #59
0
def montage(nifti,
            anat,
            roi_dict,
            thr=2,
            fig=None,
            out_file=None,
            feature_dict=None,
            target_stat=None,
            target_value=None):
    if isinstance(anat, str):
        anat = load_image(anat)
    assert nifti is not None
    assert anat is not None
    assert roi_dict is not None

    texcol = 1
    bgcol = 0
    iscale = 2
    weights = nifti.get_data()
    #weights = weights / weights.std(axis=3)
    features = weights.shape[-1]

    indices = [0]
    y = 8
    x = int(ceil(1.0 * features / y))

    font = {"size": 8}
    rc("font", **font)

    if fig is None:
        fig = plt.figure(figsize=[iscale * y, iscale * x / 2.5])
    plt.subplots_adjust(left=0.01,
                        right=0.99,
                        bottom=0.01,
                        top=0.99,
                        wspace=0.1,
                        hspace=0)

    for f in xrange(features):
        roi = roi_dict.get(f, None)
        if roi is None:
            continue
        coords = roi["top_clust"]["coords"]
        assert coords is not None

        feat = weights[:, :, :, f]
        feat = feat / feat.std()
        imax = np.max(np.absolute(feat))
        imin = -imax
        imshow_args = {"vmax": imax, "vmin": imin}

        coords = ([-coords[0], -coords[1], coords[2]])

        ax = fig.add_subplot(x, y, f + 1)
        plt.axis("off")

        try:
            plot_map(feat,
                     xyz_affine(nifti),
                     anat=anat.get_data(),
                     anat_affine=xyz_affine(anat),
                     threshold=thr,
                     figure=fig,
                     axes=ax,
                     cut_coords=coords,
                     annotate=False,
                     cmap=cmap,
                     draw_cross=False,
                     **imshow_args)
        except Exception as e:
            logger.exception(e)

        plt.text(0.05,
                 0.8,
                 str(f),
                 transform=ax.transAxes,
                 horizontalalignment="center",
                 color=(texcol, texcol, texcol))
        pos = [(0.05, 0.05), (0.4, 0.05), (0.8, 0.05)]
        colors = ["purple", "yellow", "green"]
        if feature_dict is not None and feature_dict.get(f, None) is not None:
            d = feature_dict[f]
            for i, key in enumerate([k for k in d if k != "real_id"]):
                plt.text(pos[i][0],
                         pos[i][1],
                         "%s=%.2f" % (key, d[key]),
                         transform=ax.transAxes,
                         horizontalalignment="left",
                         color=colors[i])
                if key == target_stat:
                    assert target_value is not None
                    if d[key] >= target_value:
                        p_fancy = FancyBboxPatch((0.1, 0.1),
                                                 2.5 - .1,
                                                 1 - .1,
                                                 boxstyle="round,pad=0.1",
                                                 ec=(1., 0.5, 1.),
                                                 fc="none")
                        ax.add_patch(p_fancy)
                    elif d[key] <= -target_value:
                        p_fancy = FancyBboxPatch((0.1, 0.1),
                                                 iscale * 2.5 - .1,
                                                 iscale - .1,
                                                 boxstyle="round,pad=0.1",
                                                 ec=(0., 0.5, 0.),
                                                 fc="none")
                        ax.add_patch(p_fancy)


#    stdout.write("\rSaving montage: DONE\n")
    if out_file is not None:
        plt.savefig(out_file,
                    transparent=True,
                    facecolor=(bgcol, bgcol, bgcol))
    else:
        plt.draw()
Пример #60
0
        def click(event):
            if not event.inaxes: return
            global whatx, fit_1, fit_3, startpos, endpos, fits_1, xs_1, x1, y1, x3, y3
            startpos = event.xdata, event.ydata

            # find closest existing pt
            if fit_1 == None:
                #                print "no fit_1?!"
                x1 = []
                y1 = []
                d1 = pl.array([1e10])
            else:
                x1, y1 = fit_1.get_data()
                d1 = abs(event.xdata - x1)
            if fit_3 == None:
                #                print "no fit_3?!"
                x3 = []
                y3 = []
                d3 = pl.array([1e10])
            else:
                x3, y3 = fit_3.get_data()
                d3 = abs(event.xdata - x3)

            # todo: for deletions, make sure we have all avail wavelength pts
            # i suppose that the flux combination step that creates fit_wave
            # will do that...

#            print "x1=",x1
#            print "x3=",x3

            if len(d1) <= 0:
                d1 = pl.array([1e10])
            if len(d3) <= 0:
                d3 = pl.array([1e10])

            if d1.min() <= d3.min():
                whatpoint = pl.where(d1 == d1.min())[0][0]
                whatx = x1[whatpoint]
                print "deleting detection %d @ " % whatpoint, whatx
                fit_1.set_data(pl.delete(x1, whatpoint),
                               pl.delete(y1, whatpoint))
                # delete the uncert error line too
                #                ds_1=abs(event.xdata-xs_1)
                #                k=pl.where(ds_1==ds_1.min())[0][0]
                k = whatpoint
                fits_1[k].remove()
                fits_1 = pl.delete(fits_1, k)
                xs_1 = pl.delete(xs_1, k)
            else:
                whatpoint = pl.where(d3 == d3.min())[0][0]
                whatx = x3[whatpoint]
                print "deleting UL %d @ " % whatpoint, whatx
                x3 = pl.delete(x3, whatpoint)
                y3 = pl.delete(y3, whatpoint)
                fit_3.set_data(x3, y3)

            if event.button == 3:  #R-click
                x3 = pl.append(x3, whatx)
                y3 = pl.append(y3, startpos[1])
                if fit_3 == None:
                    fit_3 = pl.plot(x3, y3, 'rv')[0]
                else:
                    fit_3.set_data(x3, y3)

            pl.draw()