def main(M, z, output_filename):
    numpy.random.seed(31415)
    x_label = "$R$ [R$_\odot$]"
    y_label = "$\\rho$ [g/cm$^{3}$]"
    fig, ax = figure_frame(x_label, y_label, xsize=12, ysize=8)
    cols = get_distinct(3)

    r, rho = get_density_profile(EVtwin, M, z)
    pyplot.plot(r.value_in(units.RSun),
                rho.value_in(units.g/units.cm**3),
                label="EVtwin", c=cols[0])
    r, rho = get_density_profile(MESA, M, z)
    pyplot.plot(r.value_in(units.RSun),
                rho.value_in(units.g/units.cm**3),
                label="MESA", c=cols[1])

    # Run the merger code.
    
    r, rho = merge_two_stars(0.5*M, 0.5*M, 1|units.yr)
    
    pyplot.plot(r.value_in(units.RSun),
                rho.value_in(units.g/units.cm**3),
                label="MESA", c=cols[2])
    pyplot.semilogy()
    
    if output_filename is not None:
        pyplot.savefig(output_filename)
        print '\nSaved figure in file', output_filename,'\n'
    else:
        output_filename = 'merger_stellar_density_profile.png'
        pyplot.savefig(output_filename)
        print '\nSaved figure in file', output_filename,'\n'
        pyplot.show()
Esempio n. 2
0
def point_mass():
    GM = (G * (1.*u.M_sun)).decompose([u.au,u.M_sun,u.year,u.radian]).value

    def F(t,x):
        x,y,px,py = x.T
        a = -GM/(x*x+y*y)**1.5
        return np.array([px, py, x*a, y*a]).T

    x0 = np.array([1.0, 0.0, 0.0, 2*np.pi]) # Earth

    integrator = DOPRI853Integrator(F)
    nsteps = 10000
    dt = 0.01
    nsteps_per_pullback = 10
    d0 = 1e-5

    LEs, xs = lyapunov(x0, integrator, dt, nsteps,
                       d0=d0, nsteps_per_pullback=nsteps_per_pullback)

    print("Lyapunov exponent computed")
    plt.clf()
    plt.semilogy(LEs, marker=None)
    plt.savefig("point_mass_le.png")

    plt.clf()
    plt.plot(xs[:,0],xs[:,1],marker=None)
    plt.savefig(os.path.join(plot_path,"point_mass.png"))
Esempio n. 3
0
def plotPeriodogram(fvec, pvec,  axes=None, thinning=None):
    '''
    **plotPeriodogram(fvec, pvec,  axes=None, thinning=None)**
    Function that plot a periodogram
    
    Parameters
    ----------
    **fvec** : vector containing  the frequencies resulting from fftdem() \n
    **pvec** : vector containing  the power values resulting from fftdem() \n
    **axes** : string indicating what type of axes to use. Possible options are: \n\t
        - "loglog" \n\t
        - "semilogx"\n\t
        - "semilogy" \n\t
        - None (default option)\n
    **thinning** : parameter to thin the data o plot as vectors can be ver large. It will plot only the number of dots indicated by thinning '''
    # Wvec=1/fvec
    if thinning == None:
        thinning = thinningCheckup(fvec)
    plt.figure()
    if axes == "loglog":
        plt.loglog(fvec[range(0,fvec.size,thinning)],pvec[range(0,pvec.size,thinning)])
    elif axes == "semilogx":
        plt.semilogx(fvec[range(0,fvec.size,thinning)],pvec[range(0,pvec.size,thinning)])
    elif axes == "semilogy":
        plt.semilogy(fvec[range(0,fvec.size,thinning)],pvec[range(0,pvec.size,thinning)])
    else:
        plt.plot(fvec[range(0,fvec.size,thinning)],pvec[range(0,pvec.size,thinning)])
    plt.title("Periodogram")
    plt.ylabel("DFT mean square amplitude")
    plt.xlabel("Frequency (1/m)")
    plt.show()
def draw_log_hist(X):
    X = X.tocsc().tocoo()  # collapse multiple records. I don't think it is needed

    # we are interested only in existence of a token in user posts, not it's quantity
    vf = np.vectorize(lambda x: 1 if x > 0 else 0)
    X_data_booleaned = vf(X.data)
    X = coo_matrix((X_data_booleaned, (X.row, X.col)), shape=X.shape)

    # now we will calculate (1, 1, ... 1) * X to sum up rows
    features_counts = np.ones(X.shape[0]) * X

    features_counts_sorted = np.sort(features_counts)
    features_counts_sorted = features_counts_sorted[::-1]  # this is how decreasing sort looks like in numpy
    ranks = np.arange(features_counts_sorted.size)

    plt.figure()
    plt.semilogy(ranks, features_counts_sorted,
                 color='red',
                 linewidth=2)
    plt.title('For each feature (word), how many users has it at least once?')
    plt.ylabel("number of users which has this word at least once")
    plt.xlabel("rank")
    # plt.show()

    return features_counts
Esempio n. 5
0
def pal5():
    from streams import usys

    r0 = np.array([[8.312877511,0.242593717,16.811943627]])
    v0 = ([[-52.429087,-96.697363,-8.156130]]*u.km/u.s).decompose(usys).value

    x0 = np.append(r0,v0)
    acc = np.zeros((2,3))
    potential = LawMajewski2010()

    def F(t,X):
        x,y,z,px,py,pz = X.T
        dH_dq = potential._acceleration_at(X[...,:3], 2, acc)
        return np.hstack((np.array([px, py, pz]).T, dH_dq))

    integrator = DOPRI853Integrator(F)
    nsteps = 100000
    dt = 0.1
    print(nsteps*dt*u.Myr)
    nsteps_per_pullback = 10
    d0 = 1e-5

    LEs, xs = lyapunov(x0, integrator, dt, nsteps,
                       d0=d0, nsteps_per_pullback=nsteps_per_pullback)

    print("Lyapunov exponent computed")
    plt.clf()
    plt.semilogy(LEs, marker=None)
    plt.savefig('pal5_le.png')

    plt.clf()
    plt.plot(np.sqrt(xs[:,0]**2+xs[:,1]**2), xs[:,2], marker=None)
    plt.savefig('pal5_orbit.png')
Esempio n. 6
0
	def __call__(self,u,v,w,bx,by,bz):
		q = 4

		d = (self.sim.zmx, self.sim.ymx, self.sim.xmx)
		vspec = spec(u,v,w,dims=d)
		bspec = spec(bx,by,bz,dims=d)

		plt.subplot(221)
		plt.plot(vspec)
		plt.semilogy()
		plt.title('v-spec')
		plt.axis("tight")
		plt.ylim([1e-12,1e-2])

		plt.subplot(222)
		plt.plot(vspec)
		plt.loglog()
		plt.title('v-spec')
		plt.axis("tight")
		plt.ylim([1e-12,1e-2])

		plt.subplot(223)
		plt.plot(bspec)
		plt.semilogy()
		plt.title('b-spec')
		plt.axis("tight")
		plt.ylim([1e-12,1e-2])

		plt.subplot(224)
		plt.plot(bspec)
		plt.loglog()
		plt.title('b-spec')
		plt.axis("tight")
		plt.ylim([1e-12,1e-2])
Esempio n. 7
0
def main():
    conn = krpc.connect()
    vessel = conn.space_center.active_vessel
    streams = init_streams(conn,vessel)
    print vessel.control.throttle
    plt.axis([0, 100, 0, .1])
    plt.ion()
    plt.show()

    t0 = time.time()
    timeSeries = []
    vessel.control.abort = False
    while not vessel.control.abort:

        t_now = time.time()-t0
        tel = Telemetry(streams,t_now)
        timeSeries.append(tel)
        timeSeriesRecent = timeSeries[-40:]

        plt.cla()
        plt.semilogy([tel.t for tel in timeSeriesRecent], [norm(tel.angular_velocity) for tel in timeSeriesRecent])
        #plt.semilogy([tel.t for tel in timeSeriesRecent[1:]], [quat_diff_test(t1,t2) for t1,t2 in zip(timeSeriesRecent,timeSeriesRecent[1:])])
        #plt.axis([t_now-6, t_now, 0, .1])
        plt.draw()
        plt.pause(0.0000001)
        #time.sleep(0.0001)

    with open('log.json','w') as f:
        f.write(json.dumps([tel.__dict__ for tel in timeSeries],indent=4))

    print 'The End'
def plot_magnif_from_times(times, magnifs, fmt="--ro", magnif_log_scale=False):
    plt.xlabel("time ({})".format(times.unit))
    plt.ylabel("magnification")
    if magnif_log_scale:
        plt.semilogy(times, magnifs, fmt)
    else:
        plt.plot(times, magnifs, fmt)
Esempio n. 9
0
	def __call__(self,u,v,w,iteration):
		q = 4

		plt.cool()
		if self.x == None:
			ny = v.shape[1]
			nz = v.shape[0]
			self.x,self.y = np.meshgrid(range(ny),range(nz))
		x,y = self.x,self.y

		if self.iterations == None:
			self.iterations = self.sim.bulk_calc(getIteration())
		all_itr = self.iterations

		if self.xvar == None:
			class temp(sim_operation):
				def get_params(self):
					return ["u"]
				def __call__(self,u):
					return np.max(self.sim.ddx(u))

			self.xvar = self.sim.bulk_calc(temp())
		xvar_series = self.xvar

		min = np.min(xvar_series)
		max = np.max(xvar_series)
		if min <= 0:
			min = 0.000001
		if max <= min:
			max = 0.00001
	
		avgu = np.average(u,2)
		avgv = np.average(v,2)
		avgw = -np.average(w,2)
		xd = self.sim.ddx(u)
		xd2d = np.max(xd,2)
		xd1d = np.max(xd2d,1)

		plt.subplot(221)
		plt.imshow(avgu)
		plt.quiver(x[::q,::q],y[::q,::q],avgv[::q,::q],avgw[::q,::q])
		plt.title('Avg u')
		plt.axis("tight")

		plt.subplot(222)
		plt.imshow(xd2d)
		plt.title('Max x Variation (y-z)')
		plt.axis("tight")

		plt.subplot(223)
		plt.plot(xd1d)
		plt.title('Max x Variation (z)')
		plt.axis("tight")

		plt.subplot(224)
		plt.plot(all_itr,xvar_series, '--')
		plt.plot([iteration,iteration],[min,max])
		plt.semilogy()
		plt.title('Max x Variation (t)')
		plt.axis("tight")
Esempio n. 10
0
def compute_lyapunov(orbit_name, halo_shape='oblate'):
    if halo_shape == "prolate":
        params = prolate_params
        raise NotImplementedError()
    elif halo_shape == "oblate":
        params = oblate_params
    else:
        raise ValueError("Invalid halo shape.")

    integrator = DOPRI853Integrator(F, func_args=params)
    nsteps = 10000
    dt = 1.
    print(nsteps*dt*u.Myr)
    nsteps_per_pullback = 10
    d0 = 1e-5

    LEs, xs = lyapunov(x0, integrator, dt, nsteps,
                       d0=d0, nsteps_per_pullback=nsteps_per_pullback)

    print("Lyapunov exponent computed")
    plt.figure(figsize=(10,10))
    plt.clf()
    plt.semilogy(LEs, marker=None)
    plt.savefig('zotos_le_{}.png'.format(orbit_type))

    plt.clf()
    plt.plot(xs[:,0], xs[:,1], marker=None)
    plt.xlim(0,15)
    plt.ylim(-15,15)
    plt.savefig('zotos_orbit_{}.png'.format(orbit_type))
def plot_magnif_from_time_terms(time_terms, magnifs, fmt="--ro", magnif_log_scale=False):
    plt.xlabel("time term, (t - t_max)/t_E")
    plt.ylabel("magnification")
    if magnif_log_scale:
        plt.semilogy(time_terms, magnifs, fmt)
    else:
        plt.plot(time_terms, magnifs, fmt)
Esempio n. 12
0
 def advance(self, t, plotresult=False):
     y0 = self.concs * self.molWeight
     y0 = append(y0, self.thickness)
     yt = odeint(self.rightSideofODE, y0, t)
     if (plotresult):
         import matplotlib.pyplot as plt
         plt.figure()
         plt.axes([0.1, 0.1, 0.6, 0.85])
         plt.semilogy(t, yt)
         plt.ylabel('mass concentrations (kg/m3)')
         plt.xlabel('time(s)')
         #plt.legend(self.speciesnames)
         for i in range(len(self.speciesnames)):
             plt.annotate(
                 self.speciesnames[i], (t[-1], yt[-1, i]),
                 xytext=(20, -5),
                 textcoords='offset points',
                 arrowprops=dict(arrowstyle="-"))
         plt.show()
     self.thickness = yt[-1][-1]
     ytt = yt[-1][:-1]
     #        for iii in range(len(ytt)):
     #            if ytt[iii]<0:
     #                ytt[iii]=0.
     molDens = ytt / self.molWeight
     self.concs = molDens
     self.molFrac = molDens / sum(molDens)
     self.massFrac = ytt / sum(ytt)
Esempio n. 13
0
def _main():
    args = _parse_input_arguments()

    # read the file
    handle = open(args.filename)
    data = yaml.load(handle)
    handle.close()

    # Plot relresvecsi.
    #pp.subplot(121)
    # Mind that the last Newton datum only contains the final ||F||.
    num_newton_steps = len(data['Newton results']) - 1
    for k in xrange(num_newton_steps):
        pp.semilogy(
            data['Newton results'][k]['relresvec'],
            color=str(1.0 - float(k+1)/num_newton_steps)
            )
    pp.xlabel('Krylov step')
    pp.ylabel('||r||/||b||')
    pp.title('Krylov: %s    Prec: %r    ix-defl: %r    extra defl: %r    ExpRes: %r    Newton iters: %d' %
             (data['krylov'], data['preconditioner type'], data['ix deflation'],
              data['extra deflation'], data['explicit residual'], num_newton_steps)
             )
    if args.xmax:
        pp.xlim([0, args.xmax])
    pp.ylim([1e-10, 10])

    # Write the info out to files.
    if args.imgfile:
        pp.savefig(args.imgfile)
    if args.tikzfile:
        matplotlib2tikz.save(args.tikzfile)
    return
Esempio n. 14
0
def do_halomass_plots(fosc):
    """Plot halo mass, distance to the halo and the relationship between eq. width and column density"""
    ahalos = {
                    4:CIVPlottingSpectra(5, myname.get_name(4, box=25), None, None, savefile="rand_civ_spectra.hdf5", spec_res=5.,label=labels[4]+" 25"),
                   9:CIVPlottingSpectra(5, myname.get_name(9, box=25), None, None, savefile="rand_civ_spectra.hdf5", spec_res=5.,label=labels[9]+" 25"),
                    7:CIVPlottingSpectra(5, myname.get_name(7, box=25), None, None, savefile="rand_civ_spectra.hdf5", spec_res=5.,label=labels[7]+" 25")}
    for (ll, ahalo) in ahalos.items():
        ahalo.plot_eqw_mass("C",4,1548,color=colors[ll])
    plt.legend(loc="upper left")
    save_figure(path.join(outdir,"civ_eqwvsmass"))
    plt.clf()
    for (ll, ahalo) in ahalos.items():
        ahalo.plot_eqw_dist("C",4,1548,color=colors[ll])
    plt.legend(loc="upper left")
    save_figure(path.join(outdir,"civ_eqwvsdist"))
    plt.clf()
    ahalos['I']=CIVPlottingSpectra(68, path.expanduser("~/data/Illustris"), None, None, savefile="rand_civ_spectra.hdf5", spec_res=5.,label=labels['I']+" 75")
    ccc = {1e12: "yellow", 1e15:"red"}
    for tag in ('I', 4, 7):
        for nmin in (1e12, 1e15):
            nminstr = str(np.round(np.log10(nmin),1))
            #for (ll, ahalo) in ahalos.iteritems():
            ahalos[tag].plot_mass_hist(elem="C",ion=4,color=ccc[nmin], ls=lss[tag],nmin=nmin, label=ahalos[tag].label+" "+nminstr)
    plt.legend(loc="upper left",ncol=1)
    save_figure(path.join(outdir,"civ_halos_hist"))
    plt.clf()
    ahalos['I'].plot_eq_width_vs_col_den("C",4,1548)
    eqw = np.linspace(-3, 0.5,50)
    plt.semilogy(eqw, linear_cog_col(10**eqw, 1548, fosc), '-',color="black")
    plt.ylim(1e12,1e16)
    plt.xlim(-2.5,0.5)
    save_figure(path.join(outdir,"civ_eqwvscolden"))
    plt.clf()
Esempio n. 15
0
def plot_gens(images, rowlabels, losses):
    '''
    From great jupyter notebook by Tim Sainburg:
    http://github.com/timsainb/Tensorflow-MultiGPU-VAE-GAN
    '''
    examples = 8
    fig, ax = plt.subplots(nrows=len(images), ncols=examples, figsize=(18, 8))
    for i in range(examples):
        for j in range(len(images)):
            ax[(j, i)].imshow(create_image(images[j][i]), cmap=plt.cm.gray,
                              interpolation='nearest')
            ax[(j, i)].axis('off')
    title = ''
    for i in rowlabels:
        title += ' {}, '.format(i)
    fig.suptitle('Top to Bottom: {}'.format(title))
    plt.show()
    #fig.savefig(''.join(['imgs/test_',str(epoch).zfill(4),'.png']),dpi=100)
    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 10), linewidth = 4)

    D_plt, = plt.semilogy((losses['discriminator']), linewidth=4, ls='-',
                          color='b', alpha=.5, label='D')
    G_plt, = plt.semilogy((losses['generator']), linewidth=4, ls='-',
                          color='k', alpha=.5, label='G')

    plt.gca()
    leg = plt.legend(handles=[D_plt, G_plt],
                     fontsize=20)
    leg.get_frame().set_alpha(0.5)
    plt.show()
Esempio n. 16
0
def test_airy_2d(display=False):
    """ Test 2D airy function vs 1D function; both
    should yield the exact same results for a 1D cut across the 2d function.
    And we've already tested the 1D above...
    """

    fn2d = airy_2d(diameter=1.0, wavelength=1e-6, shape=(511, 511), pixelscale=0.010)
    r, fn1d = airy_1d(diameter=1.0, wavelength=1e-6, length=256, pixelscale=0.010)

    cut = fn2d[255, 255:].flatten()
    print(cut.shape)

    if display:
        
        plt.subplot(211)

        plt.semilogy(r, fn1d, label='1D')

        plt.semilogy(r, cut, label='2D', color='black', ls='--')

        plt.legend(loc='upper right')
        plt.axvline(0.251643, color='red', ls='--')
        plt.ylabel('Intensity relative to peak')
        plt.xlabel('Separation in $\lambda/D$')
 
        ax=plt.subplot(212)
        plt.plot(r, cut-fn1d)
        ax.set_ylim(-1e-8, 1e-8)
        plt.ylabel('Difference')
        plt.xlabel('Separation in $\lambda/D$')

    #print fn1d[0], cut[0]
    #print np.abs(fn1d-cut) #< 1e-9
    assert np.all( np.abs(fn1d-cut) < 1e-9)
Esempio n. 17
0
def test_airy_1d(display=False):
    """ Compare analytic airy function results to the expected locations
    for the first three dark rings and the FWHM of the PSF."""
    lam = 1.0e-6
    D = 1.0
    r, airyprofile = airy_1d(wavelength=lam, diameter=D, length=20480, pixelscale=0.0001)

    # convert to units of lambda/D
    r_norm = r*_ARCSECtoRAD / (lam/D)
    if display:
        plt.semilogy(r_norm,airyprofile)
        plt.axvline(1.028/2, color='k', ls=':')
        plt.axhline(0.5, color='k', ls=':')
        plt.ylabel('Intensity relative to peak')
        plt.xlabel('Separation in $\lambda/D$')
        for rad in airy_zeros:
            plt.axvline(rad, color='red', ls='--')

    airyfn = scipy.interpolate.interp1d(r_norm, airyprofile)
    # test FWHM occurs at 1.028 lam/D, i.e. HWHM is at 0.514
    assert (airyfn(0.5144938) - 0.5) < 1e-5

    # test first minima occur near 1.22 lam/D, 2.23, 3.24 lam/D
    # TODO investigate/improve numerical precision here?
    for rad in airy_zeros:
        #print(rad, airyfn(rad), airyfn(rad+0.005))
        assert airyfn(rad) < airyfn(rad+0.0003)
        assert airyfn(rad) < airyfn(rad-0.0003)
Esempio n. 18
0
def make_accuracy_plot():
	from numpy import linspace, mean
	from math import pi
	from matplotlib import pyplot as pl
	pl.rc('text', usetex=True)
	pl.rc('font', family='serif')
	pl.axvline(x=1,color='k')
	pl.xlabel(r'$$\lambda$$',fontsize=16)
	pl.ylabel(r'$$\epsilon$$',fontsize=16)
	print "Testing M=0"
	it_list = list()
	#We test for zero revolutions
	err,it,la,x,T = test_standard(M=100000,gooding=False,
		house=True,tau=False,iter_max=15,eps=1e-5, rnd_seed=4562,N=0)
	it_list.append(mean(it))
	pl.semilogy(la,err,'k.')
	#We test for multiple revolutions
	for rev in range(50):
		print "Testing M=" + str(rev)
		err,it,la,x,T = test_standard(M=10000,gooding=False,
			house=True,tau=False,iter_max=15,eps=1e-8, rnd_seed=4562+rev+1,N=rev+1)
		pl.semilogy(la,err,'k.')
		it_list.append(mean(it))
	pl.figure()
	pl.plot(it_list)
Esempio n. 19
0
def semilogy(x_vals, y_vals, x_label, y_label, figsize=(3.5, 2.5)):
    """绘图(y取对数)。"""		
    set_fig_size(mpl, figsize)
    plt.semilogy(x_vals, y_vals)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.show()
Esempio n. 20
0
 def plot(self):
     from matplotlib import pyplot
     pyplot.figure()
     it_ls = [] #ls = lineseach
     y_ls = []
     it_ga = [] #gradient approximation
     y_ga = []
     gradApprox = False
     for i in range(len(self.x)):
         y = norm( self.f_x[i] ) + 10**-9
         if self.notes.has_key(i):
             if self.notes[i] == 'starting gradient approximation':
                 gradApprox = True
             if self.notes[i] == 'finished gradient approximation':
                 gradApprox = False
         if gradApprox:
             it_ga.append( i )
             y_ga.append( y ) 
         else:
             it_ls.append( i )
             y_ls.append( y )
     pyplot.semilogy( it_ls, y_ls, 'go') 
     pyplot.semilogy( it_ga, y_ga, 'bx') 
     pyplot.xlabel('function evaluation')
     pyplot.ylabel('norm(f(x)) + 10**-9')
     pyplot.legend(['line searches', 'gradient approx' ])
                   
     pyplot.show()
Esempio n. 21
0
    def _double_hit_check(x):
        # first PSD
        W, fr = PSD(x, dt=dt)
        # second PSD: look for oscillations in PSD
        W2, fr2 = PSD(W, dt=fr[1])
        upto = int(0.01 * len(x))
        max_impact = np.max(W2[:upto])
        max_after_impact = np.max(W2[upto:])
        if plot_figure:
            import matplotlib.pyplot as plt
            plt.subplot(121)
            l = int(0.002*len(x))
            plt.plot(1000*dt*np.arange(l),  x[:l])
            plt.xlabel('t [ms]')
            plt.ylabel('F [N]')
            plt.subplot(122)
            plt.semilogy((W2/np.max(W2))[:5*upto])
            plt.axhline(limit, color='r')
            plt.axvline(upto, color='g')
            plt.xlabel('Double freq')
            plt.ylabel('')
            plt.show()

        if max_after_impact / max_impact > limit:
            return True
        else:
            return False
Esempio n. 22
0
def create_plot(freq_mat, data_mat, length_vec, color_keys, plot_name, output_dir=""):
	print("Creating plot: {0:s}".format(plot_name) )
	pl.figure(1)
	pl.clf()
	pl.hold(True)
	
	for idx, freq in enumerate(freq_mat):
		data = data_mat[idx]
		length = length_vec[idx]
		
		num_pos = 0
		num_neg = 0
		for el in data:
			if el > 0:
				num_pos += 1
			if el < 0:
				num_neg +=1
		if num_pos > 0:
			pl.semilogy(freq/1e9, data, color_keys[length])
		if num_neg > 0:
			pl.semilogy(freq/1e9, -data, color_keys[length] + ":")
		pl.xlabel('Frequency (GHz)')
		pl.ylabel("PUL R ($\Omega$/m)")
		
	output_name = os.path.join(output_dir, plot_name)
	pl.savefig(output_name)
Esempio n. 23
0
def plot_trunc_gr_model(aval, bval, min_mag, max_mag, dmag, catalogue=None,
        completeness=None, figure_size=None, filename=None, filetype='png', 
        dpi=300):
    """
    Plots a Gutenberg-Richter model
    """
    input_model = TruncatedGRMFD(min_mag, max_mag, dmag, aval, bval)
    if not catalogue:
        # Plot only the modelled recurrence
        annual_rates, cumulative_rates = _get_recurrence_model(input_model)
        plt.semilogy(annual_rates[:, 0], annual_rates[:, 1], 'b-')
        plt.semilogy(annual_rates[:, 0], cumulative_rates, 'r-')
        plt.xlabel('Magnitude', fontsize='large')
        plt.ylabel('Annual Rate', fontsize='large')
        plt.legend(['Incremental Rate', 'Cumulative Rate'])
        _save_image(filename, filetype, dpi)
    else:
        completeness = _check_completeness_table(completeness, catalogue)
        plot_recurrence_model(input_model,
                              catalogue,
                              completeness,
                              input_model.bin_width,
                              figure_size,
                              filename,
                              filetype,
              		      dpi)
Esempio n. 24
0
def plot_prob_sums(prob_sums, folder):
    """Plot the probability sums of the original motif against iterations."""
    # Create the folder if it does not exist
    if not os.path.exists("figures/%s" % folder):
        os.makedirs("figures/%s" % folder)

    # Plot on automatic axis
    figure_file = "figures/%s/probability_mass.png" % folder
    plt.title("%s | Prob Mass of Original Motif" % folder)
    plt.ylabel("Probability Mass")
    plt.xlabel("Iterations")
    plt.grid(True)
    plt.plot(prob_sums)
    pylab.savefig(figure_file, format="png")
    plt.clf()
    
    # Plot on semi-log axis
    figure_file = "figures/%s/probability_mass_semilog.png" % folder
    plt.title("%s | Prob Mass of Original Motif" % folder)
    plt.xlabel("Iterations")
    plt.ylabel("Probability Mass (semi-log)")
    plt.grid(True)
    plt.semilogy(prob_sums)
    pylab.savefig(figure_file, format="png")
    plt.clf()
Esempio n. 25
0
def plot_KLs(KLs, folder, filename, title):
    if not os.path.exists("figures/%s" % folder):
        os.makedirs("figures/%s" % folder)

    # Plot on automatic axis
    figure_file = "figures/%s/%s.png" % (folder, filename)
    plt.title("%s | %s" % (folder, title))
    plt.ylabel("KL")
    plt.xlabel("Iterations")
    plt.grid(True)
    plt.plot(KLs)
    pylab.savefig(figure_file, format="png")
    plt.clf()
    
    # Plot on semi-log axis
    try:
        figure_file = "figures/%s/%s_semilog.png" % (folder, filename)
        plt.title("%s | %s" % (folder, title))
        plt.xlabel("Iterations")
        plt.ylabel("KL (semi-log)")
        plt.grid(True)
        plt.semilogy(KLs)
        pylab.savefig(figure_file, format="png")
    except:
        debug("Could not generate semilog plot.")
    plt.clf()
def plot_cmf_with_arbitrary_input(catalog, cmf_input, bins=20, hist_range=(4, 7), 
                                  mass_column_name='mass', **kwargs):

    fig = plt.figure()

    number_in_bin_q2, bin_edges, ch = plt.hist(np.log10(catalog[mass_column_name]), cumulative=-1, log=True, bins=bins, range=hist_range)
    bin_centers_q2 = (bin_edges[1:] + bin_edges[:-1])/2.
    plt.clf()
    plt.plot(bin_centers_q2, number_in_bin_q2, 'ko' )
    plt.semilogy()
    plt.xlabel(r"log$_{10}$ (M$_{GMC}$ / M$_\odot$)")
    plt.ylabel("n(M > M')")

    M_0, N_0, gamma = cmf_input

    m_array = np.linspace(min(bin_edges), max(bin_edges), 50)
    n_array = truncated_cloudmass_function([M_0, N_0, gamma], 10**m_array)

    plt.plot(m_array, n_array, label="$\\gamma = {0:.2f}$,\n$M_0={1:.2e}$,\n$N_0={2:.1f}$".format(gamma, M_0, N_0))

    text_string = r"$N(M' > M) = N_0 \left [ \left ( \frac{M}{M_0} \right )^{\gamma+1} - 1 \right ]$"

    plt.text(4.1, 3, text_string, fontsize=18)
    plt.xlim(*hist_range)
    plt.ylim(0.7, 1e3)

    plt.legend(loc='upper right')

    return fig    
Esempio n. 27
0
def on_off_experiment2(num_motifs=100,filename="gini-vs-mi-correlation-in-on-off-spoofs.pdf"):
    """compare MI vs Gini on biological_motifs"""
    bio_motifs = [getattr(tfdf,tf) for tf in tfdf.tfs]
    Ns = map(len, bio_motifs)
    spoofses = [spoof_on_off_motif(motif,num_motifs=num_motifs,trials=1) for motif in bio_motifs]
    spoof_ginises = mmap(motif_gini,tqdm(spoofses))
    spoof_mises = mmap(total_motif_mi,tqdm(spoofses))
    cors, ps = [],[]
    for ginis, mis in zip(ginises, mises):
        cor, p = pearsonr(ginis,mis)
        cors.append(cor)
        ps.append(p)
    q = fdr(ps)
    
    plt.scatter(cors,ps,filename="gini-vs-mi-correlation-in-on-off-spoofs.pdf")
    plt.plot([-1,1],[q,q],linestyle='--',label="FDR-Adjusted Significance Level")
    plt.semilogy()
    plt.legend()
    plt.xlabel("Pearson Correlation Coefficient")
    plt.ylabel("P value")
    plt.xlim([-1,1])
    plt.ylim([10**-4,1+1])
    cor_ps = zip(cors,ps)
    sig_negs = [(c,p) for (c,p) in cor_ps if c < 0 and p < q]
    sig_poses = [(c,p) for (c,p) in cor_ps if c > 0 and p < q]
    insigs = [(c,p) for (c,p) in cor_ps if p > q]
    def weighted_correlation(cor_p_Ns):
        cors,ps,Ns = transpose(cor_p_Ns)
        return sum([cor*N for (cor,N) in zip (cors,Ns)])/sum(Ns)
    plt.title("Gini-MI Correlation Coefficient vs. P-value for On-Off Simulations from Prokaryotic Motifs")
    maybesave(filename)
Esempio n. 28
0
def plota_teste5(arqsaida):
    n, c, t = np.loadtxt(arqsaida, unpack=True)

    # Calcula os coeficientes de um ajuste a um polinômio de grau 2 usando
    # o método dos mínimos quadrados
    coefs = np.polyfit(n, c, 2)
    p = np.poly1d(coefs)

    # set_yscale('log')
    # set_yscale('log')
    plt.semilogy(n, p(n), label='$n^2$')
    plt.semilogy(n, c, 'ro', label='bubble sort')

    # Posiciona a legenda
    plt.legend(loc='upper left')

    # Posiciona o título
    plt.title('Análise da complexidade de \ntempo do método da bolha')

    # Rotula os eixos
    plt.xlabel('Tamanho do vetor (n)')
    plt.ylabel('Número de comparações')

    plt.savefig('bubble5.png')
    plt.show()
def throughputs(output='throughputs.pdf'):
    """
    Plot throughputs, compares to HST WFC3 UVIS F600LP

    :param output: name of the output file
    :type output: str

    :return: None
    """
    #comparison
    bp1 = S.ObsBandpass('wfc3,uvis2,f600lp')

    #VIS
    bp, bpEoL = _VISbandpass()

    #ghost
    bpG, bpEoLG = _VISbandpassGhost()

    #plot
    plt.semilogy(bp1.wave/10., bp1.throughput, 'r-', label='WFC3 F600LP')
    plt.semilogy(bp.wave/10., bp.throughput, 'b-', label='VIS Best Estimate')
    plt.semilogy(bpEoL.wave/10., bpEoL.throughput, 'g--', label='VIS EoL Req.')
    plt.semilogy(bpG.wave/10., bpG.throughput, 'm-', label='VIS Ghost')
    plt.semilogy(bpEoLG.wave/10., bpEoLG.throughput, 'y-.', label='VIS Ghost EoL')
    plt.xlim(230, 1100)
    plt.xlabel(r'Wavelength [nm]')
    plt.ylabel(r'Total System Throughput')
    plt.legend(shadow=True, fancybox=True, loc='best')
    plt.savefig(output)
    plt.close()
def runAlgorithm(data, d, k):
    W = None

    alphas = 10.0 ** linspace(-4, 1, 6)
    #alphas = 10.0 ** array([-2,-1,0,-2,-3,-2])
    expNum = len(alphas)
    allStates = []
    for (alpha, i) in zip(alphas, count(1)):
        states = jointlyPenalizedMultiplePCA(data, d, alpha, W, k=k)
        allStates.append(states)
        weightVectors = [s.weights for s in states]
        W = array(weightVectors)

    figure()
    for (states, alpha, i) in zip(allStates, alphas, count(1)):
        subplot(expNum, 1, i)
        weightVectors = [s.weights for s in states]
        W = array(weightVectors)
        plot(W.T)
        title(r'Run with $\alpha $ = %f' % alpha)

    figure()
    for (states, alpha, i) in zip(allStates, alphas, count(1)):
        subplot(expNum, 1, i)
        lossVectors = [s.squaredLosses() for s in states]
        L = array(lossVectors)
        semilogy(L.T)
        title(r'Run with $\alpha $ = %f' % alpha)

    show()
Esempio n. 31
0
    def norm_plot(self, loss_name='loss', acc_name='acc'):
        self.norm = [float(i) for i in self.norm]
        x = [
            np.linspace(0,
                        self.data[i].shape[0],
                        self.data[i].shape[0],
                        endpoint=False) * self.norm[i]
            for i in range(len(self.data))
        ]

        plt.figure(figsize=(15, 7))
        plt.subplot(121)
        for i, d in enumerate(self.data):
            loss_name_tmp = loss_name
            if loss_name not in self.header[i]:
                for h in self.header[i]:
                    if 'loss' in h:
                        loss_name_tmp = h
                        print('%d: use %s\n' % (i, loss_name_tmp))
                        break
            if len(self.name) == len(self.labels):
                plt.semilogy(x[i],
                             d[:, self.header[i].index(loss_name_tmp)],
                             label='train_loss_' + self.labels[i])
                plt.semilogy(x[i],
                             d[:,
                               self.header[i].index('val_' + loss_name_tmp)],
                             label='val_loss_' + self.labels[i])
            else:
                plt.semilogy(x[i],
                             d[:, self.header[i].index(loss_name_tmp)],
                             label='train_loss_' + str(i))
                plt.semilogy(x[i],
                             d[:,
                               self.header[i].index('val_' + loss_name_tmp)],
                             label='val_loss_' + str(i))
        plt.legend(loc=0)
        plt.xlabel('Training time (h)')
        plt.ylabel('Loss')
        plt.grid()

        plt.subplot(122)
        for i, d in enumerate(self.data):
            acc_name_tmp = acc_name
            if acc_name not in self.header[i]:
                for h in self.header[i]:
                    if 'acc' in h:
                        acc_name_tmp = h
                        print('%d: use %s' % (i, acc_name_tmp))
                        break
            if len(self.name) == len(self.labels):
                plt.plot(x[i],
                         d[:, self.header[i].index(acc_name_tmp)],
                         label='train_acc_' + self.labels[i])
                plt.plot(x[i],
                         d[:, self.header[i].index('val_' + acc_name_tmp)],
                         label='val_acc_' + self.labels[i])
            else:
                plt.plot(x[i],
                         d[:, self.header[i].index(acc_name_tmp)],
                         label='train_acc_' + str(i))
                plt.plot(x[i],
                         d[:, self.header[i].index('val_' + acc_name_tmp)],
                         label='val_acc_' + str(i))
        plt.legend(loc=0)
        plt.xlabel('Training time (h)')
        plt.ylabel('Accuracy')
        plt.grid()
Esempio n. 32
0
mp.figure('Filter', facecolor='lightgray')
mp.subplot(221)
mp.title('Time Domain', fontsize=16)
mp.ylabel('Signal', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(times[:178], noised_sigs[:178],
        c='orangered', label='Noised')
mp.legend()
mp.subplot(222)
mp.title('Frequency Domain', fontsize=16)
mp.ylabel('Power', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.semilogy(freqs[freqs >= 0],
            noised_pows[freqs >= 0],
            c='limegreen', label='Noised')
mp.legend()
mp.subplot(223)
mp.xlabel('Time', fontsize=12)
mp.ylabel('Signal', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(times[:178], filter_sigs[:178],
        c='hotpink', label='Filter')
mp.legend()
mp.subplot(224)
mp.xlabel('Frequency', fontsize=12)
mp.ylabel('Power', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
Esempio n. 33
0
def main():
    Nz = []
    for zm, zw in [(0.5, 0.1), (0.8, 0.2), (1.1, 0.25)]:
        zarr = np.linspace(zm - 3 * zw, zm + 3 * zw, 1000)
        Nzarr = np.exp(-(zm - zarr)**2 / (2 * zw**2))
        Nz.append((zarr, Nzarr))
    Nt = len(Nz)
    t = GSKY_Theory(Nz)
    larr = np.linspace(100, 2000, 20)
    import matplotlib.pyplot as plt

    plt.figure(figsize=(10, 6))

    for count in range(2):

        if (count == 0):
            style = '-'
            lf = lambda x: x
        else:
            style = '--'
            lf = lambda x: None
            t.set_cosmology(
                ccl.Cosmology(Omega_c=0.3,
                              Omega_b=0.045,
                              h=0.67,
                              sigma8=0.83,
                              n_s=0.96))
            t.set_params({'mmin': 11.5})

        plt.subplot(2, 4, 1)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            for j in range(i, Nt):
                plt.plot(larr,
                         t.getCls('gg', larr, i, j),
                         style,
                         label='GG %i x %i' % (i, j))

        plt.subplot(2, 4, 2)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            for j in range(Nt):
                plt.plot(larr,
                         t.getCls('gs', larr, i, j),
                         style,
                         label=lf('GS %i x %i' % (i, j)))

        plt.subplot(2, 4, 3)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            for j in range(i, Nt):
                plt.plot(larr,
                         t.getCls('ss', larr, i, j),
                         style,
                         label=lf('SS %i x %i' % (i, j)))

        plt.subplot(2, 4, 4)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            plt.plot(larr,
                     t.getCls('gk', larr, i),
                     style,
                     label=lf('GK %i' % i))

        plt.subplot(2, 4, 5)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            plt.plot(larr,
                     t.getCls('sk', larr, i),
                     style,
                     label=lf('SK %i' % i))

        plt.subplot(2, 4, 6)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            plt.plot(larr,
                     t.getCls('gy', larr, i),
                     style,
                     label=lf('GY %i' % i))

        plt.subplot(2, 4, 7)
        plt.gca().set_prop_cycle(None)
        for i in range(Nt):
            plt.plot(larr,
                     t.getCls('sy', larr, i),
                     style,
                     label=lf('SY %i' % i))

        plt.subplot(2, 4, 8)
        plt.gca().set_prop_cycle(None)
        plt.plot(larr, t.getCls('kk', larr), style, label=lf('KK'))

    for c in range(1, 9):
        plt.subplot(2, 4, c)
        plt.xlabel('$\ell$')
        plt.ylabel('$C_\ell$')
        plt.legend(fontsize=6)
        plt.semilogy()
        plt.tight_layout()

    plt.show()
Esempio n. 34
0
    batch_size=batch_size,
    epochs=epochs,
    verbose=2,
    validation_split=vsplit,
    callbacks=callbacks_list)

# score = model.evaluate(x_test, y_test,
#                        batch_size=batch_size, verbose=1)
# print('Test score:', score[0])
# print('Test accuracy:', score[1])
if vsplit:
    # summarize history for loss
    fig = plt.figure()
    # plt.plot(history.history['loss'])
    # plt.plot(history.history['val_loss'])
    plt.semilogy(history.history['loss'])
    plt.semilogy(history.history['val_loss'])
    plt.title('mse')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

# visualisation
# 1. analytical solution
x_test = np.zeros((ny * nx, 2))
surface = np.zeros((ny, nx))
for i, x in enumerate(x_space):
    for j, y in enumerate(y_space):
        x_test[i * nx + j] = [x, y]
        surface[i][j] = undecorated(analytic_solution)([x, y])
            if BER_vector[-1] < BER_go_on_in_smaller_steps:
                EbN0_dB_vector = np.append(
                    EbN0_dB_vector,
                    EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth)
            else:
                EbN0_dB_vector = np.append(
                    EbN0_dB_vector,
                    EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth)

            BER_vector = np.append(BER_vector, 0)
        else:
            ready = True

    #Plot
    plt.figure()
    plt.semilogy(EbN0_dB_vector, BER_vector)
    plt.xlabel('Eb/N0')
    plt.ylabel('Bit Error Rate ')
    plt.grid(True)
    plt.show()

    np.savez(os.path.join(pathname, 'BER_results'),
             EbN0_dB_vector=EbN0_dB_vector,
             BER_vector=BER_vector)

    plt.savefig(os.path.join(pathname, 'BER_figure.pgf'))
    plt.savefig(os.path.join(pathname, 'BER_figure.pdf'))

    res_dict = {
        "EbN0_dB_vector": EbN0_dB_vector,
        "BER_vector": BER_vector,
Esempio n. 36
0
    for i in range(int(len(max_index[0]))):
        if i == 0:
            #aux = np.concatenate((np.array(list(('{0:04b}'.format(int(max_index[0][0])-1)).zfill(4)), dtype=int),np.array(list(('{0:04b}'.format(int(max_index[0][1])-1)).zfill(4)), dtype=int)), axis=0)    
            aux = np.concatenate((np.array(list(('{0:04b}'.format(int(max_index[0][0]))).zfill(4)), dtype=int),np.array(list(('{0:04b}'.format(int(max_index[0][1]))).zfill(4)), dtype=int)), axis=0)
        elif i > 1:
            aux = np.concatenate((aux,np.array(list(('{0:04b}'.format(int(float(max_index[0][i])))).zfill(4)), dtype=int)), axis=0)
    ipHat_soft = aux

    #counting the errors
    #nErr_hard = np.zeros((len(Eb_N0_dB)))
    nErr_hard[yy] = len(np.where((ip - ipHat_hard[0]) != 0)[0])
    #nErr_soft = np.zeros((len(Eb_N0_dB)))
    nErr_soft[yy] = len(np.where((ip - ipHat_soft) != 0)[0])
    
theoryBer = 0.5*special.erfc(np.sqrt(10**(Eb_N0_dB/10)))
simBer_hard = nErr_hard/N
simBer_soft = nErr_soft/N

plt.semilogy(Eb_N0_dB, theoryBer,'bo',Eb_N0_dB, theoryBer,'b')
plt.semilogy(Eb_N0_dB, theoryBer,'bo',label='theory - uncoded')
plt.semilogy(Eb_N0_dB, simBer_hard, 'mo',Eb_N0_dB, simBer_hard, 'm')
plt.semilogy(Eb_N0_dB, simBer_hard, 'mo',label="simulation - Hamming 7,4 (hard)")
plt.semilogy(Eb_N0_dB, simBer_soft,'ro',Eb_N0_dB, simBer_soft, 'r')
plt.semilogy(Eb_N0_dB, simBer_soft,'ro',label='simulation - Hamming 7,4 (soft)')
plt.xlabel("Eb/No, dB")
plt.ylabel("Bit Error Rate")

#plt.legend(borderaxespad=0)
plt.legend()

plt.show()
Esempio n. 37
0
def opt_lf_num_bits(lf_params,
                    min_bits,
                    max_bits,
                    rms_filt_error=0.1,
                    noise_figure=1,
                    sim_steps=1000,
                    sim_runs=10,
                    fpoints=512,
                    mode="tdc",
                    sigma_ph=0.1,
                    tdc_in_stdev=1,
                    plot=False):
    """ optimize number of bits for a digital direct form-I implementation using two's complement
        representation fixed point words with all parts of the data path with same data representation
        args:
            noise_figure: the maximum dB increase in noise due to loop filter quantization
            rms_filt_error : RMS value in dB for allowable filter error
    """
    print("\n********************************************************")
    print("Optimizing loop filter digital direct form-I implementation for")
    print("number of bits in fixed point data words utilized")
    sign_bits = 1
    # fint number of integer bits needed
    int_bits = n_int_bits(lf_params)
    print("\n* Integer bits = %d" % int_bits)
    """ Optimization for quantization noise
    """
    print("\n* Optimizing for quantization noise:")
    # find optimal number of bits for quantization noise
    # generate white noise signal w simulating "regular" activity
    if lf_params["mode"] == "tdc":
        # w = np.floor(np.random.normal(0, 0.1*lf_params["m"], sim_steps))
        w = np.floor(np.random.normal(0, tdc_in_stdev, sim_steps))
    else:  # BBPD mode, test sequence is random +/- 1
        # w = np.random.normal(0, np.sqrt((1-2/np.pi)), sim_steps)
        w = np.random.choice((-1, 1), sim_steps)  # ate

    pow_npd_post_lf = var_npd_post_lf(
        lf_params, mode=mode)  # variance of TDC noise at loop filter

    lf_ideal = LoopFilterPIPhase(ignore_clk=True, **lf_params)
    x_ideal = np.zeros(sim_steps)
    for n in range(sim_steps):
        x_ideal[n] = lf_ideal.update(w[n], 0)

    mses = []
    bit_range = range(min_bits - int_bits - 1, max_bits - int_bits)
    for frac_bits in bit_range:
        # use a large number of int bits to avoid overflow. Tuning here is with frac bits as
        runs = np.zeros(sim_runs)
        for m in range(sim_runs):
            lf_quant = LoopFilterPIPhase(ignore_clk=True,
                                         int_bits=32,
                                         frac_bits=frac_bits,
                                         quant_filt=False,
                                         **lf_params)
            x_quant = np.zeros(sim_steps)
            for n in range(sim_steps):
                x_quant[n] = lf_quant.update(w[n], 0)
            runs[m] = np.var(x_ideal - x_quant)
            mse = np.average(runs)
        print(
            "\tBits = %d,\t #(sign,int,frac) = (%d,%d,%d), \tQuant noise power = %E LSB^2"
            % ((sign_bits + int_bits + frac_bits), sign_bits, int_bits,
               frac_bits, mse))
        mses.append(mse)
    threshold = (10**(noise_figure / 10.0) - 1) * pow_npd_post_lf
    print("Threshold=%E, PD noise post-LF=%E" % (threshold, pow_npd_post_lf))
    for n, v in enumerate(mses):
        if v < threshold:
            break
    opt_frac_bits_qn = bit_range[n]
    print(
        "* Optimum int bits = %d, frac bits = %d, sign bits = 1, quant noise = %.3f LSB^2"
        % (int_bits, opt_frac_bits_qn, mses[n]))
    if plot:
        plt.figure(1)
        plt.clf()
        plt.semilogy(np.arange(min_bits, max_bits + 1), mses)
        plt.title("RMS Quantization Noise versus Filter Coefficient Bits")
        plt.xlabel("Total bits")
        plt.grid()
        plt.ylabel("DAC LSB$^2$")
        razavify()
        ticks = plt.yticks()[0]
        plt.yticks(ticks,
                   ["10$^{%d}$" % int(round(np.log10(x))) for x in ticks])
        plt.xlim(min_bits, max_bits)
        ticks = plt.xticks()[0]
        plt.xticks(ticks, ["%d" % x for x in ticks])

    #////////////////////////////////////////////////////////////////////////////////////
    """ Optimization for filter accuracy
    """
    print("\n* Optimizing for filter design accuracy:")
    fmin = 1e2
    fref = lf_params["fref"]

    b = [lf_params["b0"], lf_params["b1"]]
    a = [
        1,
        -1,
    ]
    f, h_ideal = scipy.signal.freqz(b,
                                    a,
                                    np.geomspace(fmin, fref / 2, fpoints),
                                    fs=fref)
    s = 2j * np.pi * f
    l = 2 * np.pi * lf_params["kpd"] * lf_params["kdco"] * h_ideal / s
    g = l / (1 + l)
    bit_range = range(min_bits - int_bits - 1, max_bits - int_bits)
    mses = []
    # print(lf_params["b0"], lf_params["b1"])
    for frac_bits in bit_range:
        _lf_params = quant_lf_params(lf_params, int_bits, frac_bits)
        b = [_lf_params["b0"], _lf_params["b1"]]
        # print(_lf_params["b0"], _lf_params["b1"])
        f, h = scipy.signal.freqz(b,
                                  a,
                                  np.geomspace(fmin, fref / 2, fpoints),
                                  fs=fref)
        s = 2j * np.pi * f
        l = 2 * np.pi * lf_params["kpd"] * lf_params["kdco"] * h / s
        _g = l / (1 + l)
        # mses.append(np.var(20*np.log10(np.abs(h[1:]))-20*np.log10(np.abs(h_ideal[1:]))))
        mses.append(
            np.var(20 * np.log10(np.abs(g[1:])) -
                   20 * np.log10(np.abs(_g[1:]))))
        # print("\tN bits = %d\tMSE = %E dB^2"%(frac_bits+int_bits+sign_bits, mses[-1]))
        print(
            "\tBits = %d,\t #(sign,int,frac) = (%d,%d,%d), \tMSE = %E LSB^2" %
            ((sign_bits + int_bits + frac_bits), sign_bits, int_bits,
             frac_bits, mses[-1]))
    n = len(mses) - 1
    for n, v in enumerate(mses):
        if v < rms_filt_error**2:
            break
    opt_frac_bits_filt_acc = bit_range[n]
    print(
        "* Optimum int bits = %d, frac bits = %d, sign_bits=1, quant noise = %E LSB^2"
        % (int_bits, opt_frac_bits_filt_acc, mses[n]))
    if plot:
        plt.figure(2)
        plt.clf()
        plt.semilogy(np.arange(min_bits, max_bits + 1), mses)
        plt.title("MSE Filter Error (dB) versus Filter Coefficient Bits")
        plt.xlabel("Total bits")
        plt.ylabel("MSE [dB$^2$]")
        plt.grid()
        razavify()
        ticks = plt.yticks()[0]
        plt.yticks(ticks,
                   ["10$^{%d}$" % int(round(np.log10(x))) for x in ticks])
        plt.xlim(min_bits, max_bits)
        ticks = plt.xticks()[0]
        plt.xticks(ticks, ["%d" % x for x in ticks])

    frac_bits = max(opt_frac_bits_qn, opt_frac_bits_filt_acc)
    print("\n* Optimization complete:")
    print("\tInt bits = %d, frac bits = %d, sign bits = 1" %
          (int_bits, frac_bits))
    print("\tTotal number bits = %d" % (int_bits + frac_bits + sign_bits))
    return int_bits, frac_bits
Esempio n. 38
0
previousMAE = float('inf')

print(2)
for i in range(1000):
    ANN.fit(featTrain, labelsTrain)
    epochs.append(ANN.n_iter_)
    yPredict = ANN.predict(featTrain)
    MAEtrain.append(np.mean(np.abs(yPredict - labelsTrain)))
    yPredict = ANN.predict(featVal)
    MAEval.append(np.mean(np.abs(yPredict - labelsVal)))
    if MAEval[-1] >= previousMAE and ANN.n_iter_ % 10 == 0:
        break
    if ANN.n_iter_ % 10 == 0:
        previousMAE = MAEval[-1]

# Graph training and validation error vs training time
plt.figure(2, figsize=(12, 4), dpi=100)
plt.clf()
plt.semilogy(epochs, MAEval, label='validation')
plt.semilogy(epochs, MAEtrain, label='training')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('MAE in ANN output')
plt.savefig('figure2.png')

# Pickle ANN for later use
pickle.dump(ANN, open("ANN.pkl", "wb"))
pickle.dump(trans, open("normalizationScalar.pkl", "wb"))

t2 = time.time()
print('Run time (s):', round(t2 - t, 2))
testerr=[]
trainerr=[]

err = np.zeros(len(d))
for i,k in enumerate(n_learners):
    J = np.zeros(nFolds)
    for iFold in range(nFolds):
        Xti, Xvi, Yti, Yvi = ml.crossValidate(Xtr, Ytr, nFolds, iFold);
        gbm0 = GradientBoostingClassifier(n_estimators=k)
        gbm0.fit(Xti, Yti)
        mean_acc = gbm0.score(Xti, Yti)
        trainerr.append(1 - mean_acc);
        mean_acc = gbm0.score(Xvi, Yvi)
        J[iFold] = 1 - mean_acc
        testerr.append(1 - mean_acc);
plt.semilogy(depth,testerr,marker='o',color = 'r');
plt.semilogy(depth,trainerr,marker='o',color = 'b');
plt.xlabel(Number of learners)
plt.ylabel('MSE')
plt.show()

testAUC = []
trainAUC = []
err = np.zeros(len(d))
for i,k in enumerate(depth):
    J = np.zeros(nFolds)
    for iFold in range(nFolds):
        Xti, Xvi, Yti, Yvi = ml.crossValidate(Xtr, Ytr, nFolds, iFold);
        gbm0 = GradientBoostingClassifier(n_estimators=1000,max_depth=k)
        gbm0.fit(Xti, Yti)
        mean_acc = gbm0.score(Xti, Yti)
slice1 = np.array(df1, dtype=float)
slice1.shape

freqs, times, Sx = signal.spectrogram(slice1, fs=sampling_frequency, nperseg=1024, noverlap=512, window='hanning', return_onesided=True)

freqs.shape

plt.pcolormesh(times, freqs, Sx, shading='gouraud')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()

"""##### Power Spectral Density """

f, Pxx_den = signal.welch(slice1, sampling_frequency, nperseg=1024)
plt.semilogy(f, Pxx_den)
plt.ylim([0.5e-3, 1])
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V**2/Hz]')
plt.show()

"""#### FFT on ADC_02

##### DFT
"""

y2 = df2['var_names']
c2 = fftpack.fft(y2)  # returns DFT of real or complex sequences
f2 = np.fft.fftfreq(len(y2),time_step)
print(c2.shape)
Esempio n. 41
0
    alpha_is_correct = tf.equal(tf.argmax(alpha, 1), tf.argmax(label, 1))
    accuracy = tf.reduce_mean(tf.cast(alpha_is_correct, tf.float32))
    alpha_values = alpha.eval({inp: test_images, label: test_labels})

    index_first_mistaken_number = alpha_is_correct.eval({
        inp: test_images,
        label: test_labels
    }).tolist().index(False)
    mistaken_alpha = tf.argmax(alpha_values[index_first_mistaken_number, :])
    mistaken_label = tf.argmax(test_labels[index_first_mistaken_number, :])

    print('Accuracy:', accuracy.eval({inp: test_images, label: test_labels}))
    print('The first mistaken number')
    print('-------------------------')
    print('Alpha:', mistaken_alpha.eval())
    print('Label:', mistaken_label.eval())

    mistaken_number = test_images[index_first_mistaken_number, :].reshape(
        (28, 28))
    plt.imshow(mistaken_number, cmap='gray')


with tf.Session() as sess:
    alpha, E_1 = train_neural_network(sess)
    evaluate_model(sess, alpha)

plt.figure('Test error')
plt.semilogy(np.linspace(0, T, len(E_1)), E_1, label='$E_1$')
plt.legend()
plt.show()
Esempio n. 42
0
p = np.zeros(n)

for i in trange(N,desc="1"):
    for j in range(n):
        if random_walks[i,1] > 0:
            if random_walks[i,j] <0:
                p[j] += 1
                break
        if random_walks[i,1] < 0:
            if random_walks[i,j] >0:
                p[j] += 1
                break

p = p / np.linalg.norm(p)
logt = np.log(t)[2:]
alpha = np.log(p[2:])
coeffs = np.polyfit(logt, alpha,1)
print(coeffs)

plt.figure()
plt.subplot(121)
plt.semilogy(p[2:], label = "Probability")
plt.legend()
plt.grid()

plt.subplot(122)
plt.plot(alpha, label = r"$\alpha$")
plt.plot(logt*coeffs[0] + coeffs[1], label = r"Linear fit. $\alpha = ${:.3f}".format(coeffs[0]))
plt.grid()
plt.legend()
plt.show()
s = StandardScaler().fit_transform(activity)
normalize = pd.DataFrame(data=s)
print(normalize.head())

#do the PCA
pca = PCA(n_components=3)
prin_Comp = pca.fit_transform(s)
prin_CompDf = pd.DataFrame(data=prin_Comp,
                           columns=['prin_comp1', 'prin_comp2', 'prin_comp3'])

prin_CompDf.head()

# Join the label to the data and un-comment 'for label data below'
# pca_data = pd.concat([prin_CompDf, activity[['0']]], axis = 1)
# print(pca_data.head(5))

# for no-label data
# normalize.to_csv('./datas/normalize.csv')
prin_CompDf.to_csv('./evaluate/thirtydays_feature.csv')
plt.semilogy(prin_CompDf, '--o')
plt.title('Feature after PCA')
plt.show()

# For label data
# # normalize.to_csv('./datas/normalize.csv')
# pca_data.to_csv('./data/cane_data/pca_dis.csv')
# plt.semilogy(pca_data, '--o')
# plt.title('Feature after PCA')
# plt.show()
Esempio n. 44
0
means = []
low = []
up = []
for i in range(len(time)):
    s = []
    for j in range(len(count)):
        s.append(size[j][i])
    
    s_work = np.array(s)
    means.append(np.mean(s_work))
    low.append(np.percentile(s_work,5))
    up.append(np.percentile(s_work,95))

time = np.arange(0,40+1,1)

plt.semilogy(time,means[0:len(time)],'o',color='C1')
plt.semilogy(t,I_plot,color='C1',linewidth=3)
plt.semilogy(t,I2_plot,color='C0',linewidth=3)
#plt.plot(t_det,inf_det/p_surv,linewidth=3)
#plt.plot(t,inf,linewidth = 3, color ='C0')
#plt.plot(time,low,color='C0',linewidth=3,alpha=0.5)/p_surv
#plt.plot(time,up,color='C0',linewidth=3,alpha=0.5)
plt.ylim((0,3*10000))
plt.xlim((0,40))
plt.fill_between(time,low[0:len(time)],up[0:len(time)],alpha=0.25,color='C1')
plt.tick_params(axis='both', which='major', labelsize=15, width=1, length=10)
plt.tick_params(axis='both', which='minor', labelsize=10, width=1, length=5)
plt.show()


Esempio n. 45
0
mpl.legend(["data","psedo-interp"])#,"galerk-interp","pseudo-points"])
mpl.grid(True)
mpl.show()

mpl.plot(xs, funcExpanding ,xs,ypseudo)#, xs,ys,cpoints,pseudopoints)
mpl.legend(["data","psedo-interp"])#,"galerk-interp","pseudo-points"])
mpl.grid(True)
mpl.show()

mpl.plot(xs, funcExpanding - ypseudo)#, xs,ys,cpoints,pseudopoints)
mpl.legend(["Diff between data & pseudo-interp"])#,"galerk-interp","pseudo-points"])
mpl.grid(True)
mpl.show()


# mpl.plot(xs, d2pseudo, cpoints, d2pseudopoints )#, xs,ys,cpoints,pseudopoints)
# mpl.legend(["data","d2psedo-interp"])#,"galerk-interp","pseudo-points"])
# #mpl.show()
# #mpl.semilogx(rhoFunc.points,rhoFunc.data)
# mpl.grid(True)
# mpl.show()

mpl.plot(cpoints,dpseudopoints,xs,dypseudo,realxs,fdeos)
mpl.legend(["dpseudo-pts","dpseudo-interp","finite-difference"])
mpl.grid(True)
mpl.show()


mpl.semilogy(range(N),absolute(pseudofks),range(N), absolute(fks))
mpl.show()
        for value in values:
            allTransitions.add(value)

    for index in allTransitions: # for example 0,1,2,5,6,9
        possibles = []
        percent   = []
        temp      = []
        for values in resultsAe:
            try:
                possibles.append(values[index][1]) # read possibles from slot 1
                percent.append(values[index][0]) # read ratio from slot 0
                temp.append(values[index][2]) # read temperature from slot 2
            except KeyError:
                pass
        kbT = 1/kb/np.array(temp)
        plt.semilogy(kbT, possibles, "-x", label=index)

        # try to fit
        if len(possibles) > 1:
            try:
                popt = curve_fit(f.exp, kbT, possibles, p0=[10e5, -0.01])
                a = popt[0][0]
                b = popt[0][1]
                minusEnergy = b
                plt.semilogy(kbT, f.exp(kbT, a,b), label="fit {0:.4g}e^{1:.4f}".format(a,b))
                percentMean = np.mean(percent)
                # index is the process, from x to y
                x,y = mkl.getXy(index, len(energies))
                if (verbose):
                    print("/",index, mkl.getXy(index, len(energies)))
                    print(percentMean,energies[x][y],minusEnergy)
Esempio n. 47
0
    # Use the same formulas above
    N = x.shape[0]
    for n in range(1, N):
        w0 = np.append(w0, w)
        x_predict[n, :] = x[n-1, :] * w
        error[n, :] = x[n, :] - x_predict[n, :]
        w = w + eta * error[n, :].T * x[n-1, :]

    if iteration == 0:
        error_full = np.copy(error)
    else:
        error_full = np.concatenate((error_full, np.copy(error)), axis=0)

# Reshape the final error
error_full = error_full.reshape((100, 5000))

# LMS learning curve: formula (3.63) in textbook where the learning-rate parameter eta is small
Jn = sigu2*(1-a**2)*(1+(eta/2)*sigu2) + sigu2*(a**2+(eta/2)*(a**2)*sigu2-0.5*eta*sigu2)*(1-eta*sigu2)**(2*t)

# LMS mean square error: formula (under formula (3.62) in textbook)
J_mean = np.mean(np.square(error_full), axis=0)

# Plot the desired results
plt.semilogy(Jn, 'r--', label='Theory')
plt.semilogy(J_mean, 'k-', label='Experiment', linewidth=0.8)
plt.legend(loc="upper right")
plt.title('Learning-rate parameter eta = ' + str(eta))
plt.xlabel("Number of iterations, n")
plt.ylabel("MSE")
plt.show()
    ############################################################################
    # %% SAVE MODELS
    ############################################################################

    g_model.save('02-results/gen_model.h5')
    d_model.save('02-results/dis_model.h5')
    acgan_model.save('02-results/gan_model.h5')
    e_model.save('02-results/enc_model.h5')
    ae_model.save('02-results/ae_model.h5')

    ############################################################################
    # %% PLOT LOSS CURVES
    ############################################################################

    fig = mp.figure(figsize=(10,8))
    mp.semilogy(np.array(loss)[:, [1, 2, 4, 5, 6]])
    mp.xlabel('batch')
    mp.ylabel('loss')
    mp.legend(['D(w) loss', 'D(y) loss', 'D(G(w)) loss',  'D(G(y)) loss', 'E(X) loss'])
    mp.savefig('02-results/loss.png')
    mp.close()
    np.save('02-results/loss.npy', np.array(loss))

    ############################################################################
    # %% PLOT ACCURACY CURVES
    ############################################################################

    fig = mp.figure(figsize=(10,8))
    #mp.plot(np.array(acc))
    N = 16
    mp.plot(np.convolve(np.array(acc)[:,0], np.ones((N,))/N)[(N-1):-N])
Esempio n. 49
0
def ptest(methods,
          ivps,
          tols=[1.e-1, 1.e-2, 1.e-4, 1.e-6],
          verbosity=0,
          parallel=False):
    """
        Runs a performance test, integrating a set of problems with a set
        of methods using a sequence of error tolerances.  Creates a plot
        of the error achieved versus the amount of work done (number of
        function evaluations) for each method.

        **Input**:
            * methods -- a list of ODEsolver instances
                      Note that all methods must have error estimators.
            * ivps    -- a list of IVP instances
            * tols    -- a specified list of error tolerances (optional)
            * parallel -- to exploit possible parallelization (optional)

        **Example**::

            >>> import nodepy.runge_kutta_method as rk
            >>> from nodepy.ivp import load_ivp
            >>> bs5=rk.loadRKM('BS5')
            >>> myivp=load_ivp('nlsin')
            >>> work,err=ptest(bs5,myivp)

    """
    import matplotlib.pyplot as pl
    pl.clf()
    pl.draw()
    # In case just one method is passed in (and not as a list):
    if not isinstance(methods, list): methods = [methods]
    if not isinstance(ivps, list): ivps = [ivps]
    err = np.ones([len(methods), len(tols)])
    work = np.zeros([len(methods), len(tols)])
    for ivp in ivps:
        if verbosity > 0: print("solving problem {}".format(ivp))
        try:
            exsol = ivp.exact(ivp.T)
        except:
            bs5 = rk.loadRKM('BS5')  #Use Bogacki-Shampine RK for fine solution
            lowtol = min(tols) / 100.
            if verbosity > 0:
                print(
                    'solving for "exact" solution with tol= {}'.format(lowtol))
            t, u = bs5(ivp, errtol=lowtol, dt=ivp.dt0)
            if verbosity > 0: print('done')
            exsol = u[-1] + 0.
        for imeth, method in enumerate(methods):
            if verbosity > 0:
                print('Solving with method {}'.format(method.name))
            if parallel:
                speedup = len(method) / float(method.num_seq_dep_stages())
            else:
                speedup = 1.
            workperstep = len(method) - method.is_FSAL()
            for jtol, tol in enumerate(tols):
                t, u, rej, dt, errhist = method(ivp,
                                                errtol=tol,
                                                dt=ivp.dt0,
                                                diagnostics=True,
                                                controllertype='P')
                if verbosity > 1: print('{} rejected steps'.format(rej))
                err[imeth, jtol] *= np.max(np.abs(u[-1] - exsol))
                #FSAL methods save on accepted steps, but not on rejected:
                work[imeth, jtol] += (len(t) * workperstep +
                                      rej * len(method)) / speedup
    for imeth, method in enumerate(methods):
        for jtol, tol in enumerate(tols):
            err[imeth, jtol] = err[imeth, jtol]**(1. / len(ivps))
    for imeth, method in enumerate(methods):
        pl.semilogy(work[imeth, :],
                    err[imeth, :],
                    label=method.name,
                    linewidth=3)
    pl.xlabel('Function evaluations')
    pl.ylabel('Error at $t_{final}$')
    pl.legend(loc='best')
    pl.draw()
    return work, err
Esempio n. 50
0
    infile = name

    names.append(os.path.basename(name).replace(".txt", ""))

    print('plotting ' + infile)

    with open(infile, 'r') as csvfile:
        Ns = []
        Times = []

        reader = csv.DictReader(csvfile, delimiter='\t')

        for row in reader:
            Ns.append(int(row['N']))
            Times.append(float(row['time']))

        plt.semilogy(Ns, Times)

pp = PdfPages('time.pdf')

#plt.axis([0, 2048, .01, 24])
plt.legend(names, loc='upper left', fontsize='small')
plt.title('Euclidean Norm Computation')
plt.xlabel('Problem Size')
plt.ylabel('Execution Time')
plt.xticks([22, 23, 24, 25, 26, 27], [22, 23, 24, 25, 26, 27])

pp.savefig()
pp.close()
plt.show()
Esempio n. 51
0
    return result[0]


plt.figure
for j in range(14):
    v = v0
    if (j != 0):
        v[j - 1] = [0]
    v[j] = [1]
    #print(v)
    # compute their product
    u = np.dot(A, v)
    # diff records the difference of Av and v
    diff = [abssum(u, v)]
    i = 0
    while diff[i] > 1e-5:
        v = u  # record A^kv
        u = np.dot(A, v)  # compute A^{k+1}v
        diff.append(abssum(u, v))  # append add
        i = i + 1
        print(vecsum(u))

    #plot the differences with iterations
    #plt.subplot(7,2,j+1)
    plt.semilogy(range(len(diff)), diff, label='vector ' + str(j))
plt.xlabel('iterations')
plt.ylabel('difference')
plt.title('Convergence')
plt.legend()
plt.show()
Esempio n. 52
0
def get_completeness_adjusted_table(catalogue, completeness, dmag,
                                    offset=1.0E-5, end_year=None, plot=False,
                                    figure_size=(8, 6), filename=None,
                                    filetype='png', dpi=300, ax=None):
    """
    Counts the number of earthquakes in each magnitude bin and normalises
    the rate to annual rates, taking into account the completeness
    """
    if not end_year:
        end_year = catalogue.end_year
    # Find the natural bin limits
    mag_bins = _get_catalogue_bin_limits(catalogue, dmag)
    obs_time = end_year - completeness[:, 0] + 1.
    obs_rates = np.zeros_like(mag_bins)
    durations = np.zeros_like(mag_bins)
    n_comp = np.shape(completeness)[0]
    for iloc in range(n_comp):
        low_mag = completeness[iloc, 1]
        comp_year = completeness[iloc, 0]
        if iloc == (n_comp - 1):
            idx = np.logical_and(
                catalogue.data['magnitude'] >= low_mag - offset,
                catalogue.data['year'] >= comp_year)
            high_mag = mag_bins[-1]
            obs_idx = mag_bins >= (low_mag - offset)
        else:
            high_mag = completeness[iloc + 1, 1]
            mag_idx = np.logical_and(
                catalogue.data['magnitude'] >= low_mag - offset,
                catalogue.data['magnitude'] < (high_mag - offset))

            idx = np.logical_and(mag_idx,
                                 catalogue.data['year'] >= (comp_year - offset))
            obs_idx = np.logical_and(mag_bins >= (low_mag - offset),
                                     mag_bins < (high_mag + offset))
        temp_rates = np.histogram(catalogue.data['magnitude'][idx],
                                  mag_bins[obs_idx])[0]
        temp_rates = temp_rates.astype(float) / obs_time[iloc]
        obs_rates[obs_idx[:-1]] = temp_rates
        durations[obs_idx[:-1]] = obs_time[iloc]
    selector = np.where(obs_rates > 0.)[0]
    mag_bins = mag_bins[selector]
    obs_rates = obs_rates[selector]
    durations = durations[selector]
    # Get cumulative rates
    cum_rates = np.array([sum(obs_rates[iloc:])
                          for iloc in range(0, len(obs_rates))])
    if plot:
        plt.figure(figsize=figure_size)
        plt.semilogy(mag_bins + dmag / 2., obs_rates, "bo",
                     label="Incremental")
        plt.semilogy(mag_bins + dmag / 2., cum_rates, "rs",
                     label="Cumulative")
        plt.xlabel("Magnitude (M)", fontsize=16)
        plt.ylabel("Annual Rate", fontsize=16)
        plt.grid(True)
        plt.legend(fontsize=16)
        if filename:
            plt.savefig(filename, format=filetype, dpi=dpi,
                        bbox_inches="tight")
    return np.column_stack([mag_bins, durations, obs_rates, cum_rates,
                            np.log10(cum_rates)])
Esempio n. 53
0
    Hyee = h.initial(xx)
    Hyee[0] = 0
    Hyee[-1] = 0

    t0yee = time.time()
    for j in range(m):
        Hyee[1:-1] = Hyee[1:-1] + s * (Eyee[2:] - Eyee[:-2])
        Hyee[0] = Hyee[1]
        Hyee[-1] = Hyee[-2]
        Eyee[1:-1] = Eyee[1:-1] + s * (Hyee[2:] - Hyee[:-2])
    tout[2, k] = time.time() - t0yee

#print(dxs)
#print(tout.T)

plt.plot(dxs, tout.T)
plt.legend(('RK45', 'DOP853', 'FDTD'))
plt.xlabel('dx')
plt.ylabel('computation time [s]')
plt.title('T=' + str(T))
plt.savefig('testresults/timing/MOL2yeespacing.pdf')
plt.clf()

plt.semilogy(dxs, tout.T, '.')
plt.legend(('RK45', 'DOP853', 'FDTD'))
plt.xlabel('dx')
plt.ylabel('computation time [s]')
plt.title('T=' + str(T))
plt.savefig('testresults/timing/MOL2yeespacingsemilog.pdf')
plt.clf()
Esempio n. 54
0
ram_prices = pd.read_csv("data/ram_price.csv")

# use historical data to forecast prices after the year 2000
data_train = ram_prices[ram_prices.date < 2000]
data_test = ram_prices[ram_prices.date >= 2000]

# predict prices based on date
X_train = data_train.date[:, np.newaxis]

# we use a log-transform to get a simpler relationship of data to target
y_train = np.log(data_train.price)
tree = DecisionTreeRegressor().fit(X_train, y_train)
linear_reg = LinearRegression().fit(X_train, y_train)

# predict on all data
X_all = ram_prices.date[:, np.newaxis]
pred_tree = tree.predict(X_all)
pred_lr = linear_reg.predict(X_all)

# undo log-transform
price_tree = np.exp(pred_tree)
price_lr = np.exp(pred_lr)

plt.semilogy(data_train.date, data_train.price, label="Training data")
plt.semilogy(data_test.date, data_test.price, label="Test data")
plt.semilogy(ram_prices.date, price_tree, label="Tree prediction")
plt.semilogy(ram_prices.date, price_lr, label="Linear prediction")
plt.legend()
plt.show()
Esempio n. 55
0
#pl.show()
pl.clf()
pl.yscale('log')
pl.hist(BinVar,
        BinNumb,
        log=True,
        histtype='stepfilled',
        color=hex2color('#0000FF'),
        linewidth=0.2)
pl.ylim([-0.75, (np.max(BinHists)) * 10**0.35])

if type(FitParam) is dict and len(FitRange) > 0:
    pl.semilogy(
        BinCents[FitRange],
        FitGauss,
        color=hex2color('#FF0000'),
        linewidth=3,
        linestyle='solid'
    )  # -- You simply need to use semilogy instead of plot -- http://stackoverflow.com/questions/773814/plot-logarithmic-axes-with-matplotlib-in-python
    pl.text(FitParam['mu'] + 1.0 * FitParam['sigma'],
            FitParam['A'],
            'sigma = %.10g' % (FitParam['sigma']),
            color=hex2color('#FF0000'),
            fontsize=18)

pl.xlabel("Pixel Value")
pl.ylabel("log N")

#
# Save eps
#
Esempio n. 56
0
def test_case():
    np.random.seed(42)
    print(
        "Length of Nvals = number of iternations, with 100 sample points added per step. i.e. setting up the function for 10 iterations of 100 sample points per iteration"
    )
    Nvals = np.array([100, 100, 100, 100, 100, 100, 100, 100, 100, 100])
    num_Ns = len(Nvals)
    print(
        "Length of Nvals becomes also the number of iteration steps (= 10) used in the model"
    )
    max_errs = np.inf * np.ones((num_Ns, 3))
    max_errs_gp = np.inf * np.ones(num_Ns)
    avg_errs_gp = np.inf * np.ones(num_Ns)

    print(
        "construct test points, setting N = 100 test points and D = 8 dimensions"
    )
    X_test = randOmega(100, 8)
    f_test = test_function(X_test.T)

    for i, N in enumerate(Nvals):
        # training points
        X = randOmega(int(N), 8)
        V = test_function(X.T)
        G = dtest_function(X.T)

        CN = (G.T @ G) / N
        vals, vecs = linalg.eigh(CN)
        for d in range(3, 0, -1):
            # find active subspace of dimension d
            W = vecs[:, -d:]
            Y = X @ W

            # fit GP on active subspace
            gp_as = GaussianProcessRegressor(RBF(), n_restarts_optimizer=8)
            gp_as.fit(Y.reshape(N, d), V)
            m_tilde = gp_as.predict((X_test @ W).reshape(100, d))
            max_errs[i, d - 1] = np.max(np.abs(f_test - m_tilde))

        gp = GaussianProcessRegressor(RBF())
        gp.fit(X, V)
        m_tilde = gp.predict(X_test)
        # Calculating the max error for the GP
        max_errs_gp[i] = np.max(np.abs(f_test - m_tilde))
        #Calculating the same for avg error according to the formula in part b), and using these values for part b)
        avg_errs_gp[i] = (1 / N) * np.sum(np.abs(f_test - m_tilde))


#Adjusting the plot to fit 8 dimensions
    fig, ax = plt.subplots(1, 2, figsize=(9, 5))
    x = np.arange(1, 9)
    ax[0].semilogy(x, vals, ".", ms=15)
    ax[0].set_xlabel("Eigenvalues")
    ax[0].set_xticks(x)
    ax[0].set_ylabel("$\lambda$")

    ax[1].semilogy(Nvals, max_errs, ".-", ms=15)
    ax[1].semilogy(Nvals, max_errs_gp, ".-", ms=15)
    ax[1].legend([str(i) + "d AS" for i in range(1, 4)] + ["Full GP"])
    ax[1].set_xlabel("# of points")

    fig.tight_layout()
    plt.show()

    print(
        "b) Plotting the average and maximum errors according to the formulae in part b:"
    )
    plt.semilogy(Nvals, max_errs_gp, ".-", ms=15)
    plt.title("Maximum errors")
    plt.xlabel("# of points")
    plt.show()

    plt.semilogy(Nvals, avg_errs_gp, ".-", ms=15)
    plt.title("Average errors")
    plt.xlabel("# of points")
    plt.show()
    print(
        "As expected we get a vertical line. This line shows us the difference in errors generated by the model when we run the model 10 times over the same amount of sample points"
    )

    print(
        "is the gradient vector, hence, we plot and put out the value for the gradient at the 10th iteration step"
    )
    #
    plt.plot(x, G[10], ".", ms=15)
    plt.xlabel("Dimension")
    plt.ylabel("Partial derivative")
    plt.show()
    print("The gradient vector for the 10th generation: ")
    print(G[10])

    print(
        "For this exercise we utilise the partial derivative at the 10th iteration, where G is the matrix of partial derivatives\
    Then we construct a matrix C whose eigenvalues can help us decided the active subspaces in the 10th iteration. We see\
    From our plot that the active subspace is present at the 2nd and 3rd dimension for the 10th iteration."
    )

    return fig, max_errs
Esempio n. 57
0
import matplotlib.pyplot as plt
import numpy as np
import sys
import seaborn as sns
sns.set()

if __name__ == '__main__':
    depth = int(sys.argv[1])
    g = 1.4
    h = 0.9045

    for idx in range(1, 21):
        try:
            T = idx * 0.5
            plt.semilogy(np.load(
                'data/1d_TFI_g%.4f_h%.4f/L31/T%.1f/circuit_depth%d_Niter100000_1st_error.npy'
                % (g, h, T, depth)),
                         label='T=%.1f' % T)
            print(
                'plot T %.1f' % T,
                np.load(
                    'data/1d_TFI_g%.4f_h%.4f/L31/T%.1f/circuit_depth%d_Niter100000_1st_error.npy'
                    % (g, h, T, depth))[0])
        except Exception as e:
            print(e)

    plt.legend()
    plt.show()

    # T = 0.2
    # T = float(sys.argv[1])
    # for idx in range(6):
Esempio n. 58
0
    #    for mag in [7,8,9,10]:
    #        for bmv in [1.0, 1.4]:

    cols = ['violet', 'b', 'r']
    mags = [4.7, 6.53, 10.65]
    bmvs = [0.77, 0.68, 0.996]
    for i in range(0, len(mags)):
        uncs = np.linspace(1, 10)
        mag = mags[i]
        bmv = bmvs[i]
        if bmv > 1.2:
            stype = "M"
            i2counts = ds.getI2_M(uncs)
        else:
            stype = "K"
            i2counts = ds.getI2_K(uncs)

        exp_counts = ds.getEXPMeter(i2counts, bmv)
        exp_time = ds.getEXPTime(i2counts, mag, bmv, STAR_EL, AVG_FWHM)

        #        print "%s %4.1f %3.1f %7.0f %.1g" % (stype,mag,precision,exp_time,exp_counts)

        plt.semilogy(uncs, exp_time, c=cols[i])

        plt.ylabel("Exposure Time (sec)")
        plt.xlabel("Desired Precision m/s")

    plt.ylim(1, 2500)
    plt.show()
Esempio n. 59
0
            iterAMP2 = 4
            xhat2 = AMP(y, H, sigma2, sigmas2, iterAMP2, m, n)
            # APPROXIMATE MESSAGE 2
            errors['AMP2'].append(np.sum(x != xhat2))

            x_mmse = np.dot(
                inv(sigma2 / sigmas2 * np.eye(n) + np.dot(prim(H), H)),
                np.dot(prim(H), y))
            x_mmse = np.sign(np.real(x_mmse)) + 1.j * np.sign(np.imag(x_mmse))
            errors['MMSE'].append(np.sum(x != x_mmse))

        count = count + 1

        meta_errors['AMP1'][0].append(np.mean(errors['AMP1']))
        meta_errors['AMP1'][1].append(np.std(errors['AMP1']))
        meta_errors['AMP2'][0].append(np.mean(errors['AMP2']))
        meta_errors['AMP2'][1].append(np.std(errors['AMP2']))

        meta_errors['MMSE'][0].append(np.mean(errors['MMSE']))
        meta_errors['MMSE'][1].append(np.std(errors['MMSE']))

import matplotlib.pyplot as plt
plt.semilogy(SNRrange, meta_errors['AMP1'][0], 'r')
plt.semilogy(SNRrange, meta_errors['AMP2'][0], 'b')
plt.semilogy(SNRrange, meta_errors['MMSE'][0], 'k')
plt.title('Approximate Message Passing')
plt.xlabel('Signal to Noise [db]')
plt.ylabel('Mean meta error')
plt.show()
Esempio n. 60
0
    Y10[i] *= 1000
    Y11[i] *= 1000
    # Normalize by number of cells
    Y8[i] /= Y1[i]
    Y9[i] /= Y1[i]
    Y10[i] /= Y1[i]
    Y11[i] /= Y1[i]
    #print (X1[i]*pow(2,i+1) * X1[i]*pow(2,i+1))/Y1[i]

fig = pyplot.figure()

fig.patch.set_facecolor('white')
pyplot.xlabel("Compressibility", fontdict={'fontsize': 16})
pyplot.ylabel("Runtime/numcells (millisecs/cell)", fontdict={'fontsize': 16})

pyplot.semilogy()

pyplot.plot(Y2,
            Y8,
            color='green',
            linestyle='-',
            linewidth=1.0,
            marker='D',
            label='Full Perfect on GPU')
pyplot.plot(Y2,
            Y9,
            color='orange',
            linestyle='-',
            linewidth=1.0,
            marker='D',
            label='7 write, 1 read on GPU')