Beispiel #1
0
def plot_running_time(running_time, path):
    xlabel('|C|')
    ylabel('MTV iteration in secs.')
    grid(True)
    plot([x for x in range(len(running_time))], running_time)
    savefig(os.path.join(path, 'running_time.png'))
    close()
Beispiel #2
0
def display_grid(grid, **kwargs):
    fig = plt.figure()
    plt.axes().set_aspect('equal')

    if kwargs.get('mark_core_cells', True):
        core_cell_coords = grid._cell_nodes[1:-1, 1:-1]
        cellx, celly = core_cell_coords[:, :, 0], core_cell_coords[:, :, 1]
        plt.plot(cellx, celly, '-o', np.transpose(cellx), np.transpose(celly), '-o', color='red')

    if kwargs.get('mark_boundary_cells', True):
        boundary_cell_coords = grid._cell_nodes[0, :], \
                               grid._cell_nodes[-1, :], \
                               grid._cell_nodes[1:-1, 0], \
                               grid._cell_nodes[1:-1, -1]

        for coords in boundary_cell_coords:
            plt.plot(coords[:, 0], coords[:, 1], '-x', color='blue')

    if kwargs.get('show', False):
        plt.show()

    f = BytesIO()
    plt.savefig(f)

    return f
Beispiel #3
0
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
Beispiel #4
0
def plot_size_of_c(size_of_c, path):
    xlabel('|C|')
    ylabel('Max model size |Ci|')
    grid(True)
    plot([x+1 for x in range(len(size_of_c))], size_of_c)
    savefig(os.path.join(path, 'size_of_c.png'))
    close()
Beispiel #5
0
	def transition_related_averaging_run(self, simulation_data, smoothing_kernel_width = 200, sampling_interval = [-50, 150], plot = True ):
		"""docstring for transition_related_averaging"""
		transition_occurrence_times = self.transition_occurrence_times(simulation_data = simulation_data, smoothing_kernel_width = smoothing_kernel_width)
		# make sure only valid transition_occurrence_times survive
		transition_occurrence_times = transition_occurrence_times[(transition_occurrence_times > -sampling_interval[0]) * (transition_occurrence_times < (simulation_data.shape[0] - sampling_interval[1]))]
		
		# separated into on-and off periods:
		transition_occurrence_times_separated = [transition_occurrence_times[::2], transition_occurrence_times[1::2]]
		
		mean_time_course, std_time_course = np.zeros((2, sampling_interval[1] - sampling_interval[0], 5)), np.zeros((2, sampling_interval[1] - sampling_interval[0], 5))
		if transition_occurrence_times_separated[0].shape[0] > 2:
			
			for k in [0,1]:
				averaging_interval_times = np.array([transition_occurrence_times_separated[k] + sampling_interval[0],transition_occurrence_times_separated[k] + sampling_interval[1]]).T
				interval_data = np.array([simulation_data[avit[0]:avit[1]] for avit in averaging_interval_times])
				mean_time_course[k] = interval_data.mean(axis = 0)
				std_time_course[k] = (interval_data.std(axis = 0) / np.sqrt(interval_data.shape[0]))
			
			if plot:
				f = pl.figure(figsize = (10,8))
				for i in range(simulation_data.shape[1]):
					s = f.add_subplot(simulation_data.shape[1], 1, 1 + i)
					for j in [0,1]:
						pl.plot(np.arange(mean_time_course[j].T[i].shape[0]), mean_time_course[j].T[i], ['r--','b--'][j], linewidth = 2.0 )
						pl.fill_between(np.arange(mean_time_course[j].shape[0]), mean_time_course[j].T[i] + std_time_course[j].T[i], mean_time_course[j].T[i] - std_time_course[j].T[i], ['r','b'][j], alpha = 0.2)
					s.set_title(self.variable_names[i])
				pl.draw()
			
		return (mean_time_course, std_time_course)
Beispiel #6
0
def study_multiband_planck(quick=True):
    savename = datadir+'cl_multiband.pkl'
    bands = [100, 143, 217, 'mb']
    if quick: cl = pickle.load(open(savename,'r'))
    else:
        cl = {}
        mask = load_planck_mask()
        mask_factor = np.mean(mask**2.)
        for band in bands:
            this_map = load_planck_data(band)
            this_cl = hp.anafast(this_map*mask, lmax=lmax)/mask_factor
            cl[band] = this_cl
        pickle.dump(cl, open(savename,'w'))


    cl_theory = {}
    pl.clf()
    
    for band in bands:
        l_theory, cl_theory[band] = get_cl_theory(band)
        this_cl = cl[band]
        pl.plot(this_cl/cl_theory[band])
        
    pl.legend(bands)
    pl.plot([0,4000],[1,1],'k--')
    pl.ylim(.7,1.3)
    pl.ylabel('data/theory')
Beispiel #7
0
def study_sdss_density(hemi='south'):
    grid = grid3d(hemi=hemi)
    n_data = num_sdss_data_both_catalogs(hemi, grid)
    n_rand, weight = num_sdss_rand_both_catalogs(hemi, grid)
    n_rand *= ((n_data*weight).sum() / (n_rand*weight).sum())
    delta = (n_data - n_rand) / n_rand
    delta[weight==0]=0.
    fdelta = np.fft.fftn(delta*weight)
    power = np.abs(fdelta)**2.
    ks = get_wavenumbers(delta.shape, grid.reso_mpc)
    kmag = ks[3]
    kbin = np.arange(0,0.06,0.002)
    ind = np.digitize(kmag.ravel(), kbin)
    power_ravel = power.ravel()
    power_bin = np.zeros_like(kbin)
    for i in range(len(kbin)):
        print i
        wh = np.where(ind==i)[0]
        power_bin[i] = power_ravel[wh].mean()
    #pl.clf()
    #pl.plot(kbin, power_bin)
    from cosmolopy import perturbation
    pk = perturbation.power_spectrum(kbin, 0.4, **cosmo)
    pl.clf(); pl.plot(kbin, power_bin/pk, 'b')
    pl.plot(kbin, power_bin/pk, 'bo')    
    pl.xlabel('k (1/Mpc)',fontsize=16)
    pl.ylabel('P(k) ratio, DATA/THEORY [arb. norm.]',fontsize=16)
    ipdb.set_trace()
Beispiel #8
0
 def ROC(self, x):
     """
     ROC curve for seperating positive Gamma distribution
     from two other modes, predicted by current parameter values
     -x: vector of observations
     
     Output: P
     P[0]: False positive rates
     P[1]: True positive rates
     """
     import matplotlib.pylab as mp
     p = len(x)
     P = np.zeros((2,p))
     #False positives
     P[0] = (self.mixt[0]*st.gamma.cdf(-x,self.shape_n,scale=self.scale_n)
              + self.mixt[1]*st.norm.sf(x,0,np.sqrt(self.var)))/\
              (self.mixt[0] + self.mixt[1])
     #True positives
     P[1] = st.gamma.sf(x,self.shape_p,scale=self.scale_p)
     mp.figure()
     I = P[0].argsort()
     mp.plot(P[0,I],P[0,I],'r-')
     mp.plot(P[0,I],P[1,I],'g-')
     mp.legend(('False positive rate','True positive rate'))
     return P
Beispiel #9
0
def plot_mock(mock):
    plt.clf()
    plt.plot(mock['dates'], mock['y'], marker='+', color='blue',
             label='data', markersize=9)
    plt.plot(mock['dates'], mock['y_without_seasonal'],
             color='green', alpha=0.6, linewidth=1,
             label='model without seasonal')
  def __init__(self, frames, z, zeta, sweep):
    from scitbx import simplex
    from scitbx.array_family import flex
    import numpy
    self.L = Likelihood(FractionOfObservedIntensity(frames, z, zeta, sweep.get_scan()))

    x = 0.1 + numpy.arange(1000) / 2000.0
    l = [self.L(xx) for xx in x]
    from matplotlib import pylab
    pylab.plot(x, l)
    pylab.show()

   # print 1/0

    startA = 0.3
    startB = 0.4
#        startA = 0.2*3.14159 / 180
#        startB = 0.3*3.14159 / 180

    print "Start: ", startA, startB
    starting_simplex=[flex.double([startA]), flex.double([startB])]
#        for ii in range(2):
#            starting_simplex.append(flex.double([start]))#flex.random_double(1))

    self.optimizer = simplex.simplex_opt(
        1, matrix=starting_simplex, evaluator=self, tolerance=1e-7)
    def check_models(self):
        '''
        Displays a plot of the models against that taken from a
        respected website (https://www.pvlighthouse.com.au/)
        '''
        plt.figure('Intrinsic bandgap')
        t = np.linspace(1, 500)

        for author in self.available_models():

            Eg = self.update(temp=t, author=author, multiplier=1.0)
            plt.plot(t, Eg, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'iBg.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for temp, name in zip(data.dtype.names[0::2], data.dtype.names[1::2]):
            plt.plot(
                data[temp], data[name], '--', label=name)

        plt.xlabel('Temperature (K)')
        plt.ylabel('Intrinsic Bandgap (eV)')

        plt.legend(loc=0)
        self.update(temp=0, author=author, multiplier=1.01)
 def plot_values(self, TITLE, SAVE):
     plot(self.list_of_densities, self.list_of_pressures)
     title(TITLE)
     xlabel("Densities")
     ylabel("Pressure")
     savefig(SAVE)
     show()
Beispiel #13
0
 def test_simple_gen(self):
     self_con = .8
     other_con = 0.05
     g = self.gen.gen_stoch_blockmodel(min_degree=1, blocks=5, self_con=self_con, other_con=other_con,
                                       powerlaw_exp=2.1, degree_seq='powerlaw', num_nodes=1000, num_links=3000)
     deg_hist = vertex_hist(g, 'total')
     res = fit_powerlaw.Fit(g.degree_property_map('total').a, discrete=True)
     print 'powerlaw alpha:', res.power_law.alpha
     print 'powerlaw xmin:', res.power_law.xmin
     if len(deg_hist[0]) != len(deg_hist[1]):
         deg_hist[1] = deg_hist[1][:len(deg_hist[0])]
     print 'plot degree dist'
     plt.plot(deg_hist[1], deg_hist[0])
     plt.xscale('log')
     plt.xlabel('degree')
     plt.ylabel('#nodes')
     plt.yscale('log')
     plt.savefig('deg_dist_test.png')
     plt.close('all')
     print 'plot graph'
     pos = sfdp_layout(g, groups=g.vp['com'], mu=3)
     graph_draw(g, pos=pos, output='graph.png', output_size=(800, 800),
                vertex_size=prop_to_size(g.degree_property_map('total'), mi=2, ma=30), vertex_color=[0., 0., 0., 1.],
                vertex_fill_color=g.vp['com'],
                bg_color=[1., 1., 1., 1.])
     plt.close('all')
     print 'init:', self_con / (self_con + other_con), other_con / (self_con + other_con)
     print 'real:', gt_tools.get_graph_com_connectivity(g, 'com')
Beispiel #14
0
def histograma(hist):
    
    hist=hist.histogram(255)
##    hist.save("hola4Hist.txt")
    pylab.plot(hist)
    pylab.draw()
    pylab.pause(0.0001)
Beispiel #15
0
def compareFrequencies():
	times = generateTimes(sampleFreq, numSamples)
	signal = (80.0, 0.1)
	coherent = (60.0, 1.0)
	incoherent = (60.1, 1.0)
	highFNoise = (500.0, 0.01)
	timeData = generateTimeDomain(times, [signal, coherent, highFNoise])
	timeData2 = generateTimeDomain(times, [signal, incoherent, highFNoise])
	#timeData3 = generateTimeDomain(times, [signal, highFNoise])
	
	#timeData = generateTimeDomain(times, [(60.0, 1.0)])
	#timeData2 = generateTimeDomain(times, [(61.0, 1.0)])
	
	roi = (0, 20)
	
	freqData = map(toDb, map(dtype, map(absolute, fourier(timeData))))[roi[0]:roi[1]]
	freqData2 = map(toDb, map(dtype, map(absolute, fourier(timeData2))))[roi[0]:roi[1]]
	#freqData3 = map(toDb, map(dtype, map(absolute, fourier(timeData3))))[roi[0]:roi[1]]
	
	frequencies = generateFFTFrequencies(sampleFreq, numSamples)[roi[0]:roi[1]]
	
	#pylab.subplot(111)
	pylab.plot(frequencies, freqData)
	
	#pylab.subplot(112)
	pylab.plot(frequencies, freqData2)
	
	#pylab.plot(frequencies, freqData3)
	
	pylab.grid(True)
	pylab.show()
Beispiel #16
0
def plotGetRetangle():
    """ Area selection from selected pen.
    """
    selRect = []
    if len(ds.EpmDatasetAnalysisPens.SelectedPens) != 1:
        sr.msgBox('EPM Python Plugin - Demo Tools', 'Please select a single pen before applying this function!', 'Warning')
        return 0
    epmData = ds.EpmDatasetAnalysisPens.SelectedPens[0].values
    y = epmData['Value'].copy()
    x = np.arange(len(y))
    fig, current_ax = pl.subplots()
    pl.plot(x, y, lw=2, c='g', alpha=.3)

    def line_select_callback(eclick, erelease):
        'eclick and erelease are the press and release events'
        x1, y1 = eclick.xdata, eclick.ydata
        x2, y2 = erelease.xdata, erelease.ydata
        print ("\n(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
        selRect.append((int(x1), y1, int(x2), y2))

    def toggle_selector(event):
        if event.key in ['Q', 'q'] and toggle_selector.RS.active:
            toggle_selector.RS.set_active(False)
        if event.key in ['A', 'a'] and not toggle_selector.RS.active:
            toggle_selector.RS.set_active(True)
    toggle_selector.RS = RectangleSelector(current_ax, line_select_callback, drawtype='box', useblit=True, button=[1,3], minspanx=5, minspany=5, spancoords='pixels')
    pl.connect('key_press_event', toggle_selector)
    pl.show()
    return selRect
Beispiel #17
0
def existe_croche_blanche_mesure(img,img2,liste,ecart):
	for elt in liste:
		#Si on a une noire en haut ou (exclusif) en bas
		if (not(elt[3]) and elt[4]) or (elt[3] and not(elt[4])):
			if elt[3]:
				elt.append(existe_croche_haut(img,ecart,elt[0],elt[2]))
			else:
				elt.append(existe_croche_bas(img,ecart,elt[1],elt[2]))
			#on regarde s'il y a d'autres croches
			liste = existe_autre_croche(img,liste,ecart)
			elt.extend([False,False])
			
		#s'il n'y a pas de noire
		elif (not(elt[3]) and not(elt[4])):
			#on met le nombre de croches à zéro
			elt.append(0)
			elt.append(existe_note(img2,ecart,elt[1],elt[2],pc_blan,'magenta'))
			elt.append(existe_note(img2,ecart,elt[0],elt[2],pc_blan,'magenta'))
			
			#c'est une barre de mesure (ni noire, ni blanche)
			if (not(elt[6]) and not(elt[7])):
				#elt.extend('m')
				x = [elt[2],elt[2]]
				y = [elt[0],elt[1]]
				plt.plot(x,y,'b')
	return liste
Beispiel #18
0
def cdf(x,colsym="",lab="",lw=4):
    """ plot the cumulative density function

    Parameters
    ----------

    x : np.array()
    colsym : string
    lab : string
    lw : int
        linewidth

    Examples
    --------

    >>> import numpy as np

    """
    rcParams['legend.fontsize']=20
    rcParams['font.size']=20

    x  = np.sort(x)
    n  = len(x)
    x2 = np.repeat(x, 2)
    y2 = np.hstack([0.0, repeat(np.arange(1,n) / float(n), 2), 1.0])
    plt.plot(x2,y2,colsym,label=lab,linewidth=lw)
    plt.grid('on')
    plt.legend(loc=2)
    plt.xlabel('Ranging Error[m]')
    plt.ylabel('Cumulative Probability')
    def plot(self, bit_stream):
        if self.previous_bit_stream != bit_stream.to_list():
            self.previous_bit_stream = bit_stream

            x = []
            y = []
            bit = None

            for bit_time in bit_stream.to_list():
                if bit is None:
                    x.append(bit_time)
                    y.append(0)
                    bit = 0
                elif bit == 0:
                    x.extend([bit_time, bit_time])
                    y.extend([0, 1])
                    bit = 1
                elif bit == 1:
                    x.extend([bit_time, bit_time])
                    y.extend([1, 0])
                    bit = 0

            plt.clf()
            plt.plot(x, y)
            plt.xlim([0, 10000])
            plt.ylim([-0.1, 1.1])
            plt.show()
            plt.pause(0.005)
Beispiel #20
0
def fancy_dendrogram(*args, **kwargs):
    '''
    Source: https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
    '''
    from scipy.cluster import hierarchy
    import matplotlib.pylab as plt
    
    max_d = kwargs.pop('max_d', None)
    if max_d and 'color_threshold' not in kwargs:
        kwargs['color_threshold'] = max_d
    annotate_above = kwargs.pop('annotate_above', 0)

    ddata = hierarchy.dendrogram(*args, **kwargs)

    if not kwargs.get('no_plot', False):
        plt.title('Hierarchical Clustering Dendrogram (truncated)')
        plt.xlabel('sample index or (cluster size)')
        plt.ylabel('distance')
        for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
            x = 0.5 * sum(i[1:3])
            y = d[1]
            if y > annotate_above:
                plt.plot(x, y, 'o', c=c)
                plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
                             textcoords='offset points',
                             va='top', ha='center')
        if max_d:
            plt.axhline(y=max_d, c='k')
    return ddata
Beispiel #21
0
    def visualization2(self, sp_to_vis=None):
        if sp_to_vis:
            species_ready = list(set(sp_to_vis).intersection(self.all_sp_signatures.keys()))
        else:
            raise Exception('list of driver species must be defined')

        if not species_ready:
            raise Exception('None of the input species is a driver')

        for sp in species_ready:
            # Setting up figure
            plt.figure()
            plt.subplot(313)

            mon_val = OrderedDict()
            signature = self.all_sp_signatures[sp]
            for idx, mon in enumerate(list(set(signature))):
                if mon[0] == 'C':
                    mon_val[self.all_comb[sp][mon] + (-1,)] = idx
                else:
                    mon_val[self.all_comb[sp][mon]] = idx

            mon_rep = [0] * len(signature)
            for i, m in enumerate(signature):
                if m[0] == 'C':
                    mon_rep[i] = mon_val[self.all_comb[sp][m] + (-1,)]
                else:
                    mon_rep[i] = mon_val[self.all_comb[sp][m]]
            # mon_rep = [mon_val[self.all_comb[sp][m]] for m in signature]

            y_pos = numpy.arange(len(mon_val.keys()))
            plt.scatter(self.tspan[1:], mon_rep)
            plt.yticks(y_pos, mon_val.keys())
            plt.ylabel('Monomials', fontsize=16)
            plt.xlabel('Time(s)', fontsize=16)
            plt.xlim(0, self.tspan[-1])
            plt.ylim(0, max(y_pos))

            plt.subplot(312)

            for name in self.model.odes[sp].as_coefficients_dict():
                mon = name
                mon = mon.subs(self.param_values)
                var_to_study = [atom for atom in mon.atoms(sympy.Symbol)]
                arg_f1 = [numpy.maximum(self.mach_eps, self.y[str(va)][1:]) for va in var_to_study]
                f1 = sympy.lambdify(var_to_study, mon)
                mon_values = f1(*arg_f1)
                mon_name = str(name).partition('__')[2]
                plt.plot(self.tspan[1:], mon_values, label=mon_name)
            plt.ylabel('Rate(m/sec)', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.1, 0.85), loc='upper right', ncol=1)

            plt.subplot(311)
            plt.plot(self.tspan[1:], self.y['__s%d' % sp][1:], label=parse_name(self.model.species[sp]))
            plt.ylabel('Molecules', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.15, 0.85), loc='upper right', ncol=1)
            plt.suptitle('Tropicalization' + ' ' + str(self.model.species[sp]))

            # plt.show()
            plt.savefig('s%d' % sp + '.png', bbox_inches='tight', dpi=400)
Beispiel #22
0
 def sanity_RotBroadProfileExample(self):
   """
     Example of rotational broadening.
   """
   import numpy as np
   import matplotlib.pylab as plt
   from PyAstronomy import modelSuite as ms
   
   # Get an instance of the model ...
   x = ms.RotBroadProfile()
   # ... and define some starting value
   x["xmax"] = 60.0
   x["A"] = 1.0
   x["eps"] = 0.8
   x["off"] = 0.0
   
   # Define a radial velocity axis
   vv = np.linspace(-90.,90.,200)
   
   # Construct some "data" and ...
   data = x.evaluate(vv)
   # ... add noise
   data += np.random.normal(0.0, 1e-3, data.size)
   
   # Fit the model using A, xmax, and eps as free
   # parameters ...
   x.thaw(["A", "xmax", "eps"])
   x.fit(vv, data)
   # ... and show the resulting parameter values.
   x.parameterSummary()
   
   # Plot the data and the model
   plt.plot(vv, data, 'bp')
   plt.plot(vv, x.model, 'r--')
Beispiel #23
0
    def plot_corner_posteriors(self, savefile=None, labels=["T1", "R1", "Av", "T2", "R2"]):
        '''
        Plots the corner plot of the MCMC results.
        '''
        ndim = len(self.sampler.flatchain[0,:])
        chain = self.sampler
        samples = chain.flatchain
        
        samples = samples[:,0:ndim]  
        plt.figure(figsize=(8,8))
        fig = corner.corner(samples, labels=labels[0:ndim])
        plt.title("MJD: %.2f"%self.mjd)
        name = self._get_save_path(savefile, "mcmc_posteriors")
        plt.savefig(name)
        plt.close("all")
        

        plt.figure(figsize=(8,ndim*3))
        for n in range(ndim):
            plt.subplot(ndim,1,n+1)
            chain = self.sampler.chain[:,:,n]
            nwalk, nit = chain.shape
            
            for i in np.arange(nwalk):
                plt.plot(chain[i], lw=0.1)
                plt.ylabel(labels[n])
                plt.xlabel("Iteration")
        name_walkers = self._get_save_path(savefile, "mcmc_walkers")
        plt.tight_layout()
        plt.savefig(name_walkers)
        plt.close("all")  
def plot_locking_states(df, meta, num_joints=None):

    marker_style = dict(linestyle=':', marker='o', s=100,)
    
    def format_axes(ax):
        ax.margins(0.2)
        ax.set_axis_off()

    if num_joints is None:
        num_joints = determine_num_joints(df)

    points = np.ones(num_joints)
    
    fig, ax = plt.subplots()
    for j in range(num_joints):
        ax.text(-1.5, j, "%d" % j)
    ax.text(0, -1.5, "time")
        
    for t in df.index:
        lock_states = df.loc[t][ [ "LockingState%d" % k for k in range(num_joints) ] ].tolist()
        c = ["orange" if l else "k" for l in lock_states]
        
        ax.scatter((t+0.1) * points, range(num_joints), color=c, **marker_style)
        format_axes(ax)
        
    ax.set_title('Locking state evolution')
    ax.set_xlabel("t")
    
    plt.plot()
def check_isometry(G, chart, nseeds=100, verbose = 0):
    """
    A simple check of the Isometry:
    look whether the output distance match the intput distances
    for nseeds points
    
    Returns
    -------
    a scaling factor between the proposed and the true metrics
    """
    nseeds = np.minimum(nseeds, G.V)
    aux = np.argsort(nr.rand(nseeds))
    seeds =  aux[:nseeds]
    dY = Euclidian_distance(chart[seeds],chart)
    dx = G.floyd(seeds)

    dY = np.reshape(dY,np.size(dY))
    dx = np.reshape(dx,np.size(dx))

    if verbose:
        import matplotlib.pylab as mp
        mp.figure()
        mp.plot(dx,dY,'.')
        mp.show()

    scale = np.dot(dx,dY)/np.dot(dx,dx)
    return scale
Beispiel #26
0
def plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
    """
    Plot a radiallysymmetric Q model.

    plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):

    r_min=minimum radius [km], r_max=maximum radius [km], dr=radius
    increment [km]

    Currently available models (model): cem, prem, ql6
    """
    import matplotlib.pylab as plt

    r = np.arange(r_min, r_max + dr, dr)
    q = np.zeros(len(r))

    for k in range(len(r)):

        if model == 'cem':
            q[k] = q_cem(r[k])
        elif model == 'ql6':
            q[k] = q_ql6(r[k])
        elif model == 'prem':
            q[k] = q_prem(r[k])

    plt.plot(r, q, 'k')
    plt.xlim((0.0, r_max))
    plt.xlabel('radius [km]')
    plt.ylabel('Q')
    plt.show()
Beispiel #27
0
def fdr(p_values=None, verbose=0):
    """Returns the FDR associated with each p value

    Parameters
    -----------
    p_values : ndarray of shape (n)
        The samples p-value

    Returns
    -------
    q : array of shape(n)
        The corresponding fdr values
    """
    p_values = check_p_values(p_values)
    n_samples = p_values.size
    order = p_values.argsort()
    sp_values = p_values[order]

    # compute q while in ascending order
    q = np.minimum(1, n_samples * sp_values / np.arange(1, n_samples + 1))
    for i in range(n_samples - 1, 0, - 1):
        q[i - 1] = min(q[i], q[i - 1])

    # reorder the results
    inverse_order = np.arange(n_samples)
    inverse_order[order] = np.arange(n_samples)
    q = q[inverse_order]

    if verbose:
        import matplotlib.pylab as mp
        mp.figure()
        mp.xlabel('Input p-value')
        mp.plot(p_values, q, '.')
        mp.ylabel('Associated fdr')
    return q
Beispiel #28
0
 def plot_fft(self,b):
     a = len(self.fullfft_dft_py_fc_0.output)
     
     for i in range(0,b):
         self.frq.append(i)
     plt.plot(self.frq,self.fullfft_dft_py_fc_0.output)
     plt.show()
    def check_models(self):
        plt.figure('Bandgap narrowing')
        Na = np.logspace(12, 20)
        Nd = 0.
        dn = 1e14
        temp = 300.

        for author in self.available_models():
            BGN = self.update(Na=Na, Nd=Nd, nxc=dn,
                              author=author,
                              temp=temp)

            if not np.all(BGN == 0):
                plt.plot(Na, BGN, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'Bgn.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for name in data.dtype.names[1:]:
            plt.plot(
                data['N'], data[name], 'r--',
                label='PV-lighthouse\'s: ' + name)

        plt.semilogx()
        plt.xlabel('Doping (cm$^{-3}$)')
        plt.ylabel('Bandgap narrowing (K)')

        plt.legend(loc=0)
Beispiel #30
0
	def plot_cell(self,cell_number=0,label='insert_label'):

		current_cell = self.cell_list[cell_number]
		temp = current_cell.temp
		cd_signal = current_cell.cd_signal
		cd_calc = current_cell.cd_calc()
		
		ax = pylab.gca()

		pylab.plot(temp,cd_signal,'o',color='black')
                pylab.plot(temp,cd_calc,color='black')
		pylab.xlabel(r'Temperature ($^{\circ}$C)')
		pylab.ylabel('mdeg')
		pylab.ylim([-25,-4])
		dH = numpy.round(current_cell.dH, decimals=1)
		Tm = numpy.round(current_cell.Tm-273.15, decimals=1)
		nf = current_cell.nf
		nu = current_cell.nu
		textstr_dH = '${\Delta}H_{m}$ = %.1f kcal/mol' %dH
		textstr_Tm ='$T_{m}$ = %.1f $^{\circ}$C' %Tm
		textstr_nf ='$N_{folded}$ = %d' %nf
		textstr_nu ='$N_{unfolded}$ = %d'%nu
		ax.text(8,-6,textstr_dH, fontsize=16,ha='left',va='top')
		ax.text(8,-7.5,textstr_Tm, fontsize=16,ha='left',va='top')
		ax.text(8,-9,textstr_nf, fontsize=16,ha='left',va='top')
		ax.text(8,-10.5,textstr_nu, fontsize=16,ha='left',va='top')
		pylab.title(label)		
		pylab.show()

		return
Beispiel #31
0
    def on_train_begin(self, logs={}):
        self.acc = []

    def on_epoch_end(self, batch, logs={}):
        self.acc.append(logs.get('acc'))

history = AccuracyHistory()        


# # Train the model

model.fit(x=X,
          y=y,
          batch_size=batch_size,
          epochs=10,
          verbose=1,
          validation_data=(X_test,y_test), 
          callbacks=[history])


# # Model Evaluation
testScore = model.evaluate(x=X_test,y=y_test,verbose=1)
print('Test loss:', testScore[0])
print('Test accuracy:', testScore[1])


# # Plot epoch vs accuracy.
plt.plot(range(1,11),history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
Beispiel #32
0
    min_ind = np.argmin(D, axis=1)
    min_val = D[np.arange(D.shape[0]), min_ind]

    numbers = min_ind / 2.
    return numbers, min_val


high_num, err_h = get_best_match(higher_img, numbers_base)
low_num, err_l = get_best_match(lower_img, numbers_base)

video_num = np.floor(high_num) * 10 + low_num
video_num[25::25] = np.nan

#%%
plt.figure()
lab_t, = plt.plot(np.diff(stamp), label='timestamp')

dd = np.diff(video_num)
dd[dd < -50] = np.nan
lab_v, = plt.plot(dd * 20, label='video number')

plt.xlim((0, 12000))
plt.ylim((0, 550))
plt.legend(handles=[lab_t, lab_v])
plt.xlabel('frame number')
plt.ylabel('time difference (ms)')

plt.xlim((1800, 2000))
#%%
#plt.figure()
#plt.imshow(lower_img[1328]-numbers_base[-1])
dwell_time=0.001 #sec

def lorentzian(x,bkg,amp,f0,w):
    return bkg + amp*w**2/((x-f0)**2+w**2)

if flag=='nm':
    data = np.loadtxt(filename)
    dim = np.shape(data)
    print(dim)
    center=float(a[0])
    x0 = np.linspace(float(a[2]), float(a[3]), 800)
    y0 = sum(data) / dim[0]
    print(center)
    x=299792458.0/(x0*1E9+299792458.0/(center*1E-9))
    y=y0
    plt.plot(x*1E9,y/dwell_time,'.-',label='PLE Cps')
    plt.xlabel('nm')
    plt.ylabel('cps')
    plt.legend(loc=0)
if flag=='V':
    data = np.loadtxt(filename)
    dim = np.shape(data)
    print(dim)
    x = np.linspace(float(a[2])/-10.0, float(a[3])/-10.0, 800)
    y = sum(data) / dim[0]
    plt.plot(x, y / dwell_time, '.-', label='PLE Cps')
    plt.xlabel('V')
    plt.ylabel('cps')
    plt.legend(loc=0)
if flag == 'GHz':
    data = np.loadtxt(filename)
Beispiel #34
0
    def outliers_example(self,
                         corr='',
                         out='',
                         date='',
                         N='',
                         lower='',
                         upper='',
                         median='',
                         flag='',
                         upper_s='',
                         lower_s='',
                         station='',
                         what=''):

        pressure = self.pretty_pressure_dic[str(self.an_p)]
        var = self.var_dics[self.var]['name']
        hour = str(self.hour).replace('0', '00:00').replace('1', '12:00')
        plt.title(var + ' ' + what + ' Outliers - Stat: ' + station + ', H: ' +
                  hour + ', P: ' + pressure + ' [hPa]',
                  y=1.03)

        corr_ = [n for n in corr if not np.isnan(n)]
        out_ = [n for n in out if not np.isnan(n)]

        num_a = '{:.1f}'.format(len(corr_) / len(out_ + corr_) * 100)
        num_o = '{:.1f}'.format(len(out_) / len(out_ + corr_) * 100)

        plt.scatter(date,
                    corr,
                    label='Accepted [' + num_a + '%]',
                    color='cyan',
                    s=3)
        plt.scatter(date,
                    out,
                    label='Outliers [' + num_o + '%]',
                    color='black',
                    s=3)
        X = [min(date), max(date)]

        plt.plot(X, [lower, lower], label='Lower', color='blue', ls='--')
        plt.plot(X, [upper, upper], label='Upper', color='red', ls='--')

        # adding the upper and lower values for skewed distributions
        plt.plot(X, [lower_s, lower_s],
                 label='Lower Skewed',
                 color='blue',
                 ls='-')
        plt.plot(X, [upper_s, upper_s],
                 label='Upper Skewed',
                 color='red',
                 ls='-')

        plt.plot(X, [median, median],
                 label='Median [' + '{:.1f}'.format(median) + ']',
                 color='black',
                 ls='--')

        plt.legend(fontsize=self.font - 6, loc='upper right', ncol=2)
        plt.grid(linestyle=':', color='lightgray', lw=1.2)

        plt.ylabel('Departure ' + self.var_dics[self.var]['units'],
                   fontsize=self.font)

        plt.xlabel('Date', fontsize=self.font)
        plt.xticks(rotation=45)

        out_c = [n for n in out if not np.isnan(n)]
        corr_c = [n for n in corr if not np.isnan(n)]

        plt.xlim(min(date) - 1 / 365, max(date) + 1 / 365)

        plt.ylim(-10, 10)

        plt.savefig('plots/outliers/outliers_' + flag + '_' + str(N) +
                    '_date_' + str(min(date)) + '_hour_' + self.hour + '_' +
                    self.var + '_anp_' + str(self.an_p) + '_fgp_' +
                    str(self.fg_p) + '.png',
                    bbox_inches='tight')
        plt.close()
big_dict={}
for dataset in ['HadGHCND','MIROC5','NORESM1']:

	pkl_file = open('data/'+dataset+'_regional_distrs.pkl', 'rb')
	region_dict = pickle.load(pkl_file)	;	pkl_file.close()

	big_dict[dataset]=region_dict


counter=big_dict['MIROC5']['CEU']['All-Hist']['JJA']['warm']['counter']

cold,warm=counter_to_list(counter)


num_bins = 65
counts, bin_edges = np.histogram(warm, bins=range(0,65), normed=True)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf)
plt.plot([0,70],[0.95,0.95])
plt.show()

def quantile_from_cdf(x,qu):
	counts, bin_edges = np.histogram(warm, bins=range(0,max(x)+1), normed=True)
	cdf = np.cumsum(counts)

	quantiles=[]
	for q in qu:
		x1=np.where(cdf<q)[0][-1]
		quantiles.append(x1+(q-cdf[x1])/(cdf[x1+1]-cdf[x1]))

	return quantiles
ax1.get_yaxis().set_tick_params(which='both', direction='out')

plt.tick_params(
    axis='both',  # changes apply to the x-axis
    which='both',  # both major and minor ticks are affected
    bottom=False,
    left=False,  # ticks along the bottom edge are off
    top=False,  # ticks along the top edge are off
    labelbottom=False,  # ticks along the top edge are off
    labelleft=False)

bottom = -65
top = 25
ax1.set_ylim(bottom, top)

plt.plot(times[t1:t2], voltage_a[t1:t2], 'k')

offset = 0.005

plt.xlabel('20 ms', fontsize=12)
ax1.xaxis.set_label_coords(0.9, -0.015)
plt.ylabel('20 mV', fontsize=12)
ax1.yaxis.set_label_coords(1.0, 0.375)
scale1_x = [times[t2] + offset - 0.02, times[t2] + offset]
scale1_y = [-65, -65]
plt.plot(scale1_x, scale1_y, color='k')

scale2_x = [times[t2] + offset, times[t2] + offset]
scale2_y = [-65, -45]
plt.plot(scale2_x, scale2_y, color='k')
Beispiel #37
0
        susPlot.append(np.sum(np.sum(susMat)))
        infPlot.append(np.sum(np.sum(infMat)))
        remPlot.append(np.sum(np.sum(remMat)))
        expPlot.append(np.sum(np.sum(expMat)))

        # if i%100 == 0:
        #     plt.cla()
        #     plt.plot(susPlot,'g')
        #     plt.plot(infPlot,'r')
        #     plt.plot(remPlot,'b')
        #     plt.plot(expPlot,'k')

        #     print(susPlot[-1]+infPlot[-1]+remPlot[-1]+expPlot[-1])

        #     # plt.imshow(infMat,aspect='auto',interpolation='none',vmin=0,vmax=1/(xSize*ySize))
        #     plt.pause(0.001)
        #     plt.draw()

    #plt.plot(np.diff(np.array(susPlot)),np.diff(np.array(infPlot)+np.array(expPlot))) #Plots the phase diagram

    plt.plot(susPlot, 'g')
    plt.plot(infPlot, 'r')
    plt.plot(remPlot, 'b')
    plt.plot(expPlot, 'k')
plt.show()

# sp = np.fft.fft(susPlot)
# freq = np.fft.fftfreq(len(susPlot))
# plt.semilogy(freq, np.abs(sp))
# plt.show()
    # we can duplicate the list here without worry as it will be copied into new python processes
    # thus creating separate copies of sd
    print("Running parallel generation of surrogates and SVD")
    slam_list = pool.map(compute_surrogate_cov_eigvals, [(sd, Ud)] * NUM_SURR)
    
    # rearrange into numpy array (can I use vstack for this?)
    for i in range(len(slam_list)):
        slam[i, :], maxU[i, :] = slam_list[i]
        
    maxU.sort(axis = 0)
        
    print("Saving computed spectra ...")
                
    # save the results to file
    with open('data/slp_eigvals_surrogates.bin', 'w') as f:
        cPickle.dump([dlam, slam, sd.model_orders(), sd.lons, sd.lats], f)
    
    plt.figure()
    plt.plot(np.arange(NUM_EIGS) + 1, dlam, 'ro-')
    plt.errorbar(np.arange(NUM_EIGS) + 1, np.mean(slam, axis = 0), np.std(slam, axis = 0) * 3, fmt = 'g-')
    
    plt.figure()
    plt.errorbar(np.arange(NUM_EIGS) + 1, np.mean(maxU, axis = 0), np.std(maxU, axis = 0) * 3, fmt = 'g-')
    plt.plot(np.arange(NUM_EIGS) + 1, np.amax(maxU, axis = 0), 'r-')
    plt.plot(np.arange(NUM_EIGS) + 1, np.amin(maxU, axis = 0), 'r-')
    plt.plot(np.arange(NUM_EIGS) + 1, maxU[94, :], 'bo-', linewidth = 2)
    plt.plot(np.arange(NUM_EIGS) + 1, np.amax(np.abs(Ud), axis = 0), 'kx-', linewidth = 2)

    plt.show()
    print("DONE.")
Beispiel #39
0
 def plot(self):
     """Plot this timeseries."""
     pl.plot(self.values)
Beispiel #40
0
import matplotlib.pylab as pl
import numpy as np

# # 隐藏层函数比较(ReLu和Sigmoid函数)
x1 = np.linspace(0, 10000, 1000)
y1 = np.loadtxt('./activation_function/relu85.data')

x2 = np.linspace(0, 10000, 1000)
y2 = np.loadtxt('./activation_function/sigmoid63.data')

pl.plot(x1, y1, 'r', linewidth=2.0, linestyle='-', label='ReLu,Neurons=8,ALR')
pl.plot(x2,
        y2,
        'k',
        linewidth=2.0,
        linestyle='-',
        label='Sigmoid,Neurons=8,ALR')
# pl.xlabel('Iteration', fontsize=13, fontweight='bold')
# # pl.xlabel('*1000', horizontalalignment=right)
# pl.ylabel('loss', fontsize=13, fontweight='bold')
# pl.legend()
# # print(p1)
# pl.show()
#
# # 学习率动态和固定值比较
# x3 = np.linspace(0, 10000, 1000)
# y3 = np.loadtxt('learning_rate0.data')
#
# x4 = np.linspace(0, 10000, 1000)
# y4 = np.loadtxt('learning_rate_s.data')
#
Beispiel #41
0
def estimate_fit_parameters(phi, noisy_squid_curve, nharmonics_to_estimate=5):

    min_acorr_dist_from_zero = len(phi) * 0.1

    ##find period from autocorrelation
    lags, corrs = autocorr(noisy_squid_curve)

    #find peaks in autocorrelation vs lag
    from scipy.signal import find_peaks
    peaks, _ = find_peaks(corrs, height=0)

    plt.figure()
    plt.plot(lags, corrs)
    plt.plot(peaks, corrs[peaks], "x")

    sorted_peaks = [
        pk for _, pk in sorted(zip(corrs[peaks], peaks), reverse=False)
    ]

    print(sorted_peaks[:4])
    try:
        phi0_idx = next(pk for pk in sorted_peaks
                        if pk > min_acorr_dist_from_zero)
    except:
        return None

    phi0 = np.abs(phi[phi0_idx] - phi[0])

    print('phi0=', phi0)

    #plot cosine with same amplitude and period
    ymax = np.max(noisy_squid_curve)
    ymin = np.min(noisy_squid_curve)
    yspan = ymax - ymin
    yoffset = yspan / 2. + ymin
    harmonic = lambda n, ph, phoff, amplitude: (amplitude) * np.cos(n * (
        ph - phoff) * (2. * np.pi / phi0))
    first_harmonic_guess = lambda ph, phoff: harmonic(1, ph, phoff, (
        yspan / 2)) + harmonic(0, ph, phoff, (yoffset))

    #now correlate the first harmonic guess against the
    #SQUID curve
    dphi = np.abs(phi[1] - phi[0])
    testphoffs = np.linspace(0, phi0, int(np.floor(phi0 / dphi) + 1))
    corrs = []
    for testphoff in testphoffs:
        y1 = first_harmonic_guess(phi, testphoff)
        y2 = noisy_squid_curve

        y1 = (y1 - np.mean(y1))
        y2 = (y2 - np.mean(y2))

        corr = np.corrcoef(y1, y2)[0, 1]
        corrs.append(corr)

    # should just be able to find the maximum of this correlation
    phioffset = testphoffs[np.argmax(corrs)]

    # plot harmonics only over the largest possible number of SQUID periods.  May only be 1.
    lower_phi_full_cycles = (np.min(phi) + phioffset) % (phi0) + np.min(phi)
    upper_phi_full_cycles = np.max(phi) - (np.max(phi) - phioffset) % phi0
    phi_full_cycle_idxs = np.where((phi > lower_phi_full_cycles)
                                   & (phi < upper_phi_full_cycles))
    phi_full_cycles = phi[phi_full_cycle_idxs]

    ##overplot squid curve, squid curve + noise,
    ##and as many full periods as overlap
    #plt.figure()
    #plt.plot(phi,noisy_squid_curve)
    #plt.plot(phi,squid_curve)
    #plt.plot(phi,first_harmonic_guess(phi,phioffset),'r--')

    #plt.ylim(ymin-yspan/10.,ymax+yspan/10.)
    #plt.plot([lower_phi_full_cycles,lower_phi_full_cycles],plt.gca().get_ylim(),c='gray',ls='-',lw=2,alpha=0.5)
    #plt.plot([upper_phi_full_cycles,upper_phi_full_cycles],plt.gca().get_ylim(),c='gray',ls='-',lw=2,alpha=0.5)

    # correlate some harmonics and overplot!
    fit_guess = np.zeros_like(noisy_squid_curve)

    # add constant
    fit_guess += np.mean(noisy_squid_curve)

    # mean subtract the data and this harmonic
    d = noisy_squid_curve[phi_full_cycle_idxs]
    dm = np.mean(d)
    d_ms = d - dm

    est = [phi0, phioffset, dm]

    for n in range(1, nharmonics_to_estimate):
        # if 1/2, peak-to-peak amplitude is 1
        A = 1 / 2.
        this_harmonic = lambda ph: harmonic(n, ph, phioffset, A)

        h = this_harmonic(phi_full_cycles)
        hm = np.mean(h)
        h_ms = h - hm

        # sort of inverse dft them
        Xh = np.sum(d_ms * h_ms)

        # add this harmonic
        fit_guess += Xh * this_harmonic(phi)
        est.append(Xh)

        #print('n=',n,'Xh=',Xh)

    # match span of harmonic guess sum and add offset from data
    normalization_factor = (np.max(d_ms) - np.min(d_ms)) / (np.max(fit_guess) -
                                                            np.min(fit_guess))
    fit_guess *= normalization_factor
    # also scale parameter guesses we pass back
    est = np.array(est)
    est[3:] *= normalization_factor

    fit_guess += dm

    #plt.plot(phi,fit_guess,'c--')

    return est
Beispiel #42
0
def main(unused_args):
    tdLoader = TrafficDataLoader(
        'internet-data/data/internet-traffic-11-cities-5min.csv', max_norm=5.)
    tdConfig = TrafficDataConfig()
    tmConfig = TrafficRNNConfig(tdConfig)
    batch_size = tmConfig.batch_size

    seq_input, seq_target = tdLoader.get_rnn_input(tdConfig)

    print seq_input.shape, seq_target.shape
    data = dict()
    data['seq_input'] = seq_input
    data['seq_target'] = seq_target
    data['early_stop'] = tdConfig.batch_size

    is_training = True
    save_graph = False

    with tf.Graph().as_default(), tf.Session() as session:
        model = TrafficRNN(is_training=True, config=tmConfig)

        saver = tf.train.Saver()
        merged = None
        writer = None

        if is_training and save_graph:
            writer = tf.train.SummaryWriter('/tmp/rnn_logs', session.graph_def)

        tf.initialize_all_variables().run()

        decay = .8
        if is_training:
            lr_value = 1e-3
            for epoch in range(tmConfig.max_epoch):
                if epoch > 10:
                    lr_value = 1e-3
                elif epoch > 75:
                    lr_value = 1e-4
                elif epoch > 100:
                    lr_value = 1e-6
                elif epoch > 200:
                    lr_value = 1e-7
                elif epoch > 250:
                    lr_value = 1e-8

                model.assign_lr(session, lr_value)

                net_outs_all = np.array([])

                error, net_outs_all = run_epoch(session, model, data,
                                                model.train_op, tdConfig)
                error, net_outs_all = run_epoch(session, model, data,
                                                tf.no_op(), tdConfig, writer)
                print net_outs_all.shape, seq_target.shape
                print('Epoch %d: %s') % (epoch, error)

                if epoch == 0:
                    plt.figure(1, figsize=(20, 10))
                    plt.ion()
                    plt.ylim([-1, 6])
                    plt.plot(xrange(tdConfig.n_steps), seq_target, 'b-',
                             xrange(tdConfig.n_steps), net_outs_all, 'r-')
                    plt.show()
                    time.sleep(20)
                else:
                    plt.clf()
                plt.ylim([-1, 6])
                plt.plot(xrange(tdConfig.n_steps), seq_target, 'b-',
                         xrange(tdConfig.n_steps), net_outs_all, 'r-')
                img_loc = 'out-img/epoch-%05d.png' % (epoch)
                plt.savefig(img_loc)
                plt.draw()
                time.sleep(.1)

                if epoch > 40 and epoch % 20 == 9:
                    outfile = 'internet-data/saved-models/traffic-rnn-hid-%d-batch-%d-window-%d-lag-%d.chkpnt' % (
                        tmConfig.num_hidden, tdConfig.batch_size,
                        tdConfig.window_size, tdConfig.lag)
                    saver.save(session, outfile, global_step=epoch)
        else:
            saved_vars = 'internet-data/saved-models/traffic-rnn-hid-%d-batch-%d-window-%d-lag-%d.chkpnt-%d' % (
                tmConfig.num_hidden, tdConfig.batch_size, tdConfig.window_size,
                tdConfig.lag, tmConfig.max_epoch - 1)
            saver.restore(session, saved_vars)

        train_error, train_outs_all = run_epoch(session, model, data,
                                                tf.no_op(), tdConfig)

        testDataConfig = TestConfig()
        test_seq_input, test_seq_target = tdLoader.get_rnn_input(
            testDataConfig)

        test_data = dict()
        test_outs_all = np.array([])
        test_data['seq_input'] = test_seq_input
        test_data['seq_target'] = test_seq_target
        test_data['early_stop'] = testDataConfig.batch_size
        test_error, test_outs_all = run_epoch(session, model, test_data,
                                              tf.no_op(), testDataConfig)

        upper_curve = test_outs_all + .1 * test_outs_all
        lower_curve = test_outs_all - .1 * test_outs_all
        shift_left = np.zeros(test_outs_all.shape)
        shift_right = np.zeros(test_outs_all.shape)
        shift_left[:-18] = test_outs_all[18:]
        shift_right[18:] = test_outs_all[:-18]

        curve1 = np.maximum(upper_curve, shift_left)
        curve1 = np.maximum(curve1, shift_left)
        curve1 = np.maximum(curve1, shift_right)
        curve2 = np.minimum(lower_curve, shift_right)
        curve2 = np.minimum(curve2, upper_curve)
        curve2 = np.minimum(curve2, shift_left)

        print test_outs_all.shape

        x = xrange(len(test_outs_all))
        plt.figure(3, figsize=(20, 10))
        plt.ioff()

        plt.plot(x, test_outs_all, 'b-', alpha=1)
        plt.plot(x, test_seq_target, 'g-', alpha=1)
        plt.plot(x, curve1, 'r-', alpha=.1)
        plt.plot(x, curve2, 'r-', alpha=.1)
        plt.fill_between(x, curve1, curve2, color='grey', alpha=.3)
        plt.show()

        print 'Test error: %s' % test_error
        plt.figure(2, figsize=(20, 10))
        plt.plot(xrange(tdConfig.n_steps), seq_target, 'b-',
                 xrange(tdConfig.n_steps), train_outs_all, 'g--')
        plt.plot(
            xrange(tdConfig.n_steps - 24,
                   tdConfig.n_steps + testDataConfig.n_steps - 24),
            test_seq_target, 'b-')
        plt.plot(
            xrange(tdConfig.n_steps - 24,
                   tdConfig.n_steps + testDataConfig.n_steps - 24),
            test_outs_all, 'r--')
        plt.show()
        time.sleep(1)
    elif try_darpa_2d:
        ntheta = 2
        x,y,z,theta_offset,np,x_ll,L = darpa2gen(nx,ntheta,pad_x=10.0, pad_r_fact=8.)
        write_csv_file(x[:np],y[:np],z[:np],'darpa2_2d')
        domain = build_2d_domain_from_axisymmetric_points(x[:np,:],y[:np,:],x_ll,L,name='darpa2_2d')
	domain.writePoly('darpa2_2d')

    else:
        x,y,np = darpa2gen_orig(nx)

        fout = open('darpa2.dat','w')
        for i in range(np):
            fout.write("%12.5e %12.5e \n" % (x[i],y[i]))

        #
        fout.close()
        import matplotlib
        from matplotlib import pylab

        fig = pylab.figure(1)
        pylab.plot(x[:np],y[:np])
        fig.hold('on')
        y *= -1.0
        pylab.plot(x[:np],y[:np])
        pylab.xlabel('x [ft]'); pylab.ylabel('y [ft]')
        pylab.title('DARPA2 body')
        pylab.axes().set_aspect('equal','datalim')
        pylab.savefig('darpa2.png')
        pylab.show()
Beispiel #44
0
        print('Training Accurancy : {:<10}'.format(
            gbrt_clf.score(train_sub[features], train_sub['result'])))
        print('x-validation Accurancy: {:<10}'.format(
            clf.score(xv_sub[features], xv_sub['result'])))
        GBRT_scores.append(gbrt_clf.score(xv_sub[features], xv_sub['result']))
        print('Time spent for GBRT: {:6.3f}s'.format(time.time() - start))
        print('The logloss is: {}'.format(
            logloss(xv_sub['result'],
                    gbrt_clf.predict_proba(xv_sub[features])[:, 1])))

import matplotlib.pylab as plt
plt.figure()
plt.subplot(2, 2, 1)
plt.title('Scores of Random Forest')
if RF:
    plt.plot(RF_para, RF_scores, 'o-')
plt.subplot(2, 2, 2)
plt.title('Scores of Neural Network')
if NN:
    plt.semilogx(NN_para, NN_scores, 'o-')
plt.subplot(2, 2, 3)
plt.title('Scores of Logistic Regression')
if GLM:
    plt.semilogx(GLM_para, GLM_scores, 'o-')
plt.subplot(2, 2, 4)
plt.title('Scores of Gradient Boost RT')
if GBRT:
    plt.semilogx(GBRT_para, GBRT_scores, 'o-')
plt.show()

# =============================================================================
Beispiel #45
0
import numpy as np
import matplotlib.pylab as plt

def step_function(x):
	return np.array(x > 0, dtype=np.int)

x = np.arange(-5.0, 5.0, 0.1)
y = step_function(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
Beispiel #46
0
sgd = SGD(lr=0.01)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              epochs=100,
              batch_size=32)

# evaluate the network
print('[INFO] evaluating network...')
predictions = model.predict(testX, batch_size=32)
print(
    classification_report(testY.argmax(axis=1),
                          predictions.argmax(axis=1),
                          target_names=labelNames))

# plot the training loss and accuracy
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0, 100), H.history['loss'], label='train_loss')
plt.plot(np.arange(0, 100), H.history['val_loss'], label='validation_loss')
plt.plot(np.arange(0, 100), H.history['acc'], label='train_accuracy')
plt.plot(np.arange(0, 100), H.history['val_acc'], label='validation_accuracy')
plt.title('Training Loss and Accuracy')
plt.xlabel('Epoch #')
plt.ylabel('Loss or Accuracy')
plt.legend()
plt.savefig(args['output'])
i = 0
data = pd.read_csv('D:\College\Project\Final\data\upload_data_kadu1.csv')
#print data.head()
#print '\n Data Types:'
#print data.dtypes
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H:%M:%S')
data = pd.read_csv('D:\College\Project\Final\data\upload_data_kadu1.csv',
                   parse_dates=['datetime'],
                   index_col='datetime',
                   date_parser=dateparse)
#print data.head()
#print data.index
co_data = data['field2']
#print co_data.head(10)
plt.plot(co_data)

test_stationarity(co_data)
i = i + 1
co_log = np.log(co_data)
plt.plot(co_log)
moving_avg = pd.Series(co_log).rolling(window=12, center=False).mean()
plt.plot(co_log)
plt.plot(moving_avg, color='red')
co_log_moving_avg_diff = co_log - moving_avg
co_log_moving_avg_diff.head(12)
co_log_moving_avg_diff.dropna(inplace=True)
test_stationarity(co_log_moving_avg_diff)
i = i + 1
expwighted_avg = pd.Series(co_log).ewm(halflife=12,
                                       ignore_na=True,
Beispiel #48
0

def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = identity_function(a3)

    return y


network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y)
y1 = step_function(x)
y2 = sigmoid(x)
y3 = relu(x)

plt.figure()
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
plt.ylim(-0.1, 1.1)
plt.show()
# Trainieren des Modells 
model.compile(
  optimizer=tf.keras.optimizers.SGD(lr=0.005, momentum=0.9), 
  loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=0.1),
  metrics=['accuracy'])

history = model.fit(
    train_ds,
    epochs=25, 
    validation_data=valid_ds).history

# plot the development of the accuracy and loss during training
plt.figure(figsize=(12,4))
plt.subplot(1,2,(1))
plt.plot(history['accuracy'],linestyle='-.')
plt.plot(history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='lower right')
plt.subplot(1,2,(2))
plt.plot(history['loss'],linestyle='-.')
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper right')
plt.show()

# Upper and lower bound for cos integral
a = -10.0
b = 10.0

# maturities
T = [0.2, 0.4, 0.6, 1.0]
markers = ['.', '+', '*', '^']
no_maturities = len(T)


for i in range(0, no_maturities):
    cf_merton = partial(JumpDiffusionCharesticFunction.get_merton_cf, t=T[i], x=x0, sigma=sigma, jumpmean=jumpmean,
                        jumpstd=jumpstd, lambda_t=lambda_t)

    # check martingale
    aux = cf_merton(np.asfortranarray(3.0))

    cos_price_merton = COSRepresentation.get_european_option_price(TypeEuropeanOption.CALL, a, b, 256, k_s, cf_merton)

    iv_smile_merton = []
    for k in range(0, no_strikes):
        iv_smile_merton.append(implied_volatility(cos_price_merton[k], f0, f0, T[i], 0.0, 0.0, 'c'))

    plt.plot(k_s, iv_smile_merton, label='T=%s' % T[i], linestyle='--', color='black', marker=markers[i])


# plt.ylim([0.0, 1.0])
plt.xlabel('K')
plt.legend()
plt.show()
                    .format(batch_idx + 1, iterations, loss.item(),
                            loss_v.item(), accuracy))

    scheduler.step(accuracy)

fi.close()

print('Finished Training')

# In[ ]:

loss_ = np.asarray(loss_)

iters = np.linspace(0, len(loss_), len(loss_))

plt.plot(iters, loss_, label='Training loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Train Loss')
plt.legend()
plt.savefig('Train_loss_Classification.jpg')
plt.close()

# In[ ]:

loss_val = np.asarray(loss_val)

iters = np.linspace(0, len(loss_val), len(loss_val))

plt.plot(iters, loss_val, label='Training loss')
plt.xlabel('Epoch')
Beispiel #52
0
        rabi[i + 1] = rabi[i] + 1j * g * (1e-6) * dz * rho_z
    return rabi


t_step = 0.5
tlist = np.arange(0, 100.0, 0.5)
z_step = 200
min_z = 0
max_z = 20.
dz = (max_z - min_z) / z_steps
zlist = np.arange(min_z, max_z, z_steps)
t0 = 10
sigma = 10.
det = 0
peak = gamma / 5
rho_0 = basis(2, 0) * basis(2, 0).dag()
rabi_0 = input_pulse(tlist, peak, t0, sigma)
time_evolution = solve_Lindblad(tlist, rabi_0, rho_0)
s_1 = time_evolution.expect[0]
rho_eg = time_evolution.expect[1]
N_z = 1.
spatial_evolution = solve_spatial(rho_eg[100], N_z, rabi_0[100], z_steps, dz,
                                  g)

data = np.array([tlist, s_1])
# plt.plot(tlist,s_1)
plt.plot(zlist, spatial_evolution)
plt.legend(('ground', 'pulse', 'field'), loc='upper right')
plt.show()
# plt.savefig('mbe.pdf')
# np.savetxt('populations_2level.dat', data.T)
# option information
# options
strikes = np.linspace(70.0, 130.0, 30)
no_strikes = len(strikes)
f0 = 100
T = 0.1
notional = 1.0
options = []
for k_i in strikes:
    options.append(EuropeanOption(k_i, notional, TypeSellBuy.BUY, TypeEuropeanOption.CALL, f0, T))


map_output = SABR_Engine.get_path_multi_step(0.0, T, parameters, f0, no_paths, no_time_steps,
                                             Types.TYPE_STANDARD_NORMAL_SAMPLING.REGULAR_WAY, rnd_generator)

iv_vol = []
for i in range(0, no_strikes):
    rnd_generator.set_seed(seed)
    mc_option_price = options[i].get_price_control_variate(map_output[Types.SABR_OUTPUT.PATHS][:, -1],
                                                           map_output[Types.SABR_OUTPUT.INTEGRAL_VARIANCE_PATHS])
    
    iv_vol.append(implied_volatility(mc_option_price[0], f0, strikes[i], T, 0.0, 0.0, 'c'))


plt.plot(strikes, iv_vol, label="rho=%s" % rho, marker=".", linestyle="--", color="black")

plt.xlabel("K")
plt.legend()
plt.show()
Beispiel #54
0
fig = pylab.figure()
ax = fig.add_subplot(111)

# vary initial conditions
ystart = 0.1
for xstart in numpy.linspace(1., 1.4, 3):

    # initial condition
    x = numpy.array([xstart, ystart, 0.])

    # solve
    solExact = odeint(velocityExact, x, ts)
    solNodal = odeint(velocityFaceAsNodal2, x, ts)
    solFace = odeint(velocityFace2, x, ts)

    pylab.plot(solExact[:, 0], solExact[:, 1], 'g-')
    pylab.plot(solNodal[:, 0], solNodal[:, 1], 'r-')
    pylab.plot(solFace[:, 0], solFace[:, 1], 'b-')
    pylab.plot([solExact[0, 0]], [solExact[0, 1]], 'k*', markersize=10)

pylab.legend(['exact', 'nodal', 'face'])

# plot the stream function
x = numpy.linspace(origin[0], origin[0] + ls[0], 101)
y = numpy.linspace(origin[1], origin[1] + ls[1], 101)
xx, yy = numpy.meshgrid(x, y)
xxp = cos_angle * xx - sin_angle * yy
yyp = sin_angle * xx + cos_angle * yy
pylab.contour(x, y, numpy.sin(xxp)**2 + yyp**2, colors='k')
ax.set_aspect('equal')
# Temp1=np.loadtxt('KE_ave10000.dat',usecols=[2])/(10**(-11.)*1.38064852*2)
ke_ave2 = np.loadtxt('ke_ave.dat',
                     usecols=[3]) / (10**(-11.) * 1.38064852 * 0.5)
Temp2 = np.loadtxt('ke_ave.dat', usecols=[2]) / (10**(-11.) * 1.38064852 * 0.5)

begin = 1
end = int(sys.argv[1])

# if(len(sys.argv)>3):
# =int(sys.argv[3])
if (len(sys.argv) > 2):
    count = int(sys.argv[2])
    mean = np.mean(ke_ave2[count:end])
    print('mean temp=', mean)
x = [i + 1 for i in range(begin, end)]
plt.plot(x, Temp2[begin:end], '--', linewidth=2.0, color='g')
# plt.plot(x,ke_ave1,'--',linewidth=2.0,color='g')
plt.plot(x, ke_ave2[begin:end], '-', linewidth=2.0, color='r')
#plt.plot(x,np.ones(len(x))*400,'-',linewidth=2.0,color='b')
if (len(sys.argv) > 3):
    plt.plot(x, np.ones(len(x)) * (Temp2[1]), '--', linewidth=2.0, color='b')
    plt.plot(x,
             np.ones(len(x)) * (Temp2[1] + 100),
             '--',
             linewidth=2.0,
             color='b')
    plt.plot(x,
             np.ones(len(x)) * (Temp2[1] - 100),
             '--',
             linewidth=2.0,
             color='b')
Beispiel #56
0
def _get_OP_(x, y):
    print "CF_gl_md4"
    p_guess = (0.01, 1)
    p_gl_md4, s_gl_md4 = scipy.optimize.leastsq(_CF_gl_md4_residuals,
                                                p_guess,
                                                args=(x, y))
    ts_gl_md4, S_gl_md4 = p_gl_md4
    print "CF_gl"
    p_guess = (1, 0.5, 1, 0.1)
    p_gl, s_gl = scipy.optimize.leastsq(_CF_gl_residuals, p_guess, args=(x, y))
    tm_gl, S_gl, Sf_gl, te_gl = p_gl
    print "CF_ex"
    p_guess = (4, 0.5, 1, 1)
    p_ex, s_ex = scipy.optimize.leastsq(_CF_ex_residuals, p_guess, args=(x, y))
    tm_ex, S_ex, Sf_ex, te_ex = p_ex
    print "CF_or"
    p_guess = (4, 0.5, 1)
    p_or, s_or = scipy.optimize.leastsq(_CF_or_residuals, p_guess, args=(x, y))
    tm_or, S_or, te_or = p_or
    print "CF_davi"
    p_guess = (4, 1, 1, 0.001, 1)
    p_davi, s_davi = scipy.optimize.leastsq(_CF_davi_residuals,
                                            p_guess,
                                            args=(x, y),
                                            maxfev=12000)
    tm_davi, S_davi, Sf_davi, tf_davi, ts_davi = p_davi
    print "Ci(t) for simple cases (Sf^2 == 1 ) by Eq (9) on Chen - black - S2 = " + str(
        p_gl_md4[1]**2)
    print p_gl_md4
    r = _CF_gl_md4_residuals(p_gl_md4, x, y)
    r = r**2
    #       print sum(r)
    print numpy.corrcoef(y, _CF_gl_md4(p_gl_md4, x))[0][1]
    print "Ci(t) for complex cases or Eq 11 on Chen - magenta - S2 = " + str(
        p_gl[1]**2)
    print p_gl
    #	print sum(_CF_gl_residuals(p_gl,x,y)**2)
    print numpy.corrcoef(y, _CF_gl(p_gl, x))[0][1]
    print "C(t) for complex residues as Shaw 2005 - green - S2 = " + str(
        p_ex[1]**2)
    print p_ex
    #	print sum(_CF_ex_residuals(p_ex,x,y)**2)
    print numpy.corrcoef(y, _CF_ex(p_ex, x))[0][1]
    print "C(t) for simple residues as Shaw 2005 - red - S2 = " + str(p_or[1]**
                                                                      2)
    print p_or
    #	print sum(_CF_or_residuals(p_or,x,y)**2)
    print numpy.corrcoef(y, _CF_or(p_or, x))[0][1]
    print "C(t) for complex residues Davi 2012 - blue - S2 = " + str(p_davi[1]
                                                                     **2)
    print p_davi
    #	print sum(_CF_davi_residuals(p_davi,x,y)**2)
    print numpy.corrcoef(y, _CF_davi(p_davi, x))[0][1]
    #	print "S = " + str(S)

    #	xp = numpy.linspace(x.min(), x.max(), numpy.floor((x.max()-x.min())*100)
    p_cf_gl_md4 = _CF_gl_md4(p_gl_md4, x)
    p_cf_gl = _CF_gl(p_gl, x)
    p_cf_ex = _CF_ex(p_ex, x)
    p_cf_davi = _CF_davi(p_davi, x)
    p_cf_or = _CF_or(p_or, x)

    mp.plot(x, y, 'o', x, p_cf_ex, 'g-', x, p_cf_or, 'r-', x, p_cf_davi, 'b-',
            x, p_cf_gl, '-m', x, p_cf_gl_md4, '-k')
    mp.grid(True)
    mp.xscale('log')
    mp.show()
    '''
	if (j >= 15):
	    sim.spring_l0_offset = ((np.random.random(sim.spring_connections.shape[0])*0.2+0.2))
	    print sim.time*1e-6
	    for i in xrange(50):
		sim.simulate()
		pos1.append(sim.nodes_eucl.copy())
		nForce1.append(sim.nodal_forces_reduced)'''

#plot and save the results

iterations = (np.arange(0., j + 1.) / 1000)[np.newaxis].T

plt.figure(3)
print np.array(pos1).shape
plt.plot(np.array(pos1)[:, :, 2], 'b')
plt.title('Z Direction Position')
plt.ylabel('vertical position nodes')
plt.xlabel('time (s)')
plt.savefig('Z_Direction_Position')
np.savetxt('Z_Position.txt',
           np.column_stack((iterations, np.array(pos1)[:, :, 2])),
           delimiter=',',
           fmt='%f')
np.savetxt('Y_Position.txt',
           np.column_stack((iterations, np.array(pos1)[:, :, 1])),
           delimiter=',',
           fmt='%f')
np.savetxt('X_Position.txt',
           np.column_stack((iterations, np.array(pos1)[:, :, 0])),
           delimiter=',',
Beispiel #58
0
                                                    bw='normal_reference')
         KL0 = np.mean(np.log(kernel0.pdf(z_samples0)) - truepost0)
         KL1 = np.mean(np.log(kernel1.pdf(z_samples1)) - truepost1)
         KL2 = np.mean(np.log(kernel2.pdf(z_samples2)) - truepost2)
         KL3 = np.mean(np.log(kernel3.pdf(z_samples3)) - truepost3)
         KL4 = np.mean(np.log(kernel4.pdf(z_samples4)) - truepost4)
         indexuu = int(j / 100)
         KLAVG = np.mean([KL0, KL1, KL2, KL3, KL4])
         if KLAVG < 1.35 and converge[0] == 0:
             converge[0] = int(j)
         KLAVGBRUH[indexuu] = KLAVG
 np.savetxt("JCADVgudlog truklavg.csv", KLAVGBRUH, delimiter=",")
 np.savetxt("JCADVgudlog estimatorloss.csv", ISTHISLOSS, delimiter=",")
 np.savetxt("JCADVgudlog nelbo.csv", NELBRUH, delimiter=",")
 np.savetxt("JCADVgudlog converge.csv", converge)
 plt.plot(KLAVGBRUH)
 plt.axvline(x=(converge[0] / 100), ls='--')
 plt.xlabel('Iterations (x100)')
 plt.ylabel('Avg KL Div')
 plt.savefig("JCADVgudlog truklavg")
 plt.close()
 plt.plot(ISTHISLOSS)
 plt.axvline(x=converge[0], ls='--')
 plt.xlabel('Iterations')
 plt.ylabel('Estimator Loss')
 plt.savefig("JCADVgudlog estimatorloss")
 plt.close()
 plt.plot(NELBRUH)
 plt.axvline(x=converge[0], ls='--')
 plt.xlabel('Iterations')
 plt.ylabel('NELBO')
Beispiel #59
0
    for i in range(step_num):
        x_history.append( x.copy() )

        grad = numerical_gradient(f, x)
        x -= lr * grad

    return x, np.array(x_history)


def function_2(x):
    return x[0]**2 + x[1]**2

init_x = np.array([-3.0, 4.0])    

#lr = 0.1
lr = 10
step_num = 100
x, x_history = gradient_descent(function_2, init_x, lr=lr, step_num=step_num)

print(x)
plt.plot( [-5, 5], [0,0], '--b')
plt.plot( [0,0], [-5, 5], '--b')
plt.plot(x_history[:,0], x_history[:,1], 'o')

plt.xlim(-3.5, 3.5)
plt.ylim(-4.5, 4.5)
plt.xlabel("X0")
plt.ylabel("X1")
plt.show()
Beispiel #60
0
print np.empty((2, 3, 2))
bouya = np.arange(0, 15)
print bouya.dtype
myFloatArr = bouya.astype(np.float)
print myFloatArr.dtype

arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print arr
print arr * arr
print arr - arr
print 1 / arr
print arr**0.5

arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print arr2d[2][1]
print arr2d[2, 1]

#Random walk example
import random
from matplotlib import pylab as plt
position = 0
walk = [position]
steps = 100
for i in xrange(steps):
    step = 1 if random.randint(0, 1) else -1
    position += step
    walk.append(position)

plt.plot([x for x in range(steps + 1)], walk)