Exemplo n.º 1
0
def plot_freq(series,sampling_frequency=1):
    """
        Purpose: Illustrate the power spectrum calculation for a given data series
        Inputs: series-A data array to show the power spectrum for. Calls power_spectrum
                sampling_frequency-(optional) IDK what for yet
        Outputs: none
    """
    t=sp.arange(0,len(series))/(1.0*sampling_frequency)
    pylab.subplot(2,1,1)
    pylab.plot(t,series)
    pylab.xlabel('Time')
    pylab.ylabel('Amplitude')
    pylab.subplot(2,1,2)

    n = len(series)             # length of the signal
    k = sp.arange(n)
    T = n/sampling_frequency
    frq = k/T                   # two sides frequency range
    frq = frq[range(n/2)]       # one side frequency range

    Y = np.fft.fft(series)/n    # fft computing and normalization
    Y = Y[range(n/2)]

    pylab.plot(frq,abs(Y),'r') # plotting the spectrum
    pylab.xlabel('Freq (Hz)')
    pylab.ylabel('|Y(freq)|')
    pylab.show()
Exemplo n.º 2
0
 def setUp(self):
     Reader = fitsGBT.Reader("./testdata/testfile_guppi_combined.fits",
                             feedback=0)
     self.Blocks = Reader.read((), 1)
     Data = self.Blocks[0]
     Data.calc_freq()
     params = {'dm_deweight_time_slope' : True}
     Maker = dirty_map.DirtyMapMaker(params, feedback=0)
     n_chan = Data.dims[-1]
     Maker.n_chan = n_chan
     Maker.pols = (1, 2, 3, 4)
     Maker.pol_ind = 0
     Maker.band_centres = (Data.freq[Data.dims[-1]//2],)
     Maker.band_ind = 0
     map = sp.zeros((Data.dims[-1], 32, 15))
     map = al.make_vect(map, ('freq', 'ra', 'dec'))
     map.set_axis_info('freq', Data.freq[Data.dims[-1]//2],
                       Data.field['CRVAL1'])
     map.set_axis_info('ra', 218, 0.075)
     map.set_axis_info('dec', 2, 0.075)
     Maker.map = map
     self.Maker = Maker
     # The variances of each channel.
     self.norms = (sp.arange(1., 2., 0.25)[:,None]
                   * (sp.arange(1., 2., 1./n_chan)[None,:]))
     for Data in self.Blocks:
         Data.data[...] = random.randn(*Data.data.shape)
         Data.data *= sp.sqrt(self.norms[:,None,:])
         Data.data += 50.
Exemplo n.º 3
0
def makesumrule(ptype,plen,ts,lagtype='centered'):
    """ This function will return the sum rule.
        Inputs
            ptype - The type of pulse.
            plen - Length of the pulse in seconds.
            ts - Sample time in seconds.
            lagtype -  Can be centered forward or backward.
        Output
            sumrule - A 2 x nlags numpy array that holds the summation rule.
    """
    nlags = sp.round_(plen/ts)
    if ptype.lower()=='long':
        if lagtype=='forward':
            arback=-sp.arange(nlags,dtype=int)
            arforward = sp.zeros(nlags,dtype=int)
        elif lagtype=='backward':
            arback = sp.zeros(nlags,dtype=int)
            arforward=sp.arange(nlags,dtype=int)
        else:
            arback = -sp.ceil(sp.arange(0,nlags/2.0,0.5)).astype(int)
            arforward = sp.floor(sp.arange(0,nlags/2.0,0.5)).astype(int)
        sumrule = sp.array([arback,arforward])
    elif ptype.lower()=='barker':
        sumrule = sp.array([[0],[0]])
    return sumrule
Exemplo n.º 4
0
 def range_query_geno_local(self, idx_start=None, idx_end=None, chrom=None,pos_start=None, pos_end=None,windowsize=0):
     """
     return an index for a range query on the genotypes
     """
     if idx_start==None and idx_end==None and pos_start==None and pos_end==None and chrom==None:
         return  sp.arange(0,self.num_snps)
     elif idx_start is not None or idx_end is not None:
         if idx_start is None:
             idx_start = 0
         if idx_end is None:
             idx_end = self.num_snps
         res =  sp.arange(idx_start,idx_end)
         return res
     elif chrom is not None:
         res = self.geno_pos["chrom"]==chrom
     elif pos_start is not None or pos_end is not None:
         if pos_start is not None and pos_end is not None:
             assert pos_start[0] == pos_end[0], "chromosomes have to match"
         
         if pos_start is None:
             idx_larger =  sp.ones(self.num_snps,dtype=bool)
         else:
             idx_larger = (self.geno_pos["pos"]>=(pos_start[1]-windowsize)) & (self.geno_pos["chrom"]==pos_start[0])
         if pos_end is None:
             idx_smaller =  sp.ones(self.num_snps,dtype=bool)
         else:
             idx_smaller = (self.geno_pos["pos"]<=(pos_end[1]+windowsize)) & (self.geno_pos["chrom"]==pos_end[0])
         res = idx_smaller & idx_larger
     else:
         raise Exception("This should not be triggered")#res =  sp.ones(self.geno_pos.shape,dtype=bool)
     return  sp.where(res)[0]
Exemplo n.º 5
0
    def test_covariate_shift(self):
        n_sample = 100
        # Biased training
        var_bias = .5**2
        mean_bias = .7
        x_train = SP.random.randn(n_sample)*SP.sqrt(var_bias) + mean_bias
        y_train = self.complete_sample(x_train)

        # Unbiased test set
        var = .3**2
        mean = 0

        x_test = SP.random.randn(n_sample)*SP.sqrt(var) + mean
        x_complete = SP.hstack((x_train, x_test))

        kernel = utils.getQuadraticKernel(x_complete, d=1) +\
            10 * SP.dot(x_complete.reshape(-1, 1), x_complete.reshape(1, -1))
        kernel = utils.scale_K(kernel)
        kernel_train = kernel[SP.ix_(SP.arange(x_train.size),
                                     SP.arange(x_train.size))]
        kernel_test = kernel[SP.ix_(SP.arange(x_train.size, x_complete.size),
                             SP.arange(x_train.size))]

        mf = MF(n_estimators=100, kernel=kernel_train, min_depth=0,
                subsampling=False)
        mf.fit(x_train.reshape(-1, 1), y_train.reshape(-1, 1))
        response_gp = mf.predict(x_test.reshape(-1, 1), kernel_test, depth=0)
        self.assertTrue(((response_gp - self.polynom(x_test))**2).sum() < 2.4)
 def test_correlate(self) :
     Data = self.blocks[0]
     Data.calc_freq()
     map = self.map
     gain = 3.45
     const = 2.14
     # Set all data = gain*(cos(time_ind)).
     Data.data[:,:,:,:] = gain*sp.cos(sp.arange(1,11)
                                 [:,sp.newaxis,sp.newaxis,sp.newaxis])
     # Explicitly set time mean to something known.
     Data.data -= ma.mean(Data.data, 0)
     Data.data += gain*const*Data.freq/800.0e6
     # Now the Map.
     map[:,:,:] = 0.0
     # Set 10 pixels to match cos part of data.
     map[:, range(10), range(10)] = (
                 sp.cos(sp.arange(1,11)[None, :]))
     map[:, range(10), range(10)] -= ma.mean(
         map[:, range(10), range(10)], 1)[:, None]
     # Give Map a mean to test things out. Should really have no effect.
     map[...] += 0.352*map.get_axis('freq')[:, None, None]/800.0e6
     # Rig the pointing to point to those 10 pixels.
     def rigged_pointing() :
         Data.ra = map.get_axis('ra')[range(10)]
         Data.dec = map.get_axis('dec')[range(10)]
     Data.calc_pointing = rigged_pointing
     solved_gains = smd.sub_map(Data, map, correlate=True)
     # Now data should be just be gain*const*f, within machine precision.
     Data.data /= gain*Data.freq/800.0e6
     self.assertTrue(sp.allclose(Data.data[:,:,:,:], const))
     self.assertTrue(sp.allclose(solved_gains, gain))
    def audio_cb(self, data):
        rospy.loginfo("Callback received!")
        print "<Previous y_data len: " + str(len(self.y_data))
        self.y_data.extend(data.data)
        #self.y_data = data.data
        print ">After y_data len: " + str(len(self.y_data))
        
        print "--------------"
        print "len of y_data: " + str(len(self.y_data))
        excess_of_data = None
        if len(self.y_data) > self.MAX_DATA:
            print "excess of data: " + str(len(self.y_data) - self.MAX_DATA)
            excess_of_data = len(self.y_data) - self.MAX_DATA
#             self.x_data = arange(self.x_data[excess_of_data], (len(self.x_data) - 1) * 0.01, 0.01).tolist()
            self.y_data = self.y_data[excess_of_data:]
            
        print "<Previous x_data len: " + str(len(self.x_data))
        if excess_of_data:
            new_times = arange(self.x_data[-1], len(self.y_data) * 0.01 + self.x_data[-1], 0.01).tolist()
            print "Initial time: " + str(self.x_data[-1])
            self.x_data = new_times[:len(self.y_data)]
            print "Final time: "+ str(self.x_data[-1])
            
        else: # if we are just adding data to the array
            new_times = arange(self.x_data[-1], len(data.data) * 0.01 + self.x_data[-1], 0.01).tolist()
            self.x_data.extend(new_times[:len(self.y_data)])
        print ">After x_data len: " + str(len(self.x_data))
        
        if len(self.x_data) != len(self.y_data):
            rospy.logerr("Error, not same size")
            exit(0)
def plotSpectrum(y,Fs,image_name):
    """
    Plots a Single-Sided Amplitude Spectrum of y(t)
    """
    n = len(y) # length of the signal
    subplot(2,1,1)
    
    plot(arange(0,n),y)
    xlabel('Time')
    ylabel('Amplitude')
    subplot(2,1,2)
    k = arange(n)
    T = n/Fs
    frq = k/T # two sides frequency range
    frq = frq[range(n/2)] # one side frequency range

    Y = fft(y)/n # fft computing and normalization
    Y = Y[range(n/2)]

    plot(frq,abs(Y),'r') # plotting the spectrum
    xlabel('Freq (Hz)')
    ylabel('|Y(freq)|')
    print "here"
    #show()
    savefig(image_name,dpi=110)
 def test_rigged_pointing(self) :
     Data = self.blocks[0]
     Data.calc_freq()
     map = self.map
     # Set all data = (f + cal_ind)*time_ind
     Data.data[:,:,:,:] = (sp.arange(-4.5, 5)
                           [:,sp.newaxis,sp.newaxis,sp.newaxis]
                           *(Data.freq/100e6))
     Data.data[...] -= sp.mean(Data.data, 0)
     Data.data[...] += (sp.arange(6,8).reshape((1,1,2,1)) * (Data.freq/100e6) 
                        * sp.arange(-4.5, 5).reshape((10, 1, 1, 1)))
     map[:,:,:] = 0.0
     # Set 10 pixels to match data (except for cal_ind part).
     map[:, range(10), range(10)] = (sp.arange(-4.5, 5)[None,:]
                                     * map.get_axis('freq')[:,None]/100e6)
     # We should be completely insensitive to the map mean.  Th following
     # should have no effect.
     map[...] += 0.352*map.get_axis('freq')[:, None, None]/800.0e7
     # Rig the pointing to point to those 10 pixels.
     def rigged_pointing() :
         Data.ra = map.get_axis('ra')[range(10)]
         Data.dec = map.get_axis('dec')[range(10)]
     Data.calc_pointing = rigged_pointing
     smd.sub_map(Data, map)
     # Now data should be just f*time_ind*(cal_ind+6), within 2.0 MHz/2.
     Data.data /= sp.arange(-4.5, 5)[:,sp.newaxis,sp.newaxis,sp.newaxis]
     Data.data /= Data.freq/100e6
     # Relative tol of 1/700, is the frequency bin width.
     self.assertTrue(sp.allclose(Data.data[:,:,0,:], 6.0, rtol=1.0/700))
     self.assertTrue(sp.allclose(Data.data[:,:,1,:], 7.0, rtol=1.0/700))
Exemplo n.º 10
0
	def to_netcdf_file(self, dir=None, echo=False):
		t1 = time.time()
		if dir is None:
			dir = self.data_dir
		filename = "sweep_" + self.last_sweep_time + ".nc"
		filename = os.path.join(dir, filename)
		nc_file = netcdf.netcdf_file(filename, 'w')
		n_range = self.trimmed_sweep.shape[1]
		n_pulse = self.trimmed_sweep.shape[0]
		pulse_dim = nc_file.createDimension("pulse_number", n_pulse)
		pulse_var = nc_file.createVariable("pulse_number", int, ("pulse_number", ))
		pulse_var.units = "pulse_number"
		range_dim = nc_file.createDimension("range", n_range)
		range_var = nc_file.createVariable("range", float, ("range", ))
		range_var.units = "meters"
		amplitude = nc_file.createVariable("amplitude", self.trimmed_sweep.dtype,
			("pulse_number", "range"))
		range_var[:] = sp.arange(0, self.range_resolution * n_range, self.range_resolution)
		pulse_var[:] = sp.arange(n_pulse, dtype=int)
		amplitude[:] = self.trimmed_sweep
		amplitude.units = "ADC_counts"
		amplitude.max_counts = self.ps.getMaxValue()
		amplitude.min_counts = self.ps.getMinValue()
		nc_file.max_range = self.max_range
		nc_file.range_resolution = self.range_resolution
		nc_file.sweep_time = self.last_sweep_time
		nc_file.pulse_rate_nominal = self.pulse_rate
		nc_file.rotation_period_nominal = self.rotation_period
		nc_file.samples_per_segment = self.samples_per_segment
		nc_file.close()
		if echo:
			print filename
			print "Time to write data: ", str(time.time() - t1)
def save_plotSpectrum(y,Fs,image_name):
    """
    Plots a Single-Sided Amplitude Spectrum of y(t)
    """
    fig = Figure(linewidth=0.0)
    fig.set_size_inches(fig_width,fig_length, forward=True)
    Figure.subplots_adjust(fig, left = fig_left, right = fig_right, bottom = fig_bottom, top = fig_top, hspace = fig_hspace)
    n = len(y) # length of the signal

    _subplot = fig.add_subplot(2,1,1)        
    print "Fi"
    _subplot.plot(arange(0,n),y)
    xlabel('Time')
    ylabel('Amplitude')
    _subploti_2=fig.add_subplot(2,1,2)
    k = arange(n)
    T = n/Fs
    frq = k/T # two sides frequency range
    frq = frq[range(n/2)] # one side frequency range

    Y = fft(y)/n # fft computing and normalization
    Y = Y[range(n/2)]

    _subplot_2.plot(frq,abs(Y),'r') # plotting the spectrum
    xlabel('Freq (Hz)')
    ylabel('|Y(freq)|')
    print "here"
    canvas = FigureCanvasAgg(fig)
    if '.eps' in outfile_name:
        canvas.print_eps(outfile_name, dpi = 110)
    if '.png' in outfile_name:
        canvas.print_figure(outfile_name, dpi = 110)
Exemplo n.º 12
0
 def Evolve_DE(self):
  for i in scipy.arange(self.npop1):
   # para cada individuo da populacao 
   # gera trial vector usado para perturbar individuo atual (indice i)
   # a partir de 3 individuos escolhidos aleatoriamente na populacao e
   # cujos indices sejam distintos e diferentes de i
   invalido = True
   while invalido:
    j = random_integers(0,self.npop1-1,3)
    invalido = (i in j)
    invalido = invalido or (j[0] == j[1]) 
    invalido = invalido or (j[1] == j[2]) 
    invalido = invalido or (j[2] == j[0])    
   # trial vector a partir da mutacao de um alvo 
   u = self.pop1[j[0]] + self.beta*(self.pop1[j[1]] - self.pop1[j[2]]) 
   # gera por crossover solucao candidata
   c = self.pop1[i].copy()  
   # seleciona indices para crossover
   # garantindo que ocorra crossover em
   # pelo menos uma vez                 
   j = random_integers(0,self.pop1.shape[1]-1)
   for k in scipy.arange(self.pop1.shape[1]):
    if (scipy.rand() < self.pr) or (k == j):
     c[k] = u[k]  
   ans,c = self.resolve_desafio(c)
   c_fit = self.avalia_aptidao1(ans)    
   # leva para proxima geracao quem tiver melhor fitness
   if (c_fit > self.fit1[i]):
    self.pop1[i] = c
    self.fit1[i] = c_fit
    self.ans1[i] = ans
Exemplo n.º 13
0
def waveGen():
	n = 4096			# samples
	freq0 = 0 	# Hz
	samp_rate = 64	# Hz
	levels = 8

	start_freq = 1	# Hz
	end_freq = 2	# Hz
	if (start_freq != end_freq):
		freq0 = arange(start_freq, end_freq, (end_freq - start_freq) / (n * 1.0))
	else:
		freq0 = start_freq


	factor0 = samp_rate / freq0
	time = arange(n)/float(samp_rate)
	wave0 = sin(2 * pi * freq0 * time)

	# errors = [random() - 0.5 for _ in range(n)]
	# wave0 += errors

	sampleList = list()
	for t in arange(len(time)):
		sample = [time[t], wave0[t]]
		sampleList.append(sample)

	return sampleList
Exemplo n.º 14
0
def fit_gaussian_state(Q, P, W):
    q = Q[0,:]
    p = P[:,0]
    m, n = W.shape
    idx_to_q = interp1d(scipy.arange(n), q)
    idx_to_p = interp1d(scipy.arange(m), p)
    i_mean = find_mean(W)
    try:
        q0, p0 = idx_to_q(i_mean[0]), idx_to_p(i_mean[1])
        s0 = 1./(W.max()*sqrt(2.*pi))
        theta0 = 0.
        def twoD_Gaussian(qp, a, b, c):
            q, p = qp
            det = a*c-b**2
            if det<0:
                raise RuntimeError
            normalization = sqrt(det)/(2.*pi)
            g = normalization*exp( -1./2.* (a*((q-q0)**2) + 2*b*(q-q0)*(p-p0) + c*((p-p0)**2)))
            return g.ravel()
        initial_guess = convert_params(s0, s0, theta0)
        (a, b, c), pcov = curve_fit(twoD_Gaussian, (Q, P), W.ravel(), p0=initial_guess)
        cov = scipy.array([[c, -b], [-b, a]])/(a*c-b**2)
        dq = cov[0,0]
        cqp = cov[0,1]
        dp = cov[1,1]
    except:
        q0 = scipy.nan
        p0 = scipy.nan
        dq = scipy.nan
        cqp = scipy.nan
        dp = scipy.nan
    return scipy.array([q0, p0, dq, cqp, dp])
Exemplo n.º 15
0
 def setUp(self):
     # Make a positive definite noise matrix, clean map, and dirty_map.
     self.nra = 10
     self.ndec = 5
     self.nf = 20
     self.shape = (self.nf, self.nra, self.ndec)
     self.size = self.nra * self.ndec * self.nf
     # Clean map.
     clean_map = sp.empty(self.shape, dtype=float)
     clean_map = al.make_vect(clean_map, axis_names=('freq', 'ra', 'dec'))
     clean_map[...] = sp.sin(sp.arange(self.nf))[:,None,None]
     clean_map *= sp.cos(sp.arange(self.nra))[:,None]
     clean_map *= sp.cos(sp.arange(self.ndec))
     # Noise inverse matrix.
     noise_inv = sp.empty(self.shape * 2, dtype=float)
     noise_inv = al.make_mat(noise_inv, axis_names=('freq', 'ra', 'dec')*2,
                             row_axes=(0, 1, 2), col_axes=(3, 4, 5))
     rand_mat = rand.randn(*((self.size,) * 2))
     information_factor = 1.e6  # K**-2
     rand_mat = sp.dot(rand_mat, rand_mat.transpose()) * information_factor
     noise_inv.flat[...] = rand_mat.flat
     # Dirty map.
     dirty_map = al.partial_dot(noise_inv, clean_map)
     # Store in self.
     self.clean_map = clean_map
     self.noise_inv = noise_inv
     self.dirty_map = dirty_map
Exemplo n.º 16
0
def optdelta(UY,UX,S,ldeltanull=None,numintervals=100,ldeltamin=-10.0,ldeltamax=10.0):
    """find the optimal delta"""
    if ldeltanull==None:
        nllgrid=SP.ones(numintervals+1)*SP.inf;
        ldeltagrid=SP.arange(numintervals+1)/(numintervals*1.0)*(ldeltamax-ldeltamin)+ldeltamin;
        nllmin=SP.inf;
        for i in SP.arange(numintervals+1):
            nllgrid[i]=nLLeval(ldeltagrid[i],UY,UX,S);
            if nllgrid[i]<nllmin:
                nllmin=nllgrid[i];
                ldeltaopt_glob=ldeltagrid[i];
        foundMin=False
        for i in SP.arange(numintervals-1)+1:
            continue
            ee = 1E-8
            #carry out brent optimization within the interval
            if ((nllgrid[i-1]-nllgrid[i])>ee) and ((nllgrid[i+1]-nllgrid[i])>1E-8):
                foundMin = True
                ldeltaopt,nllopt,iter,funcalls = OPT.brent(nLLeval,(UY,UX,S),(ldeltagrid[i-1],ldeltagrid[i],ldeltagrid[i+1]),full_output=True);
                if nllopt<nllmin:
                    nllmin=nllopt;
                    ldeltaopt_glob=ldeltaopt;
    else:
        ldeltaopt_glob=ldeltanull;
    return ldeltaopt_glob;
Exemplo n.º 17
0
def estimateBeta(X,Y,K,C=None,addBiasTerm=False,numintervals0=100,ldeltamin0=-5.0,ldeltamax0=5.0):
    """ compute all pvalues
    If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
    """
    n,s=X.shape;
    n_pheno=Y.shape[1];
    S,U=LA.eigh(K);
    UY=SP.dot(U.T,Y);
    UX=SP.dot(U.T,X);
    if (C==None):
        Ucovariate=SP.dot(U.T,SP.ones([n,1]));
    else:
        if (addBiasTerm):
            C_=SP.concatenate((C,SP.ones([n,1])),axis=1)
            Ucovariate=SP.dot(U.T,C_);
        else:
            Ucovariate=SP.dot(U.T,C);
    n_covar=Ucovariate.shape[1];
    beta = SP.empty((n_pheno,s,n_covar+1));
    LL=SP.ones((n_pheno,s))*(-SP.inf);
    ldelta=SP.empty((n_pheno,s));
    sigg2=SP.empty((n_pheno,s));
    pval=SP.ones((n_pheno,s))*(-SP.inf);
    for phen in SP.arange(n_pheno):
        UY_=UY[:,phen];
        ldelta[phen]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
        for snp in SP.arange(s):
            UX_=SP.hstack((UX[:,snp:snp+1],Ucovariate));
            nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
            beta[phen,snp,:]=beta_;
            sigg2[phen,snp]=sigg2_;
            LL[phen,snp]=-nLL_;
    return beta, ldelta
Exemplo n.º 18
0
 def __init__(self,fitness_func,npop = 20,w = 0.5,c1 = 2.01,c2 = 2.02,debug = False):
  seed()
  self.debug = debug
  self.c1 = c1
  self.c2 = c2
  self.w = w
  self.ns = int(npop) 
  self.fitness_func = fitness_func  
  # gera pop inicial
  if os.path.isfile("dump_pso.pkl"):
   dump_fd = open("dump_pso.pkl",'r')
   self.pop = cPickle.load(dump_fd)
   self.fit = cPickle.load(dump_fd)
   self.v = cPickle.load(dump_fd)
   self.bfg = cPickle.load(dump_fd)
   self.bfg_fitness = cPickle.load(dump_fd)
   self.bfp = cPickle.load(dump_fd)
   self.bfp_fitness  = cPickle.load(dump_fd)
  else:
   self.pop = scipy.array([self.gera_individuo() for i in scipy.arange(self.ns)])
   self.fit = scipy.zeros(self.ns)
   # avalia fitness de toda populacao
   for i in scipy.arange(self.ns):
    self.fit[i],self.pop[i] = self.avalia_aptidao(self.pop[i])  
   # inicializa velocidades iniciais
   self.v = scipy.zeros((self.ns,Dim))
   # guarda a melhor posicao de cada particula 
   self.bfp = scipy.copy(self.pop)
   self.bfp_fitness = scipy.copy(self.fit)
   # guarda a melhor posicao global
   self.bfg = self.pop[self.bfp_fitness.argmin()].copy()
   self.bfg_fitness = self.bfp_fitness.min().copy()
Exemplo n.º 19
0
def KramersKronigFFT(ImX_A):
	'''	Hilbert transform used to calculate real part of a function from its imaginary part
	uses piecewise cubic interpolated integral kernel of the Hilbert transform
	use only if len(ImX_A)=2**m-1, uses fft from scipy.fftpack  '''
	X_A = sp.copy(ImX_A)
	N = int(len(X_A))
	## be careful with the data type, orherwise it fails for large N
	if N > 3e6: A = sp.arange(3,N+1,dtype='float64')
	else:       A = sp.arange(3,N+1)  
	X1 = 4.0*sp.log(1.5)
	X2 = 10.0*sp.log(4.0/3.0)-6.0*sp.log(1.5)
	## filling the kernel
	if N > 3e6: Kernel_A = sp.zeros(N-2,dtype='float64')
	else:       Kernel_A = sp.zeros(N-2)
	Kernel_A = (1-A**2)*((A-2)*sp.arctanh(1.0/(1-2*A))+(A+2)*sp.arctanh(1.0/(1+2*A)))\
	+((A**3-6*A**2+11*A-6)*sp.arctanh(1.0/(3-2*A))+(A+3)*(A**2+3*A+2)*sp.arctanh(1.0/(2*A+3)))/3.0
	Kernel_A = sp.concatenate([-sp.flipud(Kernel_A),sp.array([-X2,-X1,0.0,X1,X2]),Kernel_A])/sp.pi
	## zero-padding the functions for fft
	ImXExt_A = sp.concatenate([X_A[int((N-1)/2):],sp.zeros(N+2),X_A[:int((N-1)/2)]])
	KernelExt_A = sp.concatenate([Kernel_A[N:],sp.zeros(1),Kernel_A[:N]])
	## performing the fft
	ftReXExt_A = -fft(ImXExt_A)*fft(KernelExt_A)
	ReXExt_A = sp.real(ifft(ftReXExt_A))
	ReX_A = sp.concatenate([ReXExt_A[int((3*N+3)/2+1):],ReXExt_A[:int((N-1)/2+1)]])
	return ReX_A
Exemplo n.º 20
0
    def plotmap(self,fig,ax):
        """ This function will plot the map of Alaska. The data will be plotted
            over it and will use the basemap class to position everything.
            Input
                fig - The figure handle for the plots.
                ax - The axes handle that the map will be plotted over.
            Output
                m - This is the handle for the basemap object.
        """
        latlim2 = self.params['latbounds']
        lonlim2 = self.params['lonbounds']
        m = Basemap(projection='merc',lon_0=sp.mean(lonlim2),lat_0=sp.mean(latlim2),\
        lat_ts=sp.mean(latlim2),llcrnrlat=latlim2[0],urcrnrlat=latlim2[1],\
        llcrnrlon=lonlim2[0],urcrnrlon=lonlim2[1],\
        rsphere=6371200.,resolution='i',ax=ax)
        # draw coastlines, state and country boundaries, edge of map.
        #m.drawcoastlines()
    #    m.drawstates()
    #    m.drawcountries()
        m.readshapefile('st99_d00','states',drawbounds=True)

        merstep = sp.round_((lonlim2[1]-lonlim2[0])/5.)
        parstep = sp.round_((latlim2[1]-latlim2[0])/5.)
        meridians=sp.arange(lonlim2[0],lonlim2[1],merstep)
        parallels = sp.arange(latlim2[0],latlim2[1],parstep)
        m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
        m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
        plt.hold(True)
        return m
Exemplo n.º 21
0
 def setData(self,x_c=None,x_r=None,x=None,gplvm_dimensions_r=None,gplvm_dimensions_c=None,y=None,**kw_args):
     #previous interfaces used x,y; now e add x_r/x_c assuming x_r=x
     self.y=y
     self.n=y.shape[0]
     self.d=y.shape[1]
     if x_r is not None:
         x = x_r
     else:
         x_r = SP.zeros([self.n,0])
     if x is not None:
         x_r = x
     #GPLVM.GPLVM.setData(self,x = x_r,**kw_args)
     #inputs for second covariance if applicable
     if x_c is not None:
         assert x_c.shape[0]==self.d, 'dimension missmatch'
     else:
         x_c = SP.zeros([self.d,0])
     self.x_c = x_c
     self.x_r = x_r
     #useful input matrix which hold the size of the entire kronecker structure
     self.xx = SP.zeros([self.x_r.shape[0]*self.x_c.shape[0],0])
     #store rehsaped view of Y
     self.nd = self.n*self.d
     if gplvm_dimensions_r is None:
         gplvm_dimensions_r = SP.arange(self.x_r.shape[1])
     if gplvm_dimensions_c is None:
         gplvm_dimensions_c = SP.arange(self.x_c.shape[1])
     #store dimensions
     self.gplvm_dimensions_r = gplvm_dimensions_r
     self.gplvm_dimensions_c = gplvm_dimensions_c
     self._invalidate_cache()
Exemplo n.º 22
0
Arquivo: mean.py Projeto: PMBio/mtSet
	def _update_indicator(self,K,L):
		""" update the indicator """
		_update = {'term': self.n_terms*SP.ones((K,L)).T.ravel(),
					'row': SP.kron(SP.arange(K)[:,SP.newaxis],SP.ones((1,L))).T.ravel(),
					'col': SP.kron(SP.ones((K,1)),SP.arange(L)[SP.newaxis,:]).T.ravel()} 
		for key in _update.keys():
			self.indicator[key] = SP.concatenate([self.indicator[key],_update[key]])
	def numProj(self, ang=5, sym='d7', with_mirror=False):
		csym = abs(float(sym[1:]))
		ang = abs(float(ang))
		if ang == 0.0:
			return 0
		angrad = ang*math.pi/180.0
		maxalt = math.pi/2.0 + angrad/1.99
		maxaz = 2.0*math.pi/csym
		if sym[0].lower() == 'd':
			maxaz /= 2.0
		numproj = 0
		for alt in arange(0.0, maxalt, angrad):
			if alt < 1.0e-6:
				### only one for top projection
				numproj+=1
				continue
			### calculate number of steps
			numsteps = math.floor(360.0/(ang*1.1547));
			numsteps = math.floor(numsteps * math.sin(alt) + 0.5)
			if numsteps < 1.0e-3:
				### only valid for c1, d1, c2 and d2
				numsteps = 1.0
			numsteps = csym * math.floor(numsteps/csym + 0.5) + 1.0e-6
			### calculate azimuthal step size
			azstep = 2.0*math.pi/numsteps
			if (maxaz/azstep) < 2.8:
				### if less than 2.8 steps, use 2 steps
				azstep = maxaz/2.1
			for az in arange(0.0, maxaz-azstep/4.0, azstep):
				if not with_mirror and az > math.pi-1.0e-3 and abs(alt-math.pi/2.0) < 1.0e-3:
					### ignore half of the equator
					continue
				numproj+=1

		return numproj
Exemplo n.º 24
0
def triples2mat(triples,shape="csr"):
    n = len(triples)
    data = arange(n)
    ij = arange(2*n).reshape(2,n)
    for k,item in enumerate(triples):
        ij[0][k],ij[1][k],data[k] = item
    return scipy.sparse.coo_matrix((data, ij)).asformat(shape)
Exemplo n.º 25
0
def getData(shot, tags=False,
            probesort=True, internal=True, external=False):
    if tags == False:
        print 'tags must be specified in getMDSplus.getData(tags="typetagshere")'
        return False
    if type(tags)==str:
        tags = [tags]
    data = {}
    data["Shot"] = shot
    data["Tags"] = tags
    if "RespModes" in tags  or "AppModes"in tags:
        data["gamma"] = getGamma(shot)
    if "RespModes" in tags:
        data["RespModes"] = getRespModes(shot)
        data["Mtime"] = sp.arange(0, max(data["RespModes"].shape)/512, 1/512)
    if "AppModes" in tags:
        data["AppModes"] = getRespModes(shot)
        data["Mtime"] = sp.arange(0, max(data["AppModes"].shape)/512, 1/512)
    if "Bfield" in tags:
        data["Bfield"] = getBfield(shot)
    if "Bfield" in tags or "Appfield" in tags or "RespField" in tags:
        data["Serials"] = getSerials(shot)
        data["Position"] = getPosition(shot)
        # a time field is created so that resampling can be kept track of within the dictionary
        data["Btime"] = sp.arange(0, max(data["Bfield"].shape)/512, 1/512)
        if probesort == True:
            probeSort(shot, data, internal=internal, external=external)
    if "RawData" in tags:
        data["RawData"] = getRawData(shot)
    return data
Exemplo n.º 26
0
    def getGenoID(self,i0=None,i1=None,pos0=None,pos1=None,chrom=None,pos_cum0=None,pos_cum1=None):
        """get genotype IDs. 
        Optionally the indices for loading subgroups the genotype IDs for all people
        can be given in one out of three ways: 
        - 0-based indexing (i0-i1)
        - position (pos0-pos1 on chrom)
        - cumulative position (pos_cum0-pos_cum1)
        If all these are None (default), then all genotypes are returned

        Args:
            i0:         genotype index based selection (start index)
            i1:         genotype index based selection (stop index)
            pos0:       position based selection (start position)
            pos1:       position based selection (stop position)
            chrom:      position based selection (chromosome)
            pos_cum0:   cumulative position based selection (start position)
            pos_cum1:   cumulative position based selection (stop position)
           
        Returns:
            ID:         scipy.array of genotype IDs (e.g. rs IDs)
        """
        #position based matching?
        if (i0 is None) and (i1 is None) and ((pos0 is not None) & (pos1 is not None) & (chrom is not None)) or ((pos_cum0 is not None) & (pos_cum1 is not None)):
            i0,i1=self.getGenoIndex(pos0=pos0,pos1=pos1,chrom=chrom,pos_cum0=pos_cum0,pos_cum1=pose_cum1)
        if "genotype_id" in list(self.geno.keys()):
            if (i0 is not None) & (i1 is not None):
                return self.geno["genotype_id"][i0:i1]
            else:
                return self.geno["genotype_id"][i0:i1]
        else:
            if (i0 is not None) & (i1 is not None):
                return SP.arange(i0,i0)
            else:
                return SP.arange(self.genoM.shape[1])
        pass
Exemplo n.º 27
0
def eig(A, normal = False, iter = 100):
	'''Finds eigenvalues of an nxn array A. If A is normal, QRalg.eig 
	may also return eigenvectors.
	
	Parameters
	----------
	A :  nxn array
	     May be real or complex
	normal : bool, optional
		     Set to True if A is normal and you want to calculate
		     the eigenvectors.
	iter : positive integer, optional
			
	Returns
	-------
	v : 1xn array of eigenvectors, may be real or complex
	Q : (only returned if normal = True) 
		nxn array whose columns are eigenvectors, s.t. A*Q = Q*diag(v)
		real if A is real, complex if A is complex
	
	For more on the QR algorithm, see Eigenvalue Solvers lab.
	'''
	def getSchurEig(A):
		#Find the eigenvalues of a Schur form matrix. These are the 
		#elements on the main diagonal, except where there's a 2x2 
		#block on the main diagonal. Then we have to find the 
		#eigenvalues of that block.
		D = sp.diag(A).astype(complex)
		#Find all the 2x2 blocks:
		LD = sp.diag(A,-1)
		index = sp.nonzero(abs(LD)>.01)[0] #is this a good tolerance?
		#Find the eigenvalues of those blocks:
		a = 1
		b = -D[index]-D[index+1]
		c = D[index]*D[index+1] - A[index,index+1]*LD[index]
		discr = sp.sqrt(b**2-4*a*c)
		#Fill in vector D with those eigenvalues
		D[index] = (-b + discr)/(2*a)
		D[index+1] = (-b - discr)/(2*a)
		return D

	n,n = A.shape
	I = sp.eye(n)
	A,Q = hessenberg(A,True)
	if normal == False:
		for i in sp.arange(iter):
			s = A[n-1,n-1].copy()
			Qi,R = la.qr(A-s*I)
			A = sp.dot(R,Qi) + s*I
		v = getSchurEig(A)
		return v
	
	elif normal == True:
		for i in sp.arange(iter):
			s = A[n-1,n-1].copy()
			Qi,R = la.qr(A-s*I)
			A = sp.dot(R,Qi) + s*I
			Q = sp.dot(Q,Qi)
		v = sp.diag(A)
		return v,Q
Exemplo n.º 28
0
def gqr(A):
	"""Finds the QR decomposition of A using Givens rotations.
	input: 	A, mxn array with m>=n
	output: Q, orthogonal mxm array
	        R, upper triangular mxn array
	        s.t QR = A
	"""
	def rotate(i,k,B):
	# create the Givens rotation matrix G to zero out the i,k entry of B
		c,s,r = solve(B[k,k],B[i,k])
		r = sp.sqrt(B[k,k]**2 + B[i,k]**2)
		c = B[k,k]/r
		s = -B[i,k]/r
		G = sp.eye(m)
		G[i,i] = c
		G[k,k] = c
		G[k,i] = -s
		G[i,k] = s
		return G
	
	B = A.copy()	
	m,n = B.shape
	G = sp.eye(m)
	#cycle through each nonzero subdiagonal element of B, and rotate it to zero
	for k in sp.arange(n-1):
		for i in sp.arange(k+1,m):
			if B[i,k] is not 0:
				H = rotate(i,k,B)
				B = sp.dot(H,B)
				G = sp.dot(H,G)
	return G.T, B
Exemplo n.º 29
0
def PlotLc(id=None, dir=None, quarter=None, tset=None):
    if id != None:
        tset, status = GetLc(id=id, dir=dir, tr_out=True)
        if tset is None:
            return
    elif tset == None:
        print "no tset"
        return
    if quarter != None:
        tset.tables[1] = tset.tables[1].where(tset.tables[1].Q == quarter)
        if len(tset.tables[1].TIME) == 0:
            print "No data for Q%d" % quarter
            return
    time = tset.tables[1].TIME
    phase, inTr = TransitPhase(tset)
    col = ["r", "g", "b", "y", "c", "m", "grey"]
    npl, nobs = inTr.shape
    pylab.figure(1)
    pylab.clf()
    pylab.plot(time, tset.tables[1].PDCSAP_FLUX, "k-")
    for ipl in scipy.arange(npl):
        list = inTr[ipl, :].astype(bool)
        pylab.plot(time[list], tset.tables[1].PDCSAP_FLUX[list], ".", c=col[ipl])
    l = scipy.isfinite(time)
    pylab.xlim(time[l].min(), time[l].max())
    ttl = "KIC %d, P=" % (tset.tables[0].KID[0])
    for i in scipy.arange(npl):
        ttl = "%s%.5f " % (ttl, tset.tables[0].Period[i])
    if quarter != None:
        ttl += "Q%d" % quarter
    pylab.title(ttl)

    return
Exemplo n.º 30
0
def plotSpectrum(y,Fs):
    """
    Plots a Single-Sided Amplitude Spectrum of y(t)
    :param y: the signal
    :param Fs: the sampling frequency
    """
    n = len(y) # length of the signal
    k = arange(n)
    T = n/Fs 
    frq = k/T # Two sides frequency range
    frq = np.squeeze(frq)

    frq = frq[range(int(n/2))] # One side frequency range

    Y = fft(y)/n # FFT computing and normalization
    Y = Y[range(int(n/2))]
    # Plot the signal in wall-clock time
    subplot(2,1,1)
    Ts = 1.0/Fs; # sampling interval
    t = arange(0, 1.0*len(y)/Fs, Ts)
    plot(t, y)
    xlabel('Time')
    ylabel('Amplitude')
    # Plot the spectrum 
    subplot(2,1,2)
    plot( frq, np.log(abs(Y)),'r') # Plotting the spectrum
    xlabel('Freq (Hz)')
    ylabel('log|Y(freq)|')
    show()      
Exemplo n.º 31
0
def ldpred_gibbs(beta_hats,
                 genotypes=None,
                 start_betas=None,
                 h2=None,
                 n=None,
                 ns=None,
                 ld_radius=100,
                 num_iter=60,
                 burn_in=10,
                 p=None,
                 zero_jump_prob=0.01,
                 sampl_var_shrink_factor=0.9,
                 tight_sampling=False,
                 ld_dict=None,
                 reference_ld_mats=None,
                 ld_boundaries=None,
                 verbose=False,
                 print_progress=True):
    """
    LDpred (Gibbs Sampler) 
    """
    # Set random seed to stabilize results
    sp.random.seed(42)

    t0 = time.time()
    m = len(beta_hats)

    ldpred_n, ldpred_inf_n = get_LDpred_sample_size(n, ns, verbose)

    # If no starting values for effects were given, then use the infinitesimal model starting values.
    if start_betas is None and verbose:
        print(
            'Initializing LDpred effects with posterior mean LDpred-inf effects.'
        )
        print('Calculating LDpred-inf effects.')
        start_betas = LDpred_inf.ldpred_inf(
            beta_hats,
            genotypes=genotypes,
            reference_ld_mats=reference_ld_mats,
            h2=h2,
            n=ldpred_inf_n,
            ld_window_size=2 * ld_radius,
            verbose=False)
    curr_betas = sp.copy(start_betas)
    assert len(
        curr_betas
    ) == m, 'Betas returned by LDpred_inf do not have the same length as expected.'
    curr_post_means = sp.zeros(m)
    avg_betas = sp.zeros(m)

    # Iterating over effect estimates in sequential order
    iter_order = sp.arange(m)

    # Setting up the marginal Bayes shrink
    const_dict = prepare_constants(ldpred_n, ns, m, p, h2,
                                   sampl_var_shrink_factor)

    for k in range(num_iter):  # Big iteration
        h2_est = max(0.00001, sp.sum(curr_betas**2))
        if tight_sampling:
            # Force an alpha shrink if estimates are way off compared to heritability estimates.
            #(May improve MCMC convergence.)
            alpha = min(1.0 - zero_jump_prob, 1.0 / h2_est,
                        (h2 + 1.0 / sp.sqrt(ldpred_n)) / h2_est)
        else:
            alpha = 1.0 - zero_jump_prob

        rand_ps = sp.random.random(m)

        rand_norms = stats.norm.rvs(0.0, 1, size=m) * const_dict['rv_scalars']

        for i, snp_i in enumerate(iter_order):
            if ld_boundaries is None:
                start_i = max(0, snp_i - ld_radius)
                focal_i = min(ld_radius, snp_i)
                stop_i = min(m, snp_i + ld_radius + 1)
            else:
                start_i = ld_boundaries[snp_i][0]
                stop_i = ld_boundaries[snp_i][1]
                focal_i = snp_i - start_i

            #Figure out what sample size and constants to use
            cd = get_constants(snp_i, const_dict)

            # Local LD matrix
            D_i = ld_dict[snp_i]

            # Local (most recently updated) effect estimates
            local_betas = curr_betas[start_i:stop_i]

            # Calculate the local posterior mean, used when sampling.
            local_betas[focal_i] = 0.0
            res_beta_hat_i = beta_hats[snp_i] - sp.dot(D_i, local_betas)
            b2 = res_beta_hat_i**2

            d_const_b2_exp = cd['d_const'] * sp.exp(-b2 * cd['n'] / 2.0)
            if sp.isreal(d_const_b2_exp):
                numerator = cd['c_const'] * sp.exp(-b2 / (2.0 * cd['hdmpn']))
                if sp.isreal(numerator):
                    if numerator == 0.0:
                        postp = 0.0
                    else:
                        postp = numerator / (numerator + d_const_b2_exp)
                        assert sp.isreal(
                            postp
                        ), 'The posterior mean is not a real number?  Possibly due to problems with summary stats, LD estimates, or parameter settings.'
                else:
                    postp = 0.0
            else:
                postp = 1.0
            curr_post_means[snp_i] = cd['hdmp_hdmpn'] * postp * res_beta_hat_i

            if rand_ps[i] < postp * alpha:
                # Sample from the posterior Gaussian dist.
                proposed_beta = rand_norms[
                    snp_i] + cd['hdmp_hdmpn'] * res_beta_hat_i

            else:
                # Sample 0
                proposed_beta = 0.0

            curr_betas[snp_i] = proposed_beta  # UPDATE BETA

        if verbose and print_progress:
            sys.stdout.write('\r%0.2f%%' % (100.0 *
                                            (min(1,
                                                 float(k + 1) / num_iter))))
            sys.stdout.flush()

        if k >= burn_in:
            avg_betas += curr_post_means  # Averaging over the posterior means instead of samples.
    if verbose and print_progress:
        sys.stdout.write('\r%0.2f%%\n' % (100.0))
        sys.stdout.flush()

    avg_betas = avg_betas / float(num_iter - burn_in)
    t1 = time.time()
    t = (t1 - t0)
    if verbose:
        print('Took %d minutes and %0.2f seconds' % (t / 60, t % 60))
    return {'betas': avg_betas, 'inf_betas': start_betas}
Exemplo n.º 32
0
    freerun_steps = 1000
    training_sample_length = 5000
    n_training_samples = 3
    test_sample_length = 5000

    train_signals = Oger.datasets.mackey_glass(sample_len=training_sample_length, n_samples=n_training_samples)
    test_signals = Oger.datasets.mackey_glass(sample_len=test_sample_length, n_samples=1)

    reservoir = Oger.nodes.LeakyReservoirNode(output_dim=400, leak_rate=0.4, input_scaling=.05, bias_scaling=.2, reset_states=False)
    readout = Oger.nodes.RidgeRegressionNode()
    Oger.utils.enable_washout(Oger.nodes.RidgeRegressionNode, 500)

    flow = Oger.nodes.FreerunFlow([reservoir, readout], freerun_steps=freerun_steps)

    gridsearch_parameters = {readout:{'ridge_param': 10 ** scipy.arange(-4, 0, .3)}}

    # Instantiate an optimizer
    loss_function = Oger.utils.timeslice(range(training_sample_length - freerun_steps, training_sample_length), Oger.utils.nrmse)
    opt = Oger.evaluation.Optimizer(gridsearch_parameters, loss_function)

    # Do the grid search
    opt.grid_search([[], train_signals], flow, cross_validate_function=Oger.evaluation.leave_one_out)

    # Get the optimal flow and run cross-validation with it 
    opt_flow = opt.get_optimal_flow(verbose=True)

    print 'Freerun on test_signals signal with the optimal flow...'
    opt_flow.train([[], train_signals])
    freerun_output = opt_flow.execute(test_signals[0][0])
Exemplo n.º 33
0
#!/usr/bin/env python

from sys import argv
from scipy import array, arange
from scipy.misc import toimage
from itertools import product
from pprint import pprint

edge = 60
half = edge / 2
check = 10
argc = len(argv)
if argc >= 2:
    temp = int(argv[1])
    if 1024 >= temp >= 60:
        edge = temp
        half = edge / 2
if argc >= 3:
    temp = int(argv[2])
    if half >= temp >= 1:
        check = temp
shape = (edge, edge)
XY = list(product(arange(edge), repeat=2))
value = [255 * int((x / check) & 1 == (y / check) & 1) for (x, y) in XY]

board = array(value).reshape(shape)
image = toimage(board)
image.save('img/checkers_%04d_%02d.png' % (edge, check))
Exemplo n.º 34
0
print 'setting up interface'
comm = Motor_Comm()
comm.enable()
comm.standby('off')
comm.loop_mode('off')
dt = comm.dt()
num_motor = comm.num_motor()

if 1:
    print 'creating kinematics'
    T = 2.5
    num_period = 5.0
    n = int(num_period * T / dt)

    t = scipy.arange(0.0, n) * dt
    x = scipy.zeros((n, num_motor))
    for i in range(0, num_motor):
        x[:, i] = kine(t, T)

    print 'loading outscan buffer'
    comm.load_os_buffer(x)
    #comm.print_status()

    print 'moving to start'
    comm.move2start()

    print 'outscanning buffer'
    comm.start()
    comm.wait()
Exemplo n.º 35
0
def tdo_fft(inputfile, outputfile):
    ''' Perform Fourier transform and return frequency evolution '''
    # Input parameters
    fourier_le = 1024  # Fourier length
    time_le = 1024  # timewindow
    dfmin = 0.01  # Frequency resolution
    dt = 2e-8  # timestep of acquisition
    load_balancing = 1

    # Lecture du fichier
    fid = open(inputfile, 'rb')
    fid.seek(512)  # Skip useless header
    V = fromfile(fid, int16, -1, '')
    fid.close()

    pstart = 1  # First timewindow
    pend = int(floor(
        (len(V) - 4 * fourier_le) / time_le)) + 1  # Last timewindow

    t = arange(0, fourier_le) * dt

    # Approximation of main frequency
    Vf = abs(real(fft(V[0:fourier_le])))
    tf = fftfreq(fourier_le, dt)
    fmax = zeros((pend + 2 - pstart, 2))
    fmax[0, 1] = tf[argmax(Vf[0:int(fourier_le / 2)])]
    fmax[0, 0] = time_le * dt / 2

    # Calculation of constants
    expon = -2j * pi * t
    deltaf0 = tf[1] / 1000
    if deltaf0 < dfmin:
        deltaf0 = 10 * dfmin

    # Start jobs
    job_server = pp.Server()
    ncpus = int(job_server.get_ncpus())

    serv_jobs = []

    # Load-balancing
    # Last processes are faster
    # If nprocess = ncpus, half of the core remains mostly idle
    if load_balancing == 1:
        nprocess = ncpus * 4
    else:
        nprocess = ncpus

    pstart_b = pstart
    for i in range(0, nprocess):
        if nprocess == 1:
            pend_b = pend
        else:
            pend_b = int(pstart_b + floor(pend / nprocess))

        print(pstart_b, pend_b)

        args_tuple = (pstart_b, pend_b+1, \
        V[pstart_b*time_le:(pend_b+1)*time_le+fourier_le], \
        dt, dfmin, deltaf0, expon, fourier_le, time_le,)

        serv_jobs.append(job_server.submit(find_freq, args_tuple, \
        (local_trapz,)))

        pstart_b = pend_b

    pstart_b = pstart
    for i in range(0, nprocess):
        if nprocess == 1:
            pend_b = pend
        else:
            pend_b = int(pstart_b + floor(pend / nprocess))

        fmax[pstart_b:pend_b, :] = serv_jobs[i]()
        pstart_b = pend_b

    # Save calculation in file
    savetxt(outputfile, fmax)
    job_server.print_stats()
Exemplo n.º 36
0
def find_freq(ps, pe, V, dt, dfmin, deltaf0, expon, fourier_le, time_le):
    '''Perform DFT of signal and return main frequency'''
    from scipy import zeros, real, fft, argmax, exp, arange, cos, pi, mean
    from scipy.fftpack import fftfreq

    Vf = abs(real(fft(V[0:fourier_le] - mean(V[0:fourier_le]))))
    tf = fftfreq(fourier_le, dt)
    fmax = zeros((pe - ps, 2))
    fmax[0, 1] = tf[argmax(Vf[0:int(fourier_le / 2)])]
    fmax[0, 0] = (ps * time_le + fourier_le / 2) * dt

    # Rectangular
    #window  =  ones(fourier_le)
    # Cosinus
    window = arange(0, fourier_le)
    window = 1 - cos(window * 2 * pi / (fourier_le - 1))

    for i in xrange(1, pe - ps):
        # Utilisation de la dernière valeur comme point de depart
        a = fmax[i - 1, 1]
        V_temp = window * V[i * time_le:i * time_le + fourier_le]

        # Previous frequency spectral weight
        # Complex exponential time consuming
        # Need a smarter way to perform this calculations
        deltaf = deltaf0
        essaimax = abs(local_trapz(V_temp * exp(expon * a)))

        # Calculation of local derivative of Fourier transform
        # If derivative positive, then search for frequency in growing direction
        if abs(local_trapz(V_temp * exp(expon * (a + deltaf)))) > essaimax:
            while abs(deltaf) > dfmin:
                F = abs(local_trapz(V_temp * exp(expon * (a + deltaf))))
                if F > essaimax:
                    essaimax = F
                    a += deltaf
                else:
                    deltaf = -deltaf / 5
                    if (abs(deltaf) < dfmin) and (abs(deltaf) > dfmin * 4.9):
                        deltaf = deltaf / abs(deltaf) * 1.01 * dfmin

            # Store frequency
            fmax[i, 0:2] = [((i + ps) * time_le + fourier_le / 2) * dt,
                            a - 2.5 * deltaf]
        # Lower frequency otherwise
        else:
            while abs(deltaf) > dfmin:
                F = abs(local_trapz(V_temp * exp(expon * (a - deltaf))))
                if F > essaimax:
                    essaimax = F
                    a -= deltaf
                else:
                    deltaf = -deltaf / 5
                    if (abs(deltaf) < dfmin) and (abs(deltaf) > dfmin * 4.9):
                        deltaf = deltaf / abs(deltaf) * 1.01 * dfmin

            # Store frequency
            fmax[i, 0:2] = [((i + ps) * time_le + fourier_le / 2) * dt,
                            a + 2.5 * deltaf]

    return fmax[1:, :]
Exemplo n.º 37
0
     color='g',
     linestyle='dashed')
grid()
subplot(212)
ylabel(r'$\bar{u}^s$')
xlabel(r't')
plt.ylim(-2.1, 2.1)
plt.xlim(xmin=.49)
p1 = plot(dataPlot[4900:, 0], dataPlot[4900:, 3])
#p2 = plot(dataPlot[4900:, 0], np.sin(50*dataPlot[4900:, 0]))
#plt.legend((p1[0], p2[0]), (r'$\bar{u}^s(t)$', r'$-\rho(t)$'), ncol=2)
savefig('esmc_sigma_u_z')

u_z = dataPlot[4900:, 3]
n = len(u_z)
Y = scipy.fft(dataPlot[4900:, 3]) / n
k = arange(n)
T = n * h
frq = k / T
frq = frq[list(range(n / 2))]
Y = Y[list(range(n / 2))]
plot(frq, abs(Y), 'r')
xlabel(r'freq (Hz)')
title(r'Frequency spectrum of $\bar{u}^s$')
savefig('esmc_u_freq.png')
# TODO
# compare with the reference
#ref = getMatrix(SimpleMatrix("result.ref"))
#if (norm(dataPlot - ref[1:,:]) > 1e-12):
#    print("Warning. The result is rather different from the reference file.")
Exemplo n.º 38
0
def run_demo():
    LG.basicConfig(level=LG.INFO)

    #1. simulate data from a linear PCA model
    N = 25
    K = 5
    D = 200

    SP.random.seed(1)
    S = SP.random.randn(N,K)
    W = SP.random.randn(D,K)

    Y = SP.dot(W,S.T).T

    Y+= 0.5*SP.random.randn(N,D)

    #use "standard PCA"
    [Spca,Wpca] = gplvm.PCA(Y,K)

    #reconstruction
    Y_ = SP.dot(Spca,Wpca.T)

    if 1:
        #use linear kernel
        covariance = linear.LinearCFISO(n_dimensions=K)
        hyperparams = {'covar': SP.log([1.2])}
    if 0:
        #use ARD kernel
        covariance = se.SqexpCFARD(n_dimensions=K)
        hyperparams = {'covar': SP.log([1]*(K+1))}

    #initialization of X at arandom
    X0 = SP.random.randn(N,K)
    X0 = Spca
    hyperparams['x'] = X0
    
    #standard Gaussian noise
    likelihood = lik.GaussLikISO()
    hyperparams['lik'] = SP.log([0.1])
    g = gplvm.GPLVM(covar_func=covariance,likelihood=likelihood,x=X0,y=Y,gplvm_dimensions=SP.arange(X0.shape[1]))

    #specify optimization bounds:
    bounds = {}
    bounds['lik'] = SP.array([[-5.,5.]]*D)
    hyperparams['x'] = X0

    print "running standard gplvm"
    [opt_hyperparams,opt_lml2] = opt.opt_hyper(g,hyperparams,gradcheck=False)

    print "optimized latent X:"
    print opt_hyperparams['x']
Exemplo n.º 39
0
def preTrain(Y,
             terms,
             P_I,
             noise='gauss',
             nFix=None,
             priors=None,
             covariates=None):
    """Pre-train the slalom factor analysis model.

    Helper function to pre-train the slalom factor analysis model to achieve 
    faster convergence and obtain an initial update order. Called by `initFA`.

    Args:
        Y          (array_like): Matrix of normalised count values of `N` cells 
                                 and `G` variable genes in log-space.
                                 Dimension (:math:`N\\times G`).
        terms     (vector_like): Names of `K` annotated gene sets. Dimension
                                 (:math:`K\\times 0`).
        P_I        (array_like): Matrix specifying the likelihood of 
                                 whether a gene is annotated to a specific factor.
                                 Dimension (:math:`G\\times K`).
        noise              (str): Specifies the observation noise model. Should be either `'gauss'`,`'hurdle'` or `'poisson'`.
                                 Defaults to `gauss`.             
        nFix               (int): Number of terms which should be fixed and updated first. Defaults to `None`, 
                                  resulting in the number of unannotated factors being updated first.                                                                                           
    Returns:
        A vector containing the initial update order of the terms
    """

    init_params = {}
    init_params['noise'] = noise
    init_params['iLatent'] = SP.where(terms == 'hidden')[0]
    init_params['iLatentSparse'] = SP.array(
        [])  #SP.where(terms=='hiddenSparse')[0]
    if not (covariates is None):
        init_params['Known'] = covariates
    learnPi = False

    pi = P_I.copy()
    K = pi.shape[1]

    #data for sparseFA instance
    pi[pi > .8] = 1 - 1e-100  # 0.99#1-1e-100#0.9999
    pi[pi < .2] = 1e-100  #1e-8

    init = {
        'init_data': CGauss(Y),
        'Pi': pi,
        'terms': terms,
        'noise': noise,
        'covariates': covariates
    }
    sigmaOff = 1E-3
    sparsity = 'VB'

    #prior on noise level
    if priors is None:
        priors = {'Eps': {'priors': [1E-3, 1E-3]}}
    #how to initialize network?
    #initType = 'pcaRand'
    terms0 = terms
    pi0 = pi.copy()
    FA0 = slalom.CSparseFA(components=K,
                           sigmaOff=sigmaOff,
                           sigmaOn=SP.ones(pi.shape[1]) * 1.0,
                           sparsity=sparsity,
                           nIterations=50,
                           permutation_move=False,
                           priors=priors,
                           initType='pcaRand',
                           learnPi=learnPi)
    FA0.init(**init)
    if nFix == None:
        nFix = FA0.nKnown + FA0.nLatent


#Fit PCA
    pca = PCA(n_components=1)  #,svd_solver='full')
    pca.fit(FA0.Z.E1)
    X = pca.transform(FA0.Z.E1)

    #Sort by correlation to PC1
    MPC = abs(vcorrcoef(FA0.S.E1[:, SP.argsort(FA0.W.Ilabel)].T, X.T))[nFix:]
    Ipi = SP.argsort(-MPC.ravel())
    IpiRev = SP.argsort(MPC.ravel())

    mRange = list(range(FA0.components))
    mRange[nFix:] = Ipi + nFix

    mRangeRev = list(range(FA0.components))
    mRangeRev[nFix:] = IpiRev + nFix

    #Run model for 50 iterations
    pi = pi0[:, mRange]
    terms = terms0[mRange]
    init = {'init_data': CGauss(Y), 'Pi': pi, 'terms': terms, 'noise': noise}
    FA = slalom.CSparseFA(components=K,
                          sigmaOff=sigmaOff,
                          sigmaOn=SP.ones(pi.shape[1]) * 1.0,
                          sparsity=sparsity,
                          nIterations=50,
                          permutation_move=False,
                          priors=priors,
                          initType='pcaRand',
                          learnPi=learnPi)
    FA.shuffle = True
    FA.nScale = 30

    FA.init(**init)
    for j in range(50):
        FA.update()

    #Run reverse model for 50 iterations
    pi = pi0[:, mRangeRev]
    terms = terms0[mRangeRev]
    init = {'init_data': CGauss(Y), 'Pi': pi, 'terms': terms, 'noise': noise}
    FArev = slalom.CSparseFA(components=K,
                             sigmaOff=sigmaOff,
                             sigmaOn=SP.ones(pi.shape[1]) * 1.0,
                             sparsity=sparsity,
                             nIterations=50,
                             permutation_move=False,
                             priors=priors,
                             initType='pcaRand',
                             learnPi=learnPi)
    FArev.shuffle = True
    FArev.nScale = 30
    FArev.init(**init)

    #FArev.iterate(forceIterations=True, nIterations=nIterations)
    for j in range(50):
        FArev.update()

    #import pdb
    IpiM = (-(0.5 * (1. / FArev.Alpha.E1[SP.argsort(mRangeRev)][nFix:]) + .5 *
              (1. / FA.Alpha.E1[SP.argsort(mRange)][nFix:]))).argsort()

    #    IpiM = (-(0.5*(1./FArev.Alpha.E1[SP.argsort(mRangeRev)][nFix:]*FArev.S.E1[:,SP.argsort(mRangeRev)][:,nFix:].std(0))+.5*(1./FA.Alpha.E1[SP.argsort(mRange)][nFix:]*FA.S.E1[:,SP.argsort(mRange)][:,nFix:].std(0)))).argsort()
    Ilabel = SP.hstack([SP.arange(nFix), IpiM + nFix])

    return Ilabel
Exemplo n.º 40
0
# Create input
import scipy
import loudia

fundamental = 440.0
harmonics = 5

frameSize = 128
fftSize = 512
sampleRate = 8000

a_zeros = scipy.zeros( frameSize )
a_ones = scipy.ones( frameSize )
a_random = scipy.random.random( frameSize )
a_sine = scipy.cos(2 * scipy.pi * 440 * scipy.arange( frameSize ) / sampleRate + scipy.pi/4.0)

a_sine += (a_random - 0.5) * 1.0

# Loudia's solution # --------------------------------- #
window = loudia.Window(frameSize, loudia.Window.HAMMING)
fft = loudia.FFT(fftSize)
peaks = loudia.PeakDetectionComplex(5, 4)
peaksinterp = loudia.PeakInterpolationComplex()
trajs = loudia.PeakTracking(5, 4, 3)


r_sine_windowed = window.process(a_sine)
r_sine_mag = fft.process(r_sine_windowed)
r_sine_peakpos, r_sine_peakmag, r_sine_peakphase = peaks.process(r_sine_mag)
r_sine_peakipos, r_sine_peakimag, r_sine_peakphasei = peaksinterp.process(r_sine_mag, r_sine_peakpos, r_sine_peakmag, r_sine_peakphase)
Exemplo n.º 41
0
def run(P):

    import pyfits, os, scipy, pylab

    web = '/afs/slac.stanford.edu/u/ki/pkelly/'  # os.environ['sne'] + '/photoz/COSMOS_' + set + '_' + ext + '/'

    os.system('mkdir -p ' + web)

    print 'web', web
    comp = open(web + '/comp.html', 'w')

    print 'loading cosmos'
    C = pyfits.open(
        '/nfs/slac/g/ki/ki05/anja/SUBARU/COSMOS_PHOTOZ/PHOTOMETRY_W-C-IC_' +
        ext + '/cosmos_lephare.cat')['OBJECTS'].data
    print 'loading BPZ'
    U = pyfits.open(
        '/nfs/slac/g/ki/ki05/anja/SUBARU/COSMOS_PHOTOZ/PHOTOMETRY_W-C-IC_' +
        ext + '/COSMOS_PHOTOZ.APER.1.' + set +
        '.list.all.bpz.tab')['STDTAB'].data
    print 'loading PDZ'
    print 'done'

    params = {
        'backend': 'ps',
        'text.usetex': True,
        'ps.usedistiller': 'xpdf',
        'ps.distiller.res': 6000
    }
    pylab.rcParams.update(params)

    fig_width = 6
    fig_height = 6

    fig_size = [fig_width, fig_height]
    params = {
        'axes.labelsize': 16,
        'text.fontsize': 16,
        'legend.fontsize': 15,
        'xtick.labelsize': 16,
        'ytick.labelsize': 16,
        'figure.figsize': fig_size
    }
    pylab.rcParams.update(params)

    for low, high in [[0, 0.1], [0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5],
                      [0.5, 0.6], [0.6, 0.7], [0.7, 0.8], [0.8, 0.9],
                      [0.9, 1.0], [1.0, 1.2], [3, 4]]:

        file = ext + '_' + str(low) + '_' + str(high) + 'interval.png'
        comp.write('<img src=' + file + '></img>\n')

        if True:
            pylab.clf()
            out_zs = (U.field('BPZ_Z_B') < high) * (
                U.field('BPZ_Z_B') > low) * (U.field('BPZ_ODDS') >
                                             0) * (C.field('zp_best') > 0)
            cosmos_zs = C.field('zp_best')[out_zs]
            multiple = 7
            bins = scipy.arange(0, 4., 0.01 * multiple)

            n, bins, patches = pylab.hist(cosmos_zs, bins=bins, histtype='bar')

            pylab.clf()
            mask = out_zs[:]
            pdz_zs_raw = P[mask].sum(axis=0) / mask.sum()
            pdz_zs = []
            sum = 0
            for i in range(len(pdz_zs_raw)):
                if (i + 1) % multiple == 0:
                    pdz_zs.append(sum)
                    sum = 0
                sum += pdz_zs_raw[i]

            print pdz_zs

            x = bins[:-1]
            y = pdz_zs[:len(bins[:-1])]
            y[0] = 0
            print x, y
            y_cosmos = n / float(len(cosmos_zs))

            print(y_cosmos)

            pylab.bar(x,
                      y_cosmos,
                      width=x[1] - x[0],
                      facecolor='red',
                      linewidth=0,
                      label='COSMOS')
            pylab.bar(x,
                      y,
                      width=x[1] - x[0],
                      facecolor='none',
                      edgecolor='black',
                      label='BPZ')

            import scipy, pylab
            zs = scipy.arange(0.01000, 4.0100, 0.0100)

            #pylab.plot([0,1],[0,1],c='red')
            #pylab.xlim([0,1])
            #pylab.ylim([0,1])
            #pylab.title('ALL')
            pylab.legend(frameon=False)
            pylab.figtext(0.62,
                          0.73,
                          '$' + str(low) + '< z_B < ' + str(high) + '$',
                          size=15)
            pylab.figtext(0.62,
                          0.68,
                          str(len(cosmos_zs)) + ' Galaxies',
                          size=15)
            pylab.figtext(0.62, 0.63, 'BVRIZ', size=15)
            pylab.ylabel('Probability Density')
            pylab.xlabel('Redshift')
            print web + file
            pylab.savefig(web + file)
            pylab.savefig(web + file.replace('.png', '.pdf'))
            pylab.savefig(web + file.replace('.png', '.ps'))
            #pdz.write('<img width=600px src=all.png></img><br>\n')
    comp.close()
Exemplo n.º 42
0
def initFA(Y, terms, I, gene_ids=None, nHidden=3, nHiddenSparse = 0,pruneGenes=True, FPR=0.99, FNR=0.001, \
            noise='gauss', minGenes=20, do_preTrain=True, nFix=None, priors=None, covariates=None, dropFactors=True, learnPi=False):
    """Initialise the slalom factor analysis model.

    Required 3 inputs are first, a gene expression matrix `Y` containing normalised count values of `N` cells and `G` 
    variable genes in log-space, second a vector `terms` contaning the names of all annotated gene set (correspondig to annotated factors) 
    and third, a binary indicator matrix `I` linking `G` genes to `K` terms by indicating which genes are annotated to each factor. 
    A variety of options can be specified as described below. 

    Args:
        Y (array_like): Matrix of normalised count values of `N` cells 
                                 and `G` variable genes in log-space.
                                 Dimension (:math:`N\\times G`).
        terms    (vector_like): Names of `K` annotated gene sets. Dimension
                                 (:math:`K\\times 0`).
        I           (array_like): Indicator matrix specifying
                                 whether a gene is annotated to a specific factor.
                                 Dimension (:math:`G\\times K`).
        gene_ids   (array_like): Gene identifiers (opitonal, defaults to None)
        FNR             (float): False negative rate of annotations.
                                 Defaults to 0.001
        FPR             (float): False positive rate of annotations.
                                 Defaults to 0.99                                 
        nHidden           (int): Number of unannotated dense factors. Defaults to 3.
        nHiddenSparse       (int): Number of unannotated sparse factors. Defaults to 0. 
                                 This value should be changed to e.g. 5 if the diagnositcs fail. 
        pruneGenes         (bool): prune genes that are not annotated to a least one factor. This option allows fast inference and 
                                   should be set to `True` either if the 
                                   key objective is to rank factors or if the annotations cover all genes of interest.  
                                   Defaults to `True`.
        dropFactors         (bool): drop factors from update schedule once they are shut off. In practice, factors that are switched off 
                                   at some point during inference are usuallly not switched off. Allows faster inference. Defaults to `True`.
                                   Currently only supported for the Gaussian noise model.                                  
        noise              (str): Specifies the observation noise model. Should be either `'gauss'`,`'hurdle'` or `'poisson'`.
                                 Defaults to `gauss`.                                      
        minGenes          (int): minimum number of genes required per term to retain it  
                                 Defaults to `20`.  
        do_preTrain      (bool): Boolean switch indicating whether pre-training should be used to establish the initial 
                                update order. Can be set to `False` for very large datasets.
                                Defaults to `True` 
        priors      (dict): Dictionary containing the hyperparameters of the priors for `Alpha`, `Eps` and Pi (`PiDense` and `PiSparse`). 
                            Defaults to None; in this case default values are used.    
        covariates  (array_like): Matrix with known covariates that are controlled for when fitting the model. Defaults to `None`.
        learnPi          (bool): Learn sparsity of spearse hidden factors (default False)


    Returns:
        A :class:`slalom.CSparseFA` instance.
    """

    #check for consistency of input parameters
    [num_cells, num_genes] = Y.shape
    num_terms = I.shape[1]

    assert I.shape[
        0] == num_genes, 'annotation needs to be matched to gene input dimension'

    assert noise in ['gauss', 'hurdle', 'poisson'], 'invalid noise model'
    assert 0 < FNR < 1, 'FNR is required to be between 0 and 1'
    assert 0 < FNR < 1, 'FPR is required to be between 0 and 1'
    if noise == "hurdle" and dropFactors == True:
        dropFactors = False
        print(
            "dropFactors only supported for gauss noise model. Set to False.")

    #make sure the annotation is boolean
    I = (I > .5)
    #. filter annotation by min number of required genes
    Iok = I.sum(axis=0) > minGenes
    terms = terms[Iok]
    I = I[:, Iok]
    num_terms = I.shape[1]

    #create initial pi matrix, which corresponds to the effective prior probability of an annotated link
    pi = SP.zeros([num_genes, num_terms], dtype='float')
    #default FNR
    pi[:] = FNR
    #active links
    pi[I] = FPR

    #prune genes?
    if pruneGenes == True:
        idx_genes = SP.sum(I, 1) > 0
        Y = Y[:, idx_genes]
        pi = pi[idx_genes, :]
        if not (gene_ids is None):
            gene_ids = SP.array(gene_ids)[idx_genes]
    else:
        idx_genes = SP.arange(Y.shape[1])
        if Y.shape[1] > 10000:
            print(
                "For large datasets we recommend setting the pruneGenes option to True."
            )

    #center data for Gaussian observation noise
    if noise == 'gauss':
        Y -= SP.mean(Y, 0)

    #include hidden variables
    if nHiddenSparse > 0:
        piSparse = SP.ones((Y.shape[1], nHiddenSparse)) * .01
        idxVar = SP.argsort(-Y.var(0))
        for iH in range(piSparse.shape[1]):
            idxOnH = SP.random.choice(idxVar[:100], 20, replace=False)
            piSparse[idxOnH, iH] = 0.99
        pi = SP.hstack([piSparse, pi])
        thiddenSparse = SP.repeat('hiddenSparse', nHiddenSparse)
        termsHiddnSparse = [
            '%s%s' % t for t in zip(thiddenSparse, SP.arange(nHiddenSparse))
        ]
        terms = SP.hstack([termsHiddnSparse, terms])
        num_terms += nHiddenSparse

    thidden = SP.repeat('hidden', nHidden)
    termsHidden = ['%s%s' % t for t in zip(thidden, SP.arange(nHidden))]
    terms = SP.hstack([termsHidden, terms])

    pi = SP.hstack([SP.ones((Y.shape[1], nHidden)) * .99, pi])
    num_terms += nHidden

    if not (covariates is None):
        if len(covariates.shape) == 1:
            covariates = covariates[:, SP.newaxis]
        nKnown = covariates.shape[1]
        pi = SP.hstack([SP.ones((Y.shape[1], nKnown)) * .99, pi])
        num_terms += nKnown
        tcovariates = SP.repeat('covariate', nKnown)
        termsCovariates = [
            '%s%s' % t for t in zip(tcovariates, SP.arange(nKnown))
        ]
        terms = SP.hstack([termsCovariates, terms])


#mean term for non-Gaussian noise models
    if noise != 'gauss':
        terms = SP.hstack(['bias', terms])
        pi = SP.hstack([SP.ones((Y.shape[1], 1)) * (1. - 1e-10), pi])
        num_terms += 1

    if do_preTrain == True:
        Ilabel = preTrain(Y,
                          terms,
                          pi,
                          noise=noise,
                          nFix=nFix,
                          priors=priors,
                          covariates=covariates)
        pi = pi[:, Ilabel]
        terms = terms[Ilabel]

    init = {
        'init_data': CGauss(Y),
        'Pi': pi,
        'terms': terms,
        'noise': noise,
        'covariates': covariates,
        "dropFactors": dropFactors
    }
    if not gene_ids is None:
        gene_ids = SP.array(gene_ids)

    FA = slalom.CSparseFA(components=num_terms,
                          idx_genes=idx_genes,
                          gene_ids=gene_ids,
                          priors=priors,
                          learnPi=learnPi)
    FA.saveInit = False
    FA.init(**init)

    return FA
import scipy
import matplotlib.pyplot as plt

p = 0.5
n = 300
k = 200
victory = 10
ruin = 0
interval = victory - ruin + 1

winLose = 2 * (scipy.random.random((n, k, interval)) <= p) - 1
totals = scipy.cumsum(winLose, axis=0)

start = scipy.multiply.outer(scipy.ones((n + 1, k), dtype=int),
                             scipy.arange(ruin, victory + 1, dtype=int))
paths = scipy.zeros((n + 1, k, interval), dtype=int)
paths[1:n + 1, :, :] = totals
paths = paths + start


def match(a, b, nomatch=None):
    return b.index(a) if a in b else nomatch


# arguments: a is a scalar, b is a python list, value of nomatch is scalar
# returns the position of first match of its first argument in its second argument
# but if a is not there, returns the value nomatch
# modeled on the R function "match", but with less generality

hitVictory = scipy.apply_along_axis(
Exemplo n.º 44
0
def plotRelevance(FA,
                  Nactive=20,
                  stacked=True,
                  madFilter=0.4,
                  annotated=True,
                  unannotated=False,
                  unannotated_sparse=False):
    """Plot results of slalom

    Identified factors and corresponding gene set size ordered by relevance (white = low relevance; black = high relevance). 
    Top panel: Gene set augmentation, showing the number of genes added (red) and removed (blue) by the model for each factor.

    Args:
        FA                 (:class:`slalom.CSparseFA`): Factor analysis object, usually generated using `initFA` function
        Nactive                                  (int): Numer of terms to be plotted
        stacked                                 (bool): Boolean variable indicating whether bars should be stacked
        db                                      (str): Name of database used, either 'MSigDB' or 'REACTOME'
        madFilter                              (float): Filter factors by this mean absolute deviation to exclude outliers. 
                                                        For large datasets this can be set to 0.
        annotated                             (bool): Indicates whether  annotated factors should be plotted. Defaults to True.
        unannotated                             (bool): Indicates whether  unannotated factors should be plotted. Defaults to False.
        unannotated                             (bool): Indicates whether unannotated sparse factors should be plotted. Defaults to False.


    """

    pltparams = {
        'backend': 'pdf',
        'axes.labelsize': 12,
        'font.size': 12,
        'legend.fontsize': 13,
        'xtick.labelsize': 14,
        'ytick.labelsize': 12,
        'text.usetex': False
    }

    plt.rcParams.update(pltparams)

    pattern_hidden = re.compile('hidden*')
    pattern_bias = re.compile('bias')

    terms = FA.getTerms(annotated=annotated,
                        unannotated=unannotated,
                        unannotated_sparse=unannotated_sparse)

    i_use = list()
    if unannotated_sparse == True:
        i_use.extend(FA.iLatentSparse)
    if unannotated == True:
        i_use.extend(FA.iLatent)
    if annotated == True:
        i_use.extend(
            SP.setxor1d(
                SP.hstack([
                    SP.where(FA.terms == 'bias')[0], FA.iLatentSparse,
                    FA.iLatent
                ]), SP.arange(len(FA.terms))))
    i_use = SP.array(i_use)

    X = FA.getX()[:, i_use]
    Iprior = FA.getAnnotations()[:, i_use]
    Iposterior = FA.getZ()[:, i_use] > .5
    rel = FA.getRelevance()[i_use]

    MAD = mad(X)
    R = (MAD > madFilter) * (rel)
    terms = SP.array(terms)

    Nactive = min(SP.sum(R > 0), Nactive)

    #terms change,s etc.
    Nprior = Iprior.sum(axis=0)
    #gains
    Ngain = (Iposterior & (~Iprior)).sum(axis=0)
    #loss
    Nloss = ((~Iposterior & (Iprior))).sum(axis=0)

    #sort terms by relevance
    Iactive = R.argsort()[::-1][0:Nactive]
    RM = R[Iactive, SP.newaxis]

    xticks_range = SP.arange(Nactive)
    terms[terms == 'hidden'] = 'Unannotated'
    terms[terms == 'hiddenSparse'] = 'Unannotated-sparse'
    xticks_text = list(terms[Iactive])

    n_gain = []
    n_loss = []
    n_prior = []
    for i in range(Nactive):
        n_gain += [Ngain[Iactive[i]]]
        n_loss += [-1.0 * Nloss[Iactive[i]]]
        n_prior += [Nprior[Iactive[i]]]

    width = 0.6
    left = SP.arange(Nactive) - 0.5 + (1. - width) / 2.

    fig = plt.figure(2, figsize=(10, 6))
    fig.subplots_adjust(bottom=0.3)

    gs = mpl.gridspec.GridSpec(2,
                               2,
                               height_ratios=[2., 1.],
                               width_ratios=[1., 0.05])
    gs.update(hspace=0.1)

    #fig.text(0.06, 0.6, 'Number of annotated genes', ha='center', va='center', rotation='vertical', fontsize=17)

    #################################################################################
    ax1 = plt.subplot(gs[1, 0])
    simpleaxis(ax1)
    ax1.set_xlabel('Active pathways', fontsize=15)
    ax1.set_ylabel('Gene set size', fontsize=13.5)
    #im = ax1.imshow(SP.append(RM.T,[[0]],axis=1),origin=[0,0],interpolation='nearest',cmap='Greys',aspect='auto')

    minima = 0
    maxima = max(RM)

    norm = mpl.colors.Normalize(vmin=minima, vmax=maxima, clip=True)

    mapper = mpl.cm.ScalarMappable(norm=norm, cmap='Greys')

    colors = []
    for v in RM.flatten():
        colors += [mapper.to_rgba(v)]

    #colors = []
    #for i in xrange(RM.shape[0]):
    #    colors += [im.cmap(im.norm(RM[i]))[0,:-1]]

    y_max = Nprior[Iactive].max() + 100.

    bar_rel_importance = ax1.bar(left=SP.arange(Nactive) - 0.5,
                                 width=1.05,
                                 height=[y_max] * len(n_prior),
                                 bottom=0,
                                 color=colors,
                                 log=True,
                                 edgecolor='none')
    bar_annotated = ax1.bar(left=left,
                            width=width,
                            height=n_prior,
                            bottom=0,
                            color='w',
                            log=True,
                            alpha=0.6,
                            edgecolor='k')

    ax1.set_ylim([10, y_max])
    ax1.set_xlim([0, Nactive])
    #ax1.set_yticks([])
    #ax1.set_yscale('log')
    plt.xticks(xticks_range, xticks_text, rotation=45, fontsize=14, ha='right')

    color_bar_ax = plt.subplot(gs[1, 1])
    mpl.colorbar.ColorbarBase(color_bar_ax,
                              cmap='Greys',
                              norm=norm,
                              orientation='vertical',
                              ticks=[minima, maxima])

    #color_bar = fig.colorbar(im, cax=color_bar_ax,ticks=[0., RM.max()])
    color_bar_ax.set_yticklabels([0, 1])
    #color_bar_ax.set_yticklabels([0,round(RM.max(),3)])
    #color_bar_ax.set_ylabel('Rel. importance')
    #color_bar.outline.set_visible(False)
    #################################################################################

    ax0 = plt.subplot(gs[0, 0], sharex=ax1)
    simpleaxis(ax0)

    if stacked:
        bar_gain = ax0.bar(left=left,
                           width=width,
                           height=n_gain,
                           bottom=0,
                           color='#861608')
        bar_loss = ax0.bar(left=left,
                           width=width,
                           height=n_loss,
                           bottom=0,
                           color='#0c09a0')
    else:
        bar_gain = ax0.bar(left=SP.arange(Nactive) - 0.5,
                           width=0.5,
                           height=n_gain,
                           bottom=0,
                           color='#861608')
        bar_loss = ax0.bar(left=SP.arange(Nactive),
                           width=0.5,
                           height=n_loss,
                           bottom=0,
                           color='#0c09a0')

    #figure out range to make ylim symmatrix
    ax0.axhline(y=0, linestyle='-', color='gray')

    #ax0.set_yscale('symlog')
    gap = SP.ceil(max(max(n_gain), abs(min(n_loss))) / 4.)
    y_max = SP.ceil(max(n_gain) / gap)
    y_min = SP.floor(min(n_loss) / gap)
    yticks = SP.arange(y_min * gap, y_max * gap, gap)
    ax0.set_yticks(yticks)
    ax0.set_ylabel('Gene set augemntation', fontsize=13.5)
    ax0.legend((bar_gain[0], bar_loss[0]), ('Gain', 'Loss'),
               ncol=1,
               loc='center left',
               bbox_to_anchor=(1, 0.5),
               frameon=False,
               fontsize=15)
    plt.setp(ax0.get_xticklabels(), visible=False)
    plt.show()

    return fig
Exemplo n.º 45
0
 def __init__(self, grid, data):
     self.n = data.shape[-1]
     grid_aug = grid + [s.arange(data.shape[-1])]
     self.itp = RegularGridInterpolator(grid_aug, data)
Exemplo n.º 46
0
    def __init__(self, columns, tag, default=None):
        """Create a Trace object and fill it with data
        Parameters:
        :param columns: dict of 1D data arrays. column names either:
                            "time" (or any other value in known_time_labels)    to be automatically recognized as time vector
                            mass (int)                                          to be automatically recognized as data corresponding to given mass
                            arbitrary string                                    to be manually assigned to timetrace via self.set_timecol
        :param tag: dict of metadata
                            time_unit   unit of time vector
                            title       set data title
        :param default: dict of default values (see set_default_values for understood options)
        :return None
        """

        self.type = "Trace"
        self.columns = {}
        self.filled = False  # Trace contains data (Boolean)
        self.deconvoluted = False  # deconvolute function has been used on the data
        self.calibrated = False  # calibrate function has been used on the data
        bloc_len = 0  # length of shortest data column
        self.echo = True

        # Load the default values for the object
        if default == None:
            self.def_val = load_default_values({})
        else:
            self.def_val = load_default_values(default)

        if type(tag) != dict:  # if tag is not provided in the correct format
            self.tag = {"raw_tag": tag}  # store it anyway
        else:
            self.tag = tag
        self.tag["init_errors"] = []  # Errors encountered in Trace.__init__
        header_int = []  # List of recorded masses, represented by integers
        lengths = {}  # Lengths of all of the columns
        for key in columns:
            # Check if all columns have the same length
            lengths[key] = len(columns[key])
            try:
                value = int(key)
                header_int.append(value)
                self.columns[value] = sp.array(columns[key])
            except ValueError:
                self.columns[key] = sp.array(columns[key])
        if len(header_int) == 0:
            self.tag["init_errors"].append(103)
        try:
            bloc_len = min(lengths.values())  # shortest column length
            for key in self.columns:
                # If a column is longer than the shortest column (bloc_len)
                # drop any datapoints beyond bloc_len
                self.columns[key] = self.columns[key][:bloc_len]
            if (bloc_len == 0
                ):  # at least one zero-lenght column was included in the data
                self.tag["init_errors"].append(102)
        except ValueError:
            self.tag["init_errors"].append(101)
        # Create the column of indices of datapoints
        self.columns["index"] = sp.arange(bloc_len)
        if len(header_int) > 0 and bloc_len > 0:
            self.filled = True  # Trace contains technically OK data
            # set the time column
            time_col_candidates = list(
                set(self.columns.keys()) & set(known_time_labels))
            try:
                self.set_timecol(time_col_candidates[0])
            except:
                # if no label is recognized, set the index column as the time column
                self.set_timecol("index")

            self.header_int = sp.array(sorted(header_int))
            # Title of the Trace object
            # If 'title' is not provided in the tag, use default value
            if "title" not in self.tag:
                self.tag["title"] = self.def_val["trace_name"]
            # Initialize mass-space and cracking patterns
            # This has now been moved to RGA_base_container

            RGA_base_container.__init__(self)
import numpy as np
from scipy import sqrt, exp, log, pi
from scipy import zeros, sqrt, shape
import random
import pandas as pd
import os
from datetime import datetime as dt
from scipy.optimize import minimize
from scipy.stats import norm

## 1. 정규분포 ##

#표준 정규분포로부터 난수 생성
x = sp.random.standard_normal(size=10)
print(x)
x = sp.arange(-3, 3, 0.01)
y = stats.norm.pdf(x)
plt.plot(x, y)
plt.title("standard normal disstribution")
plt.xlabel('x')
plt.ylabel('y')
plt.show()

#시드 난수
sp.random.seed(12345)
x = sp.random.normal(0, 1, 20)
print(x[:5])

#정규분포 난수 생성
mean = 0.1
std = 0.2
Exemplo n.º 48
0
 def __call__(self, points):
     res = []
     for v in s.arange(self.n):
         p_aug = s.concatenate((points, s.array([v])), axis=0)
         res.append(self.itp(p_aug))
     return res
Exemplo n.º 49
0
    def plot_cov(self):

        cov = self._co

        if False:
            ###
            plt.plot(sp.diag(cov))
            plt.grid()
            plt.show()
            ###
            plt.plot(sp.diag(cov) * self._nb)
            plt.grid()
            plt.show()
            ###
            plt.plot(sp.diag(cov) * self._rt)
            plt.grid()
            plt.show()
        if True:
            cor = utils.getCorrelationMatrix(cov)
            ###
            #tcor = cor.copy()
            #tcor[tcor==1.] = sp.nan
            #plt.imshow(tcor, interpolation='nearest')
            #plt.show()
            ###
            yMin = None
            yMax = None
            for i in range(3):
                mcor = sp.asarray([
                    sp.mean(sp.diag(cor, k=i + self._nt * k))
                    for k in sp.arange(self._np)
                ])
                plt.plot(sp.arange(mcor.size) * self._binSize,
                         mcor,
                         linewidth=2,
                         label=r"$\Delta r_{\perp} = " +
                         str(int(i * self._binSize)) + "$")

                if yMin is None:
                    yMin = mcor.min()
                else:
                    yMin = min(yMin, mcor.min())
                if yMax is None:
                    if i == 0:
                        yMax = mcor[1:].max()
                    else:
                        yMax = mcor.max()
                else:
                    if i == 0:
                        yMax = max(yMax, mcor[1:].max())
                    else:
                        yMax = max(yMax, mcor.max())
            plt.ylim([yMin, yMax])
            plt.xlabel(r"$\Delta r_{\parallel} \, [h^{-1} \, \mathrm{Mpc}]$",
                       fontsize=20)
            plt.ylabel(
                r"$\overline{Corr}(\Delta r_{\parallel},\Delta r_{\perp})$",
                fontsize=20)
            plt.legend(fontsize=20, numpoints=1, ncol=2, loc=1)
            plt.grid()
            plt.tight_layout()
            #plt.savefig(self._title)
            #plt.clf()
            plt.show()

        return
Exemplo n.º 50
0
        print(last + ' Arnorm  =  %12.4e' % (Arnorm,))
        print(last + msg[istop+1])

    if istop == 6:
        info = maxiter
    else:
        info = 0

    return (postprocess(x),info)


if __name__ == '__main__':
    from scipy import ones, arange
    from scipy.linalg import norm
    from scipy.sparse import spdiags

    n = 10

    residuals = []

    def cb(x):
        residuals.append(norm(b - A*x))

    # A = poisson((10,),format='csr')
    A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
    M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
    A.psolve = M.matvec
    b = 0*ones(A.shape[0])
    x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
    # x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
Exemplo n.º 51
0
enddate = (2016, 12, 31)
#
y0 = pd.Series(ret_f('IBM', begdate, enddate))
x0 = pd.Series(ret_f('^GSPC', begdate, enddate))
#
d = quotes_historical_yahoo_ochl('^GSPC',
                                 begdate,
                                 enddate,
                                 asobject=True,
                                 adjusted=True).date[0:-1]
lag_year = d[0].strftime("%Y")
y1 = []
x1 = []
beta = []
index0 = []
for i in sp.arange(1, len(d)):
    year = d[i].strftime("%Y")
    if (year == lag_year):
        x1.append(x0[i])
        y1.append(y0[i])
    else:
        (beta, alpha, r_value, p_value, std_err) = stats.linregress(y1, x1)
        alpha = round(alpha, 8)
        beta = round(beta, 3)
        r_value = round(r_value, 3)
        p_vaue = round(p_value, 3)
        print(year, alpha, beta, r_value, p_value)
        x1 = []
        y1 = []
        lag_year = year
"""
  Name     : c11_03_standard_normal_dist.py
  Book     : Python for Finance (2nd ed.)
  Publisher: Packt Publishing Ltd. 
  Author   : Yuxing Yan
  Date     : 6/6/2017
  email    : [email protected]
             [email protected]
"""
import scipy as sp
import scipy.stats as stats
x = sp.arange(-3, 3, 0.01)
ret = stats.norm.pdf(x)
confidence = 0.99
position = 10000
z = stats.norm.ppf(1 - confidence)
print("z=", z)
zES = -stats.norm.pdf(z) / (1 - confidence)
print("zES=", zES)
std = sp.std(ret)
VaR = position * z * std
print("VaR=", VaR)
ES = position * zES * std
print("ES=", ES)
Exemplo n.º 53
0
def func_nAg(WLs):
    ep = 3.691 - 9.1522**2 / ((1240 / WLs)**2 + 1j * 0.021 * (1240 / WLs))
    index = sqrt(ep)
    return index  # 銀の誘電関数


def func_nTiO2(WLs):
    ep = 5.193 + 0.244 / ((WLs / 1000)**2 - 0.0803)
    index = sqrt(ep)
    return index  # TiO2の誘電関数


WLmin = 300  # 波長(短波長側) 〔nm〕
WLmax = 1000  # 波長(長波長側) 〔nm〕
WLperiod = 1  # 波長間隔  〔nm〕
WLx = arange(WLmin, WLmax + 1, WLperiod)  # 波長の配列
NumWLx = int((WLmax - WLmin) / WLperiod) + 1  # 波長の数
k0 = 2 * pi / WLx  # 各波長の波数

nTiO2 = zeros((NumWLx), dtype=complex)  # Ti02 屈折率の配列の初期化
nAg = zeros((NumWLx), dtype=complex)  # Ag 屈折率の配列の初期化

for i in range(NumWLx):
    nTiO2[i] = func_nTiO2(WLx[i])  # Ti02 屈折率の生成
    nAg[i] = func_nAg(WLx[i])  # Ag 屈折率の生成

epx = 0.5 * (nTiO2**2 + nAg**2)  # EMA による誘電率の計算 x 方向
epz = 2 * (nTiO2**2) * (nAg**2) / ((nTiO2**2) + (nAg**2))  # EMA による誘電率の計算 z 方向

plt.figure(figsize=(8, 6))
# x 方向有効媒質の誘電率(実部)のプロット
if __name__ == '__main__':
    # Load data set
    X, y = rt.get_samples_from_roi('../Data/university.tif',
                                   '../Data/university_gt.tif')
    sc = StandardScaler()
    X = sc.fit_transform(X)

    # Split the data
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.1,
                                                        random_state=0,
                                                        stratify=y)

    # Parameters
    param_grid_svm = dict(gamma=2.0**sp.arange(-4, 4),
                          C=10.0**sp.arange(0, 3))  # SVM
    param_grid_linear_svm = dict(C=10.0**sp.arange(-2, 3))  # LinearSVM
    param_grid_rf = dict(n_estimators=sp.arange(10, 150, 10))  # RF
    param_grid_fffs = dict(maxvar=20, threshold=0.001)  # FFFS
    param_grid_knn = dict(n_neighbors=sp.arange(1, 50, 5))
    F1, CT = [], []

    # Start the classification: SVM
    ts = time.time()
    F1.append(compute_SVM(X_train, y_train, X_test, y_test, param_grid_svm))
    CT.append(time.time() - ts)

    # Start the classification: RF
    ts = time.time()
    F1.append(compute_RF(X_train, y_train, X_test, y_test, param_grid_rf))
Exemplo n.º 55
0
def f1(x):
 aux = 0
 for i in scipy.arange(x.shape[0]-1):
  aux = aux + 100*(x[i+1] - x[i]**2)**2+(x[i]-1)**2
 return aux
Exemplo n.º 56
0
def compute_ext(filt, N=0.78):

    print '''COMPUTING EXTINCTION COEFFICIENT USING FITZPATRICK99 EXTINCTION LAW'''
    odonnell_ext_1_um = 1.32 # A(1 um) / E(B-V) where R_v = 3.1
    import scipy, math
    sed = scipy.loadtxt(os.environ['BIGMACS'] + '/munari.sed')
    ''' now stitch together with blackbody spectrum '''    
    longwave = sed[-1,0]    
    flux = sed[-1,1]

    wavelength = sed[:,0].tolist()
    source = sed[:,1].tolist()

    a = flux / longwave**-3.

    for wave in scipy.arange(11500,20000,25):
        wavelength.append(wave)
        source.append(a*wave**-3.)

    import scipy
    from scipy import interpolate
    sedSpline = interpolate.interp1d(wavelength, source, 
                                   bounds_error = True, 
                                   )


    #s_od = N*odonnell_ext_1_um*odonnell(scipy.arange(3000,20000))
    #s_od = fitzpatrick(scipy.arange(3000,20000))
    #import pylab
    #pylab.clf()
    #pylab.plot(scipy.arange(3000,20000),s_od)
    #pylab.xlim([3000,20000])
    #pylab.show()

    ''' source flux is ergs / s / Ang '''
    filt_wavelength = filt['wavelength']
    filt_response = filt['response']
    throw_out = scipy.zeros(len(filt_wavelength))

    ''' trim off zero-valued tails of response function'''
    for i in range(len(filt_wavelength)):
        if filt_response[i] == 0:
            throw_out[i]= 1.
        else: break

    for i in range(len(filt_wavelength)):
        if filt_response[len(filt_response)-1-i] == 0:
            throw_out[len(filt_response)-1-i]= 1.
        else: break

    filt_wavelength = filt_wavelength[throw_out==0.] 
    filt_response = filt_response[throw_out==0.] 

    #print scipy.array([(filt_wavelength[i]) for i in range(len(filt_wavelength[:-1]))])
    #print scipy.array([fitzpatrick(filt_wavelength[i]) for i in range(len(filt_wavelength[:-1]))])
    numerator = scipy.array([10.**(fitzpatrick(filt_wavelength[i])/-2.5)*sedSpline(filt_wavelength[i])*filt_wavelength[i]*(filt_response[i])*(filt_wavelength[i+1]-filt_wavelength[i]) for i in range(len(filt_wavelength[:-1]))])
    denom = scipy.array([source[i]*filt_wavelength[i]*(filt_response[i])*(filt_wavelength[i+1]-filt_wavelength[i]) for i in range(len(filt_wavelength[:-1]))])

    coeff = -2.5*math.log10(numerator.sum()/denom.sum()) 

    print filt['name'], coeff, 'coeff'
    return coeff
Exemplo n.º 57
0
def region_interface_areas(regions, areas, voxel_size=1, strel=None):
    r"""
    Calculates the interfacial area between all pairs of adjecent regions

    Parameters
    ----------
    regions : ND-array
        An image of the pore space partitioned into individual pore regions.
        Note that zeros in the image will not be considered for area
        calculation.
    areas : array_like
        A list containing the areas of each regions, as determined by
        ``region_surface_area``.  Note that the region number and list index
        are offset by 1, such that the area for region 1 is stored in
        ``areas[0]``.
    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1.
    strel : array_like
        The structuring element used to blur the region.  If not provided,
        then a spherical element (or disk) with radius 1 is used.  See the
        docstring for ``mesh_region`` for more details, as this argument is
        passed to there.

    Returns
    -------
    A named-tuple containing 2 arrays. ``conns`` holds the connectivity
    information and ``area`` holds the result for each pair.  ``conns`` is a
    N-regions by 2 array with each row containing the region number of an
    adjacent pair of regions.  For instance, if ``conns[0, 0]`` is 0 and
    ``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial
    area shared by regions 0 and 5.

    """
    print('_' * 60)
    print('Finding interfacial areas between each region')
    from skimage.morphology import disk, square, ball, cube
    im = regions.copy()
    if im.ndim == 2:
        cube = square
        ball = disk
    # Get 'slices' into im for each region
    slices = spim.find_objects(im)
    # Initialize arrays
    Ps = sp.arange(1, sp.amax(im) + 1)
    sa = sp.zeros_like(Ps, dtype=float)
    sa_combined = []  # Difficult to preallocate since number of conns unknown
    cn = []
    # Start extracting area from im
    for i in tqdm(Ps):
        reg = i - 1
        s = extend_slice(slices[reg], im.shape)
        sub_im = im[s]
        mask_im = sub_im == i
        sa[reg] = areas[reg]
        im_w_throats = spim.binary_dilation(input=mask_im, structure=ball(1))
        im_w_throats = im_w_throats * sub_im
        Pn = sp.unique(im_w_throats)[1:] - 1
        for j in Pn:
            if j > reg:
                cn.append([reg, j])
                merged_region = im[(
                    min(slices[reg][0].start, slices[j][0].start)
                ):max(slices[reg][0].stop, slices[j][0].stop), (
                    min(slices[reg][1].start, slices[j][1].start)
                ):max(slices[reg][1].stop, slices[j][1].stop)]
                merged_region = ((merged_region == reg + 1) +
                                 (merged_region == j + 1))
                mesh = mesh_region(region=merged_region, strel=strel)
                sa_combined.append(mesh_surface_area(mesh))
    # Interfacial area calculation
    cn = sp.array(cn)
    ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined)
    ia[ia < 0] = 1
    result = namedtuple('interfacial_areas', ('conns', 'area'))
    result.conns = cn
    result.area = ia * voxel_size**2
    return result
Exemplo n.º 58
0
def main():
    gr_estimators = {
        "simple": digital.SNR_EST_SIMPLE,
        "skew": digital.SNR_EST_SKEW,
        "m2m4": digital.SNR_EST_M2M4,
        "svr": digital.SNR_EST_SVR
    }
    py_estimators = {
        "simple": snr_est_simple,
        "skew": snr_est_skew,
        "m2m4": snr_est_m2m4,
        "svr": snr_est_svr
    }

    parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
    parser.add_option(
        "-N",
        "--nsamples",
        type="int",
        default=10000,
        help="Set the number of samples to process [default=%default]")
    parser.add_option("",
                      "--snr-min",
                      type="float",
                      default=-5,
                      help="Minimum SNR [default=%default]")
    parser.add_option("",
                      "--snr-max",
                      type="float",
                      default=20,
                      help="Maximum SNR [default=%default]")
    parser.add_option("",
                      "--snr-step",
                      type="float",
                      default=0.5,
                      help="SNR step amount [default=%default]")
    parser.add_option("-t",
                      "--type",
                      type="choice",
                      choices=gr_estimators.keys(),
                      default="simple",
                      help="Estimator type {0} [default=%default]".format(
                          gr_estimators.keys()))
    (options, args) = parser.parse_args()

    N = options.nsamples
    xx = scipy.random.randn(N)
    xy = scipy.random.randn(N)
    bits = 2 * scipy.complex64(scipy.random.randint(0, 2, N)) - 1
    #bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
    #    1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)

    snr_known = list()
    snr_python = list()
    snr_gr = list()

    # when to issue an SNR tag; can be ignored in this example.
    ntag = 10000

    n_cpx = xx + 1j * xy

    py_est = py_estimators[options.type]
    gr_est = gr_estimators[options.type]

    SNR_min = options.snr_min
    SNR_max = options.snr_max
    SNR_step = options.snr_step
    SNR_dB = scipy.arange(SNR_min, SNR_max + SNR_step, SNR_step)
    for snr in SNR_dB:
        SNR = 10.0**(snr / 10.0)
        scale = scipy.sqrt(2 * SNR)
        yy = bits + n_cpx / scale
        print "SNR: ", snr

        Sknown = scipy.mean(yy**2)
        Nknown = scipy.var(n_cpx / scale)
        snr0 = Sknown / Nknown
        snr0dB = 10.0 * scipy.log10(snr0)
        snr_known.append(float(snr0dB))

        snrdB, snr = py_est(yy)
        snr_python.append(snrdB)

        gr_src = blocks.vector_source_c(bits.tolist(), False)
        gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
        gr_chn = channels.channel_model(1.0 / scale)
        gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
        tb = gr.top_block()
        tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
        tb.run()

        snr_gr.append(gr_snr.snr())

    f1 = pylab.figure(1)
    s1 = f1.add_subplot(1, 1, 1)
    s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
    s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
    s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
    s1.grid(True)
    s1.set_title('SNR Estimators')
    s1.set_xlabel('SNR (dB)')
    s1.set_ylabel('Estimated SNR')
    s1.legend()

    f2 = pylab.figure(2)
    s2 = f2.add_subplot(1, 1, 1)
    s2.plot(yy.real, yy.imag, 'o')

    pylab.show()
Exemplo n.º 59
0
#  Potassium
def I_K(V, n):  return g_K * n ** 4 * (V - E_K)

#  Leak
def I_L(V):     return g_L * (V - E_L)

# External current
start=5
finish=105
def I_inj(t, voltage):
    return voltage * (t > start) - voltage * (t > finish)
    # return 10*t

# The time to integrate over
dt=0.05
t = sp.arange(0.0, 110.0, dt)

I = np.linspace(1,2.5,10)
plt.figure()
for i in range(len(I)):
    plt.plot(t, I_inj(t, I[i]))
    plt.xlabel('t (ms)')
    plt.ylabel('$I_{inj}$ ($\\mu{A}/cm^2$)')
plt.legend(I)
plt.show()

plt.figure()
for i in range(len(I)):
    def dALLdt(X, t):
        V, m, h, n = X
Exemplo n.º 60
0
def objFunction(W, R, target_ret):
    stock_mean = np.mean(R, axis=0)
    port_mean = np.dot(W, stock_mean)  # portfolio mean
    #cov=np.cov(R.T)                         # var-cov matrix
    cov = cov0
    port_var = np.dot(np.dot(W, cov), W.T)  # portfolio variance
    penalty = 2000 * abs(port_mean - target_ret)  # penalty 4 deviation
    return np.sqrt(port_var) + penalty  # objective function


R0 = ret_monthly(stocks[0])  # starting from 1st stock
n_stock = len(stocks)  # number of stocks
std1 = std_f(stocks[0])
std2 = std_f(stocks[1])
for jj in sp.arange(1):
    k = 0.1 * std1 * std2
    #cov0=sp.array([[0.00266285,0.00037303],[0.00037303,0.0021296]])
    #cov0=sp.array([[std1**2,k],[k,std2**2]])
    cov0 = sp.array([[std1**2, 0.00037303], [0.00037303, std2**2]])
    for i in xrange(1, n_stock):  # merge with other stocks
        x = ret_monthly(stocks[i])
        R0 = pd.merge(R0, x, left_index=True, right_index=True)
        R = np.array(R0)

    out_mean, out_std, out_weight = [], [], []
    stockMean = np.mean(R, axis=0)
    for r in np.linspace(np.min(stockMean), np.max(stockMean), num=100):
        W = np.ones([n_stock]) / n_stock  # starting from equal weights
        b_ = [(0, 1) for i in range(n_stock)]  # bounds, here no short
        c_ = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.})  #constraint