Esempio n. 1
0
	def kernel_caustic_masscalc(self,enc_r,enc_v,ENC_DIA_MASS,ENC_INF_MASS,ENC_DIA_CAU,ENC_INF_CAU,ENC_INF_NFW,enc_r200,enc_m200,enc_srad,enc_esrad,enc_vdisp,r_limit,vlimit,H0,q,k,root,beta,l=None):
	
		enc_r,enc_v = array(enc_r),array(enc_v)
		r = append(enc_r,enc_r)
		v = append(enc_v,-1*enc_v)

		xbeta,abeta = loadtxt(''+str(root)+'/nkern/Documents/MDB_milliMil_halodata/Caustic/average_betaprofile.tab',dtype='float',usecols=(0,1),unpack=True)

		print ''
		print '## Working on Ensemble #'+str(k)+''
		if l != None:
			print '## Line of Sight #'+str(l)+''
		print '_______________________________________'
		fit = polyfit((xbeta*enc_r200)[xbeta<4],abeta[xbeta<4],6)			
		res_array = array([1])
		img_tot = 0
		img_grad_tot = 0
		img_inf_tot = 0
		for u in xrange(res_array.size):
			x_range,y_range,img,img_grad,img_inf = C.gaussian_kernel(r,v,enc_r200,normalization=H0,scale=q,res=200,adj=res_array[u],see=False)
			img_tot += img/npmax(img)
			img_grad_tot += img_grad/npmax(img_grad)
			img_inf_tot += img_inf/npmax(img_inf)

		## Define Beta ##
#		beta = fit[0]*x_range**6+fit[1]*x_range**5+fit[2]*x_range**4+fit[3]*x_range**3+fit[4]*x_range**2+fit[5]*x_range+fit[6]	
		beta = np.zeros(x_range.size) + beta
	
		## Caustic Surface Estimation ##
		maxv = enc_r200*H0*sqrt(200)+500

		## INFLECTION TECHNIQUE (MPHI) ##
		Anew,threesig,dens_norm,e_dens_norm,srad,e_srad = C.level_search2(r,v,r,v,r,x_range,y_range,img_tot,img_inf_tot,H0*q,enc_r200,r_limit,maxv,beta,enc_srad,enc_esrad,use_vdisp=enc_vdisp,bin=k+1)
		vdispersion = threesig/3.5

		## DIAFERIO TECHNIQUE (CAUSTIC) ##
		AnewD,threesigD,dens_normD,e_dens_normD,sradD,e_sradD = C.level_search(r,v,r,v,r,x_range,y_range,img_tot,H0*q,enc_r200,r_limit,maxv,beta,enc_srad,enc_esrad,use_vdisp=enc_vdisp,bin=k+1)

		## Mass Calculation ##
		# 1.) Using Diaferio Technique, pick out caustic, use caustic via massprofile to find mass, dia_caumass
		# 2.) Using Inflection technique find caustic, fit to an nfw, find mass via dens_norm, inf_nfwmass

		massprofile,integrand = C.masscalc(x_range,abs(C.Ar_final),enc_r200,enc_m200,vdispersion,beta=beta,conc=enc_r200/enc_srad)
		massprofileD,integrandD = C.masscalc(x_range,abs(C.Ar_finalD),enc_r200,enc_m200,vdispersion,beta=beta,conc=enc_r200/enc_srad)

		inf_nfwmass = 4*pi*dens_norm*(srad)**3*(log(1+enc_r200/srad)-enc_r200/srad/(1+enc_r200/srad))
		dia_caumass = massprofileD[where(x_range[x_range >= 0] < enc_r200)[0][-1]]

		
		ENC_DIA_MASS.append(dia_caumass)
		ENC_INF_MASS.append(inf_nfwmass)
		ENC_DIA_CAU.append(C.Ar_finalD)
		ENC_INF_CAU.append(C.Ar_final)
		ENC_INF_NFW.append(Anew)

		return x_range,ENC_DIA_MASS,ENC_INF_MASS,ENC_DIA_CAU,ENC_INF_CAU,ENC_INF_NFW
		def phi_f(mu, gam):
			"""
			Interpolate but extrapolate using the nearest value
			on the grid.
			"""
			mu = npmax(self.mumin, mu)
			gam = npmax(self.gammin, gam)
			mu = npmin(self.mumax, mu)
			gam = npmin(self.gammax, gam)
			return interp_phi(mu, gam)
def compare(frame):
    """Compare effects of depth isolation in histogram form"""
    small = copy(frame)
    done = __pipeline(frame)
    done = done.astype(float) / npmax(done)
    small[:5] = 0
    small = (small.astype(float) / npmax(small))
    plt.plot(done)
    plt.plot(small)
    plt.ylim(0, 1.0)
    plt.show()
Esempio n. 4
0
    def Estimate_p0(self, x, y, p_0, Deblend_Check):
        
        #INCLUDING P_0 HERE IS DIRTY BUT WE NEED IT FOR THE BATCH PROCESS LINE MEASURING
        Emission_Line = True
        
        #Check if emission or absorption
        if self.LocalMedian > npmax(y):
            Emission_Line = False
        
#         print ' the line list is', self.List_BlendedLines[1][self.Current_BlendedGroup]
        #Case of a blended line
        if (Deblend_Check != None) and (p_0 == None):
            if Emission_Line:
                
                mu_0_List, A_0_List, Deblend_Check = self.FindMaxima(x, y, MinLevel = self.LocalMedian  + 4 * self.SigmaContinuum, ListLines=self.List_BlendedLines[1][self.Current_BlendedGroup], Deblend_Check=Deblend_Check)
                sigma_i                  = zeros(len(self.List_BlendedLines[0][self.Current_BlendedGroup]))
                             
                #Calculating the sigmas:
                Interest_Points = [self.Selections[2], self.Selections[3]]
#                 for i in range(len(A_0_List) - 1):
# 
#                     Peak_Left, Peak_Right               = where(y == A_0_List[i])[0],  where(y == A_0_List[i + 1])[0]
#                     x_BetweenMaxima, y_BetweenMaxima    = x[Peak_Left : Peak_Right], y[Peak_Left: Peak_Right]
#                     x_m = x_BetweenMaxima[argmin(y_BetweenMaxima)]            
#                     Interest_Points.append(x_m)
# 
#                 Interest_Points.sort()
#                 for i in range(len(sigma_i)):
#                     sigma_i[i] = (Interest_Points[i+1] - Interest_Points[i]) /2 / 2.335            #If the lines are very closed no point in dividing by our /2 factor.

                for i in range(len(sigma_i)):
                    sigma_i[i] = 1            #If the lines are very closed no point in dividing by our /2 factor.                
            p_0 = [A_0_List, mu_0_List, sigma_i]
            
        if (Deblend_Check == None) and (p_0 == None):
            
            self.Kmpfit_Dictionary = [{},{},{'limits':(0,3)}]
            
            if Emission_Line:
                A_0                 = npmax(y)
                mu_0                = x[where(y == A_0)]
                sigma_0             = ((absolute(mu_0 - x[0]) + absolute(x[-1] - mu_0)) / 2.0) / 2.335
            else:
                                    #SHOULD WE CHANGE THIS ZEROLEV FOR THE ONE TRULLY AT A_0?
                A_0                 = 2 * self.LocalMedian - npmin(y)
                mu_0                = x[where(y == npmin(y))]
                sigma_0             = ((absolute(mu_0 - x[0]) + absolute(x[-1] - mu_0)) / 2.0) / 2.335
                
            p_0 = [A_0, mu_0, sigma_0]
        
        return Emission_Line, p_0, Deblend_Check
Esempio n. 5
0
    def plot_chiSq_Behaviour(self, Traces,  labels):
    
        n_traces = len(Traces)
        
        self.FigConf(Figtype = 'Grid', n_colors = n_traces, n_columns=4, n_rows=2, FigHeight=9, FigWidth=16)
        
        chisq_adapted   = reshape(self.pymc_database.trace('ChiSq')[:], len(self.pymc_database.trace('ChiSq')[:])) * -2
        y_lim           = 30
        min_chi_index   = argmin(chisq_adapted)
        
        for i in range(len(Traces)):
            Trace   = Traces[i]
            label   = labels[i]       

            if Trace != 'ChiSq':
                self.Axis1[i].scatter(x = self.pymc_database.trace(Trace)[:], y = chisq_adapted, color=self.ColorVector[2][i])
                x_min           = npmin(self.pymc_database.trace(Trace)[:]) 
                x_max           = npmax(self.pymc_database.trace(Trace)[:])

                self.Axis1[i].axvline(x = self.statistics_dict[Trace]['mean'], label = 'Inference value: ' + round_sig(self.statistics_dict[Trace]['mean'], 4,scien_notation=False), color='grey', linestyle = 'solid')
                self.Axis1[i].scatter(self.pymc_database.trace(Trace)[:][min_chi_index], chisq_adapted[min_chi_index], color='Black', label = r'$\chi^{2}_{min}$ value: ' + round_sig(self.pymc_database.trace(Trace)[:][min_chi_index],4,scien_notation=False))
                
                self.Axis1[i].set_ylabel(r'$\chi^{2}$',fontsize=20)
                self.Axis1[i].set_ylim(0, y_lim)
                self.Axis1[i].set_xlim(x_min, x_max)
                self.Axis1[i].set_title(label,fontsize=20)
                legend_i = self.Axis1[i].legend(loc='best', fontsize='x-large')
                legend_i.get_frame().set_facecolor('white')
Esempio n. 6
0
    def plot(self, fig=None):
        import matplotlib.pyplot as plt
        if fig == None and not isinstance(self.fig, plt.Figure):
            self.fig = plt.figure()
        else:
            self.fig = fig
        # plt.gcf(self.fig)
        plt.figure(self.fig.number)
        plt.hold(True)

        from numpy import max as npmax

        mode = "logpow"
        if mode == "logpow":
            gdata = self.get_logpow()
            plt.ylabel("power [db]")

        plt.plot(self.get_xdata() / 1000., gdata)

        ymax = int(npmax(gdata[1:]) / 10.) * 10 + 10
        ymin = ymax - 80

        plt.ylim(ymin, ymax)

        plt.xlabel('Frequency [kHz]')
        plt.locator_params(nbins=20, axis='x', tight=True)
        # plt.locator_params(nbins=15, axis='y', tight=True, fontsize=1)
        plt.grid()
        plt.title(self.name)
        return self
Esempio n. 7
0
 def _validate_seg(self):
     if npmin(self.segments) < 0:
         raise ValueError('Segments may only have positive integers')
     if npmax(self.segments) >= len(self.vertices):
         raise ValueError('Segments expects more vertices than provided')
     self._validate_file_size('segments', self.segments)
     self._validate_file_size('vertices', self.vertices)
     return True
def general_max(*args):
    if rank == 0:
        if len(args)==1:
            if isinstance(args[0],Raster) and args[0].data is not None:
                return npmax(args[0].data)
        else:
            return max(*args)
    else:
        return 0
Esempio n. 9
0
 def validate(self):
     """Check if mesh content is built correctly"""
     if npmin(self.triangles) < 0:
         raise ValueError('Triangles may only have positive integers')
     if npmax(self.triangles) >= len(self.vertices):
         raise ValueError('Triangles expects more vertices than provided')
     self._validate_file_size('vertices')
     self._validate_file_size('triangles')
     return True
Esempio n. 10
0
def normalize(X):
    """
    Normalize :math:`\mathbf{X}` to have values between 0 and 1.

    :param X: (``numpy.ndarray``) Data matrix.

    :return: (``numpy.ndarray``) Normalize matrix.
    """
    return X / npmax(X)
Esempio n. 11
0
def decorrelation_witer(W):
    """
    Iterative MDUM decorrelation that avoids matrix inversion.
    """
    lim = 1.0
    tol = 1.0e-05
    W = W/(W**2).sum()
    while lim > tol:
        W1 = (3.0/2.0)*W - 0.5*dot(dot(W,W.T),W)
        lim = npmax(npabs(npabs(diag(dot(W1,W.T))) - 1.0))
        W = W1
    return W
Esempio n. 12
0
 def load_SpectraData(self):
     
     self.Wave, self.Int, self.ExtraData = self.get_spectra_data(self.Current_Folder + self.Current_Spec)
     
     Wmin = npmin(self.Wave)
     Wmax = npmax(self.Wave)
     
     #We erase the stored emision lines list
     del self.EM_in_plot[:]          
                                      
     for i in range(len(self.Wavelengths_Total)):
         if Wmin <= self.Wavelengths_Total[i] <= Wmax:
                 self.EM_in_plot.append(i)
def __pipeline(data):
    data[:11] = 0
    data[data < (npmax(data) / 10)] = 0
    data = medfilt(data, 3)
    intervals = __find_peak_intervals(data)
    local_max = __get_local_maximums(data, intervals)
    data = __remove_small_peaks(data, intervals, local_max, 10)
    data = __thinout_peaks(data, intervals, local_max, 4)
    intervals = __find_peak_intervals(data)
    data = __remove_thin_peaks(data, intervals, 20)
    data = __flat_binarize(data)

    return data
Esempio n. 14
0
def ica_par_fp(X, tolerance, g, gprime, orthog, alpha, maxIterations, Winit):
    """Parallel FastICA; orthog sets the method of unmixing vector decorrelation. This
    fucntion is not meant to be directly called; it is wrapped by fastica()."""
    n,p = X.shape
    W = orthog(Winit)
    lim = tolerance + 1
    it = 1
    while ((lim > tolerance) and (it < maxIterations)):
        wtx = dot(W,X)
        gwtx = g(wtx,alpha)
        g_wtx = gprime(wtx,alpha)
        W1 = dot(gwtx,X.T)/p - dot(diag(g_wtx.mean(axis=1)),W)
        W1 = orthog(W1)
        lim = npmax(npabs(npabs(diag(dot(W1,W.T))) - 1.0))
        W = W1
        it = it + 1
    return W
Esempio n. 15
0
def find_force_noise_levels(force_handle, finger, filter_order=3, filter_freq=0.3, measurements=10):
    vector_filter=Filter_vector(order=filter_order, freq=filter_freq)
    #stabilizing filter
    for i in xrange(measurements):
        force=force_handle.get_force(finger, update=True)
        vector_filter.filter(force)
    #real measurement
    max_force=array([0.]*3)
    max_norm_force=0.
    for i in xrange(measurements):
        force=force_handle.get_force(finger, update=True)
        force_filtered=vector_filter.filter(force)
        max_force=npmax(array([max_force, force_filtered]), axis=0)

        norm_force=norm(force_filtered)
        max_norm_force=max(max_norm_force, norm_force)
    return(max_force, max_norm_force)
Esempio n. 16
0
    def Load_LineMesurerData(self):
        
        #SHOULD THIS FOLDER BE LOCATED ON THE LINEMESURER... NOT SURE BECAUSE IT SHOULD BE INTEGRATED WITH THE INTERFACE...
        self.Current_LinesLog           = self.Current_Folder + self.Current_Code + '_'+ self.DataType + self.LinesLogExtension_Name

        self.Wave, self.Int, self.ExtraData = self.File2Data(self.Current_Folder, self.Current_Spec)
        Wmin = npmin(self.Wave)
        Wmax = npmax(self.Wave)
        
        #We erase the stored emision lines list
        del self.EM_in_plot[:]          
                                         
        for i in range(len(self.Wavelengths_Total)):
            if Wmin <= self.Wavelengths_Total[i] <= Wmax:
                    self.EM_in_plot.append(i)
                               
        if len(self.Labels_Total) < 3:
            print "WARNING: Very few emission lines found within spectrum"
        
        #Generate emision lines log
        self.CleanTableMaker(self.Current_LinesLog, self.RemakeFiles, self.ColumnHeaderVector, self.ColumnWidth)   
Esempio n. 17
0
 def Emission_Threshold(self, LineLoc, TotalWavelen, TotalInten):
     #Use this method to determine the box and location of the emission lines
     Bot = LineLoc - self.BoxSize
     Top = LineLoc + self.BoxSize
     
     indmin, indmax  = searchsorted(TotalWavelen, (Bot, Top))
             
     if indmax > (len(TotalWavelen)-1):
         indmax = len(TotalWavelen)-1
     
     PartialWavelength   = TotalWavelen[indmin:indmax]
     PartialIntensity    = TotalInten[indmin:indmax]
     
     Bot      = LineLoc - 2
     Top    = LineLoc + 2
     
     indmin, indmax = searchsorted(PartialWavelength, (Bot, Top))
     
     LineHeight = npmax(PartialIntensity[indmin:indmax])
     LineExpLoc = PartialWavelength[where(PartialIntensity == LineHeight)]
     
     return PartialWavelength, PartialIntensity, LineHeight, LineExpLoc   
Esempio n. 18
0
def test_callback(samples, rtlsdr_obj):
    #print(samples)
    mpl.clf()
    #dataset.extend(samples)
    x = samples
    yb1 = signal.convolve(x, filterB1)
    #mpl.psd(yb1, NFFT=1024, Fc=0, Fs=48e3)
    yn1 = downsample(yb1, 10)
    #mpl.psd(yn1, NFFT=1024, Fc=0, Fs=sdr.rs/10)
    zdis = discrim(yn1)
    #mpl.psd(zdis, NFFT=1024, Fc=0, Fs=sdr.rs/10)

    zb2 = signal.convolve(zdis, filterB2)
    zn2 = downsample(zb2, 5)
    mpl.psd(x)
    mpl.psd(zn2, NFFT=1024, Fc=0, Fs=48e3)
    mpl.plot(zdis)
    mpl.pause(0.0001)
    mpl.show(block=False)
    zn2 /= npmax(abs(zn2),axis=0)
    # complex2wav("teste.wav",48e3,zn2)
    play(zn2, fs=48e3)
	def res_rule_operator(self, phi):
		"""
		The reservation rule operator
		-----------------------------
		Qphi = c0*(1-beta) + 
			   beta*integral( max{u(w'),phi(mu',gam')} * f(w'|mu,gam) )dw'
		where:
			   u(w) = (1/a) * (1 - exp(-a*w))
			   f(w'|mu, gam) = N(mu, gam + gam_w)
			   gam' = 1/(1/gam + 1/gam_w)
			   mu' = gam' * (mu/gam + w'/gam_w)

		The operator Q is a well-defined contraction mapping from 
		the complete metric space (b_kappa \Theta, rho_kappa) into 
		itself, where (b_kappa \Theta, rho_kappa) is the reweighted 
		space constructed by the weight function kappa.


		Parameters
		----------
		phi : array_like(float, ndim=1, length=len(grid_points))
			  An approximate fixed point represented as a one-dimensional
			  array.


		Returns
		-------
		new_phi : array_like(float, ndim=1, length=len(grid_points))
				  The updated fixed point.

		"""
		beta, gam_w, N_mc = self.beta, self.gam_w, self.N_mc
		N_mc, a, c0 = self.N_mc, self.a, self.c0
		draws = self.draws
		u =  self.u
		interp_phi = LinearNDInterpolator(self.grid_points, phi)

		def phi_f(mu, gam):
			"""
			Interpolate but extrapolate using the nearest value
			on the grid.
			"""
			mu = npmax(self.mumin, mu)
			gam = npmax(self.gammin, gam)
			mu = npmin(self.mumax, mu)
			gam = npmin(self.gammax, gam)
			return interp_phi(mu, gam)

		N = len(phi)
		new_phi = np.empty(N)

		for i in range(N):
			mu, gam = self.grid_points[i, :]
			
			# sample w' from f(w'|mu,gam) = N(mu, gam + gam_w)
			draws_w = mu + np.sqrt(gam + gam_w) * draws
			
			# the updated belief: mu', the update is based on
			# the next period observation w'.
			gam_prime = 1.0 / (1.0 / gam + 1.0 / gam_w) # a scalar
			b1 = gam_prime / gam
			mu_prime = b1 * mu + (1.0 - b1) * draws_w # an array with length N_mc

			# the updated belief: gam'
			gam_prime = gam_prime * np.ones(N_mc) # an array with length N_mc
			
			expected_term = npmax(u(draws_w), phi_f(mu_prime, gam_prime))
			expectation = np.mean(expected_term)
			new_phi[i] = c0 * (1.0 - beta) + beta * expectation

		return new_phi
Esempio n. 20
0
# settings
lag = 15  # lag to be printed
ell_scale = 1.8  # ellipsoid radius coefficient
fit = 0  # no fit on marginals
dz_k_lim = [-1.99, 1.99]  # lim for the axes
orange = [.9, .4, 0]

# autocorrelation test for invariance
f = figure(figsize=(12, 6))
InvarianceTestEllipsoid(dz_k,
                        acf_sign[0, 1:],
                        lag,
                        fit,
                        ell_scale,
                        bound=dz_k_lim)
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])

# power low of autocorrelation decay
figure(figsize=(12, 6))
plot(ll, lcr[0, 1:], lw=1.5)
plot(ll, y, color=orange, lw=1.5)
plt.axis([min(ll), max(ll), min(lcr[0, 1:]), 0.95 * npmax(lcr[0, 1:])])
xlabel('ln(l)')
ylabel(
    r'$\ln( | Cr(\Delta\tilde\zeta_{\kappa},  \Delta\tilde\zeta_{\kappa-l}) | )$'
)
legend(['empirical', 'linear fit\n $\lambda$ =  % 1.3f' % -p[0]])
title('Autocorrelations decay: power law')
plt.show()
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
Esempio n. 21
0
[CM, C] = ColorCodedFP(p, None, None, arange(0, 0.81, 0.01), 0, 1, [0.6, 0.2])
figure()
# colormap(CM)
# marginal of epsi_2 (change in 3yr yield)
ax = plt.subplot2grid((4, 5), (1, 0), rowspan=3, colspan=1)
plt.barh(x2[:-1], p2[0], height=x2[1] - x2[0], facecolor=grey,
         edgecolor='k')  # histogram
plot([0, 0], [mu_HBFP[1] - sdev[1], mu_HBFP[1] + sdev[1]], color=orange,
     lw=5)  # +/- standard deviation bar
plot(0,
     epsi_out[1, -1],
     color='b',
     marker='o',
     markerfacecolor='b',
     markersize=5)  # outlier
plt.ylim([npmin(epsi_out[1]), npmax(epsi_out[1])])
plt.xticks([])
plt.yticks([])
ax.invert_xaxis()
# marginal of epsi_1 (change in 1yr yield)
ax = plt.subplot2grid((4, 5), (0, 1), rowspan=1, colspan=4)
bar(x1[:-1], p1[0], width=x1[1] - x1[0], facecolor=grey, edgecolor='k')
plt.xticks([])
# # histogram
plot([mu_HBFP[0] - sdev[0], mu_HBFP[0] + sdev[0]], [0, 0], color=orange, lw=5)
# # +/- standard deviation bar
plot(epsi_out[0, -1],
     0,
     color='b',
     marker='o',
     markerfacecolor='b',
Esempio n. 22
0
def plot_mag_series(times,
                    mags,
                    errs=None,
                    outfile=None,
                    sigclip=30.0,
                    timebin=None,
                    yrange=None):
    '''This plots a magnitude time series.

    If outfile is none, then plots to matplotlib interactive window. If outfile
    is a string denoting a filename, uses that to write a png/eps/pdf figure.

    timebin is either a float indicating binsize in seconds, or None indicating
    no time-binning is required.

    '''

    if errs is not None:

        # remove nans
        find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
        ftimes, fmags, ferrs = times[find], mags[find], errs[find]

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = ferrs[sigind]

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = ferrs

    else:

        # remove nans
        find = npisfinite(times) & npisfinite(mags)
        ftimes, fmags, ferrs = times[find], mags[find], None

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = None

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = None

    # now we proceed to binning
    if timebin and errs is not None:

        binned = time_bin_magseries_with_errs(stimes, smags, serrs,
                                              binsize=timebin)
        btimes, bmags, berrs = (binned['binnedtimes'],
                                binned['binnedmags'],
                                binned['binnederrs'])

    elif timebin and errs is None:

        binned = time_bin_magseries(stimes, smags,
                                    binsize=timebin)
        btimes, bmags, berrs = binned['binnedtimes'], binned['binnedmags'], None

    else:

        btimes, bmags, berrs = stimes, smags, serrs


    # finally, proceed with plotting
    fig = plt.figure()
    fig.set_size_inches(7.5,4.8)

    plt.errorbar(btimes, bmags, fmt='go', yerr=berrs,
                 markersize=2.0, markeredgewidth=0.0, ecolor='grey',
                 capsize=0)

    # make a grid
    plt.grid(color='#a9a9a9',
             alpha=0.9,
             zorder=0,
             linewidth=1.0,
             linestyle=':')

    # fix the ticks to use no offsets
    plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
    plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)

    # get the yrange
    if yrange and isinstance(yrange,list) and len(yrange) == 2:
        ymin, ymax = yrange
    else:
        ymin, ymax = plt.ylim()
    plt.ylim(ymax,ymin)

    plt.xlim(npmin(btimes) - 0.001*npmin(btimes),
             npmax(btimes) + 0.001*npmin(btimes))

    plt.xlabel('time [JD]')
    plt.ylabel('magnitude')

    if outfile and isinstance(outfile, str):

        plt.savefig(outfile,bbox_inches='tight')
        plt.close()
        return os.path.abspath(outfile)

    else:

        plt.show()
        plt.close()
        return
Esempio n. 23
0
# ## Compute the risk neutral probabilities using the Stochastic Discount Factors found at the previous steps

q_true = SDF_true.T @ np.diagflat(p) / v_tnow[0]
q_kern = SDF_kern @ np.diagflat(p) / v_tnow[0]
q_mre = SDF_mre @ np.diagflat(p) / v_tnow[0]

# ## For each instrument in the market and for each risk neutral probability found at the previous step, compute the left-hand side and the right-hand side of the fundamental theorem of asset pricing

y = v_tnow / v_tnow[0]
x = r_['-1', V_payoff @ q_true.T, V_payoff @ q_kern.T, V_payoff @ q_mre.T]

# ## Generate the figure

pick = range(50)  # We just pick first 50 dots to make the figure more
figure()
plot([npmin(y[pick]), npmax(y[pick])], [npmin(y[pick]), npmax(y[pick])], lw=1)
scatter(np.array(y[pick]),
        np.array(x[pick, 0]),
        marker='x',
        s=50,
        color=[1, 0.3, 0],
        lw=1)
scatter(np.array(y[pick]),
        np.array(x[pick, 1]),
        marker='o',
        s=70,
        color=[0.4, 0.4, 0],
        facecolor="none")
scatter(np.array(y[pick]),
        np.array(x[pick, 2]),
        marker='.',
Esempio n. 24
0
	def self_stack_kernel_caustic_masscalc(self,ENC_R,ENC_V,ENC_CAUMASS,ENC_INFMASS,ENC_CAUSURF,ENC_INFSURF,ENC_INFNFW,ENC_R200,ENC_M200,ENC_SRAD,ENC_ESRAD,ENC_VDISP,r_limit,vlimit,H0,q,k,root,beta,l=None,samp_size=None):
		
		ENC_R,ENC_V = array(ENC_R),array(ENC_V)
		R = append(ENC_R,ENC_R)
		V = append(ENC_V,-1*ENC_V)

		xbeta,abeta = loadtxt(''+str(root)+'/nkern/Documents/MDB_milliMil_halodata/Caustic/average_betaprofile.tab',dtype='float',usecols=(0,1),unpack=True)

		print ''
		print '## Working on Cluster #'+str(k)+''
		if l != None:
			print '## Line of Sight #'+str(l)+''
		print '_______________________________________'
		print 'galaxies within r200 =',where(ENC_R<ENC_R200)[0].size
		if samp_size != None:
			print 'sample size =',samp_size

		### Kernel Density Estimation ###
		fit = polyfit((xbeta*ENC_R200)[xbeta<4],abeta[xbeta<4],6)			
		res_array = array([1])
		img_tot = 0
		img_grad_tot = 0
		img_inf_tot = 0
		for u in xrange(res_array.size):
			x_range,y_range,img,img_grad,img_inf = C.gaussian_kernel(R,V,ENC_R200,normalization=H0,scale=q,res=200,adj=res_array[u],see=False)
			img_tot += img/npmax(img)
			img_grad_tot += img_grad/npmax(img_grad)
			img_inf_tot += img_inf/npmax(img_inf)

		### Define Beta ###
#			beta = fit[0]*x_range**6+fit[1]*x_range**5+fit[2]*x_range**4+fit[3]*x_range**3+fit[4]*x_range**2+fit[5]*x_range+fit[6]	
		beta = np.zeros(x_range.size) + beta
	
		### Caustic Surface Estimation ###
		maxv = ENC_R200*H0*sqrt(200)+500

		# Inflection Technique to find Surface
		Anew,threesig,dens_norm,e_dens_norm,srad,e_srad = C.level_search2(R,V,R,V,R,x_range,y_range,img_tot,img_inf_tot,H0*q,ENC_R200,r_limit,maxv,beta,ENC_SRAD,ENC_ESRAD,use_vdisp=ENC_VDISP,bin=k+1)
		vdispersion = threesig/3.5

		# Caustic Technique to find Surface
		AnewD,threesigD,dens_normD,e_dens_normD,sradD,e_sradD = C.level_search(R,V,R,V,R,x_range,y_range,img_tot,H0*q,ENC_R200,r_limit,maxv,beta,ENC_SRAD,ENC_ESRAD,use_vdisp=ENC_VDISP,bin=k+1)

		### Mass Calculation ###
		# Normal "Diaferio 1999 Mass" with fbeta = .65 (inherent setting is fbeta = .5)
		massprofile,integrand = C.masscalc(x_range,abs(C.Ar_finalD),ENC_R200,ENC_M200,vdispersion,beta=beta,conc=ENC_R200/ENC_SRAD,dstyle=True)
		cau_diamass = (.65/.5) * massprofile[where(x_range[x_range >= 0] < ENC_R200)[0][-1]]

		# Caustic Mass with Concentration parameter, no fbeta
		massprofile2,integrand2 = C.masscalc(x_range,abs(C.Ar_finalD),ENC_R200,ENC_M200,vdispersion,beta=beta,conc=ENC_R200/ENC_SRAD)
		cau_concmass = massprofile2[where(x_range[x_range >= 0] < ENC_R200)[0][-1]]

		# Inflection Mass using NFW
		inf_nfwmass = 4*pi*dens_norm*(srad)**3*(log(1+ENC_R200/srad)-ENC_R200/srad/(1+ENC_R200/srad))

		# Labeling Caustic Mass as Diaferio 1999 fbeta mass, in future, Caustic Mass is concentration mass
		ENC_CAUMASS.append(cau_diamass)	
		ENC_INFMASS.append(inf_nfwmass)
		ENC_CAUSURF.append(C.Ar_finalD)
		ENC_INFSURF.append(C.Ar_final)
		ENC_INFNFW.append(Anew)

		return x_range,ENC_CAUMASS,ENC_INFMASS,ENC_CAUSURF,ENC_INFSURF,ENC_INFNFW
Esempio n. 25
0
# y_hor = t.pdf('tlocationscale', x_hor, m_tau, sigma_tau, 1)
y_hor = t.pdf((x_hor - m_tau) / sigma_tau, 1) / sigma_tau
# -

# ## Create figure

# +
s_ = 2  # number of plotted observation before projecting time

m = min([
    npmin(X),
    npmin(x[0, t_ - s_:t_]),
    npmin([x[0, t_ - 1], m_tau]) - 6 * sigma_tau
])
M = max([
    npmax(X),
    npmax(x[0, t_ - s_:t_]),
    npmax([x[0, t_ - 1], m_tau]) + 6 * sigma_tau
])
t = arange(-s_, tau + 1)
max_scale = tau / 4
scale = max_scale / max(y_hor)

# preliminary computations
tau_red = arange(0, tau, 0.1).reshape(1, -1)
m_red = x[0, t_ - 1] + mu * tau_red
sigma_red = sigma * tau_red
redline1 = m_red + 2 * sigma_red
redline2 = m_red - 2 * sigma_red

f = figure()
# foreign exchange rate figure
figure()

# simulated path, mean and standard deviation
plot(horiz_u[:i + 1], FX[:j_sel, :i + 1].T, color=lgrey, lw=1)
xticks(arange(0, t_end + 1, 20))
xlim([min(horiz_u), max(horiz_u) + 1])
l1 = plot(horiz_u[:i + 1], Mu_FX[:i + 1], color='g')
l2 = plot(horiz_u[:i + 1], Mu_FX[:i + 1] + Sigma_FX[:i + 1], color='r')
plot(horiz_u[:i + 1], Mu_FX[:i + 1] - Sigma_FX[:i + 1], color='r')
# histogram
option = namedtuple('option', 'n_bins')
option.n_bins = round(10 * log(j_))
y_hist, x_hist = HistogramFP(FX[:, [i]].T, flex_probs_scenarios.T, option)
scale = 1500 * Sigma_FX[i] / npmax(y_hist)
y_hist = y_hist * scale
shift_y_hist = horiz_u[i] + y_hist
# empirical pdf

emp_pdf = plt.barh(x_hist[:-1],
                   shift_y_hist[0] - horiz_u[i],
                   height=x_hist[1] - x_hist[0],
                   left=horiz_u[i],
                   facecolor=lgrey,
                   edgecolor=lgrey)
plot(shift_y_hist[0], x_hist[:-1], color=dgrey, lw=1)  # border

legend(handles=[l1[0], l2[0], emp_pdf[0]],
       labels=['mean', ' + / - st.deviation', 'horizon pdf'])
xlabel('time (days)')
Esempio n. 27
0
	def kernel_caustic_masscalc(self,ENC_R,ENC_V,ENC_M200,ENC_R200,ENC_SRAD,ENC_ESRAD,ENC_HVD,halo_num,bin_range,gal_num,H0,q,r_limit,run_num,use_mems):
		#Dummy arrays that Caustic functions call but have no use..
		potential = ones(halo_num/bin_range)
		ENC_GVD = ones(halo_num/bin_range)
	
		Rt=[]		# Doubling 3d data to mimic projected data, need for levelsearch2()
		Vt=[]
		for j in range(len(ENC_R)):
			Rt.append(append(ENC_R[j],ENC_R[j]))
			Vt.append(append(ENC_V[j],-ENC_V[j]))
		ENC_R = array(Rt)
		ENC_V = array(Vt)

		xbeta,abeta = loadtxt('/n/Christoq1/nkern/Documents/MDB_milliMil_halodata/Caustic/average_betaprofile.tab',dtype='float',usecols=(0,1),unpack=True)

		ENC_INF_MPROF = []
		ENC_INF_NFW = []
		ENC_INF_CAU = []

		ENC_DIA_MPROF = []
		ENC_DIA_NFW = []
		ENC_DIA_CAU = []
	
		ENC_DIA_CAUMASS = []	#Diaferio technique to produce Caustic, Caustic to find mass
		ENC_INF_CAUMASS = []	#Inflection technique to produce Casutic, Caustic to find mass	
		ENC_DIA_NFWMASS = []	#Diaferio technique to produce Caustic, NFW fit to find mass....?? check w/ dan on levelsearch() nfw fit, double parameter???	
		ENC_INF_NFWMASS = []	#Inflection technique to produce Caustic, NFW fit to find mass

		### Kernel Density Estimation ###
		for k in range((run_num[1]-run_num[0])/bin_range):
			''' k refers to the halo's position in ENC_* arrays, run_num/bin_range'''
			print ''
			print 'WORKING ON ENSEMBLE CLUSTER','#'+str(k)+''
			fit = polyfit((xbeta*ENC_R200[k])[xbeta<4],abeta[xbeta<4],6)	
			res_array = array([1])
			img_tot = 0
			img_grad_tot = 0
			img_inf_tot = 0
			for u in range(res_array.size):
				x_range,y_range,img,img_grad,img_inf = C.gaussian_kernel(ENC_R[k],ENC_V[k],ENC_R200[k],normalization=H0,scale=q,res=200,adj = res_array[u],see=False)
				img_tot += img/npmax(img)
				img_grad_tot += img_grad/npmax(img_grad)
				img_inf_tot += img_inf/npmax(img_inf)
		### Define Beta ###
			beta = fit[0]*x_range**6 + fit[1]*x_range**5 + fit[2]*x_range**4 + fit[3]*x_range**3 + fit[4]*x_range**2 + fit[5]*x_range + fit[6]

		### Caustic Surface Estimation ###
			maxv = ENC_R200[k]*H0*sqrt(200)+500
			
			## INFLECTION TECHNIQUE (M_PHI) ##
			Anew,threesig,dens_norm,e_dens_norm,srad,e_srad,Ar_final = C.level_search2(ENC_R[k],ENC_V[k],ENC_R[k],ENC_V[k],ENC_R[k],x_range,y_range,img_tot,img_inf_tot,H0*q,ENC_R200[k],r_limit,maxv,beta,potential,ENC_SRAD[k],ENC_ESRAD[k],use_vdisp=ENC_HVD[k],use_mems=use_mems,bin=k)
			vdispersion = threesig/3.5

			## DIAFERIO TECHNIQUE (CAUSTIC) ##
			Anewd,threesigd,dens_normd,e_dens_normd,sradd,e_sradd,Ar_finald = C.level_search(ENC_R[k],ENC_V[k],ENC_R[k],ENC_V[k],ENC_R[k],x_range,y_range,img_tot,H0*q,ENC_R200[k],r_limit,maxv,beta,potential,ENC_SRAD[k],ENC_ESRAD[k],use_vdisp=ENC_HVD[k],use_mems=use_mems,bin=k)
			vdispersiond = threesigd/3.5

		### Mass Calculation ###

			# Ways to calculate mass:
			#	1.) Using Inflection technique, pick out caustic, use caustic to calculate mass via massprofile
			#	2.) Using Diaferio technique, pick out casutic, use caustic to calculate mass via massprofiled
			#	3.) Using Inflection Causitc, fit an NFW, use NFW to calculate mass via dens_norm
			#	4.) Using Diaferio technique, fit an NFW, use NFW to calculate mass via dens_normd

			massprofile,integrand = C.masscalc(x_range,abs(Ar_final),ENC_R200[k],ENC_M200[k],vdispersion,beta,conc=ENC_R200[k]/ENC_SRAD[k])
			massprofiled,integrandd = C.masscalc(x_range,abs(Ar_finald),ENC_R200[k],ENC_M200[k],vdispersiond,beta,conc=ENC_R200[k]/ENC_SRAD[k])

			inf_nfwmass = 4*pi*dens_norm*(srad)**3*(np.log(1+ENC_R200[k]/srad)-ENC_R200[k]/srad/(1+ENC_R200[k]/srad))
			dia_nfwmass = 4*pi*dens_normd*(sradd)**3*(np.log(1+ENC_R200[k]/sradd)-ENC_R200[k]/sradd/(1+ENC_R200[k]/sradd))

			inf_caumass = massprofile[where(x_range[x_range >= 0] < ENC_R200[k])[0][-1]]			
			dia_caumass = massprofiled[where(x_range[x_range >= 0] < ENC_R200[k])[0][-1]]

		### Data Appending ###
			ENC_INF_NFWMASS.append(inf_nfwmass)
			ENC_INF_CAUMASS.append(inf_caumass)
			ENC_DIA_NFWMASS.append(dia_nfwmass)
			ENC_DIA_CAUMASS.append(dia_caumass)

			ENC_INF_MPROF.append(massprofile)
			ENC_INF_NFW.append(Anew)
			ENC_INF_CAU.append(Ar_final)

			ENC_DIA_MPROF.append(massprofiled)
			ENC_DIA_NFW.append(Anewd)
			ENC_DIA_CAU.append(Ar_finald)


		ENC_INF_MPROF,ENC_DIA_MPROF = array(ENC_INF_MPROF),array(ENC_DIA_MPROF)	
		ENC_INF_NFW,ENC_DIA_NFW = array(ENC_INF_NFW),array(ENC_DIA_NFW)
		ENC_INF_CAU,ENC_DIA_CAU = array(ENC_INF_CAU),array(ENC_DIA_CAU)
		

		return x_range,ENC_INF_NFWMASS,ENC_DIA_NFWMASS,ENC_INF_CAUMASS,ENC_DIA_CAUMASS,ENC_INF_MPROF,ENC_INF_NFW,ENC_INF_CAU,ENC_DIA_MPROF,ENC_DIA_NFW,ENC_DIA_CAU
Esempio n. 28
0
def _settling_flux(X, v_max, v_max_practical, X_min, rh, rp, n0):
    X_star = npmax(X-X_min, n0)
    v = npmin(v_max_practical, v_max*(npexp(-rh*X_star) - npexp(-rp*X_star)))
    return X*npmax(v, n0)
Esempio n. 29
0
 def integrand(x):
     "Integral expression on right-hand side of operator"
     return npmax(x, phi_f(q(x, pi))) * (pi*f(x) + (1 - pi)*g(x))
Esempio n. 30
0
def garch1f4(x, eps, df):
    ## Fit a GARCH(1,1) model with student-t errors
    #  INPUTS
    #   x     : [vector] (T x 1) data generated by a GARCH(1,1) process
    #  OPS
    #   q     : [vector] (4 x 1) parameters of the GARCH(1,1) process
    #   qerr  : [vector] (4 x 1) standard error of parameter estimates
    #   hf    : [scalar] current conditional heteroskedasticity estimate
    #   hferr : [scalar] standard error on hf
    #  NOTE
    #   o Uses a conditional t-distribution with fixed degrees of freedom
    #   o Originally written by Olivier Ledoit, 4/28/1997
    #   o Difference with garch1f: errors come from the score alone

    # Parameters
    gold = (1 + sqrt(5)) / 2  # step size increment
    tol1 = 1e-7  # for termination criterion
    tol2 = 1e-7  # for closeness to boundary
    big = 2  # for making the hessian negative definite
    maxiter = 50  # maximum number of iterations
    n = 30  # number of points on the grid

    # Rescale
    y = (x.flatten() - mean(x.flatten()))**2
    t = len(y)
    scale = sqrt(mean(y**2))
    y = y / scale
    s = mean(y)
    # Grid search

    [ag, bg] = meshgrid(linspace(0, 1 - eps, n), linspace(0, 1 - eps, n))
    cg = np.maximum(s * (1 - ag - bg), 0)
    likeg = -np.Inf * ones((n, n))
    for i in range(n):
        for j in range(n - i):
            h = filter(array([0, ag[i, j]]), array([1, -bg[i, j]]), y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter(array([0, cg[i, j]]), array([1, -bg[i, j]]), ones(t))
            likeg[i, j] = -npsum(log(h) + (df + 1) * log(1 + y / h / df))

    maxlikeg = npmax(likeg)
    maxima = where(likeg == maxlikeg)  ##ok<MXFND>

    # Initialize optimization
    a = r_[cg[maxima], ag[maxima], bg[maxima]]
    best = 0
    da = 0
    # term   = 1
    # negdef = 0
    iter = 0

    # Begin optimization loop
    while iter < maxiter:
        iter = iter + 1

        # New parameter1
        a = a + gold**best * da

        # Conditional variance
        h = filter([0, a[1]], [1, -a[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
            + filter([0, a[0]], [1, -a[2]], ones(t))

        # Likelihood
        if (any(a < 0) or ((a[1] + a[2]) > 1 - eps)):
            like = -np.Inf
        else:
            like = -npsum(log(h) + (df + 1) * log(1 + y / h / df))

        # Gradient
        GG = r_['-1',
                filter([0, 1], [1, -a[2]], ones(t))[..., newaxis],
                filter([0, 1], [1, -a[2]],
                       y * (df - 2) / df)[..., newaxis],
                filter([0, 1], [1, -a[2]], h)[..., newaxis]]
        g1 = ((df + 1) * (y / (y + df * h)) - 1) / h
        G = GG * repeat(g1.reshape(-1, 1), 3, axis=1)
        gra = npsum(G, axis=0)

        # Hessian
        GG2 = GG[:,
                 [0, 1, 2, 0, 1, 2, 0, 1, 2]] * GG[:,
                                                   [0, 0, 0, 1, 1, 1, 2, 2, 2]]
        g2 = -((df + 1) *
               (y /
                (y + df * h)) - 1) / h**2 - (df *
                                             (df + 1)) * (y /
                                                          (y + df * h)**2 / h)
        HH = zeros((t, 9))
        HH[:, 2] = filter([0, 1], [1, -a[2]], GG[:, 0])
        HH[:, 6] = HH[:, 2]
        HH[:, 5] = filter([0, 1], [1, -a[2]], GG[:, 1])
        HH[:, 7] = HH[:, 5]
        HH[:, 8] = filter([0, 2], [1, -a[2]], GG[:, 2])
        H = GG2 * repeat(g2.reshape(-1, 1), 9, axis=1) + HH * repeat(
            g1.reshape(-1, 1), 9, axis=1)
        hes = reshape(npsum(H, axis=0), (3, 3), 'F')

        # Negative definite
        d, u = eig(hes)
        # d = diagflat(d)
        if any(d > 0):
            negdef = 0
            d = min(d, max(d[d < 0]) / big)
            hes = u @ diagflat(d) @ u.T
        else:
            negdef = 1

        # Direction
        da = -gra.dot(pinv(hes))

        # Termination criterion
        term = da @ gra.T
        if (term < tol1) and negdef:
            break

        # Step search
        best = 0
        newa = a + gold**(best - 1) * da
        if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
            left = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            left = -sum(log(h) + (df + 1) * log(1 + y / h / df))

        newa = a + gold**best * da
        if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
            center = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            center = -sum(log(h) + (df + 1) * log(1 + y / h / df))

        newa = a + gold**(best + 1) * da
        if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
            right = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            right = -sum(log(h) + (df + 1) * log(1 + y / h / df))

        if all(like > array([left, center, right])) or all(
                left > array([center, right])):
            while True:
                best = best - 1
                center = left
                newa = a + gold**(best - 1) * da
                if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
                    left = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    left = -sum(log(h) + (df + 1) * log(1 + y / h / df))

                if all(center >= array([like, left])):
                    break

        elif all(right > array([left, center])):
            while True:
                best = best + 1
                center = right
                newa = a + gold**(best + 1) * da
                if (any(newa < 0) or (newa[1] + newa[2]) > 1 - eps):
                    right = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    right = -npsum(log(h) + (df + 1) * log(1 + y / h / df))

                if center > right:
                    break

        # If stuck at boundary then stop
        if (center == like) and (any(a < tol2) or (a[1] + a[2]) > 1 - tol2):
            break

        # End of optimization loop

    a[a < tol2] = zeros(len(a[a < tol2]))
    if a[1] + a[2] > 1 - tol2:
        if a[1] < 1 - tol2:
            a[1] = a[1] + (1 - a[1] - a[2])
        else:
            a[2] = a[2] + (1 - a[1] - a[2])

    # Estimation error and volatility forecast
    # aerr=inv(G.T@G)
    tmp = (G.T @ G)
    aerr = tmp.dot(pinv(eye(tmp.shape[0])))
    hf = a[0] + a[1] * y[t - 1] * (df - 2) / df + a[2] * h[t - 1]
    gf = r_[1, y[t - 1], h[t - 1]] + a[2] * GG[t - 1, :]
    hferr = gf @ aerr @ gf.T
    aerr = diagflat(aerr).T

    # Revert to original scale
    a[0] = a[0] * scale
    aerr[0] = aerr[0] * scale**2
    hf = hf * scale
    hferr = hferr * scale**2

    aerr = sqrt(aerr)
    hferr = sqrt(hferr)
    q = a
    qerr = aerr

    return q, qerr, hf, hferr
Esempio n. 31
0
def RollPrices2Prices(t_end_str, tau, dates, z_roll):
    # This function uses rolling values to compute the zero-coupon bond value
    # (i.e., discount factors) with maturities in t_end_str.
    # INPUTS
    #  t_end_str [vector]: (k_ x 1) selected maturities (as .Tdd-mmm-yy.T strings)
    #  tau [vector]: (n_ x 1) times to maturity corresponding to rows of z_roll
    #  Date [vector]: (1 x t_end) dates corresponding to columns of z_roll
    #  z_roll [matrix]: (n_ x t_end) rolling values
    # OUTPUTS
    #  date [cell vector]: (k_ x 1) cell date{j} contains the numerical value of
    #                              dates corresponding to columns of z{j}
    #  z [cell vector]: (k_ x 1) cell z{j} contains the evolution of zero-coupon
    #                           bond value with maturity t_end{j}
    #  t_end [vector]: (k_ x 1) contains the numerical value corresponding to
    #                          date strings in t_end_string

    # tau_int: vector of maturities for interpolation
    tauRange = ceil((dates[-1] - dates[0]) / 365)
    _, _, tauIndex = intersect(tauRange, tau)
    if not tauIndex:
        tauIndex = tau.shape[0]
    tau_int = arange(tau[0], tau[tauIndex] + tau[0], tau[0])
    # declaration and preallocation of variables
    t_ = z_roll.shape[1]
    n_ = npmax(tau_int.shape)
    z_roll_int = zeros((n_, t_))
    expiry = zeros((n_, t_))
    expiry_f = zeros((n_, t_))
    k_ = t_end_str.shape[0]
    t_end = zeros((k_, 1), dtype=int)
    z = {}
    date = {}

    for t in range(t_):
        # remove zeros
        indexPolished = np.where(abs(z_roll[:, t]) > 0)[0]
        # matrix of rolling values: z_roll(i,t)=z_{t}(tau[i]+t)
        z_roll_int[:, t] = interp(tau_int, tau[indexPolished],
                                  z_roll[indexPolished, t])
        # expiries
        for i in range(n_):
            expiry[i, t] = tau_int[i] * 365 + dates[t]
            expiry_f[i, t] = floor(expiry[i, t])  # to remove HH:mm:ss

    # zero-coupon bond values (i.e., discount factors) with fixed expiry
    for j in range(k_):
        z[j] = zeros((1, t_))
        date[j] = zeros((1, t_))
        t_end[j] = datenum(t_end_str[j])
        # z[j] = np.where(expiry_f==t_end[j],z_roll_int,z[j])
        # date[j] = np.where(expiry_f==t_end[j],dates,date[j])
        for t in range(t_):
            for i in range(n_):
                if expiry_f[i, t] == t_end[j]:
                    z[j][0, t] = z_roll_int[i, t]
                    date[j][0, t] = dates[t]
        # remove zeros
        indexzeros = np.where(date[j] == 0)
        date[j][indexzeros] = np.NAN
        z[j][indexzeros] = np.NaN
    return date, z, t_end
Esempio n. 32
0
figure()
# simulated path, mean and standard deviation
plot(horiz_u[:index + 1], PLC_u[:j_sel, :index + 1].T, color=lgrey)
l1 = plot(horiz_u[:index + 1], PLMuC_u[0, :index + 1], color='g')
l2 = plot(horiz_u[:index + 1],
          PLMuC_u[0, :index + 1] + PLSigmaC_u[0, :index + 1],
          color='r')
plot(horiz_u[:index + 1],
     PLMuC_u[0, :index + 1] - PLSigmaC_u[0, :index + 1],
     color='r')
# histogram
option = namedtuple('option', 'n_bins')
option.n_bins = round(10 * log(j_))
y_hist, x_hist = HistogramFP(PLC_u[:, [index + 1]].T, pp_, option)
scale = 0.8 * PLSigmaC_u[0, index + 1] / npmax(y_hist)
y_hist = y_hist * scale
shift_y_hist = horiz_u[index + 1] + y_hist

emp_pdf = plt.barh(x_hist[:-1],
                   shift_y_hist[0] - horiz_u[index + 1],
                   left=horiz_u[index + 1],
                   height=x_hist[1] - x_hist[0],
                   facecolor=lgrey,
                   edgecolor=lgrey,
                   lw=2)  # empirical pdf

# first order approximation
y_hist1, x_hist1 = HistogramFP(np.atleast_2d(Taylor_first), pp_, option)
y_hist1 = y_hist1 * scale
shift_T_first = horiz_u[index + 1] + y_hist1
Esempio n. 33
0
def optimal_cmo(hic1,
                hic2,
                num_v=None,
                max_num_v=None,
                verbose=False,
                method='frobenius',
                long_nw=True,
                long_dist=True):
    """
    Calculates the optimal contact map overlap between 2 matrices

    TODO: make the selection of number of eigen vectors automatic or relying on
          the summed contribution (e.g. select the EVs that sum 80% of the info)

    .. note::

      penalty is defined as the minimum value of the pre-scoring matrix.
    
    :param hic1: first matrix to align
    :param hic2: second matrix to align
    :param None num_v: number of eigen vectors to consider, max is:
        max(min(len(hic1), len(hic2)))
    :param None max_num_v: maximum number of eigen vectors to consider.
    :param score method: distance function to use as alignment score. if 'score'
       distance will be the result of the last value of the Needleman-Wunsch
       algorithm. If 'frobenius' a modification of the Frobenius distance will
       be used

    :returns: two lists, one per aligned matrix, plus a dict summarizing the
        goodness of the alignment with the distance between matrices, their 
        Spearman correlation Rho value and pvalue.
    """

    l_p1 = len(hic1)
    l_p2 = len(hic2)
    num_v = num_v or min(l_p1, l_p2)
    if max_num_v:
        num_v = min(max_num_v, num_v)
    if num_v > l_p1 or num_v > l_p2:
        raise Exception('\nnum_v should be at most %s\n' % (min(l_p1, l_p2)))
    val1, vec1 = eigh(hic1)
    if npsum(vec1).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" + '%s\n\n%s' %
                        (hic1, vec1))
    val2, vec2 = eigh(hic2)
    if npsum(vec2).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" + '%s\n\n%s' %
                        (hic2, vec2))
    #
    val1 = array([sqrt(abs(v)) for v in val1])
    val2 = array([sqrt(abs(v)) for v in val2])
    idx = val1.argsort()[::-1]
    val1 = val1[idx]
    vec1 = vec1[idx]
    idx = val2.argsort()[::-1]
    val2 = val2[idx]
    vec2 = vec2[idx]
    #
    vec1 = array([val1[i] * vec1[:, i] for i in range(num_v)]).transpose()
    vec2 = array([val2[i] * vec2[:, i] for i in range(num_v)]).transpose()
    nearest = float('inf')
    nw = core_nw_long if long_nw else core_nw
    dister = _get_dist_long if long_dist else _get_dist
    best_alis = []
    for num in range(1, num_v + 1):
        for factors in product([1, -1], repeat=num):
            vec1p = factors * vec1[:, :num]
            vec2p = vec2[:, :num]
            p_scores = _prescoring(vec1p, vec2p, l_p1, l_p2)
            penalty = min([npmin(p_scores)] + [-npmax(p_scores)])
            align1, align2, dist = nw(p_scores, penalty, l_p1, l_p2)
            try:
                if method == 'frobenius':
                    dist = dister(align1, align2, hic1, hic2)
                else:
                    dist *= -1
                if dist < nearest:
                    if not penalty:
                        for scr in p_scores:
                            print(' '.join(
                                ['%7s' % (round(y, 2)) for y in scr]))
                    nearest = dist
                    best_alis = [align1, align2]
                    best_pen = penalty
            except IndexError as e:
                print(e)
    try:
        align1, align2 = best_alis
    except ValueError:
        pass
    if verbose:
        print('\n Alignment (score = %s):' % (nearest))
        print('TADS 1: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align1]))
        print('TADS 2: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align2]))
    rho, pval = _get_score(align1, align2, hic1, hic2)
    # print best_pen
    if not best_pen:
        print('WARNING: penalty NULL!!!\n\n')
    return align1, align2, {'dist': nearest, 'rho': rho, 'pval': pval}
Esempio n. 34
0
kappa = z.kappa
sigma_bar = z.sigma_bar
eta = z.eta
rho = z.rho
sigma_0 = z.sigma_0
sigma_heston = sigma_heston*100  # converts values to percentage
sigma_impl = sigma_impl*100  # converts values to percentage
# -

# ## Generate figures showing the evolution of the parameters and the comparison between the realized and the fitted implied volatility surfaces at some points in time

# +
# axes settings
date_tick = linspace(0,t_ - 1,4, dtype=int)
grid_k = linspace(npmin(kappa),npmax(kappa),3)
grid_sigbar = linspace(npmin(sigma_bar),npmax(sigma_bar),3)
grid_lamda = linspace(npmin(eta),npmax(eta),3)
grid_rho = [-1.1, 0, 1.1]
grid_sigma_0 = linspace(min(sigma_0),max(sigma_0),3)
grid_s = [npmin(s), 0.5*(npmin(s) + npmax(s)), npmax(s)]

# volatility fitting plot
n_fig = 1
# number of figures, representing volatility fitting, to be plotted

if n_fig == 1:
    t_fig = [0]
else:
    t_fig = linspace(0,t_-1,n_fig)
Esempio n. 35
0
 def integrand(x):
     "Integral expression on right-hand side of operator"
     return npmax(x, phi_f(q(x,pi))) * (pi*f(x) + (1 - pi)*g(x))
Esempio n. 36
0
f_hor = np.real(f_hor)

# ## Compute the normal approximation of the projected pdf

phi_hor = norm.pdf(x_hor, mu * tau, sigma * sqrt(tau))
# center around x(t_end)
x_hor = x_hor + x[t_ - 1]

# ## Create a figure

# +
s_ = 2  # number of plotted observation before projecting time

# axes settings
m = min([npmin(x[t_ - s_:t_]), mu_tau - 4.5 * sigma_tau])
M = max([npmax(x[t_ - s_:t_]), mu_tau + 5 * sigma_tau])
t = arange(-s_, tau)
max_scale = tau / 4

# preliminary computations
tau_red = arange(0, tau + 0.1, 0.1)
mu_red = x[t_ - 1] + mu * tau_red
sigma_red = sigma * sqrt(tau_red)
redline1 = mu_red + 2 * sigma_red
redline2 = mu_red - 2 * sigma_red

f = figure()

# color settings
lgrey = [0.8, 0.8, 0.8]  # light grey
dgrey = [0.2, 0.2, 0.2]  # dark grey
Esempio n. 37
0
# Generate scenarios for the invariants via historical bootstrapping
epsi_hor = SampleScenProbDistribution(epsi.reshape(1,-1), p, j_*deltat)
epsi_hor = reshape(epsi_hor, (j_, deltat),'F')

# Feed the simulated scenarios in the recursive incremental-step routine((random walk assumption))
pi_deltat = pi[-1] + cumsum(epsi_hor, 1)
# -

# ## Create a figure

# +
s_ = 2  # number of plotted observation before projecting time

# axes settings
m = min([npmin(pi[::-1]), pi[-1]-4*sigma_tau])
M = max([npmax(pi[::-1]), mu_tau + 4.5*sigma_tau])
t = arange(-s_,deltat+1)
max_scale = deltat / 4

# preliminary computations
tau_red = arange(0,deltat+0.1,0.1)
mu_red = pi[-1] + mu_1*tau_red
sigma_red = sigma_1*sqrt(tau_red)
redline1 = mu_red + 2*sigma_red
redline2 = mu_red - 2*sigma_red

from matplotlib.pyplot import xticks

f = figure()
# color settings
lgrey = [0.8, 0.8, 0.8]  # light grey
Esempio n. 38
0
# +
phi_tau = norm.pdf(epsi_tau, mu * tau, sigma * sqrt(tau))

# center around x[t_end-1]
epsi_tau = epsi_tau + x[t_ - 1]
# -

# ## Create a figure

# +
s_ = 2  # number of plotted observation before projecting time

# axes settings
m = min([npmin(x[t_ - 2:t_]), x[t_] - 4 * sigma_tau])
M = max([npmax(x[t_ - 2:t_]), x[t_] + mu_tau + 4.5 * sigma_tau])
t = arange(-s_, tau + 1)
max_scale = tau / 4
scale = max_scale / npmax(f_tau)

# preliminary computations
tau_red = arange(0, tau + 0.1, 0.1)
mu_red = x[t_ - 1] + mu * tau_red
sigma_red = sigma * sqrt(tau_red)
redline1 = mu_red + 2 * sigma_red
redline2 = mu_red - 2 * sigma_red

f = figure()
# color settings
lgrey = [0.8, 0.8, 0.8]  # light grey
dgrey = [0.2, 0.2, 0.2]  # dark grey
Esempio n. 39
0
    xlabel('$\epsilon_1$')
    ylabel('$\epsilon_2$')
    PlotTwoDimEllipsoid(mu_HFP[:, [q]], sigma2_HFP[:, :, q], 1, 0, 0, 'r', 2)

    # histogram of z^2
    options = namedtuple('option', 'n_bins')
    options.n_bins = round(30 * log(ens[0, q]))
    plt.sca(ax[0, 1])
    ax[0, 1].set_facecolor('white')
    nz, zz = HistogramFP(z_2[[q], :], P.reshape(1, -1), options)
    b = bar(zz[:-1],
            nz[0],
            width=zz[1] - zz[0],
            facecolor=[.7, .7, .7],
            edgecolor=[.3, .3, .3])
    plt.axis([-1, 15, 0, npmax(nz) + (npmax(nz) / 20)])
    yticks([])
    xlabel('$z^2$')

    plot(mu_z2[0, q],
         0,
         color='r',
         marker='o',
         markerfacecolor='r',
         markersize=4)
    MZ2 = 'HFP - mean($z^2$) =  % 3.2f' % mu_z2[0, q]
    plt.text(15,
             npmax(nz) - (npmax(nz) / 7),
             MZ2,
             color='r',
             horizontalalignment='right',
Esempio n. 40
0
    Rates.idx, tau + 1, :]  # shadow yield curve at the projection step tau

interp = interp1d(Rates.tau, ShadowRates_thor.T, fill_value='extrapolate')
Shadowy_thor = interp(
    Bonds.tau_thor
).T  # interpolate the curve to obtain the shadow rates for the relevant time to maturity
Y_thor = PerpetualAmericanCall(
    Shadowy_thor, {'eta': Rates.eta})  # from shadow yields to yields

Bonds.V_thor = zeros((Bonds.n_, Y_thor.shape[1]))
Bonds.V_mc_thor = zeros((Bonds.n_, Y_thor.shape[1]))

for n in range(Bonds.n_):
    Bonds.V_thor[n, :] = exp(-Bonds.tau_thor[n] * Y_thor[n, :])
    Bonds.V_mc_thor[n, :] = npmax(
        Bonds.I_D[n, :, :] * Bonds.recoveryrates[n] * Bonds.EAD[n, :, :],
        1).T + (1 - Bonds.I_D[n, :, -1]) * Bonds.V_thor[n, :]

# P&L's
Bonds.Pi = Bonds.V_mc_thor - tile(Bonds.v_tnow, (1, j_))
# -

# ## Pricing: Call options

# +
Options.strikes = array([1100, 1150, 1200])

# Implied volatility paths (reshaped)

implvol_idx = arange(Stocks.i_ + Bonds.i_ + 1, i_)
LogImplVol_path = reshape(
Esempio n. 41
0
    def gen_plot(self, args):
        """Generate the plot from time series and arguments
        """
        self.max_size = 16384. * 6400.  # that works on my mac
        self.yscale_factor = 1.0

        from gwpy.plotter.tex import label_to_latex
        from numpy import min as npmin
        from numpy import max as npmax

        if self.timeseries[0].size <= self.max_size:
            self.plot = self.timeseries[0].plot()
        else:
            self.plot = self.timeseries[0].plot(linestyle='None', marker='.')
        self.ymin = self.timeseries[0].min().value
        self.ymax = self.timeseries[0].max().value
        self.xmin = self.timeseries[0].times.value.min()
        self.xmax = self.timeseries[0].times.value.max()

        if len(self.timeseries) > 1:
            for idx in range(1, len(self.timeseries)):
                chname = self.timeseries[idx].channel.name
                lbl = label_to_latex(chname)
                if self.timeseries[idx].size <= self.max_size:
                    self.plot.add_timeseries(self.timeseries[idx], label=lbl)
                else:
                    self.plot.add_timeseries(self.timeseries[idx], label=lbl,
                                             linestyle='None', marker='.')
                self.ymin = min(self.ymin, self.timeseries[idx].min().value)
                self.ymax = max(self.ymax, self.timeseries[idx].max().value)
                self.xmin = min(self.xmin,
                                self.timeseries[idx].times.value.min())
                self.xmax = max(self.xmax,
                                self.timeseries[idx].times.value.max())
        # if they chose to set the range of the x-axis find the range of y
        strt = self.xmin
        stop = self.xmax
        # a bit weird but global ymax will be >= any value in
        # the range same for ymin
        new_ymin = self.ymax
        new_ymax = self.ymin

        if args.xmin:
            strt = float(args.xmin)
        if args.xmax:
            stop = float(args.xmax)
        if strt != self.xmin or stop != self.xmax:
            for idx in range(0, len(self.timeseries)):
                x0 = self.timeseries[idx].x0.value
                dt = self.timeseries[idx].dt.value
                if strt < 1e8:
                    strt += x0
                if stop < 1e8:
                    stop += x0
                b = int(max(0, (strt - x0) / dt))

                e = int(min(self.xmax, (stop - x0) / dt))

                if e >= self.timeseries[idx].size:
                    e = self.timeseries[idx].size - 1
                new_ymin = min(new_ymin,
                               npmin(self.timeseries[idx].value[b:e]))
                new_ymax = max(new_ymax,
                               npmax(self.timeseries[idx].value[b:e]))
            self.ymin = new_ymin
            self.ymax = new_ymax
        if self.yscale_factor > 1:
            self.log(2, ('Scaling y-limits, original: %f, %f)' %
                         (self.ymin, self.ymax)))
            yrange = self.ymax - self.ymin
            mid = (self.ymax + self.ymin) / 2.
            self.ymax = mid + yrange / (2 * self.yscale_factor)
            self.ymin = mid - yrange / (2 * self.yscale_factor)
            self.log(2, ('Scaling y-limits, new: %f, %f)' %
                         (self.ymin, self.ymax)))
Esempio n. 42
0
def PlotDynamicStrats(t, V_t_strat, V_t_risky, W_t_risky):
    ## This function generates two figures. The first plots the evolution of a
    # dynamic strategy of two instruments and the weights on the risky asset.
    # The second one is the scatter/histogram plot of the payoffs of the
    # strategy and the risky asset.
    #
    #  INPUTS
    #   t          :[vector] (n_bar x 1) vector of time
    #   V_t        :[matrix] (n_bar x j_bar) portfolio scenarios
    #   V_t_risky  :[matrix] (n_bar x j_bar) risky instrument scenarios
    #   W_t_risky  :[matrix] (n_bar x j_bar) weight of the risky instrument

    # For details on the exercise, see here .

    ## Code

    # adjust V_t_risky so that it has the same initial value as the strategy
    V_t_risky = V_t_risky * V_t_strat[0, 0] / V_t_risky[0, 0]

    ## Plot the values and the weights

    j = 1  # select one scenario
    y_max = npmax([V_t_strat[:, j], V_t_risky[:, j]
                   ]) * 1.2  # maximum of the y-axis

    fig1 = figure()
    # plot the scenario
    plt.subplot(2, 1, 1)

    plot(t, V_t_strat[:, j], lw=2.5, color='b')
    plot(t, V_t_risky[:, j], lw=2, color='r')
    plt.axis([0, t[-1], 0, y_max])
    plt.grid(True)
    ylabel('value')
    title('investment (blue) vs underlying (red) value')

    # bar plot of the weights
    plt.subplot(2, 1, 2)
    bar(t, W_t_risky[:, j], width=t[1] - t[0], color='r', edgecolor='k')
    plt.axis([0, t[-1], 0, 1])
    plt.grid(True)
    xlabel('time')
    ylabel('$')
    title('percentage of underlying in portfolio')
    plt.tight_layout()

    ## Joint scatter/histogram plot for the payoffs

    fig2 = figure()
    NumBins = int(round(10 * log(V_t_strat.shape[1])))

    ax = plt.subplot2grid((4, 4), (0, 0), rowspan=3)
    # histograms
    [n, D] = histogram(V_t_strat[-1, :], NumBins)
    barh(D[:-1], n, height=D[1] - D[0])
    xticks([])
    plt.grid(True)
    y_lim = plt.ylim()

    ax = plt.subplot2grid((4, 4), (3, 1), colspan=3)
    [n, D] = histogram(V_t_risky[-1, :], NumBins)
    bar(D[:-1], n, width=D[1] - D[0])
    yticks([])
    plt.grid(True)
    x_lim = plt.xlim()

    # scatter plot
    ax = plt.subplot2grid((4, 4), (0, 1), rowspan=3, colspan=3)
    scatter(V_t_risky[-1, :], V_t_strat[-1, :], marker='.', s=2)

    so = sort(V_t_risky[-1, :])
    plot(so, so, 'r')
    xlim(x_lim)
    ylim(y_lim)
    plt.grid(True)
    xlabel('underlying at horizon')
    ylabel('investment at horizon')
    plt.tight_layout()

    return fig1, fig2
      empty_flag="True"
      print 'x: ', x
      print 'y: ', y
  else:
      coverage_lines=stdout.split('\n')[:-1]
      x,y=unfold_bedgraph(coverage_lines)
  
  meancov=mean(y)
  #### gaussian on the original peak interval #####
  fitgaus=fit_gaussian(y,x)
  a,b,c,best,fwhm,chisq_raw,reduced_chisq=fitgaus
  area=area_under_curve(best)
  width95CI,fit_height,ht_to_wid_ratio=get_peak_stats(fitgaus,area)
  plateau_calc=plateauiness(best)
  
  normalized_y=1000*array(y)/float(npmax(y))
  norm_fitgaus=fit_gaussian(normalized_y,x)
  norm_a,norm_b,_norm_c,norm_best,norm_fwhm,norm_chisq_raw,norm_reduced_chisq=norm_fitgaus
  norm_area=area_under_curve(best)
  norm_width95CI,norm_fit_height,norm_ht_to_wid_ratio=get_peak_stats(norm_fitgaus,norm_area)
  #########
 
  ### evaluate possibility of double peaks
  double_check=eval_double_peak(x,y)
  if len(double_check)==2:
      bic1,bic2=double_check
  else:
      x_gaus1,y_gaus1,x_gaus2,y_gaus2,bic1,bic2=double_check
      meancov_1=mean(y_gaus1)
      meancov_2=mean(y_gaus2)
      ### 1st gaussian in mixture ###
Esempio n. 44
0
def optimal_cmo(hic1, hic2, num_v=None, max_num_v=None, verbose=False,
                method='frobenius', long_nw=True, long_dist=True):
    """
    Calculates the optimal contact map overlap between 2 matrices

    TODO: make the selection of number of eigen vectors automatic or relying on
          the summed contribution (e.g. select the EVs that sum 80% of the info)

    .. note::

      penalty is defined as the minimum value of the pre-scoring matrix.
    
    :param hic1: first matrix to align
    :param hic2: second matrix to align
    :param None num_v: number of eigen vectors to consider, max is:
        max(min(len(hic1), len(hic2)))
    :param None max_num_v: maximum number of eigen vectors to consider.
    :param score method: distance function to use as alignment score. if 'score'
       distance will be the result of the last value of the Needleman-Wunsch
       algorithm. If 'frobenius' a modification of the Frobenius distance will
       be used

    :returns: two lists, one per aligned matrix, plus a dict summarizing the
        goodness of the alignment with the distance between matrices, their 
        Spearman correlation Rho value and pvalue.
    """

    l_p1 = len(hic1)
    l_p2 = len(hic2)
    num_v = num_v or min(l_p1, l_p2)
    if max_num_v:
        num_v = min(max_num_v, num_v)
    if num_v > l_p1 or num_v > l_p2:
        raise Exception('\nnum_v should be at most %s\n' % (min(l_p1, l_p2)))
    val1, vec1 = eigh(hic1)
    if npsum(vec1).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" +
                        '%s\n\n%s' % (hic1, vec1))
    val2, vec2 = eigh(hic2)
    if npsum(vec2).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" +
                        '%s\n\n%s' % (hic2, vec2))
    #
    val1 = array([sqrt(abs(v)) for v in val1])
    val2 = array([sqrt(abs(v)) for v in val2])
    idx = val1.argsort()[::-1]
    val1 = val1[idx]
    vec1 = vec1[idx]
    idx = val2.argsort()[::-1]
    val2 = val2[idx]
    vec2 = vec2[idx]
    #
    vec1 = array([val1[i] * vec1[:, i] for i in xrange(num_v)]).transpose()
    vec2 = array([val2[i] * vec2[:, i] for i in xrange(num_v)]).transpose()
    nearest = float('inf')
    nw = core_nw_long if long_nw else core_nw
    dister = _get_dist_long if long_dist else _get_dist
    best_alis = []
    for num in xrange(1, num_v + 1):
        for factors in product([1, -1], repeat=num):
            vec1p = factors * vec1[:, :num]
            vec2p = vec2[:, :num]
            p_scores = _prescoring(vec1p, vec2p, l_p1, l_p2)
            penalty = min([npmin(p_scores)] + [-npmax(p_scores)])
            align1, align2, dist = nw(p_scores, penalty, l_p1, l_p2)
            try:
                if method == 'frobenius':
                    dist = dister(align1, align2, hic1, hic2)
                else:
                    dist *= -1
                if dist < nearest:
                    if not penalty:
                        for scr in p_scores:
                            print ' '.join(['%7s' % (round(y, 2)) for y in scr])
                    nearest = dist
                    best_alis = [align1, align2]
                    best_pen = penalty
            except IndexError as e:
                print e
    try:
        align1, align2 = best_alis
    except ValueError:
        pass
    if verbose:
        print '\n Alignment (score = %s):' % (nearest)
        print 'TADS 1: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align1])
        print 'TADS 2: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align2])
    rho, pval = _get_score(align1, align2, hic1, hic2)
    # print best_pen
    if not best_pen:
        print 'WARNING: penalty NULL!!!\n\n'
    return align1, align2, {'dist': nearest, 'rho': rho, 'pval': pval}
Esempio n. 45
0
def lightcurve_flux_measures(ftimes, fmags, ferrs, magsarefluxes=False):
    '''
    This calculates percentiles of the flux.

    '''

    ndet = len(fmags)

    if ndet > 9:

        # get the fluxes
        if magsarefluxes:
            series_fluxes = fmags
        else:
            series_fluxes = 10.0**(-0.4 * fmags)

        series_flux_median = npmedian(series_fluxes)

        # get the percent_amplitude for the fluxes
        series_flux_percent_amplitude = (npmax(npabs(series_fluxes)) /
                                         series_flux_median)

        # get the flux percentiles
        series_flux_percentiles = nppercentile(
            series_fluxes,
            [5.0, 10, 17.5, 25, 32.5, 40, 60, 67.5, 75, 82.5, 90, 95])
        series_frat_595 = (series_flux_percentiles[-1] -
                           series_flux_percentiles[0])
        series_frat_1090 = (series_flux_percentiles[-2] -
                            series_flux_percentiles[1])
        series_frat_175825 = (series_flux_percentiles[-3] -
                              series_flux_percentiles[2])
        series_frat_2575 = (series_flux_percentiles[-4] -
                            series_flux_percentiles[3])
        series_frat_325675 = (series_flux_percentiles[-5] -
                              series_flux_percentiles[4])
        series_frat_4060 = (series_flux_percentiles[-6] -
                            series_flux_percentiles[5])

        # calculate the flux percentile ratios
        series_flux_percentile_ratio_mid20 = series_frat_4060 / series_frat_595
        series_flux_percentile_ratio_mid35 = series_frat_325675 / series_frat_595
        series_flux_percentile_ratio_mid50 = series_frat_2575 / series_frat_595
        series_flux_percentile_ratio_mid65 = series_frat_175825 / series_frat_595
        series_flux_percentile_ratio_mid80 = series_frat_1090 / series_frat_595

        # calculate the ratio of F595/median flux
        series_percent_difference_flux_percentile = (series_frat_595 /
                                                     series_flux_median)
        series_percentile_magdiff = -2.5 * nplog10(
            series_percent_difference_flux_percentile)

        return {
            'flux_median': series_flux_median,
            'flux_percent_amplitude': series_flux_percent_amplitude,
            'flux_percentiles': series_flux_percentiles,
            'flux_percentile_ratio_mid20': series_flux_percentile_ratio_mid20,
            'flux_percentile_ratio_mid35': series_flux_percentile_ratio_mid35,
            'flux_percentile_ratio_mid50': series_flux_percentile_ratio_mid50,
            'flux_percentile_ratio_mid65': series_flux_percentile_ratio_mid65,
            'flux_percentile_ratio_mid80': series_flux_percentile_ratio_mid80,
            'percent_difference_flux_percentile': series_percentile_magdiff,
        }

    else:

        LOGERROR('not enough detections in this magseries '
                 'to calculate flux measures')
        return None
Esempio n. 46
0
def nonperiodic_lightcurve_features(times, mags, errs, magsarefluxes=False):
    '''This calculates the following nonperiodic features of the light curve,
    listed in Richards, et al. 2011):

    amplitude
    beyond1std
    flux_percentile_ratio_mid20
    flux_percentile_ratio_mid35
    flux_percentile_ratio_mid50
    flux_percentile_ratio_mid65
    flux_percentile_ratio_mid80
    linear_trend
    max_slope
    median_absolute_deviation
    median_buffer_range_percentage
    pair_slope_trend
    percent_amplitude
    percent_difference_flux_percentile
    skew
    stdev
    timelength
    mintime
    maxtime

    '''

    # remove nans first
    finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
    ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]

    # remove zero errors
    nzind = npnonzero(ferrs)
    ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]

    ndet = len(fmags)

    if ndet > 9:

        # calculate the moments
        moments = lightcurve_moments(ftimes, fmags, ferrs)

        # calculate the flux measures
        fluxmeasures = lightcurve_flux_measures(ftimes,
                                                fmags,
                                                ferrs,
                                                magsarefluxes=magsarefluxes)

        # calculate the point-to-point measures
        ptpmeasures = lightcurve_ptp_measures(ftimes, fmags, ferrs)

        # get the length in time
        mintime, maxtime = npmin(ftimes), npmax(ftimes)
        timelength = maxtime - mintime

        # get the amplitude
        series_amplitude = 0.5 * (npmax(fmags) - npmin(fmags))

        # calculate the linear fit to the entire mag series
        fitcoeffs = nppolyfit(ftimes, fmags, 1, w=1.0 / (ferrs * ferrs))
        series_linear_slope = fitcoeffs[1]

        # roll fmags by 1
        rolled_fmags = nproll(fmags, 1)

        # calculate the magnitude ratio (from the WISE paper)
        series_magratio = ((npmax(fmags) - moments['median']) /
                           (npmax(fmags) - npmin(fmags)))

        # this is the dictionary returned containing all the measures
        measures = {
            'ndet': fmags.size,
            'mintime': mintime,
            'maxtime': maxtime,
            'timelength': timelength,
            'amplitude': series_amplitude,
            'ndetobslength_ratio': ndet / timelength,
            'linear_fit_slope': series_linear_slope,
            'magnitude_ratio': series_magratio,
        }
        if moments:
            measures.update(moments)
        if ptpmeasures:
            measures.update(ptpmeasures)
        if fluxmeasures:
            measures.update(fluxmeasures)

        return measures

    else:

        LOGERROR('not enough detections in this magseries '
                 'to calculate non-periodic features')
        return None
Esempio n. 47
0
    def find_compartments(self,
                          crms=None,
                          savefig=None,
                          savedata=None,
                          show=False,
                          **kwargs):
        """
        Search for A/B copartments in each chromsome of the Hi-C matrix.
        Hi-C matrix is normalized by the number interaction expected at a given
        distance, and by visibility (one iteration of ICE). A correlation matrix
        is then calculated from this normalized matrix, and its first
        eigenvector is used to identify compartments. Changes in sign marking
        boundaries between compartments.
        Result is stored as a dictionary of compartment boundaries, keys being
        chromsome names.
        
        :param 99 perc_zero: to filter bad columns
        :param 0.05 signal_to_noise: to calculate expected interaction counts,
           if not enough reads are observed at a given distance the observations
           of the distance+1 are summed. a signal to noise ratio of < 0.05
           corresponds to > 400 reads.
        :param None crms: only runs these given list of chromosomes
        :param None savefig: path to a directory to store matrices with
           compartment predictions, one image per chromosome, stored under
           'chromosome-name.png'.
        :param False show: show the plot
        :param None savedata: path to a new file to store compartment
           predictions, one file only.
        :param -1 vmin: for the color scale of the plotted map
        :param 1 vmax: for the color scale of the plotted map

        TODO: this is really slow...

        Notes: building the distance matrix using the amount of interactions
               instead of the mean correlation, gives generally worse results.
        
        """
        if not self.bads:
            if kwargs.get('verbose', True):
                print 'Filtering bad columns %d' % 99
            self.filter_columns(perc_zero=kwargs.get('perc_zero', 99),
                                by_mean=False,
                                silent=True)
        if not self.expected:
            if kwargs.get('verbose', True):
                print 'Normalizing by expected values'
            self.expected = expected(self, bads=self.bads, **kwargs)
        if not self.bias:
            if kwargs.get('verbose', True):
                print 'Normalizing by ICE (1 round)'
            self.normalize_hic(iterations=0)
        if savefig:
            mkdir(savefig)

        cmprts = {}
        for sec in self.section_pos:
            if crms and sec not in crms:
                continue
            if kwargs.get('verbose', False):
                print 'Processing chromosome', sec
                warn('Processing chromosome %s' % (sec))
            matrix = [[(float(self[i, j]) / self.expected[abs(j - i)] /
                        self.bias[i] / self.bias[j])
                       for i in xrange(*self.section_pos[sec])
                       if not i in self.bads]
                      for j in xrange(*self.section_pos[sec])
                      if not j in self.bads]
            if not matrix:  # MT chromosome will fall there
                warn('Chromosome %s is probably MT :)' % (sec))
                cmprts[sec] = []
                continue
            for i in xrange(len(matrix)):
                for j in xrange(i + 1, len(matrix)):
                    matrix[i][j] = matrix[j][i]
            matrix = [list(m) for m in corrcoef(matrix)]
            try:
                # This eighs is very very fast, only ask for one eigvector
                _, evect = eigsh(array(matrix), k=1)
            except LinAlgError:
                warn('Chromosome %s too small to compute PC1' % (sec))
                cmprts[sec] = []  # Y chromosome, or so...
                continue
            first = list(evect[:, -1])
            beg, end = self.section_pos[sec]
            bads = [k - beg for k in self.bads if beg <= k <= end]
            _ = [first.insert(b, 0) for b in bads]
            _ = [
                matrix.insert(b, [float('nan')] * len(matrix[0])) for b in bads
            ]
            _ = [
                matrix[i].insert(b, float('nan')) for b in bads
                for i in xrange(len(first))
            ]
            breaks = [0] + [
                i for i, (a, b) in enumerate(zip(first[1:], first[:-1]))
                if a * b < 0
            ] + [len(first)]
            breaks = [{
                'start': b,
                'end': breaks[i + 1]
            } for i, b in enumerate(breaks[:-1])]
            cmprts[sec] = breaks

            # calculate compartment internal density
            for k, cmprt in enumerate(cmprts[sec]):
                beg = self.section_pos[sec][0]
                beg1, end1 = cmprt['start'] + beg, cmprt['end'] + beg
                sec_matrix = [(self[i, j] / self.expected[abs(j - i)] /
                               self.bias[i] / self.bias[j])
                              for i in xrange(beg1, end1) if not i in self.bads
                              for j in xrange(i, end1) if not j in self.bads]
                try:
                    cmprt['dens'] = sum(sec_matrix) / len(sec_matrix)
                except ZeroDivisionError:
                    cmprt['dens'] = 0.
            try:
                meanh = sum([cmprt['dens']
                             for cmprt in cmprts[sec]]) / len(cmprts[sec])
            except ZeroDivisionError:
                meanh = 1.
            for cmprt in cmprts[sec]:
                try:
                    cmprt['dens'] /= meanh
                except ZeroDivisionError:
                    cmprt['dens'] = 1.
            gammas = {}
            for gamma in range(101):
                gammas[gamma] = _find_ab_compartments(float(gamma) / 100,
                                                      matrix,
                                                      breaks,
                                                      cmprts[sec],
                                                      save=False)
                # print gamma, gammas[gamma]
            gamma = min(gammas.keys(), key=lambda k: gammas[k][0])
            _ = _find_ab_compartments(float(gamma) / 100,
                                      matrix,
                                      breaks,
                                      cmprts[sec],
                                      save=True)
            if savefig or show:
                vmin = kwargs.get('vmin', -1)
                vmax = kwargs.get('vmax', 1)
                if vmin == 'auto' == vmax:
                    vmax = max([abs(npmin(matrix)), abs(npmax(matrix))])
                    vmin = -vmax
                plot_compartments(sec,
                                  first,
                                  cmprts,
                                  matrix,
                                  show,
                                  savefig + '/chr' + sec + '.pdf',
                                  vmin=vmin,
                                  vmax=vmax)
                plot_compartments_summary(sec, cmprts, show,
                                          savefig + '/chr' + sec + '_summ.pdf')

        self.compartments = cmprts
        if savedata:
            self.write_compartments(savedata)
Esempio n. 48
0
def plot_phased_mag_series(times,
                           mags,
                           period,
                           errs=None,
                           epoch='min',
                           outfile=None,
                           sigclip=30.0,
                           phasewrap=True,
                           phasesort=True,
                           phasebin=None,
                           plotphaselim=[-0.8,0.8],
                           yrange=None):
    '''This plots a phased magnitude time series using the period provided.

    If epoch is None, uses the min(times) as the epoch.

    If epoch is a string 'min', then fits a cubic spline to the phased light
    curve using min(times), finds the magnitude minimum from the fitted light
    curve, then uses the corresponding time value as the epoch.

    If epoch is a float, then uses that directly to phase the light curve and as
    the epoch of the phased mag series plot.

    If outfile is none, then plots to matplotlib interactive window. If outfile
    is a string denoting a filename, uses that to write a png/eps/pdf figure.

    '''

    if errs is not None:

        # remove nans
        find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
        ftimes, fmags, ferrs = times[find], mags[find], errs[find]

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = ferrs[sigind]

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = ferrs

    else:

        # remove nans
        find = npisfinite(times) & npisfinite(mags)
        ftimes, fmags, ferrs = times[find], mags[find], None

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = None

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = None


    # figure out the epoch, if it's None, use the min of the time
    if epoch is None:

        epoch = npmin(stimes)

    # if the epoch is 'min', then fit a spline to the light curve phased
    # using the min of the time, find the fit mag minimum and use the time for
    # that as the epoch
    elif isinstance(epoch,str) and epoch == 'min':

        spfit = spline_fit_magseries(stimes, smags, serrs, period)
        epoch = spfit['fitepoch']


    # now phase (and optionally, phase bin the light curve)
    if errs is not None:

        # phase the magseries
        phasedlc = phase_magseries_with_errs(stimes,
                                             smags,
                                             serrs,
                                             period,
                                             epoch,
                                             wrap=phasewrap,
                                             sort=phasesort)
        plotphase = phasedlc['phase']
        plotmags = phasedlc['mags']
        ploterrs = phasedlc['errs']

        # if we're supposed to bin the phases, do so
        if phasebin:

            binphasedlc = phase_bin_magseries_with_errs(plotphase,
                                                        plotmags,
                                                        ploterrs,
                                                        binsize=phasebin)
            plotphase = binphasedlc['binnedphases']
            plotmags = binphasedlc['binnedmags']
            ploterrs = binphasedlc['binnederrs']

    else:

        # phase the magseries
        phasedlc = phase_magseries(stimes,
                                   smags,
                                   period,
                                   epoch,
                                   wrap=phasewrap,
                                   sort=phasesort)
        plotphase = phasedlc['phase']
        plotmags = phasedlc['mags']
        ploterrs = None

        # if we're supposed to bin the phases, do so
        if phasebin:

            binphasedlc = phase_bin_magseries(plotphase,
                                              plotmags,
                                              binsize=phasebin)
            plotphase = binphasedlc['binnedphases']
            plotmags = binphasedlc['binnedmags']
            ploterrs = None


    # finally, make the plots

    # initialize the plot
    fig = plt.figure()
    fig.set_size_inches(7.5,4.8)

    plt.errorbar(plotphase, plotmags, fmt='bo', yerr=ploterrs,
                 markersize=2.0, markeredgewidth=0.0, ecolor='#B2BEB5',
                 capsize=0)

    # make a grid
    plt.grid(color='#a9a9a9',
             alpha=0.9,
             zorder=0,
             linewidth=1.0,
             linestyle=':')

    # make lines for phase 0.0, 0.5, and -0.5
    plt.axvline(0.0,alpha=0.9,linestyle='dashed',color='g')
    plt.axvline(-0.5,alpha=0.9,linestyle='dashed',color='g')
    plt.axvline(0.5,alpha=0.9,linestyle='dashed',color='g')

    # fix the ticks to use no offsets
    plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
    plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)

    # get the yrange
    if yrange and isinstance(yrange,list) and len(yrange) == 2:
        ymin, ymax = yrange
    else:
        ymin, ymax = plt.ylim()
    plt.ylim(ymax,ymin)

    # set the x axis limit
    if not plotphaselim:
        plot_xlim = plt.xlim()
        plt.xlim((npmin(plotphase)-0.1,
                  npmax(plotphase)+0.1))
    else:
        plt.xlim((plotphaselim[0],plotphaselim[1]))

    # set up the labels
    plt.xlabel('phase')
    plt.ylabel('magnitude')
    plt.title('using period: %.6f d and epoch: %.6f' % (period, epoch))

    # make the figure
    if outfile and isinstance(outfile, str):

        plt.savefig(outfile,bbox_inches='tight')
        plt.close()
        return os.path.abspath(outfile)

    else:

        plt.show()
        plt.close()
        return
Esempio n. 49
0
def floor_sig(sig, sig_min=1e-8):
    r"""Floor an array of variances
    """
    sig_floor = npmin([norm(sig), sig_min])
    return list(map(lambda s: npmax([s, sig_floor]), sig))
Esempio n. 50
0
    def find_compartments(self, crms=None, savefig=None, savedata=None,
                          show=False, **kwargs):
        """
        Search for A/B copartments in each chromsome of the Hi-C matrix.
        Hi-C matrix is normalized by the number interaction expected at a given
        distance, and by visibility (one iteration of ICE). A correlation matrix
        is then calculated from this normalized matrix, and its first
        eigenvector is used to identify compartments. Changes in sign marking
        boundaries between compartments.
        Result is stored as a dictionary of compartment boundaries, keys being
        chromsome names.
        
        :param 99 perc_zero: to filter bad columns
        :param 0.05 signal_to_noise: to calculate expected interaction counts,
           if not enough reads are observed at a given distance the observations
           of the distance+1 are summed. a signal to noise ratio of < 0.05
           corresponds to > 400 reads.
        :param None crms: only runs these given list of chromosomes
        :param None savefig: path to a directory to store matrices with
           compartment predictions, one image per chromosome, stored under
           'chromosome-name.png'.
        :param False show: show the plot
        :param None savedata: path to a new file to store compartment
           predictions, one file only.
        :param -1 vmin: for the color scale of the plotted map
        :param 1 vmax: for the color scale of the plotted map

        TODO: this is really slow...

        Notes: building the distance matrix using the amount of interactions
               instead of the mean correlation, gives generally worse results.
        
        """
        if not self.bads:
            if kwargs.get('verbose', True):
                print 'Filtering bad columns %d' % 99
            self.filter_columns(perc_zero=kwargs.get('perc_zero', 99),
                                by_mean=False, silent=True)
        if not self.expected:
            if kwargs.get('verbose', True):
                print 'Normalizing by expected values'
            self.expected = expected(self, bads=self.bads, **kwargs)
        if not self.bias:
            if kwargs.get('verbose', True):
                print 'Normalizing by ICE (1 round)'
            self.normalize_hic(iterations=0)
        if savefig:
            mkdir(savefig)

        cmprts = {}
        for sec in self.section_pos:
            if crms and sec not in crms:
                continue
            if kwargs.get('verbose', False):
                print 'Processing chromosome', sec
                warn('Processing chromosome %s' % (sec))
            matrix = [[(float(self[i,j]) / self.expected[abs(j-i)]
                       / self.bias[i] / self.bias[j])
                      for i in xrange(*self.section_pos[sec])
                       if not i in self.bads]
                     for j in xrange(*self.section_pos[sec])
                      if not j in self.bads]
            if not matrix: # MT chromosome will fall there
                warn('Chromosome %s is probably MT :)' % (sec))
                cmprts[sec] = []
                continue
            for i in xrange(len(matrix)):
                for j in xrange(i+1, len(matrix)):
                    matrix[i][j] = matrix[j][i]
            matrix = [list(m) for m in corrcoef(matrix)]
            try:
                # This eighs is very very fast, only ask for one eigvector
                _, evect = eigsh(array(matrix), k=1)
            except LinAlgError:
                warn('Chromosome %s too small to compute PC1' % (sec))
                cmprts[sec] = [] # Y chromosome, or so...
                continue
            first = list(evect[:, -1])
            beg, end = self.section_pos[sec]
            bads = [k - beg for k in self.bads if beg <= k <= end]
            _ = [first.insert(b, 0) for b in bads]
            _ = [matrix.insert(b, [float('nan')] * len(matrix[0]))
                 for b in bads]
            _ = [matrix[i].insert(b, float('nan'))
                 for b in bads for i in xrange(len(first))]
            breaks = [0] + [i for i, (a, b) in
                            enumerate(zip(first[1:], first[:-1]))
                            if a * b < 0] + [len(first)]
            breaks = [{'start': b, 'end': breaks[i+1]}
                      for i, b in enumerate(breaks[: -1])]
            cmprts[sec] = breaks
            
            # calculate compartment internal density
            for k, cmprt in enumerate(cmprts[sec]):
                beg = self.section_pos[sec][0]
                beg1, end1 = cmprt['start'] + beg, cmprt['end'] + beg
                sec_matrix = [(self[i,j] / self.expected[abs(j-i)]
                               / self.bias[i] / self.bias[j])
                              for i in xrange(beg1, end1) if not i in self.bads
                              for j in xrange(i, end1) if not j in self.bads]
                try:
                    cmprt['dens'] = sum(sec_matrix) / len(sec_matrix)
                except ZeroDivisionError:
                    cmprt['dens'] = 0.
            try:
                meanh = sum([cmprt['dens'] for cmprt in cmprts[sec]]) / len(cmprts[sec])
            except ZeroDivisionError:
                meanh = 1.
            for cmprt in cmprts[sec]:
                try:
                    cmprt['dens'] /= meanh
                except ZeroDivisionError:
                    cmprt['dens'] = 1.
            gammas = {}
            for gamma in range(101):
                gammas[gamma] = _find_ab_compartments(float(gamma)/100, matrix,
                                                      breaks, cmprts[sec],
                                                      save=False)
                # print gamma, gammas[gamma]
            gamma = min(gammas.keys(), key=lambda k: gammas[k][0])
            _ = _find_ab_compartments(float(gamma)/100, matrix, breaks,
                                      cmprts[sec], save=True)
            if savefig or show:
                vmin = kwargs.get('vmin', -1)
                vmax = kwargs.get('vmax',  1)
                if vmin == 'auto' == vmax:
                    vmax = max([abs(npmin(matrix)), abs(npmax(matrix))])
                    vmin = -vmax
                plot_compartments(sec, first, cmprts, matrix, show,
                                  savefig + '/chr' + sec + '.pdf',
                                  vmin=vmin, vmax=vmax)
                plot_compartments_summary(sec, cmprts, show,
                                          savefig + '/chr' + sec + '_summ.pdf')
            
        self.compartments = cmprts
        if savedata:
            self.write_compartments(savedata)
def __remove_small_peaks(data, intervals, lmax, factor=10):
    small = (lmax < (npmax(data) / factor))
    for is_small, (min, max) in izip(small, intervals):
        if is_small:
            data[min:max] = 0
    return data
def plateauiness(y_vector,prop_max=0.80):
    plat_stat=sum(i>=npmax(y_vector)*prop_max for i in y_vector)
    
    return plat_stat
Esempio n. 53
0
# input parameters
t_ = 500  # number of observations
nu = 3  # degrees of freedom
mu = 0  # location parameter
sigma2 = 2  # square dispersion parameter
sigma = sqrt(sigma2)
threshold = 1e-4
last = 1
# -

# ## Generate the observation of the Student t with 3 degree of freedom, location parameter 0 and dispersion parameter 2

Epsi_std = t.rvs(nu, size=(1, t_))
Epsi = mu + sigma * Epsi_std  # Affine equivariance property
x = linspace(npmin(Epsi_std), npmax(Epsi_std), t_ + 1)

# ## Compute the Maximum Likelihood location and dispersion parameters

p = (1 / t_) * ones((1, t_))  # probabilities
mu_ML, sigma2_ML, _ = MaxLikelihoodFPLocDispT(Epsi, p, nu, threshold, last)

# ## Compute the Maximum Likelihood pdf

sigma_ML = sqrt(sigma2_ML)
fML_eps = t.pdf((x - mu_ML) / sigma_ML, nu)

# ## Compute the Maximum Likelihood cdf

FML_eps = t.cdf((x - mu_ML) / sigma_ML, nu)