コード例 #1
0
ファイル: correlation.py プロジェクト: JonathanUlm/qutip
def _correlation_es_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op):
    """
    Internal function for calculating the three-operator two-time
    correlation function:
    <A(t)B(t+tau)C(t)>
    using an exponential series solver.
    """

    # the solvers only work for positive time differences and the correlators
    # require positive tau
    if state0 is None:
        rho0 = steadystate(H, c_ops)
        tlist = [0]
    elif isket(state0):
        rho0 = ket2dm(state0)
    else:
        rho0 = state0

    if debug:
        print(inspect.stack()[0][3])

    # contruct the Liouvillian
    L = liouvillian(H, c_ops)

    corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex)
    solES_t = ode2es(L, rho0)

    # evaluate the correlation function
    for t_idx in range(len(tlist)):
        rho_t = esval(solES_t, [tlist[t_idx]])
        solES_tau = ode2es(L, c_op * rho_t * a_op)
        corr_mat[t_idx, :] = esval(expect(b_op, solES_tau), taulist)

    return corr_mat
コード例 #2
0
ファイル: test_nose.py プロジェクト: lmfit/lmfit-py
    def test_emcee_PT_output(self):
        # test mcmc output when using parallel tempering
        if not HAS_EMCEE:
            return True
        try:
            from pandas import DataFrame
        except ImportError:
            return True
        out = self.mini.emcee(ntemps=6, nwalkers=10, steps=20, burn=5, thin=2)
        assert_(isinstance(out, MinimizerResult))
        assert_(isinstance(out.flatchain, DataFrame))

        # check that we can access the chains via parameter name
        assert_(out.flatchain['amp'].shape[0] == 80)
        assert out.errorbars
        assert_(np.isfinite(out.params['amp'].correl['period']))

        # the lnprob array should be the same as the chain size
        assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))

        # test chain output shapes
        assert_(out.lnprob.shape == (6, 10, (20-5+1)/2))
        assert_(out.chain.shape == (6, 10, (20-5+1)/2, out.nvarys))
        # Only the 0th temperature is returned
        assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))
コード例 #3
0
    def getStripStatistics(self, yKey='vPhi', nMin=10):

        """For each of the strips, get the strip statistics"""

        if np.size(self.stripsFeH) < 1:
            self.buildStripsFeH()

        # may as well loop through!!

        # View of what we're using for our vertical quantity
        x = self.tSim['FeHObs']
        y = self.tSim[yKey]

        nStrips = np.size(self.stripsFeH) - 1
        self.stripCounts = np.zeros(nStrips, dtype='int')
        self.stripMeans = np.zeros(nStrips)
        self.stripMedns = np.zeros(nStrips)
        self.stripStdds = np.zeros(nStrips)
        self.stripFeHs = np.zeros(nStrips) # central point for sample

        for iStrip in range(nStrips):
            xLo = self.stripsFeH[iStrip]
            xHi = self.stripsFeH[iStrip+1]

            bStrip = (self.bSel) & (x >= xLo) & (x < xHi)

            self.stripCounts[iStrip] = np.sum(bStrip)
            if self.stripCounts[iStrip] < nMin:
                continue
            
            self.stripMeans[iStrip] = np.mean(y[bStrip])
            self.stripMedns[iStrip] = np.median(y[bStrip])
            self.stripStdds[iStrip] = np.std(y[bStrip])
            self.stripFeHs[iStrip] = np.median(x[bStrip])
コード例 #4
0
    def set_feature(self, feature, fid):
        """
        self.set_feature(feature,fid):
        Add a feature to the feature list of the structure
        
        Parameters
        ----------
        feature: array of shape(self.nb_subj,self.k,fdim),
                 where fdim is the feature dimension
        fid, string, the feature id
        """
        # 1. test that the feature does not exist yet
        i = np.array([fid == f for f in self.fids])
        i = np.nonzero(i)
        i = np.reshape(i, np.size(i))
        if np.size(i) > 0:
            raise ValueError, "Existing feature id"

        # 2. if no, add the new one
        if np.size(feature) == self.nbsubj * self.k:
            feature = np.reshape(feature, (self.nbsub, self.k))

        if (feature.shape[0]) == self.nb_subj:
            if (feature.shape[1]) == self.k:
                self.features.append(feature)
                self.fids.append(fid)
            else:
                raise ValueError, "incoherent size"
        else:
            raise ValueError, "incoherent size"
コード例 #5
0
ファイル: mroi.py プロジェクト: Naereen/nipy
def subdomain_from_balls(domain, positions, radii):
    """Create discrete ROIs as a set of balls within a certain
    coordinate systems.

    Parameters
    ----------
    domain: StructuredDomain instance,
        the description of a discrete domain
    positions: array of shape(k, dim):
        the positions of the balls
    radii: array of shape(k):
        the sphere radii

    """
    # checks
    if np.size(positions) == positions.shape[0]:
        positions = np.reshape(positions, (positions.size), 1)
    if positions.shape[1] != domain.em_dim:
        raise ValueError('incompatible dimensions for domain and positions')
    if positions.shape[0] != np.size(radii):
        raise ValueError('incompatible positions and radii provided')

    label = - np.ones(domain.size)

    for k in range(radii.size):
        supp = np.sum((domain.coord - positions[k]) ** 2, 1) < radii[k] ** 2
        label[supp] = k

    return SubDomains(domain, label)
コード例 #6
0
ファイル: psdepthmodel.py プロジェクト: iceseismic/SeisSuite
    def __init__(self, vs, dz, ratio_vp_vs, ratio_rho_vs, name='',
                 store_vg_at_periods=None):
        """
        Initializes model with layers' Vs (vs), layers' thickness (dz),
        and layers' ratio Vp/Vs and rho/Vs (ratio_vp_vs, ratio_rho_vs).
        """
        # checking shapes
        nlayers = np.size(vs)
        if np.size(dz) != nlayers - 1:
            raise Exception("Size of dz should be nb of layers minus 1")
        if not np.size(ratio_vp_vs) in [1, nlayers]:
            raise Exception("Size of ratio_vp_vs should be nb of layers or 1")
        if not np.size(ratio_rho_vs) in [1, nlayers]:
            raise Exception("Size of ratio_rho_vs should be nb of layers or 1")

        self.name = name
        self.vs = np.array(vs)
        self.dz = np.array(dz)
        self.ratio_vp_vs = np.array(ratio_vp_vs)
        self.ratio_rho_vs = np.array(ratio_rho_vs)

        # storing vg model at selected periods if required
        self.stored_vgperiods = store_vg_at_periods
        if not store_vg_at_periods is None:
            self.stored_vg = self.vg_model(store_vg_at_periods)
        else:
            self.stored_vg = None
コード例 #7
0
ファイル: tritools.py プロジェクト: 4over7/matplotlib
    def _total_to_compress_renum(mask, n=None):
        """
        Parameters
        ----------
        mask : 1d boolean array or None
            mask
        n : integer
            length of the mask. Useful only id mask can be None

        Returns
        -------
        renum : integer array
            array so that (`valid_array` being a compressed array
            based on a `masked_array` with mask *mask*) :

                  - For all i such as mask[i] = False:
                    valid_array[renum[i]] = masked_array[i]
                  - For all i such as mask[i] = True:
                    renum[i] = -1 (invalid value)

        """
        if n is None:
            n = np.size(mask)
        if mask is not None:
            renum = -np.ones(n, dtype=np.int32)  # Default num is -1
            valid = np.arange(n, dtype=np.int32).compress(~mask, axis=0)
            renum[valid] = np.arange(np.size(valid, 0), dtype=np.int32)
            return renum
        else:
            return np.arange(n, dtype=np.int32)
コード例 #8
0
def arraySlidingWindow(result_array, sliding_window_size, filter_ratio):
	array_length = np.size(result_array)
	buffer_array = np.zeros((1), dtype = np.int)

	for index in range(0, array_length - sliding_window_size):
		window_score = np.sum(result_array[index: index + sliding_window_size])
		if window_score > (sliding_window_size * filter_ratio):
			buffer_array = np.append(buffer_array, 1)
		else:
			buffer_array = np.append(buffer_array, 0)

	buffer_array= np.delete(buffer_array, 0)
	# print buffer_array
	length = np.size(buffer_array)
	flag_array = np.zeros((length), dtype = np.int)
	pre_value = 0
	for buffer_index , value in enumerate(buffer_array):
		if (pre_value - value) == -1:
			flag_array[buffer_index] = 1
		elif(pre_value - value) == 1:
			flag_array[buffer_index] = -1
		else:
			pass
		pre_value = value
	return flag_array
コード例 #9
0
ファイル: randmeanfor.py プロジェクト: pytutor/python-tutor
def randmeanfor(R):

    mean_p = 0
    count_p = 0
    mean_n = 0
    count_n = 0
    shape = np.shape(R)
    R = np.reshape(R, np.size(R))

    for k in np.arange(np.size(R)):
        if R[k] > 0:
            mean_p = mean_p + R[k]
            count_p = count_p + 1
        elif R[k] < 0:
            mean_n = mean_n + R[k]
            count_n = count_n + 1

    mean_p = mean_p / count_p
    mean_n = mean_n / count_n

    for k in np.arange(size(R)):
        if R[k] > 0:
            R[k] = mean_p
        elif R[k] < 0:
            R[k] = mean_n
    R = np.reshape(R, shape)

    return R
コード例 #10
0
ファイル: boston_housing.py プロジェクト: gokulvanan/mlfun
def explore_city_data(city_data):
    """Calculate the Boston housing statistics."""

    # Get the labels and features from the housing data
    housing_prices = city_data.target
    housing_features = city_data.data

    ###################################
    ### Step 1. YOUR CODE GOES HERE ###
    ###################################

    # Please calculate the following values using the Numpy library
    print "Size of data (number of houses)"
    print np.size(housing_prices)
    print "Number of features"
    print np.size(housing_features, 1)
    print "Minimum price"
    print np.min(housing_prices)
    print "Maximum price"
    print np.max(housing_prices)
    print "Calculate mean price"
    print np.mean(housing_prices)
    print "Calculate median price"
    print np.median(housing_prices)
    print "Calculate standard deviation"
    print np.std(housing_prices)
コード例 #11
0
ファイル: Regress.py プロジェクト: kvas7andy/diploma
def LeaveOneOut(assets, fund, lam, dynamicmodel, band=1):
    n = np.size(assets,1)
    T = np.size(assets,0)
    outPoint = np.zeros([T,1])
    arrR2 = np.zeros([T,1])
    fund_est = np.zeros([T,1])

    for t in range(band,T-band):
  #      if t == 26 or t == 39 or t== 41 or t == 47 or t==49:
  #        continue
       # Model
       # import ipdb; ipdb.set_trace()

        outPoint = np.zeros([T,1])
        outPoint[t] = 1
        assets_t = assets[t,:]
        beta, fund_est = nonstatRegress(assets, fund, lam, outPoint, dynamicmodel)
        beta_t = beta[t,:]
        fund_t = fund[t,0]
      #  import ipdb; ipdb.set_trace()
        fund_t_est = np.dot(beta_t,assets_t.T)
        fund_est[t,0] = fund_t_est
        arrR2[t,0] = (fund_t-fund_t_est)*(fund_t-fund_t_est)



    r2 = np.mean(arrR2)
    return arrR2,r2,fund_est
コード例 #12
0
def get_list_of_stimulus_name(Working_Directory):
    ## To find num z planes in each trial directory
    num_z_planes = []
    Name_stimulus = list()
    
    Stimulus_Directories = [f for f in os.listdir(Working_Directory) if os.path.isdir(os.path.join(Working_Directory, f)) and f.find('Figures')<0]

    for ii in xrange(0, size(Stimulus_Directories, axis = 0)):
        Trial_Directories = [f for f in os.listdir(os.path.join(Working_Directory, Stimulus_Directories[ii]))\
        if os.path.isdir(os.path.join(Working_Directory, Stimulus_Directories[ii], f)) and f.find('Figures')<0] #Get only directories        
        temp_num_z_planes = zeros((size(Trial_Directories)), dtype=int)    
        
        for jj in xrange(0, size(Trial_Directories, axis = 0)):
            Image_Directory = os.path.join(Working_Directory, Stimulus_Directories[ii], Trial_Directories[jj], 'C=1')+filesep    
            tif = TIFF.open(Image_Directory +'T=1.tif', mode='r') #Open multitiff 
            count = 1        
            for image in tif.iter_images():
                temp_num_z_planes[jj] = count
                count = count+1
        
        num_z_planes.append(temp_num_z_planes)
                  
    for ii in xrange(0, size(Stimulus_Directories, axis = 0)):        
        Trial_Directories = [f for f in os.listdir(os.path.join(Working_Directory, Stimulus_Directories[ii]))\
        if os.path.isdir(os.path.join(Working_Directory, Stimulus_Directories[ii], f)) and f.find('Figures')<0] #Get only directories                
        for jj in xrange(0, size(Trial_Directories, axis = 0)):
            for kk in xrange(0, num_z_planes[ii][jj]):
                name_for_saving_figures = Stimulus_Directories[ii] + ' ' + Trial_Directories[jj] + ' Z=' + str(kk+1)       
                Name_stimulus.append(name_for_saving_figures)
    
    return Name_stimulus
コード例 #13
0
def get_pixels_OFF(matched_pixels_kmeans, newclrs_rgb_OFF, unique_clrs_kmeans):
    matched_pixels_OFF = zeros((size(newclrs_rgb_OFF), size(matched_pixels_kmeans,1)))
    for ii in xrange(0,size(newclrs_rgb_OFF,0)):
        index = unique_clrs_kmeans.index(newclrs_rgb_OFF[ii])
        print index, newclrs_rgb_OFF[ii]
        matched_pixels_OFF[ii,:] = matched_pixels_kmeans[index,:]
    return matched_pixels_OFF
コード例 #14
0
ファイル: ENPC.py プロジェクト: dvro/ml
 def reprodution( self ):
     
     i, j, soma, sizeRoleta, classe = 0, 0, 0, 0, None
     qtdPrototipos = np.size( self.R, 0 )
     
     while( i < qtdPrototipos ):
         
         sizeRoleta, j = 0, 0
         while( j < self.qtdClasses ):
             sizeRoleta = sizeRoleta + np.size( self.V[i][j] )            
             j = j + 1
             
         roleta = rd.randrange(0, sizeRoleta)
         
         j, soma = 0, 0
         while( j < self.qtdClasses ):
             soma = soma + np.size( self.V[i][j] )
             if( roleta < soma ):
                 classe = j + 1
                 break
             j = j + 1
         
         j = classe - 1
         
         if( classe != self.Rclasses[i] ):
             newPrototipo = self.centroide( self.V[i][j] )
             self.R = np.concatenate( (self.R, [newPrototipo]), 0 )
             self.Rclasses = np.concatenate( ( self.Rclasses, [classe] ), 0 )
         
         i = i + 1
コード例 #15
0
    def show(self, x):
        """
        vizualisation of the mm based on the empirical histogram of x

        Parameters
        ----------
        x: array of shape(nbitems): the data to be processed
        """
        step = 3.5*np.std(x)/np.exp(np.log(np.size(x))/3)
        bins = max(10,int((x.max()-x.min())/step))
        h,c = np.histogram(x, bins)
        h = h.astype(np.float)/np.size(x)
        p = self.mixt
        
        dc = c[1]-c[0]
        y = (1-p)*_gaus_dens(self.mean,self.var,c)*dc
        z = np.zeros(np.size(c))
        i = np.ravel(np.nonzero(c>0))
        z = _gam_dens(self.shape,self.scale,c)*p*dc
        
        import matplotlib.pylab as mp
        mp.figure()
        mp.plot(0.5 *(c[1:] + c[:-1]),h)
        mp.plot(c,y,'r')
        mp.plot(c,z,'g')
        mp.plot(c,z+y,'k')
        mp.title('Fit of the density with a Gamma-Gaussians mixture')
        mp.legend(('data','gaussian acomponent','gamma component',
                   'mixture distribution'))
コード例 #16
0
ファイル: jac.py プロジェクト: liubenyuan/pyEIT
    def __init__(self, mesh, elPos,
                 exMtx=None, step=1, perm=1., parser='et3',
                 p=0.20, lamb=0.001, method='kotre'):
        """
        JAC, default file parser is 'std'

        Parameters
        ----------
        mesh : dict
            mesh structure
        elPos : array_like
            position (numbering) of electrodes
        exMtx : array_like, optional
            2D array, each row is one excitation pattern
        step : int, optional
            measurement method
        perm : array_like, optional
            initial permitivities in generating Jacobian
        parser : str, optional
            parsing file format
        p,lamb : float
            JAC parameters
        method : str
            regularization methods
        """
        # store configuration values
        self.no2xy = mesh['node']
        self.el2no = mesh['element']
        self.elPos = elPos

        # generate excitation patterns
        if exMtx is None:
            self.exMtx = eit_scan_lines(16, 8)
        else:
            self.exMtx = exMtx
        self.step = step

        # background (init, x0) perm
        n_e = np.size(self.el2no, 0)
        if np.size(perm) == n_e:
            perm_init = perm
        else:
            perm_init = perm * np.ones(n_e)

        # generate Jacobian
        self.fwd = forward(mesh, elPos)
        fs = self.fwd.solve(exMtx=self.exMtx, step=self.step,
                            perm=perm_init, parser=parser)
        self.Jac = fs.Jac
        self.v = fs.v
        self.normv = la.norm(self.v)
        self.x0 = perm_init
        self.parser = parser

        # pre-compute H0 for dynamical imaging
        # H = (J.T*J + R)^(-1) * J.T
        self.H = h_matrix(self.Jac, p, lamb, method)
        self.p = p
        self.lamb = lamb
        self.method = method
コード例 #17
0
ファイル: ENPC.py プロジェクト: dvro/ml
 def mutation( self ):
     
     i, j, qtd, qtdMax = 0, 0, 0, 0
     
     classe = -1
     
     qtdPrototipos = np.size( self.R, 0 )
     
     while( i < qtdPrototipos ):
         
         j, qtdMax = 0, -float("inf")
         while( j < self.qtdClasses ):
             
             qtd = np.size( self.V[i][j] )
             
             if( qtd > qtdMax ):
                 classe = j+1
                 qtdMax = qtd
                 
             j = j + 1
         
         if( qtdMax != np.size( self.V[i][self.Rclasses[i]-1], 0 ) ):
             self.Rclasses[i] = classe
             
         i = i + 1
コード例 #18
0
ファイル: series.py プロジェクト: thunder-project/thunder
    def convolve(self, signal, mode='full'):
        """
        Convolve series data against another signal.

        Parameters
        ----------
        signal : array
            Signal to convolve with (must be 1D)

        mode : str, optional, default='full'
            Mode of convolution, options are 'full', 'same', and 'valid'
        """

        from numpy import convolve

        s = asarray(signal)

        n = size(self.index)
        m = size(s)

        # use expected lengths to make a new index
        if mode == 'same':
            newmax = max(n, m)
        elif mode == 'valid':
            newmax = max(m, n) - min(m, n) + 1
        else:
            newmax = n+m-1
        newindex = arange(0, newmax)

        return self.map(lambda x: convolve(x, signal, mode), index=newindex)
コード例 #19
0
def trace_fracture_through_grid(m, start_yx, spacing):
    """Create a 2D fracture in a grid.

    Creates a "fracture" in a 2D grid, m, by setting cell values to unity along
    the trace of the fracture (i.e., "drawing" a line throuh the grid).

    Parameters
    ----------
    m : 2D Numpy array
        Array that represents the grid
    start_yx : tuple of int
        Starting grid coordinates for fracture
    spacing : tuple of float
        Step sizes in y and x directions

    Returns
    -------
    None, but changes contents of m
    """
    y0, x0 = start_yx
    dy, dx = spacing

    x = x0
    y = y0
    
    while round(x) < size(m, 1) and round(y) < size(m, 0) \
            and round(x) >= 0 and round(y) >= 0:
        m[int(y + 0.5)][int(x + 0.5)] = 1
        x += dx
        y += dy
コード例 #20
0
ファイル: stripe.py プロジェクト: JStuckner/tomopy
def _ringb(sino, m, n, step):
    mysino = np.transpose(sino)
    R = np.size(mysino, 0)
    N = np.size(mysino, 1)

    # Remove NaN.
    pos = np.where(np.isnan(mysino) is True)
    mysino[pos] = 0

    # Kernel & regularization parameter.
    h = _kernel(m, n)

    # Mathematical correction by blocks.
    nblock = int(N / step)
    new = np.ones((R, N))
    for k in range(0, nblock):
        sino_block = mysino[:, k * step:(k + 1) * step]
        alpha = _get_parameter(sino_block)
        pp = sino_block.mean(1)

        f = -_ringMatXvec(h, pp)
        q = _ringCGM(h, alpha, f)

        # Update sinogram.
        q.shape = (R, 1)
        K = np.kron(q, np.ones((1, step)))
        new[:, k * step:(k + 1) * step] = np.add(sino_block, K)
    newsino = new.astype(np.float32)
    return np.transpose(newsino)
コード例 #21
0
ファイル: series.py プロジェクト: thunder-project/thunder
    def correlate(self, signal):
        """
        Correlate records against one or many one-dimensional arrays.

        Parameters
        ----------
        signal : array-like
            One or more signals to correlate against.
        """
        s = asarray(signal)

        if s.ndim == 1:
            if size(s) != self.shape[-1]:
                raise ValueError("Length of signal '%g' does not match record length '%g'"
                                 % (size(s), self.shape[-1]))

            return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1])

        elif s.ndim == 2:
            if s.shape[1] != self.shape[-1]:
                raise ValueError("Length of signal '%g' does not match record length '%g'"
                                 % (s.shape[1], self.shape[-1]))
            newindex = arange(0, s.shape[0])
            return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex)

        else:
            raise Exception('Signal to correlate with must have 1 or 2 dimensions')
コード例 #22
0
def _exclusion_map(i, ref, target, targeti):
    """Ancillary function to determin admissible values of some position
    within some predefined values

    Parameters
    ----------
    i (int): index of the structure under consideration
    ref: Field that represent the topological structure of parcels
         and their standard position
    target= array of shape (ref.V,3): current posistion of the parcels
    targeti array of shape (n,3): possible new positions for the ith item

    Results
    -------
    emap: aray of shape (n): a potential that yields the fitness
          of the proposed positions given the current configuration
    rmin (double): ancillary parameter
    """
    xyz = ref.field
    fd = target.shape[1]
    ln = ref.list_of_neighbors()
    j = ln[i]
    j = np.reshape(j, np.size(j))
    rmin = 0
    if np.size(j) > 0:
        dx = np.reshape(xyz[j] - xyz[i], (np.size(j), fd))
        rmin = np.mean(np.sum(dx ** 2, 1)) / 4
        u0 = xyz[i] + np.mean(target[j] - xyz[j], 0)
        emap = rmin - np.sum((targeti - u0) ** 2, 1)
        for k in j:
            amap = np.sum((targeti - target[k]) ** 2, 1) - rmin / 4
            emap[amap < 0] = amap[amap < 0]
    else:
        emap = np.zeros(targeti.shape[0])
    return emap, rmin
コード例 #23
0
ファイル: xtc.py プロジェクト: andreubp/htmd
def XTCwrite(coords, box, filename, time=None, step=None):
    nframes = np.size(coords, 2)
    if np.size(time) != nframes:
        time = np.zeros(nframes)
    if np.size(step) != nframes:
        step = np.zeros(nframes, dtype=int)

    if os.path.isfile(filename):
        os.unlink(filename)

    lib = xtc_lib()
    bbox = (c_float * 3)()
    natoms = c_int(coords.shape[0])
    cstep = c_int()
    # print(coords.shape)
    for f in range(coords.shape[2]):
        cstep = c_int(step[f])
        ctime = c_float(time[f])  # TODO FIXME
        # print ( step )
        # print ( time )
        bbox[0] = box[0, f] * 0.1
        bbox[1] = box[1, f] * 0.1
        bbox[2] = box[2, f] * 0.1

        data = coords[:, :, f].astype(numpy.float32) * 0.1  # Convert from A to nm
        pos = data.ctypes.data_as(POINTER(c_float))
        lib['libxtc'].xtc_write(
            c_char_p(filename.encode("ascii")),
            natoms,
            cstep,
            ctime,
            pos,
            bbox)
    pass
コード例 #24
0
ファイル: sub.py プロジェクト: prayash22/p3dthon
def roll_run(CR,sx=None):
    """ Roll every variable in a simulation (CR)
        in the x direction by length in indexspace (sx)
    """
    klst = ['rho','jx','jy','jz','bx','by','bz',
            'ex','ey','ez','ne','jex','jey','jez',
            'pexx','peyy','pezz','pexy','peyz','pexz',
            'ni','jix','jiy','j*z','vix','viy','viz',
            'vex','vey','vez','dene','deni',
            'pixx','piyy','pizz','pixy','piyz','pixz',
            'tepar','teperp1','tipar','tiperp1']

    kavlst = [k+'av' for k in klst]
    if sx is None:
        if CR['yy'][0] < 1.0: 
            print 'Gonna roll RIGHT!!!!!!!!!!!!'
            sx = -1*np.size(CR['xx'])/4
        else: 
            print 'Gonna roll LEFT!!!!!!!!!!!!'
            sx = np.size(CR['xx'])/4
    
    for key in CR.keys():
# Old way not super smart
#        if key.rfind('av') == len(key)-2 and len(key) > 2:
#            print 'Rolling ',key
#            CR[key] = np.roll(CR[key],sx,axis=1)
# New way a little bit smarter
        if key in klst or key in kavlst:
            print 'Rolling ',key
            CR[key] = np.roll(CR[key],sx,axis=1)
コード例 #25
0
def get_heatmap(data_mat, name_for_saving_files,  pp,stimulus_on_time, stimulus_off_time,delta_ff, f0_start, f0_end):
    
    #Plot heatmap for validation 
    A1 = np.reshape(data_mat, (np.size(data_mat,0)*np.size(data_mat,1), np.size(data_mat,2)))
    if delta_ff == 1:
        delta_ff_A1 = np.zeros(np.shape(A1))
        for ii in xrange(0,np.size(A1,0)):
            delta_ff_A1[ii,:] = (A1[ii,:]-np.mean(A1[ii,f0_start:f0_end]))/(np.std(A1[ii,f0_start:f0_end])+0.1)
        B = np.argsort(np.mean(delta_ff_A1, axis=1))  
        print np.max(delta_ff_A1)
    else:
        B = np.argsort(np.mean(A1, axis=1)) 
        print np.max(A1)

    with sns.axes_style("white"):
        C = A1[B,:][-2000:,:]

        fig2 = plt.imshow(C,aspect='auto', cmap='jet', vmin = np.min(C), vmax = np.max(C))
        
        plot_vertical_lines_onset(stimulus_on_time)
        plot_vertical_lines_offset(stimulus_off_time)
        plt.title(name_for_saving_files)
        plt.colorbar()
        fig2 = plt.gcf()
        pp.savefig(fig2)
        plt.close()
コード例 #26
0
    def write_to_csv(self, csv_file, session="0"):
        """ Write the paradigm to a csv file

        Parameters
        ----------
        csv_file: string, path of the csv file
        session: string, optional, session identifier
        """
        import csv

        with open4csv(csv_file, "w") as fid:
            writer = csv.writer(fid, delimiter=" ")
            n_pres = np.size(self.con_id)
            sess = np.repeat(session, n_pres)
            pdata = np.vstack((sess, self.con_id, self.onset)).T

            # add the duration information
            if self.type == "event":
                duration = np.zeros(np.size(self.con_id))
            else:
                duration = self.duration
            pdata = np.hstack((pdata, np.reshape(duration, (n_pres, 1))))

            # add the amplitude information
            if self.amplitude is not None:
                amplitude = np.reshape(self.amplitude, (n_pres, 1))
                pdata = np.hstack((pdata, amplitude))

            # write pdata
            for row in pdata:
                writer.writerow(row)
コード例 #27
0
ファイル: zdataset.py プロジェクト: muqiao0626/znn-release
    def get_candidate_loc( self, low, high ):
        """
        find the candidate location of subvolume

        Parameters
        ----------
        low  : vector with length of 3, low value of deviation range
        high : vector with length of 3, high value of deviation range

        Returns:
        --------
        ret : a tuple, the coordinate of nonzero elements,
              format is the same with return of numpy.nonzero.
        """
        if np.size(self.msk) == 0:
            mask = np.ones(self.data.shape[1:4], dtype=self.data.dtype)
        else:
            mask = np.copy(self.msk[0,:,:,:])
        # erase outside region of deviation range.
        ct = self.center
        mask[:ct[0]+low[0], :, : ] = 0
        mask[:, :ct[1]+low[1], : ] = 0
        mask[:, :, :ct[2]+low[2] ] = 0

        mask[ct[0]+high[0]+1:, :, :] = 0
        mask[:, ct[1]+high[1]+1:, :] = 0
        mask[:, :, ct[2]+high[2]+1:] = 0

        locs = np.nonzero(mask)

        if np.size(locs[0])==0:
            raise NameError('no candidate location!')

        return locs
コード例 #28
0
def test_ward_clustering():
    """
    Check that we obtain the correct number of clusters with Ward clustering.
    """
    rnd = np.random.RandomState(0)
    mask = np.ones([10, 10], dtype=np.bool)
    X = rnd.randn(100, 50)
    connectivity = grid_to_graph(*mask.shape)
    clustering = Ward(n_clusters=10, connectivity=connectivity)
    clustering.fit(X)
    # test caching
    clustering = Ward(n_clusters=10, connectivity=connectivity,
                      memory=mkdtemp())
    clustering.fit(X)
    labels = clustering.labels_
    assert_true(np.size(np.unique(labels)) == 10)
    # Turn caching off now
    clustering = Ward(n_clusters=10, connectivity=connectivity)
    # Check that we obtain the same solution with early-stopping of the
    # tree building
    clustering.compute_full_tree = False
    clustering.fit(X)
    np.testing.assert_array_equal(clustering.labels_, labels)
    clustering.connectivity = None
    clustering.fit(X)
    assert_true(np.size(np.unique(clustering.labels_)) == 10)
    # Check that we raise a TypeError on dense matrices
    clustering = Ward(n_clusters=10,
                      connectivity=connectivity.todense())
    assert_raises(TypeError, clustering.fit, X)
    clustering = Ward(n_clusters=10,
                      connectivity=sparse.lil_matrix(
                          connectivity.todense()[:10, :10]))
    assert_raises(ValueError, clustering.fit, X)
コード例 #29
0
    def __init__(self, k, ijk, label, group_labels=None, referential=None, subjects=[]):
        """
        Constructor
        """
        self.k = k
        self.ijk = ijk.astype(np.int)
        self.nbvox = ijk.shape[0]
        if np.size(ijk) == self.nbvox:
            ijk = np.reshape(ijk, (self.nbvox, 1))

        self.anatdim = ijk.shape[1]
        self.label = label.astype(np.int)
        if np.size(label) == self.nbvox:
            label = np.reshape(label, (self.nbvox, 1))

        self.nb_subj = label.shape[1]

        if group_labels == None:
            self.group_labels = np.zeros(self.nbvox).astype(np.int)
        else:
            self.group_labels = group_labels

        if subjects == []:
            self.subjects = range(self.nb_subj)
        else:
            self.subjects = subjects

        self.referential = referential

        self.features = []
        self.fids = []
        self.check()
コード例 #30
0
ファイル: color_plots.py プロジェクト: sasdelli/lc_predictor
def create_class_vec(new_name):
    cfa_dir=PLS_code_dir+"data/cfaspec_snIa/"
    SNe_data=np.loadtxt(cfa_dir+'/cfasnIa_param_mod.dat', dtype={'names': ('SN_name', 'zhel', 'tMaxB', 'err_tMaxB', 'ref', 'Dm15', 'err_Dm15', 'ref2', 'M_B', 'err_M_B', "BmV", "err_BmV", "BmmVm", "err_BmmVm", "Phot_ref"),'formats': ('S15', "f8", "f8","f8", "S15", "f8", "f8","S15","f8" , "f8","f8", "f8","f8", "f8","S15")})
    spectra_data=np.loadtxt(cfa_dir+'/cfasnIa_mjdspec.dat', dtype={'names': ('spectrum_name', 'time'),'formats': ('S40', "f8")})
    SNe_BranchWang_class=np.loadtxt(cfa_dir+'/branchwangclass_mod.dat', dtype={'names': ('SN_name', 'pEW5972', 'pEW6355', 'vabs6355', 'phase', 'Branch', 'Wang'),'formats': ('S15', "f8", "f8","f8",  "f8","S15","S15")})
    name_regex = re.compile('(.+)\-\d+\.\d+')
    name_vector=[]
    for spectrum_name in enumerate(spectra_data["spectrum_name"]):
        name_vector.append(name_regex.search(spectrum_name[1]).group(1))
    #It creates the vectors of the classification of Branch and Wang
    #SN_name_vec=[]
    pEW5972_vec=[]
    pEW6355_vec=[]
    vabs6355_vec=[]
    Branch_vec=[]
    Wang_vec=[]
    for i, supernova in enumerate(new_name):
        pEW5972_tmp=np.nan
        pEW6355_tmp=np.nan
        vabs6355_tmp=np.nan
        Branch_tmp=np.nan
        Wang_tmp=np.nan
        for name_sn in enumerate(SNe_BranchWang_class["SN_name"]):
            if name_sn[1] ==  supernova:
                SN_name_tmp, pEW5972_tmp, pEW6355_tmp, vabs6355_tmp, phase_tmp, Branch_tmp, Wang_tmp= SNe_BranchWang_class[name_sn[0]]
        #SN_name_vec.append(SN_name_tmp)
        pEW5972_vec.append(pEW5972_tmp)
        pEW6355_vec.append(pEW6355_tmp)
        vabs6355_vec.append(vabs6355_tmp)
        Branch_vec.append(Branch_tmp)
        Wang_vec.append(Wang_tmp)
    #color plot for Branch 
    color_plot_Branch=[]
    for i in  range(0,np.size(new_name)):
        if Branch_vec[i]=="CN":
            color_plot_Branch.append('r')
        elif  Branch_vec[i]=="SS":
            color_plot_Branch.append('g')
        elif  Branch_vec[i]=="BL":
            color_plot_Branch.append('b')
        elif  Branch_vec[i]=="CL":
            color_plot_Branch.append('y')
        else:
            color_plot_Branch.append('w')
    #color plot for Wang 
    color_plot_Wang=[]
    for i in  range(0,np.size(new_name)):
        if Wang_vec[i]=="91T":
            color_plot_Wang.append('r')
        elif  Wang_vec[i]=="N" :
            color_plot_Wang.append('g')
        elif  Wang_vec[i]=="pec":
            color_plot_Wang.append('b')
        elif  Wang_vec[i]=="HV":
            color_plot_Wang.append('y')
        elif  Wang_vec[i]=="91bg":
            color_plot_Wang.append('c')
        else:
            color_plot_Wang.append('w')
    return color_plot_Wang, color_plot_Branch
コード例 #31
0
ファイル: pre.py プロジェクト: lxy1991/CRI-PHASE-RETRIEVE
def kkrelation(ref_spectral_data,
               cri_spectral_data,
               phase_offset=0.0,
               norm_by_ref_flag=True):
    """
    Retrieve the real and imaginary components of a CRI spectra(um) via
    the Kramers-Kronig (KK) relation.

    Parameters
    ----------
    ref_spectral_data : ndarray
        NRB reference spectra(um) array that can be one-, two-,
        or three-dimensional
    cri_spectral_data : ndarray
        CRI spectra(um) array that can be one-,two-,or three-dimensional
    (phase_offset) : int, float, or ndarray, optional
        Global phase offset applied to the KK, which effecively controls
        the real-to-imaginary components relationship
    (norm_by_ref_flag) : bool
        Should the output be normalized by the square-root of the
        reference NRB spectrum(a)

    Returns
    -------
    out : complex ndarray
        The real and imaginary components of KK.

    Note
    ----
    (1) The imaginary components provides the sponatenous Raman-like
    spectra(um).

    (2) This module assumes the spectra are oriented as such that the
    frequency (wavenumber) increases with increasing index.  If this is
    not the case for your spectra(um), apply a phase_offset of _np.pi

    (3) This is the first attempt at converting MATLAB (Mathworks, Inc)
    scripts into Python code; thus, there will be bugs, the efficiency
    will be low(-ish), and I appreciate any useful suggestions or
    bug-finds.

    References
    ----------
    Y. Liu, Y. J. Lee, and M. T. Cicerone, "Broadband CARS spectral
    phase retrieval using a time-domain Kramers-Kronig transform,"
    Opt. Lett. 34, 1363-1365 (2009).

    C. H. Camp Jr, Y. J. Lee, and M. T. Cicerone, "Quantitative,
    Comparable Coherent Anti-Stokes Raman Scattering (CARS)
    Spectroscopy: Correcting Errors in Phase Retrieval"

    ===================================
    Original Python branch: Feb 16 2015

    @author: ("Charles H Camp Jr")\n
    @email: ("*****@*****.**")\n
    @date: ("Jun 28 2015")\n
    @version: ("0.1.1")\n
    """
    #import numpy as np

    # Ensure the shape of phase_offset is compatible
    # with cri_spectral_data
    if _np.size(phase_offset) == 1 or \
    phase_offset.shape == cri_spectral_data.shape:
        pass
    else:
        phase_offset = _matchsize(cri_spectral_data, phase_offset)

    # Ensure the shape of ref_spectral_data is compatible
    # with cri_spectral_data
    if ref_spectral_data.shape == cri_spectral_data.shape:
        pass
    else:
        ref_spectral_data = _matchsize(cri_spectral_data, ref_spectral_data)

    # Return the complex KK relation using the Hilbert transform.
    if norm_by_ref_flag is True:  # Norm the Amp by ref)spectral_data
        return _np.sqrt(cri_spectral_data/ref_spectral_data)*\
        _np.exp(1j*phase_offset+1j*\
        _np.imag(hilbertfft(0.5*_np.log(cri_spectral_data/\
        ref_spectral_data))))
    else:  # Do NOT norm the Amp by ref)spectral_data
        return _np.sqrt(cri_spectral_data)*_np.exp(1j*phase_offset+1j*\
        _np.imag(hilbertfft(0.5*_np.log(cri_spectral_data/\
        ref_spectral_data))))
コード例 #32
0
ファイル: twoeddy_match.py プロジェクト: liu-ran/Doubleeddy
    if lon_eddy[i]>=357.5 and lon_eddy[i]<=360:
        it_near = (np.where( ( lat_eddy > lat_eddy[i]-2.5 ) & ( lat_eddy < lat_eddy[i]+2.5 ) 
                    & ( lon_eddy < lon_eddy[i]+2.5-360 ) & ( lon_eddy > lon_eddy[i]-2.5 ) & (j1 == j1[i]) )[0])
    elif lon_eddy[i]>=0 and lon_eddy[i]<=2.5:
        it_near = (np.where( ( lat_eddy > lat_eddy[i]-2.5 ) & ( lat_eddy < lat_eddy[i]+2.5 ) 
                    & ( lon_eddy < lon_eddy[i]+2.5 ) & ( lon_eddy > lon_eddy[i]+360-2.5 ) & (j1 == j1[i]) )[0])     
    else:
        it_near = (np.where( ( lat_eddy > lat_eddy[i]-2.5)  &  (lat_eddy < lat_eddy[i]+2.5 ) 
                    & ( lon_eddy < lon_eddy[i]+2.5 )  & ( lon_eddy > lon_eddy[i]-2.5 )  & (j1 == j1[i]) )[0] )
    it_self = np.where(it_near == i)[0]
    it_near_noself = np.delete(it_near,it_self)

    xx = lat_eddy[it_near_noself]
    yy = lon_eddy[it_near_noself]
    it_usenum = np.size(it_near_noself)

    match_use_cache = (points2dist.points2dist(it_usenum, lat_eddy[i], xx, lon_eddy[i], yy , R[i], R[it_near_noself] ))

    it_useful = np.where(match_use_cache>0)[0]
    if len(it_useful)>0:                                               ####### 有配对结果
        match_cache = match_num[it_near_noself[it_useful]]             ####### 配对序列筛出 
        it_match_never = np.where(match_cache<0)[0]                    ####### 配对结果未被配过部分
        it_match_ever = np.where(match_cache>=0)[0]                    ####### 配对结果曾被配过部分

        if match_num[i]<0 and len(it_match_never)==len(match_cache):   ####### 本体未配 & 配对都未配
            match_num[i] = i
            match_num[it_near_noself[it_useful]] = i

        elif match_num[i]<0 and len(it_match_never)<len(match_cache):  ####### 本体未配 & 配对部分未配或已全配
            min_match_num = min(match_cache[it_match_ever])
コード例 #33
0
 def functionStabilityPolynomial(self, A, b, c):
     s = np.size(b)
     e = np.ones((s, ))
     R = lambda z: 1 + z * np.dot(b,
                                  np.linalg.inv(np.eye(s) - z * A).dot(e))
     return np.vectorize(R)
コード例 #34
0
  def process_mann_turb_spectra(self, casedir):
    ncbox={}
    with nc.Dataset(casedir+'/turbulence.nc') as d:
      #print(d.variables)
      ncbox['ndim'] = d.dimensions['ndim'].size
      ncbox['nx']   = d.dimensions['nx'].size
      ncbox['ny']   = d.dimensions['ny'].size
      ncbox['nz']   = d.dimensions['nz'].size
      ncbox['L']    = d.variables['box_lengths'][:]
      ncbox['dx']   = d.variables['dx'][:]
      ncbox['uvel'] = d.variables['uvel'][:,:,:]
      ncbox['vvel'] = d.variables['vvel'][:,:,:]
      ncbox['wvel'] = d.variables['wvel'][:,:,:]

    # Get the timei vector
    Uavg = 10.0
    tvec = ncbox['dx'][0]/Uavg*np.arange(ncbox['nx'])

    # Set the scaling factors
    ds=5.0
    eps=2.5 #2*ds
    gaussScale = np.sqrt(1.0/(eps*np.sqrt(np.pi)*ds))

    # Average over the entire inlet plane of turbulence
    Suu = []
    Svv = []
    Sww = []
    itotal = 0
    print("Working...")
    for i in range(ncbox['ny']):
        sys.stdout.write("\r%d%%" % int(i*100.0/ncbox['ny']))
        sys.stdout.flush()
        for j in range(ncbox['nz']):
            f, tSuu = windspectra.getWindSpectra(tvec, ncbox['uvel'][:,i,j])
            f, tSvv = windspectra.getWindSpectra(tvec, ncbox['vvel'][:,i,j])
            f, tSww = windspectra.getWindSpectra(tvec, ncbox['wvel'][:,i,j])
            Suu = tSuu if len(Suu) == 0 else Suu + tSuu
            Svv = tSuu if len(Svv) == 0 else Svv + tSvv
            Sww = tSuu if len(Sww) == 0 else Sww + tSww
            itotal += 1
    Suu = Suu/float(itotal)
    Svv = Svv/float(itotal)
    Sww = Sww/float(itotal)
    print("")
    print("Done.")

    # Octave band average
    Nband=3
    Mannf, MannSuu = windspectra.NarrowToOctaveBand(f, Suu, Nband)
    Mannf, MannSvv = windspectra.NarrowToOctaveBand(f, Svv, Nband)
    Mannf, MannSww = windspectra.NarrowToOctaveBand(f, Sww, Nband)

    with nc.Dataset(casedir+'/avg_spectra.nc',mode='w',format='NETCDF3_CLASSIC') as d:
      d.createDimension("nfreq",np.size(Mannf))
      nc_f = d.createVariable("f","f8",("nfreq"))
      nc_suu = d.createVariable("suu","f8",("nfreq"))
      nc_svv = d.createVariable("svv","f8",("nfreq"))
      nc_sww = d.createVariable("sww","f8",("nfreq"))
      nc_f[:] = Mannf
      nc_suu[:] = MannSuu
      nc_svv[:] = MannSvv
      nc_sww[:] = MannSww
コード例 #35
0
    N_ell_P_LA = np.array([
        N_ell_P_27, N_ell_P_39, N_ell_P_93, N_ell_P_145, N_ell_P_225,
        N_ell_P_280, N_ell_P_27x39, N_ell_P_93x145, N_ell_P_225x280
    ])

    ####################################################################
    return (ell, N_ell_T_LA, N_ell_P_LA, Map_white_noise_levels)


####################################################################
####################################################################
##                   demonstration of the code
####################################################################
print("band centers: ", Simons_Observatory_V3_LA_bands(), "[GHz]")
print("beam sizes: ", Simons_Observatory_V3_LA_beams(), "[arcmin]")
N_bands = np.size(Simons_Observatory_V3_LA_bands())
beams = Simons_Observatory_V3_LA_beams()
beams_sigma_rad = beams / np.sqrt(8. * np.log(2)) / 60. * np.pi / 180.

## run the code to generate noise curves
fsky = 0.4
N_LF = 1.
N_MF = 1.
N_UHF = 1.
ellmax = 1e4
survey_time = 1.

mode = 1  # baseline sensitivity
ell, N_ell_LA_T, N_ell_LA_Pol, WN_levels = Simons_Observatory_V3_LA_noise(
    mode, fsky, ellmax, 1, N_LF, N_MF, N_UHF, survey_time)
np.save('LAT_pertube_peryear_T_noise_baseline.npy', [ell, N_ell_LA_T])
コード例 #36
0
import numpy as np
import scipy as sp
import pandas
import matplotlib.pyplot as plt
import csv as csv

csv_file_object = csv.reader(open('train.csv',newline=''))
header = csv_file_object.__next__()

data = []
for row in csv_file_object:
    data.append(row)
data = np.array(data)
# print(data[0])

number_passengers = np.size(data[0::, 1].astype(np.int))
number_survived = np.sum(data[0::, 1].astype(np.int))
proportion_survivors = number_survived / number_passengers
print("Proportion Survived: ", proportion_survivors)

women_only_stats = data[0::, 4] == "female"
men_only_stats = data[0::, 4] != "female"

women_onboard = data[women_only_stats, 1].astype(np.float)
men_onboard = data[men_only_stats, 1].astype(np.float)

num_women = len(women_onboard)
num_men = len(men_onboard)

# print("women_only_stats: ", women_only_stats)
# print("women_onboard: ", women_onboard)
コード例 #37
0
ファイル: Q2.py プロジェクト: taldan147/opt3
import numpy as np
from scipy.sparse import spdiags
import matplotlib.pyplot as plt
import numpy.linalg as LA

x = np.arange(0,5, 0.01)
n = np.size(x)
one = int(n / 5)
f = np.zeros(x.shape)
f[0:one] = 0.0 + 0.5*x[0:one]
f[one:2 * one] = 0.8 - 0.2 * np.log(x[100:200]);
f[(2*one):3*one] = 0.7 - 0.2*x[(2*one):3*one];
f[(3*one):4*one] = 0.3
f[(4*one):(5*one)] = 0.5 - 0.1*x[(4*one):(5*one)];

G = spdiags([-np.ones(n), np.ones(n)], np.array([0,1]), n-1,n).toarray()
etta = 0.1*np.random.randn(np.size(x));
y = f + etta
# plt.figure(); plt.plot(x,y, label = "noisy"); plt.plot(x,f, label = "clean"); plt.legend() ;plt.show()

# -------------------------------(a)-------------------------------------------------

# argmin ||x-y||+lambda/2||Gx|| --------- x = (I+lambda/2*G^T*G)*y

lam = 100
GTG = np.transpose(G) @ G
M = np.eye(n) + lam/2 * GTG
x_min = LA.inv(M) @ y

plt.figure(); plt.plot(x,x_min, label = r'recover using ${{\scrl}_2 } norm,\lambda=100$'); plt.plot(x,f, label = "clean"); plt.legend() ;plt.show()
コード例 #38
0
ファイル: test2.py プロジェクト: nagyist/vigra-ilastik-05
def checkAboutSame(i1,i2):
    assert(i1.shape==i2.shape)
    difference=np.sum(np.abs(i1-i2))/float(np.size(i1))
    assert(difference<5)
コード例 #39
0
ファイル: suntracer_cp.py プロジェクト: mvtea/sgl
j1, j2 = np.mgrid[0:img_px,0:img_px]
x1 = -extent_img + j2 * xs                                          #
x2 = -extent_img + j1 * xs

y1, y2 = aux.pt_lens(x1, x2, xlens+0.1, ylens, mlens)
i2 = np.round((y1 + extent_src) / ys)
i1 = np.round((y2 + extent_src) / ys)

ind = (i1 >= 0) & (i1 < src_px) & (i2 >= 0) & (i2 < src_px)

i1n = i1[ind]
i2n = i2[ind]
j1n = j1[ind]
j2n = j2[ind]

for i in np.arange(np.size(i1n)):
    b[int(j1n[i]),int(j2n[i])] = src[int(i1n[i]),int(i2n[i])]
    c += 1


fov = float(c / (src_px * src_px))*100.0


### PLOT ###

fig = plt.figure(1)
ax = plt.subplot(121)
ax.imshow(aux.cgs(src_px,rpix,jpos,ipos), extent = (-extent_src,extent_src,-extent_src,extent_src),cmap='hot')
ax.set_title('Mag = 0')

ax = plt.subplot(122)
コード例 #40
0
dcn2 = np.convolve(normalized_cases, np.ones(days2) / days2, mode='same')

plt.plot(date, dco, color='blue')
plt.plot(date, dcn, color='red')

plt.bar(date, cases, width=1, alpha=0.5, color='blue', label="cases")
plt.bar(date,
        normalized_cases,
        width=0.5,
        alpha=0.5,
        color='red',
        label="Normalized for 300 000 tests")
plt.bar(free_test_date,
        free_test_case,
        width=1,
        alpha=0.8,
        color="green",
        label="Start of free testing")

xt = ax.get_xticks()

ax.set_xticks(np.arange(xt[0], xt[np.size(xt) - 1], 30))
plt.ylim(0, 10000)
plt.legend()
plt.grid()
plt.xlabel("Date")
plt.ylabel("number of positive tests")
plt.title("Number of positive corona tests in Sweden")
plt.savefig("cases.png")
plt.show()
コード例 #41
0
ファイル: analise.py プロジェクト: thatiany/radar_parlamentar
def partidos_expressivos(N=1,
                         data_inicial='2011-01-01',
                         data_final='2011-12-31',
                         tipos_proposicao=[]):
    """Retorna uma lista com os partidos com pelo menos N deputados diferentes que tenham vindo em votações entre as datas data_inicial e data_final. Consideram-se as proposições em tipos_proposição, ou todas se tipos_proposicao=[]."""
    # Criar dicionario com id dos partidos:
    con = lite.connect(Analise.db)
    tabela_partidos = con.execute(
        'select numero,nome from partidos').fetchall()
    idPartido = {}
    for tp in tabela_partidos:
        idPartido[tp[1]] = tp[0]

    # Criar lista de todos os partidos:
    lista_todos_partidos = con.execute('SELECT nome FROM PARTIDOS').fetchall()
    con.close()
    # Pegar votacoes no bd:
    a = Analise(data_inicial, data_final, tipos_proposicao)
    votacoes = a._buscaVotacoes()
    # Inicializar variáveis
    tamanho_partidos = [0] * len(lista_todos_partidos)
    vetores_tamanho = numpy.zeros((len(lista_todos_partidos), a.num_votacoes))
    #Transformar lista de tuplas em lista de strings:
    i = 0
    for lp in lista_todos_partidos:
        lista_todos_partidos[i] = lp[0]
        i += 1
    # Calcular tamanho dos partidos:
    ip = -1
    for p in lista_todos_partidos:
        ip += 1
        num_deputados = set(
        )  # Número de deputados diferentes de um partido que apareceram em pelo menos uma votação no período.
        iv = -1
        for v in votacoes:
            iv += 1
            # Contar deputados presentes:
            deps_presentes_list = [
                list(
                    numpy.array(eval(v[3]))[numpy.where(
                        numpy.array(eval(v[3])) / 100000 == idPartido[p])]) +
                list(
                    numpy.array(eval(v[4]))[numpy.where(
                        numpy.array(eval(v[4])) / 100000 == idPartido[p])]) +
                list(
                    numpy.array(eval(v[5]))[numpy.where(
                        numpy.array(eval(v[5])) / 100000 == idPartido[p])]) +
                list(
                    numpy.array(eval(v[6]))[numpy.where(
                        numpy.array(eval(v[6])) / 100000 == idPartido[p])])
            ]
            vetores_tamanho[ip][iv] = numpy.size(deps_presentes_list)
            for d in deps_presentes_list[0]:
                num_deputados.add(
                    d
                )  # repetidos não entrarão duas vezes no set, permitindo calcular tamanho_partidos.
        tamanho_partidos[ip] = len(num_deputados)
    # Fazer lista de partidos maiores do que N:
    expressivos = []
    ip = -1
    for p in lista_todos_partidos:
        ip += 1
        if tamanho_partidos[ip] >= N:
            expressivos.append(p)
    return expressivos
コード例 #42
0
def plot_solution(sol, var, vp, x_n, y_n, x_m, y_m, n, m, r, s, I, K, T, L,
                  fig_name, problem):
    x_val = sol.get_value_dict(var["x"])
    y_val = sol.get_value_dict(var["y"])
    w_val = sol.get_value_dict(var["w"])
    if problem == "dynamic":
        v_val = sol.get_value_dict(var["v"])
    elif problem == "static":
        v_val = vp
    else:
        print("Error: problem must be static or dynamic")
        sys.exit(1)

    plt.figure(figsize=(10, 5))
    # breakpoint()
    plt.scatter(x_n[1:], y_n[1:], c='b', marker='s', label="user")
    plt.scatter(x_m[1:], y_m[1:], c='r', marker='s', label="CS")
    for i in K:
        plt.annotate('$%d$' % i, (x_m[i] - 0.02, y_m[i] + 0.025))

    plt.legend(loc="best")

    plt.axis('equal')

    for i in I:
        v_it = np.zeros((2, s + 1))
        for l in L:
            vi = np.zeros((m + 1, r + 1))
            wi = np.zeros((m + 1, r + 1))
            for k in K:
                for t in T:
                    vi[k, t] = v_val[i, k, t, l]
                    wi[k, t] = w_val[i, k, t, l]
            print((i))
            print((vi[1:, 1:]))
            print((wi[1:, 1:]))
            tm = np.nonzero(wi)
            print(tm)
            if np.size(tm) != 0:
                v_it[0, l] = tm[0]
                v_it[1, l] = tm[1]
            else:
                v_it[0, l] = 0
                v_it[1, l] = 0

            # print(v_it)

        plt.annotate('$%d$' % i, (x_n[i] - .02, y_n[i] + 0.025))
        v1 = v_it[0, 1:]
        v2 = v_it[1, 1:]
        # plt.annotate(str(list(v1.astype(np.int))), (x_n[i] + 0.02, y_n[i] - 0.01))
        plt.annotate(
            str(list(v1.astype(np.int))) + '\n' + str(list(v2.astype(np.int))),
            (x_n[i] + 0.02, y_n[i] - 0.01))

    [
        plt.annotate("x_{0}_{1}_{2}".format(i, k, t),
                     (x_n[i] - 0.002, y_n[i] - 0.04)) for i in I for k in K
        for t in T if x_val[i, k, t] == 1
    ]
    [
        plt.annotate("y_{0} = %d".format(k) % y_val[k],
                     (x_m[k] - 0.002, y_m[k] - 0.04),
                     c='r') for k in K if y_val[k] != 0
    ]
    # plt.axis('equal')
    plt.savefig(fig_name, bbox_inches='tight')
    plt.show(block=False)
    plt.close(fig_name)
コード例 #43
0
ファイル: finder.py プロジェクト: MxEntropic/lipidsaxs
def finder(file_name,lower_limit,upper_limit, Ganesha=False,DLS=False,plot=False,savefig=False,savedir=os.path.dirname(os.path.realpath(__file__))):
    
    try:
        if Ganesha==True:
            delim_str=','
            ht_threshold=0.0001
            
        if DLS==True:
            delim_str='\t'
            ht_threshold=0.1
            
        #get the data from the file
        table=np.genfromtxt(file_name,delimiter=delim_str,skip_header=10)
        
        #cut out the x and y data defined by the q range.
        x_data=table[np.intersect1d(np.where(table[0:,0]>lower_limit),np.where(table[0:,0]<upper_limit)),0]
        y_data=table[np.intersect1d(np.where(table[0:,0]>lower_limit),np.where(table[0:,0]<upper_limit)),1]
    
        #the number of data points to trial fits across
        fitting_range=10
        
        #attempt to fit the data across a moving window of the q range of interest. This will find peaks multiple times over.
        peaks=np.zeros(0)
        for i in range(0,(np.where(table[0:,0]<upper_limit)[-1][-1]-np.where(table[0:,0]>lower_limit)[0][0]-fitting_range)):
            x=x_data[i:(i+fitting_range)]
            y=y_data[i:(i+fitting_range)]
            
            result=fitting(x,y,np.mean(x),height_threshold=ht_threshold)
            
            if result != 0:
                peaks=np.append(peaks, result[0])
        
        if len(peaks)>0:    
            #define the minimum separation between peaks - otherwise the binning of the data will put separate peaks into one bin.
            #bin the peaks found during the fitting procedure
            hist, bin_edges=np.histogram(peaks,bins=np.arange(min(peaks), max(peaks) + 0.005, 0.005))
            inds=np.digitize(peaks,bin_edges)
            
            returning_peaks=np.zeros(0)
            for i in range(0, np.size(np.arange(min(peaks), max(peaks) + 0.005, 0.005))):
                try:
                    #look forwards and backward to catch each bin incase the values have leaked between boundaries
                    previous_bin=peaks[np.where(inds==(i-1))]
                    this_bin=peaks[np.where(inds==i)]
                    next_bin=peaks[np.where(inds==(i+1))]
                    
                    #if two bins are next to each other, group them together and average those values to return
                    if len(this_bin)>0 and len(previous_bin)>0 and len(next_bin)==0:
                        conc_bin=np.concatenate((this_bin,previous_bin))
                        returning_peaks=np.append(returning_peaks,np.mean(conc_bin))
                    
                    #otherwise just average the bin and return it as the peak.
                    elif len(this_bin)>0 and len(previous_bin)==0 and len(next_bin)==0:
                        returning_peaks=np.append(returning_peaks,np.mean(this_bin))
                        
                except IndexError:
                    pass
                
            if plot==True:
                plt.plot(x_data,y_data)
                for i in returning_peaks:
                    plt.axvline(i,c='r')
                plt.xlabel('$q$ (Å$^{-1}$)')
                plt.ylabel('Intensity (A.U.)')
                if savefig==True:
                    name=file_name.split('\\')[-1][:-4]
                    plt.savefig(savedir+'/'+name+'.png',dpi=200)
                plt.show()
                plt.clf()
                
            return returning_peaks
        else:
            return 0
    except UnboundLocalError:
        print('Error! You must tell the programme where the data was collected in order to use the peak finder.')
コード例 #44
0
 def __init__(self, dataset, normalizer, theta_generator):
     self.__dataset = dataset
     self.__normalizer = normalizer
     self.__theta_generator = theta_generator
     self.__theta = np.zeros((np.size(self.__dataset, 1), 1))
コード例 #45
0
def Cost(parameters,
         Y_desired,
         A,
         X_init,
         Q,
         var_Q,
         alpha=1.0,
         specified_time=None,
         beta=5.0,
         gamma=1.0,
         nu=1.0,
         mode=QUADRATIC_EXACT,
         margin=None):
    # Sanity checks.
    assert alpha >= 0.0 and beta >= 0.0
    assert (alpha == 0.0) == (specified_time is not None)
    assert (beta == 0.0) == (nu is None)
    assert (mode in (QUADRATIC_EXACT, ABSOLUTE_EXACT)) == (margin is None)

    # Prepare variable depending on whether t part of the parameters.
    num_nodes = A.shape[0]
    num_species = X_init.shape[1]
    num_traits = Q.shape[1]
    if specified_time is None:
        t = parameters[-1]
        num_parameters_i = int((np.size(parameters) - 1) / num_species)
        grad_all = np.zeros(np.size(parameters))
    else:
        t = specified_time
        num_parameters_i = int(np.size(parameters) / num_species)
        grad_all = np.zeros(np.size(parameters))

    # Reshape adjacency matrix to make sure.
    Adj = A.astype(float).reshape((num_nodes, num_nodes))
    Adj_flatten = Adj.flatten().astype(bool)  # Flatten boolean version.

    # Loop through the species to compute the cost value.
    # At the same time, prepare the different matrices.
    Ks = []  # K_s
    Z_0 = []  # Z^{(s)}(0)
    eigenvalues = []  # w
    eigenvectors = []  # V.T
    eigenvectors_inverse = []  # U.T
    exponential_wt = []  # exp(eigenvalues * t).
    x_matrix = []  # Pre-computed X matrices.
    x0s = []  # Avoids reshaping.
    qs = []  # Avoids reshaping.
    xts = []  # Keeps x_s(t).
    inside_norm = np.zeros(
        (num_nodes,
         num_traits))  # Will hold the value prior to using the norm.
    for s in range(num_species):
        x0 = X_init[:, s].reshape((num_nodes, 1))
        z_0 = np.zeros(X_init.shape)
        z_0[:, s] = x0.reshape(num_nodes)
        Z_0.append(z_0)
        q = Q[s, :].reshape((1, num_traits))
        x0s.append(x0)
        qs.append(q)
        k_ij = parameters[s * num_parameters_i:(s + 1) * num_parameters_i]
        # Create K from individual k_{ij}.
        K = np.zeros(Adj_flatten.shape)
        K[Adj_flatten] = k_ij
        K = K.reshape((num_nodes, num_nodes))
        np.fill_diagonal(K, -np.sum(K, axis=0))
        # Store K.
        Ks.append(K)
        # Perform eigen-decomposition to compute matrix exponential.
        w, V = scipy.linalg.eig(K, right=True)
        U = scipy.linalg.inv(V)
        wt = w * t
        exp_wt = np.exp(wt)
        xt = Expm(V, exp_wt, U).dot(x0)
        inside_norm += xt.dot(q)
        # Store the transpose of these matrices for later use.
        eigenvalues.append(w)
        eigenvectors.append(V.T)
        eigenvectors_inverse.append(U.T)
        exponential_wt.append(exp_wt)
        xts.append(xt)
        # Pre-build X matrix.
        with warnings.catch_warnings():
            warnings.simplefilter(
                'ignore',
                RuntimeWarning)  # We don't care about 0/0 on the diagonal.
            X = np.subtract.outer(exp_wt,
                                  exp_wt) / (np.subtract.outer(wt, wt) + 1e-10)
        np.fill_diagonal(X, exp_wt)
        x_matrix.append(X)
    inside_norm -= Y_desired

    # Compute the trait mismatch cost depending on mode.
    derivative_outer_norm = None  # Holds the derivative of inside_norm (except the multiplication by (x0 * q)^T).
    if mode == ABSOLUTE_AT_LEAST:
        derivative_outer_norm = -inside_norm + margin
        value = np.sum(np.maximum(derivative_outer_norm, 0))
        derivative_outer_norm = -(derivative_outer_norm > 0).astype(
            float)  # Keep only 1s for when it's larger than margin.
    elif mode == ABSOLUTE_EXACT:
        abs_inside_norm = np.abs(inside_norm)
        index_zeros = abs_inside_norm < 1e-10
        value = np.sum(np.abs(inside_norm))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore',
                                  RuntimeWarning)  # We don't care about 0/0.
            derivative_outer_norm = inside_norm / abs_inside_norm  # Keep only 1s for when it's larger than 0 and -1s for when it's lower.
        derivative_outer_norm[index_zeros] = 0  # Make sure we set 0/0 to 0.
    elif mode == QUADRATIC_AT_LEAST:
        derivative_outer_norm = -inside_norm + margin
        value = np.sum(np.square(np.maximum(derivative_outer_norm, 0)))
        index_negatives = derivative_outer_norm < 0
        derivative_outer_norm *= -2.0
        derivative_outer_norm[
            index_negatives] = 0  # Don't propagate gradient on negative values.
    elif mode == QUADRATIC_EXACT:
        value = np.sum(np.square(inside_norm))
        derivative_outer_norm = 2.0 * inside_norm

    # compute cost to minimize time (if desired)
    value += alpha * (t**2)

    # Calculate gradient w.r.t. the transition matrix for each species
    for s in range(num_species):
        # Build gradient w.r.t. inside_norm of cost.
        top_grad = np.dot(derivative_outer_norm, np.dot(x0s[s], qs[s]).T)
        # Build gradient w.r.t. Exp(K * t).
        middle_grad = eigenvectors_inverse[s].dot(
            eigenvectors[s].dot(top_grad).dot(eigenvectors_inverse[s]) *
            x_matrix[s]).dot(eigenvectors[s])
        # Build gradient w.r.t. K
        bottom_grad = middle_grad * t
        # Finally, propagate gradient to individual k_ij.
        grad = bottom_grad - np.diag(bottom_grad)
        grad = grad.flatten()[Adj_flatten]  # Reshape.
        grad_all[s * num_parameters_i:(s + 1) * num_parameters_i] += np.array(
            np.real(grad))
        # Build gradient w.r.t. t (if desired)
        if specified_time is None:
            grad_all[-1] += np.real(np.sum(Ks[s] * middle_grad))

    # Gradient of alpha * t^2 w.r.t. t
    if specified_time is None:
        grad_all[-1] += 2.0 * t * alpha

    # Forcing the steady state.
    # We add a cost for keeping X(t) and X(t + nu) the same. We use the quadratic norm for this sub-cost.
    # The larger beta and the larger nu, the closer to steady state.
    if beta > 0.0:
        for s in range(num_species):
            # Compute exp of the eigenvalues of K * (t + nu).
            wtdt = eigenvalues[s] * (t + nu)
            exp_wtdt = np.exp(wtdt)
            # Compute x_s(t) - x_s(t + nu) for that species.
            # Note that since we store V.T and U.T, we do (U.T * D * V.T).T == V * D * U
            inside_norm = xts[s] - Expm(eigenvectors_inverse[s], exp_wtdt,
                                        eigenvectors[s]).T.dot(x0s[s])
            # Increment value.
            value += beta * np.sum(np.square(inside_norm))

            # Compute gradient on the first part of the cost: e^{Kt} x0 (we use the same chain rule as before).
            top_grad = 2.0 * beta * np.dot(inside_norm, x0s[s].T)
            store_inner_product = eigenvectors[s].dot(top_grad).dot(
                eigenvectors_inverse[s])  # Store to re-use.
            middle_grad = eigenvectors_inverse[s].dot(
                store_inner_product * x_matrix[s]).dot(eigenvectors[s])
            bottom_grad = middle_grad * t
            grad = bottom_grad - np.diag(bottom_grad)
            grad = grad.flatten()[Adj_flatten]  # Reshape.
            grad_all[s * num_parameters_i:(s + 1) *
                     num_parameters_i] += np.array(np.real(grad))
            if specified_time is None:
                grad_all[-1] += np.real(np.sum(Ks[s] * middle_grad))

            # Compute gradient on the second part of the cost: e^{K(t + nu)} x0 (we use the same chain rule as before).
            # Compute X for e^{K(t + nu)}.
            with warnings.catch_warnings():
                warnings.simplefilter(
                    'ignore',
                    RuntimeWarning)  # We don't care about 0/0 on the diagonal.
                X = np.subtract.outer(exp_wtdt, exp_wtdt) / (
                    np.subtract.outer(wtdt, wtdt) + 1e-10)
            np.fill_diagonal(X, exp_wtdt)
            # top_grad = 2.0 * beta * np.dot(inside_norm, x0s[s].T) [same as before but needs to be negated].
            middle_grad = -eigenvectors_inverse[s].dot(
                store_inner_product * X).dot(eigenvectors[s])
            bottom_grad = middle_grad * (t + nu)
            grad = bottom_grad - np.diag(bottom_grad)
            grad = grad.flatten()[Adj_flatten]  # Reshape.
            grad_all[s * num_parameters_i:(s + 1) *
                     num_parameters_i] += np.array(np.real(grad))
            if specified_time is None:
                grad_all[-1] += np.real(np.sum(Ks[s] * middle_grad))

    # Minimize variance
    if gamma > 0.0:
        # Compute the variance cost
        X_t = np.squeeze(np.asarray(xts)).T  # convert the list to an array
        var_Y = np.dot(np.multiply(X_t, X_t), var_Q)  # compute Var(Y)
        value += gamma * np.sum(
            np.square(var_Y)
        )  # add the square of the Frobenious norm of Var(Y) to the cost
        # derivative_outer_norm = 2.0 * gamma * var_Y

        # Calculate gradient of third part of the cost w.r.t. the transition matrix for each species
        for s in range(num_species):
            # Build gradient w.r.t. inside_norm of variance.
            # top_grad = 2.0 * np.dot(derivative_outer_norm, np.dot(np.multiply(Z_0[s], X_t), var_Q).T)
            top_grad = 4 * gamma * np.dot(
                np.multiply(np.dot(var_Y, var_Q.T), X_t), Z_0[s].T)
            # Build gradient w.r.t. Exp(K * t).
            middle_grad = eigenvectors_inverse[s].dot(
                eigenvectors[s].dot(top_grad).dot(eigenvectors_inverse[s]) *
                x_matrix[s]).dot(eigenvectors[s])
            # Build gradient w.r.t. K
            bottom_grad = middle_grad * t
            # Finally, propagate gradient to individual k_ij.
            grad = bottom_grad - np.diag(bottom_grad)
            grad = grad.flatten()[Adj_flatten]  # Reshape.
            grad_all[s * num_parameters_i:(s + 1) *
                     num_parameters_i] += np.array(np.real(grad))
            # Build gradient w.r.t. t (if desired)
            if specified_time is None:
                grad_all[-1] += np.real(np.sum(Ks[s] * middle_grad))
        """
        # BEGIN OLD CODE 
        
        grad_xts_t = np.zeros(np.shape(X_t))
        for s in range(num_species):
            # Compute gradients of variance cost w.r.t. each transition rate
            wt = eigenvalues[s].real * t
            exp_wt = np.exp(wt)
            mat_1 = np.diag(exp_wt) * t
            temp_mat_2 = np.tile(exp_wt, [num_nodes, 1])
            temp_mat_3 = np.tile(eigenvalues[s].real, [num_nodes, 1])
            mat_2 = temp_mat_2.T - temp_mat_2
            mat_3 = (temp_mat_3.T - temp_mat_3)
            np.fill_diagonal(mat_3, np.ones(num_nodes))

            temp_idx = mat_3[mat_3 == 0]
            if temp_idx.size:
                flag = 1

            mat_3 = 1 / mat_3
            np.fill_diagonal(mat_3, np.zeros(num_nodes))

            grad_norm_varY_K = np.zeros((num_nodes, num_nodes))
            for i in range(num_nodes):
                for j in range(num_nodes):
                    if Adj[i][j]:
                        G_ij = np.outer(eigenvectors_inverse[s][i].real, eigenvectors[s].T[j].real)
                        V_ij = np.multiply(G_ij, (mat_1 + np.multiply(mat_2, mat_3)))
                        grad_xts_K_ij = np.dot(np.dot(np.dot(eigenvectors[s].T.real, V_ij),  
                                                eigenvectors_inverse[s].T.real),   Z_0[s])
                        grad_varY_K_ij = 2 * np.dot(np.multiply(grad_xts_K_ij, X_t), var_Q)
                        grad_norm_varY_K[i][j] = 2 * gamma * np.trace(np.dot(var_Y.T, grad_varY_K_ij)).real

            grad_norm_varY_K = grad_norm_varY_K.flatten()[Adj_flatten]  # Reshape.
            grad_all[s * num_parameters_i:(s + 1) * num_parameters_i] += np.array(np.real(grad_norm_varY_K))

            # Compute the gradient each species' distribution of w.r.t. time
            if specified_time is None:
                grad_xts_t += np.dot(np.multiply(Expm(eigenvectors_inverse[s].real, exp_wt, eigenvectors[s].real).T, 
                                                Ks[s]), Z_0[s])

        # Compute gradients of variance cost w.r.t. time
        if specified_time is None:
            grad_varY_t = 2 * np.dot(np.multiply(grad_xts_t, X_t), var_Q)
            grad_all[-1] += 2 * gamma * np.trace(np.dot(var_Y.T, grad_varY_t))
        
        # END OLD CODE
        """

    return [value, grad_all]
コード例 #46
0
ファイル: analise.py プロジェクト: thatiany/radar_parlamentar
    def _inicializa_vetores(self):
        """Cria os 'vetores' e 'quadrivetores' votação agregados por partido. Aproveita para calcular o tamanho dos partidos, presença dos deputados, etc.
        O 'vetor' usa um número entre -1 (não) e 1 (sim) para representar a posição global do partido em cada votação, sendo o vetor em si um de dimensão N formado pelas N votações.
        O 'quadrivetor' usa uma tupla de 4 inteiros para representar a posição do partido em cada votação, os inteiros são o número de deputados que votaram sim, não, abstenção e obstrução. O quadrivetor em si é um vetor com N destas tuplas."""
        # Pegar votações no BD:
        votacoes = self._buscaVotacoes()
        # Criar dicionario com id dos partidos
        con = lite.connect(Analise.db)
        tabela_partidos = con.execute(
            'select numero,nome from partidos').fetchall()
        idPartido = {}
        for tp in tabela_partidos:
            idPartido[tp[1]] = tp[0]

        self.vetores_votacao = numpy.zeros(
            (len(self.lista_partidos), self.num_votacoes))
        self.quadrivet_vot = numpy.empty(
            (len(self.lista_partidos), self.num_votacoes), dtype=object)
        self.vetores_tamanho = numpy.zeros(
            (len(self.lista_partidos), self.num_votacoes))
        self.vetores_presenca = numpy.zeros(
            (len(self.lista_partidos), self.num_votacoes))
        self.tamanho_partidos = [0] * len(self.lista_partidos)
        ip = -1
        for p in self.lista_partidos:
            ip += 1
            num_deputados = set(
            )  # Número de deputados diferentes de um partido que apareceram em pelo menos uma votação no período.
            iv = -1
            for v in votacoes:
                iv += 1
                nsim = numpy.where((numpy.array(eval(v[3])) /
                                    100000) == idPartido[p])[0].size
                nnao = numpy.where((numpy.array(eval(v[4])) /
                                    100000) == idPartido[p])[0].size
                nabs = numpy.where((numpy.array(eval(v[5])) /
                                    100000) == idPartido[p])[0].size
                nobs = numpy.where((numpy.array(eval(v[6])) /
                                    100000) == idPartido[p])[0].size
                ntot = nsim + nnao + nabs + nobs
                self.quadrivet_vot[ip][iv] = (nsim, nnao, nabs, nobs)
                if ntot != 0:
                    self.vetores_votacao[ip][iv] = (float(nsim) -
                                                    float(nnao)) / float(ntot)
                else:
                    self.vetores_votacao[ip][iv] = 0

                # Contar deputados presentes:
                deps_presentes_list = [
                    list(
                        numpy.array(eval(v[3]))[numpy.where(
                            numpy.array(eval(v[3])) / 100000 == idPartido[p])])
                    + list(
                        numpy.array(eval(v[4]))[numpy.where(
                            numpy.array(eval(v[4])) / 100000 == idPartido[p])])
                    + list(
                        numpy.array(eval(v[5]))[numpy.where(
                            numpy.array(eval(v[5])) / 100000 == idPartido[p])])
                    + list(
                        numpy.array(eval(v[6]))[numpy.where(
                            numpy.array(eval(v[6])) / 100000 == idPartido[p])])
                ]
                #                deps_presentes_list = numpy.reshape(deps_presentes_list,(1,numpy.size(deps_presentes_list)))
                self.vetores_tamanho[ip][iv] = numpy.size(deps_presentes_list)
                for d in deps_presentes_list[0]:
                    num_deputados.add(
                        d
                    )  # repetidos não entrarão duas vezes no set, permitindo calcular tamanho_partidos.
            self.tamanho_partidos[ip] = len(num_deputados)
            # Calcular vetores_presenca:
            ivv = -1
            for v in votacoes:
                ivv += 1
                self.vetores_presenca[ip][ivv] = self.vetores_tamanho[ip][
                    ivv] / self.tamanho_partidos[ip]
        return
コード例 #47
0
font_size = 25

#weight_photon = gamma
# the histogram of the data
#E_s=4.1e5
#gg = 100.

eta_list = np.logspace(-5, 1, 100)
#chi_min  = np.logspace(-19,-7,100)
chi_min = np.logspace(np.log10(5e-18), np.log10(5e-7), 100)
print(chi_min)

f_qed = open('ksi_sokolov.qed', 'w')
f_qed.write('100\t 100\t -5\t 1\n')  # python will convert \n to os.linesep

for i in range(np.size(eta_list)):
    eta = eta_list[i]
    #    bin_chi  = np.logspace(np.log10(1e-8*eta),np.log10(0.499999999*eta),2000)
    #    grid_chi = np.logspace(np.log10(1e-8*eta),np.log10(0.499999999*eta),2001)
    chi_min_i = chi_min[i]
    bin_chi = np.logspace(np.log10(chi_min_i), np.log10(0.499999999 * eta),
                          2000)
    grid_chi = np.logspace(np.log10(chi_min_i), np.log10(0.499999999 * eta),
                           2001)
    chi = bin_chi
    y = 4 * chi / (3 * eta * (eta - 2 * chi))
    F_chi = np.zeros_like(chi)
    for j in range(np.size(chi)):
        result = integrate.quad(lambda x: kv(5. / 3., x), y[j], 1e3 * y[j])
        F_chi[j] = 4 * chi[j]**2 / eta**2 * y[j] * kv(
            2. / 3., y[j]) + (1 - 2 * chi[j] / eta) * y[j] * result[0]
コード例 #48
0
ファイル: input_loader.py プロジェクト: mdelsole/NEXUS
import numpy as np


horizontal = 10*[0.0] + 5*[1.0] + 10*[0.0]
vertical   = 5*[0.0, 0.0, 1.0, 0.0, 0.0]
leftdiag   = [1.0, 0.0, 0.0, 0.0, 0.0,
               0.0, 1.0, 0.0, 0.0, 0.0,
               0.0, 0.0, 1.0, 0.0, 0.0,
               0.0, 0.0, 0.0, 1.0, 0.0,
               0.0, 0.0, 0.0, 0.0, 1.0]
rightdiag  = [0.0, 0.0, 0.0, 0.0, 1.0,
               0.0, 0.0, 0.0, 1.0, 0.0,
               0.0, 0.0, 1.0, 0.0, 0.0,
               0.0, 1.0, 0.0, 0.0, 0.0,
               1.0, 0.0, 0.0, 0.0, 0.0]

horizontal_2d = np.reshape(horizontal,(-1,int(np.size(horizontal)**(1/2))))
vertical_2d = np.reshape(vertical,(-1,int(np.size(vertical)**(1/2))))
leftdiag_2d = np.reshape(leftdiag,(-1,int(np.size(leftdiag)**(1/2))))
rightdiag_2d = np.reshape(rightdiag,(-1,int(np.size(rightdiag)**(1/2))))


コード例 #49
0
def getCost(X, y, weights):
    m = np.size(X,0)
    hx = X.dot(weights)
    cost = (1/(2*m)) * np.sum((hx[:,0] - y)**2)
    return cost
コード例 #50
0
def cost_without_grad(parameters,
                      Y_desired,
                      A,
                      X_init,
                      Q,
                      var_Q,
                      alpha=1.0,
                      specified_time=None,
                      beta=5.0,
                      gamma=1.0,
                      nu=1.0,
                      mode=QUADRATIC_EXACT,
                      margin=None):
    # Sanity checks.f
    assert alpha >= 0.0 and beta >= 0.0
    assert (alpha == 0.0) == (specified_time is not None)
    assert (beta == 0.0) == (nu is None)
    assert (mode in (QUADRATIC_EXACT, ABSOLUTE_EXACT)) == (margin is None)

    # Prepare variable depending on whether t part of the parameters.
    num_nodes = A.shape[0]
    num_species = X_init.shape[1]
    num_traits = Q.shape[1]
    if specified_time is None:
        t = parameters[-1]
        num_parameters_i = int((np.size(parameters) - 1) / num_species)
        grad_all = np.zeros(np.size(parameters))
    else:
        t = specified_time
        num_parameters_i = int(np.size(parameters) / num_species)
        grad_all = np.zeros(np.size(parameters))

    # Reshape adjacency matrix to make sure.
    Adj = A.astype(float).reshape((num_nodes, num_nodes))
    Adj_flatten = Adj.flatten().astype(bool)  # Flatten boolean version.

    # Loop through the species to compute the cost value.
    # At the same time, prepare the different matrices.
    Ks = []  # K_s
    Z_0 = []  # Z^{(s)}(0)
    eigenvalues = []  # w
    eigenvectors = []  # V.T
    eigenvectors_inverse = []  # U.T
    exponential_wt = []  # exp(eigenvalues * t).
    x_matrix = []  # Pre-computed X matrices.
    x0s = []  # Avoids reshaping.
    qs = []  # Avoids reshaping.
    xts = []  # Keeps x_s(t).
    inside_norm = np.zeros(
        (num_nodes,
         num_traits))  # Will hold the value prior to using the norm.
    for s in range(num_species):
        x0 = X_init[:, s].reshape((num_nodes, 1))
        z_0 = np.zeros(X_init.shape)
        z_0[:, s] = x0.reshape(num_nodes)
        Z_0.append(z_0)
        q = Q[s, :].reshape((1, num_traits))
        x0s.append(x0)
        qs.append(q)
        k_ij = parameters[s * num_parameters_i:(s + 1) * num_parameters_i]
        # Create K from individual k_{ij}.
        K = np.zeros(Adj_flatten.shape)
        K[Adj_flatten] = k_ij
        K = K.reshape((num_nodes, num_nodes))
        np.fill_diagonal(K, -np.sum(K, axis=0))
        # Store K.
        Ks.append(K)
        # Perform eigen-decomposition to compute matrix exponential.
        w, V = scipy.linalg.eig(K, right=True)
        U = scipy.linalg.inv(V)
        wt = w * t
        exp_wt = np.exp(wt)
        xt = Expm(V, exp_wt, U).dot(x0)
        inside_norm += xt.dot(q)
        # Store the transpose of these matrices for later use.
        eigenvalues.append(w)
        eigenvectors.append(V.T)
        eigenvectors_inverse.append(U.T)
        exponential_wt.append(exp_wt)
        xts.append(xt)
        # Pre-build X matrix.
        with warnings.catch_warnings():
            warnings.simplefilter(
                'ignore',
                RuntimeWarning)  # We don't care about 0/0 on the diagonal.
            X = np.subtract.outer(exp_wt,
                                  exp_wt) / (np.subtract.outer(wt, wt) + 1e-10)
        np.fill_diagonal(X, exp_wt)
        x_matrix.append(X)
    inside_norm -= Y_desired

    # Compute the final cost value depending on mode.
    derivative_outer_norm = None  # Holds the derivative of inside_norm (except the multiplication by (x0 * q)^T).
    if mode == ABSOLUTE_AT_LEAST:
        derivative_outer_norm = -inside_norm + margin
        value = np.sum(np.maximum(derivative_outer_norm, 0))
        derivative_outer_norm = -(derivative_outer_norm > 0).astype(
            float)  # Keep only 1s for when it's larger than margin.
    elif mode == ABSOLUTE_EXACT:
        abs_inside_norm = np.abs(inside_norm)
        index_zeros = abs_inside_norm < 1e-10
        value = np.sum(np.abs(inside_norm))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore',
                                  RuntimeWarning)  # We don't care about 0/0.
            derivative_outer_norm = inside_norm / abs_inside_norm  # Keep only 1s for when it's larger than 0 and -1s for when it's lower.
        derivative_outer_norm[index_zeros] = 0  # Make sure we set 0/0 to 0.
    elif mode == QUADRATIC_AT_LEAST:
        derivative_outer_norm = -inside_norm + margin
        value = np.sum(np.square(np.maximum(derivative_outer_norm, 0)))
        index_negatives = derivative_outer_norm < 0
        derivative_outer_norm *= -2.0
        derivative_outer_norm[
            index_negatives] = 0  # Don't propagate gradient on negative values.
    elif mode == QUADRATIC_EXACT:
        value = np.sum(np.square(inside_norm))
        derivative_outer_norm = 2.0 * inside_norm
    value += alpha * (t**2)

    # Forcing the steady state.
    # We add a cost for keeping X(t) and X(t + nu) the same. We use the quadratic norm for this sub-cost.
    # The larger beta and the larger nu, the closer to steady state.
    if beta > 0.0:
        for s in range(num_species):
            # Compute exp of the eigenvalues of K * (t + nu).
            wtdt = eigenvalues[s] * (t + nu)
            exp_wtdt = np.exp(wtdt)
            # Compute x_s(t) - x_s(t + nu) for that species.
            # Note that since we store V.T and U.T, we do (U.T * D * V.T).T == V * D * U
            inside_norm = xts[s] - Expm(eigenvectors_inverse[s], exp_wtdt,
                                        eigenvectors[s]).T.dot(x0s[s])
            # Increment value.
            value += beta * np.sum(np.square(inside_norm))

    # Minimize the variance of trait distribution
    if gamma > 0.0:
        # Compute the cost
        X_t = np.squeeze(np.asarray(xts)).T  # convert the list to an array
        var_Y = np.dot(np.multiply(X_t, X_t), var_Q)  # compute Var(Y)
        value += gamma * np.sum(
            np.square(var_Y)
        )  # add the square of the Frobenious norm of Var(Y) to the cost

    return value
コード例 #51
0
#Need to make it so that it runs over a certain array of sizes:
#Length of the edge of a box
length_variable = np.array([10, 5, 2.5, 1.25, 0.625], dtype=np.int64)
#Total Number of Sub regions
array_split_size_sub = [1, 4, 16, 64, 256]

for n in length_variable:

    array_split_size = length_variable[n] * 25.6  #Value for blocks
    array_split_size_sub = array_split_size_sub[n]
    array_split_size_sub_tot = array_split_size // array_split_size_sub
    arr_reshape = blockshaped(arr, array_split_size, array_split_size)

    #Callable Sizing Value
    array_size = np.size(arr_reshape[0])
    #Plane Fitting Process

    #Creating x,y coordintes for blocks of data
    xvalues = np.arange(0, data_length)
    yvalues = np.arange(0, data_length)

    xx, yy = np.meshgrid(xvalues, yvalues)

    xx = blockshaped(xx, array_split_size, array_split_size)
    yy = blockshaped(yy, array_split_size, array_split_size)

    #Importing Plane Fit Script here.
    from plane_fit import plane_fit

    #Making z of zeros for plane fit script
コード例 #52
0
p = np.array([a, r, b])
dt = 0.01  # Paso de tiempo para la integracion del modelo de Lorenz
x0 = np.array([
    8.0, 0.0, 30.0
])  # Condiciones iniciales para el spin-up del nature run (no cambiar)
numtrans = 600  # Tiempo de spin-up para generar el nature run (no cambiar)

#------------------------------------------------------------
# Configuracion del sistema de asimilacion
#------------------------------------------------------------
dx0 = 1.0 * np.array([5.0, 5.0, 5.0])  # Error inicial de la estimacion.
R0 = 8.0  # Varianza del error de las observaciones.
nvars = 3
EnsSize = 30  #Numero de miembros en el ensamble.

nobs = np.size(forward_operator(np.array([0, 0, 0])))

#Definimos una matriz de error de las observaciones
R = R0 * np.identity(nobs)  #En esta formulacion asumimos que los errores
#en diferentes observaciones son todos iguales y
P0 = 10.0 * np.array([[0.6, 0.5, 0.0], [0.5, 0.6, 0.0], [0.0, 0.0, 1.0]])

lam = 40.0

x = np.copy(x0)
for i in range(numtrans):
    x = model.forward_model(x, p, dt)

# Integramos la simulacion verdad
# El resultado es almacenado en un array de numpy "state" con dimension (numstep,3)
コード例 #53
0
ファイル: extinction.py プロジェクト: k-gilbert/beast
    def function(self, lamb, Av=1, Rv=2.74, Alambda=True,
                 draine_extend=False,  **kwargs):
        """
        Gordon03_SMCBar extinction law

        Parameters
        ----------
        lamb: float or ndarray(dtype=float)
            wavelength [in Angstroms] at which evaluate the law.

        Av: float
            desired A(V) (default 1.0)

        Rv: float
            desired R(V) (default 2.74)

        Alambda: bool
            if set returns +2.5*1./log(10.)*tau, tau otherwise

        Returns
        -------
        r: float or ndarray(dtype=float)
            attenuation as a function of wavelength
            depending on Alambda option +2.5*1./log(10.)*tau,  or tau
        """
        # ensure the units are in angstrom
        _lamb = units.Quantity(lamb, units.angstrom).value
        #_lamb = val_in_unit('lamb', lamb, 'angstrom').magnitude

        if isinstance(_lamb, float) or isinstance(_lamb, np.float_):
            _lamb = np.asarray([lamb])
        else:
            _lamb = lamb[:]

        # set Rv explicitly to the fixed value
        Rv = self.Rv
            
        c1 = -4.959 / Rv
        c2 = 2.264 / Rv
        c3 = 0.389 / Rv
        c4 = 0.461 / Rv
        x0 = 4.6
        gamma = 1.0

        x = 1.e4 / _lamb
        k = np.zeros(np.size(x))

        # UV part
        xcutuv = 10000.0 / 2700.
        xspluv = 10000.0 / np.array([2700., 2600.])

        ind = np.where(x >= xcutuv)
        if np.size(ind) > 0:
            k[ind] = 1.0 + c1 + (c2 * x[ind]) + c3 * ((x[ind]) ** 2) / \
                     ( ((x[ind]) ** 2 - (x0 ** 2)) ** 2 + (gamma ** 2) *
                       ((x[ind]) ** 2 ))
            yspluv = 1.0 + c1 + (c2 * xspluv) + c3 * ((xspluv) ** 2) / \
                     ( ((xspluv) ** 2 - (x0 ** 2)) ** 2 + (gamma ** 2) *
                       ((xspluv) ** 2 ))
            
        # FUV portion  
        ind = np.where(x >= 5.9)
        if np.size(ind) > 0:
            if draine_extend:
                dfname = libdir+'SMC_Rv2.74_norm.txt'
                l_draine, k_draine = np.loadtxt(dfname,usecols=(0,1),unpack=True)
                dind = np.where((1./l_draine) >= 5.9)
                k[ind] = interp(x[ind],1./l_draine[dind][::-1],
                                k_draine[dind][::-1])
            else:
                k[ind] += c4 * (0.5392 * ((x[ind] - 5.9) ** 2) +
                                0.05644 * ((x[ind] - 5.9) ** 3))

        # Opt/NIR part
        ind = np.where(x < xcutuv)
        if np.size(ind) > 0:
            xsplopir = np.zeros(9)
            xsplopir[0] = 0.0
            xsplopir[1: 10] = 1.0 / np.array([2.198, 1.65, 1.25, 0.81,
                                              0.65, 0.55, 0.44, 0.37])

            # Values directly from Gordon et al. (2003)
            #ysplopir =  np.array([0.0,0.016,0.169,0.131,0.567,0.801,
            #                      1.00,1.374,1.672])
            # K & J values adjusted to provide a smooth,
            #      non-negative cubic spline interpolation
            ysplopir = np.array([0.0, 0.11, 0.169, 0.25, 0.567,
                                 0.801, 1.00, 1.374, 1.672])

            tck = interpolate.splrep(np.hstack([xsplopir, xspluv]),
                                     np.hstack([ysplopir, yspluv]), k=3)
            k[ind] = interpolate.splev(x[ind], tck)

        if (Alambda):
            return(k * Av)
        else:
            return(k * Av * (np.log(10.) * 0.4 ))
コード例 #54
0
ファイル: hunter.py プロジェクト: chsahit/hgt_workshop
# Create barrier certificates to avoid collision
si_barrier_cert = create_single_integrator_barrier_certificate(N)

# define x initially
x = r.get_poses()
#print(x)
r.step()

# While the number of robots at the required poses is less
# than N...
robot1_goal = np.array([[0.0], [0.0], [0.0]])
robot2_goal = np.array([[-0.9], [0.9], [0.0]])
goal_points = np.concatenate((robot1_goal, robot2_goal), axis=1)

while(np.size(at_pose(x, goal_points, rotation_error=5)) != N):

    # Get poses of agents
    x = r.get_poses()
    x_si = x[:2, :]

    # Create single-integrator control inputs
    dxi = single_integrator_position_controller(x_si, goal_points[:2, :], magnitude_limit=0.08)

    # Create safe control inputs (i.e., no collisions)
    dxi = si_barrier_cert(dxi, x_si)

    # Set the velocities by mapping the single-integrator inputs to unciycle inputs
    r.set_velocities(np.arange(N), single_integrator_to_unicycle2(dxi, x))
    # Iterate the simulation
    r.step()
コード例 #55
0
ファイル: extinction.py プロジェクト: k-gilbert/beast
    def function(self, lamb, Av=1., Rv=3.1, Alambda=True, **kwargs):
        """
        Cardelli89 extinction Law

        Parameters
        ----------
        lamb: float or ndarray(dtype=float)
            wavelength [in Angstroms] at which evaluate the law.

        Av: float
            desired A(V) (default: 1.0)

        Rv: float
            desired R(V) (default: 3.1)

        Alambda: bool
            if set returns +2.5*1./log(10.)*tau, tau otherwise

        Returns
        -------
        r: float or ndarray(dtype=float)
            attenuation as a function of wavelength
            depending on Alambda option +2.5*1./log(10.)*tau,  or tau
        """
        # ensure the units are in angstrom
        _lamb = units.Quantity(lamb, units.angstrom).value
        #_lamb = val_in_unit('lamb', lamb, 'angstrom').magnitude

        if isinstance(_lamb, float) or isinstance(_lamb, np.float_):
            _lamb = np.asarray([lamb])
        else:
            _lamb = lamb[:]

        # init variables
        x = 1.e4 / _lamb  # wavenumber in um^-1
        a = np.zeros(np.size(x))
        b = np.zeros(np.size(x))
        # Infrared (Eq 2a,2b)
        ind = np.where((x >= 0.3) & (x < 1.1))
        a[ind] =  0.574 * x[ind] ** 1.61
        b[ind] = -0.527 * x[ind] ** 1.61
        # Optical & Near IR
        # Eq 3a, 3b
        ind = np.where((x >= 1.1) & (x < 3.3))
        y = x[ind] - 1.82
        a[ind] = 1. + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \
                 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.77530 * y ** 6 + \
                 0.32999 * y ** 7
        b[ind] = 1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - \
                 5.38434 * y ** 4 - 0.62251 * y ** 5 + 5.30260 * y ** 6 - \
                 2.09002 * y ** 7
        # UV
        # Eq 4a, 4b
        ind = np.where((x >= 3.3) & (x <= 8.0))
        a[ind] =  1.752 - 0.316 * x[ind] - 0.104 / ((x[ind] - 4.67) ** 2 + 0.341)
        b[ind] = -3.090 + 1.825 * x[ind] + 1.206 / ((x[ind] - 4.62) ** 2 + 0.263)

        ind = np.where((x >= 5.9) & (x <= 8.0))
        y = x[ind] - 5.9
        Fa     = -0.04473 * y ** 2 - 0.009779 * y ** 3
        Fb     =  0.21300 * y ** 2 + 0.120700 * y ** 3
        a[ind] = a[ind] + Fa
        b[ind] = b[ind] + Fb
        # Far UV
        # Eq 5a, 5b
        ind = np.where((x > 8.0) & (x <= 10.0))
        # Fa = Fb = 0
        y = x[ind] - 8.
        a[ind] = -1.073 - 0.628 * y + 0.137 * y ** 2 - 0.070 * y ** 3
        b[ind] = 13.670 + 4.257 * y - 0.420 * y ** 2 + 0.374 * y ** 3

        # Case of -values x out of range [0.289,10.0]
        ind = np.where((x > 10.0) | (x < 0.3))
        a[ind] = 0.0
        b[ind] = 0.0

        # Return Extinction vector
        # Eq 1
        if (Alambda):
            return( ( a + b / Rv ) * Av)
        else:
            # return( 1./(2.5 * 1. / np.log(10.)) * ( a + b / Rv ) * Av)
            return( 0.4 * np.log(10.) * ( a + b / Rv ) * Av)
コード例 #56
0
def make_full_table(caption, label, source_table, stacking=np.array([]), units=None):
    # Vorgeplänkel
    Output = """\\begin{table}
    \\centering
    \\caption{""" + caption + """}
    \\label{""" + label + """}
    \\sisetup{parse-numbers=false}
    \\begin{tabular}{\n"""

    # Kerngeschäft : source_table einlesen und verarbeiten, dh. Vor und Nachkommastellen rausfinden
    counter_columns = 0
    counter_lines = 0
    with open(source_table, 'r') as f:
        Text = f.read()
        for buchstabe in Text:
            if (buchstabe == '&'):
                counter_columns += 1
            elif (buchstabe == '\\'):
                counter_lines += 1

    NumberOfLines = counter_lines / 2
    NumberOfColumns = counter_columns / counter_lines * 2 + 1
    counter_digits_preDot = np.zeros((NumberOfLines, NumberOfColumns), dtype=np.int)
    counter_digits_postDot = np.zeros((NumberOfLines, NumberOfColumns), dtype=np.int)
    dot_reached = False
    counter_columns = 0
    counter_lines = 0
    with open(source_table, 'r') as f:
        Text = f.read()
    # 'Vor und Nachkommastellen rausfinden' beginnt hier
        for buchstabe in Text:
            if (buchstabe == '&'):
                counter_columns += 1
                dot_reached = False
            elif (buchstabe == '.'):
                dot_reached = True
            elif (buchstabe == '\\'):
                counter_lines += 1
                counter_columns = counter_columns % (NumberOfColumns - 1)
                dot_reached = False
            elif (buchstabe != ' ') & (buchstabe != '\n'):
                if (counter_lines / 2 <= (NumberOfLines - 1)):
                    if dot_reached == False:
                        counter_digits_preDot[counter_lines / 2][counter_columns] += 1
                    else:
                        counter_digits_postDot[counter_lines / 2][counter_columns] += 1
    # jetzt ermittle maximale Anzahl an Stellen und speichere sie in MaxDigitsPreDot und MaxDigitsPostDot
    MaxDigitsPreDot = []
    counter_digits_preDot_np = np.array(counter_digits_preDot)
    for x in counter_digits_preDot_np.T:
        MaxDigitsPreDot.append(max(x))
    MaxDigitsPostDot = []
    counter_digits_postDot_np = np.array(counter_digits_postDot)
    for x in counter_digits_postDot_np.T:
        MaxDigitsPostDot.append(max(x))
    # --------------------Ende der Stellensuche

    # Die Liste stacking in ein angepasstes Array umwandeln mit den tatsächlich betroffenen Spalten
    stacking_list = np.array(stacking)
    i = 0
    for x in stacking_list:
        stacking_list[i] += i
        i += 1

    # Schreiben der Tabellenformatierung
    if np.size(stacking) == 0:
        for digits_preDot, digits_postDot in zip(MaxDigitsPreDot, MaxDigitsPostDot):
            Output += '\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) + ']\n'
    else:   # es wurden fehlerbehaftete Werte übergeben, daher muss +- zwischen die entsprechenden Spalten
        i = 0.0
        for digits_preDot, digits_postDot in zip(MaxDigitsPreDot, MaxDigitsPostDot):
            if i in stacking_list:
                Output += '\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) + ']\n'
                Output += '\t@{${}\\pm{}$}\n'
            elif i - 1 in stacking_list:
                Output += '\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) + ', table-number-alignment = left]\n'      # wir wollen hier linksbündige Zahlen
            else:
                Output += '\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) + ']\n'
            i += 1

    # Zwischengeplänkel
    Output += '\t}\n\t\\toprule\n\t'

    # Einheitenzeile
    i = 0
    stacking_list = np.array(stacking)
    for Spaltenkopf in units:
        if i in stacking_list:
            Output += '\\multicolumn{2}{c}'
        Output += '{' + str(Spaltenkopf) + '}\t\t'
        i += 1
        if i == np.size(units):
            Output += '\\\\ \n\t'
        elif i % 2 == 0:
            Output += '& \n\t'
        else:
            Output += '& '

    # Schlussgeplänkel
    Output += """\\midrule
    \\input{""" + source_table + """}
    \\bottomrule
    \\end{tabular}
    \\end{table}"""
    return Output
コード例 #57
0
ファイル: date2dec.py プロジェクト: julemai/EEE-DA
def date2dec(calendar = 'standard', units=None,
             excelerr = True, yr=1,
             mo=1, dy=1, hr=0, mi=0, sc=0,
             ascii=None, en=None, eng=None):
    """
    Convert scalar and array_like with calendar dates into decimal
    dates. Supported calendar formats are standard, gregorian, julian,
    proleptic_gregorian, excel1900, excel1904, 365_day, noleap, 366_day,
    all_leap, 360_day, decimal, or decimal360.

    Input is year, month day, hour, minute, second or a combination of them.
    Input as date string is possible.

    Output is decimal date with day as unit.

    Parameters
    ----------
    yr : array_like, optional
        years (default: 1)
    mo : array_like, optional
        month (default: 1)
    dy : array_like, optional
        days (default: 1)
    hr : array_like, optional
        hours (default: 0)
    mi : array_like, optional
        minutes (default: 0)
    sc : array_like, optional
        seconds (default: 0)
    ascii : array_like, optional
        strings of the format 'dd.mm.yyyy hh:mm:ss'.
        Missing hour, minutes and/or seconds are set
        to their default values (0).

        `ascii` overwrites all other keywords.

        `ascii` and `eng` are mutually exclusive.
    en : array_like, optional
        strings of the format 'yyyy-mm-dd hh:mm:ss'.
        Missing hour, minutes and/or seconds are set
        to their default values (0).

        `en` overwrites all other keywords.

        `en` and `ascii` are mutually exclusive.
    eng : array_like, optional
        Same as en: obsolete.
    calendar : str, optional
        Calendar of output dates (default: 'standard').

        Possible values are:

        'standard', 'gregorian' = julian calendar from
        01.01.-4712 12:00:00 (BC) until 05.03.1583 00:00:00 and
        gregorian calendar from 15.03.1583 00:00:00 until now.
        Missing 10 days do not exsist.

        'julian' = julian calendar from 01.01.-4712 12:00:00 (BC)
         until now.

        'proleptic_gregorian' = gregorian calendar from
        01.01.0001 00:00:00 until now.

        'excel1900' = Excel dates with origin at
        01.01.1900 00:00:00.

        'excel1904' = Excel 1904 (Lotus) format.
        Same as excel1904 but with origin at
        01.01.1904 00:00:00.

        '365_day', 'noleap' = 365 days format,
        i.e. common years only (no leap years)
        with origin at 01.01.0001 00:00:00.

        '366_day', 'all_leap' = 366 days format,
        i.e. leap years only (no common years)
        with origin at 01.01.0001 00:00:00.

        '360_day' = 360 days format,
        i.e. years with only 360 days (30 days per month)
        with origin at 01.01.0001 00:00:00.

        'decimal' = decimal year instead of decimal days.

        'decimal360' = decimal year with a year of 360 days, i.e. 12 month with 30 days each.
    units : str, optional
        User set units of output dates. Must be a
        string in the format 'days since yyyy-mm-dd hh:mm:ss'.
        Default values are set automatically depending on `calendar`.
    excelerr : bool, optional
       In Excel, the year 1900 is normally considered a leap year,
       which it was not. By default, this error is taken into account
       if `calendar=='excel1900'` (default: True).

       1900 is not considered a leap year if `excelerr==False`.

    Returns
    -------
    array_like
       array_like with decimal dates. The type of output will be the same as the input type.

    Notes
    -----
    Most versions of `datetime` do not support negative years,
    i.e. Julian days < 1721423.5 = 01.01.0001 00:00:00.

    There is an issue in `netcdftime` version < 0.9.5 in proleptic_gregorian for dates before year 301:
    dec2date(date2dec(ascii='01.01.0300 00:00:00', calendar='proleptic_gregorian'), calendar='proleptic_gregorian')
    [300, 1, 2, 0, 0, 0]
    dec2date(date2dec(ascii='01.01.0301 00:00:00', calendar='proleptic_gregorian'), calendar='proleptic_gregorian')
    [301, 1, 1, 0, 0, 0]

    List input is only supported up to 2 dimensions.

    Requires `netcdftime.py` from module `netcdftime` available at:
    http://netcdf4-python.googlecode.com

    Examples
    --------
    # Some implementations of datetime do not support negative years
    >>> import datetime
    >>> if datetime.MINYEAR > 0:
    ...     print('The minimum year in your datetime implementation is ', datetime.MINYEAR)
    ...     print('i.e. it does not support negative years (BC).')
    The minimum year in your datetime implementation is  1
    i.e. it does not support negative years (BC).

    >>> if datetime.MINYEAR > 0:
    ...     year   = np.array([2000, 1810, 1630, 1510, 1271, 619, 2, 1])
    ... else:
    ...     year   = np.array([2000, 1810, 1630, 1510, 1271, 619, -1579, -4712])
    >>> month  = np.array([1, 4, 7, 9, 3, 8, 8, 1])
    >>> day    = np.array([5, 24, 15, 20, 18, 27, 23, 1])
    >>> hour   = np.array([12, 16, 10, 14, 19, 11, 20, 12])
    >>> minute = np.array([30, 15, 20, 35, 41, 8, 3, 0])
    >>> second = np.array([15, 10, 40, 50, 34, 37, 41, 0])
    >>> decimal = date2dec(calendar = 'standard', yr=year, mo=month, dy=day, hr=hour, mi=minute, sc=second)
    >>> nn = year.size
    >>> print('{:.14e} {:.14e} {:.14e} {:.14e}'.format(*decimal[:nn//2]))
    2.45154902100694e+06 2.38226217719907e+06 2.31660093101852e+06 2.27284810821759e+06
    >>> print('{:.14e} {:.14e}'.format(*decimal[nn//2:nn-2]))
    2.18536732053241e+06 1.94738596431713e+06
    >>> decimal = date2dec(calendar='standard', yr=year, mo=6, dy=15, hr=12, mi=minute, sc=second)
    >>> print('{:.14e} {:.14e} {:.14e} {:.14e}'.format(*decimal[:nn//2]))
    2.45171102100694e+06 2.38231401053241e+06 2.31657101435185e+06 2.27275102488426e+06
    >>> print('{:.14e} {:.14e}'.format(*decimal[nn//2:nn-2]))
    2.18545602886574e+06 1.94731300598380e+06

    # ascii input
    >>> if datetime.MINYEAR > 0:
    ...     a = np.array(['05.01.2000 12:30:15', '24.04.1810 16:15:10', '15.07.1630 10:20:40', '20.09.1510 14:35:50',
    ...                   '18.03.1271 19:41:34', '27.08. 619 11:08:37', '23.08.0002 20:03:41', '01.01.0001 12:00:00'])
    ... else:
    ...     a = np.array(['05.01.2000 12:30:15', '24.04.1810 16:15:10', '15.07.1630 10:20:40',  '20.09.1510 14:35:50',
    ...                   '18.03.1271 19:41:34', '27.08. 619 11:08:37', '23.08.-1579 20:03:41', '01.01.-4712 12:00:00'])
    >>> decimal = date2dec(calendar='standard', ascii=a)
    >>> nn = a.size
    >>> print('{:.14e} {:.14e} {:.14e} {:.14e}'.format(*decimal[:nn//2]))
    2.45154902100694e+06 2.38226217719907e+06 2.31660093101852e+06 2.27284810821759e+06
    >>> print('{:.14e} {:.14e}'.format(*decimal[nn//2:nn-2]))
    2.18536732053241e+06 1.94738596431713e+06

    # calendar = 'julian'
    >>> decimal = date2dec(calendar='julian', ascii=a)
    >>> print('{:.14e} {:.14e} {:.14e} {:.14e}'.format(*decimal[:nn//2]))
    2.45156202100694e+06 2.38227417719907e+06 2.31661093101852e+06 2.27284810821759e+06
    >>> print('{:.14e} {:.14e}'.format(*decimal[nn//2:nn-2]))
    2.18536732053241e+06 1.94738596431713e+06

    # calendar = 'proleptic_gregorian'
    >>> decimal = date2dec(calendar='proleptic_gregorian', ascii=a)
    >>> print('{:.7f} {:.7f} {:.7f} {:.7f}'.format(*decimal[:nn//2]))
    730123.5210069 660836.6771991 595175.4310185 551412.6082176
    >>> print('{:.7f} {:.7f}'.format(*decimal[nn//2:nn-2]))
    463934.8205324 225957.4643171

    # calendar = 'excel1900' WITH excelerr=True -> 1900 considered as leap year
    >>> d = np.array(['05.01.2000 12:30:15', '27.05.1950 16:25:10', '13.08.1910 10:40:55',
    ...               '01.03.1900 00:00:00', '29.02.1900 00:00:00', '28.02.1900 00:00:00',
    ...               '01.01.1900 00:00:00'])
    >>> decimal = date2dec(calendar='excel1900', ascii=d)
    >>> nn = d.size
    >>> print('{:.7f} {:.7f} {:.7f}'.format(*decimal[:nn//2]))
    36530.5210069 18410.6841435 3878.4450810
    >>> print('{:.1f} {:.1f} {:.1f} {:.1f}'.format(*decimal[nn//2:]))
    61.0 60.0 59.0 1.0

    # calendar = 'excel1900' WITH excelerr = False -> 1900 is NO leap year
    >>> decimal = date2dec(calendar='excel1900', ascii=d, excelerr=False)
    >>> print('{:.7f} {:.7f} {:.7f}'.format(*decimal[:nn//2]))
    36529.5210069 18409.6841435 3877.4450810
    >>> print('{:.1f} {:.1f} {:.1f} {:.1f}'.format(*decimal[nn//2:]))
    60.0 60.0 59.0 1.0

    # calendar = 'excel1904'
    >>> decimal = date2dec(calendar='excel1904', ascii=d[:nn//2])
    >>> print('{:.7f} {:.7f} {:.7f}'.format(*decimal[:nn//2]))
    35069.5210069 16949.6841435 2417.4450810

    # calendar = '365_day'
    >>> g = np.array(['18.08.1972 12:30:15', '25.10.0986 12:30:15', '28.11.0493 22:20:40', '01.01.0001 00:00:00'])
    >>> decimal = date2dec(calendar='365_day', ascii=g)
    >>> nn = g.size
    >>> print('{:.7f} {:.7f} {:.7f} {:.7f}'.format(*decimal[:nn]))
    719644.5210069 359822.5210069 179911.9310185 0.0000000

    # calendar = '366_day'
    >>> decimal = date2dec(calendar='366_day', ascii=g)
    >>> print('{:.7f} {:.7f} {:.7f} {:.7f}'.format(*decimal[:nn]))
    721616.5210069 360808.5210069 180404.9310185 0.0000000

    # 360_day does not work with netcdftime.py version equal or below 0.9.2
    # calendar = '360_day'
    >>> decimal = date2dec(calendar='360_day', ascii=g)
    >>> print('{:.7f} {:.7f} {:.7f} {:.7f}'.format(*decimal[:nn]))
    709787.5210069 354894.5210069 177447.9310185 0.0000000

    >>> print('{:.7f}'.format(date2dec(yr=1992, mo=1, dy=26, hr=2, mi=0, sc=0, calendar='decimal')))
    1992.0685337
    >>> print('{:.7f}'.format(date2dec(ascii='26.01.1992 02:00', calendar='decimal360')))
    1992.0696759
    >>> print('{:.7f} {:.7f}'.format(*date2dec(ascii=['26.01.1992 02:00','26.01.1992 02:00'], calendar='decimal360')))
    1992.0696759 1992.0696759
    >>> print('{:.7f} {:.7f}'.format(*date2dec(yr=[1992,1992], mo=1, dy=26, hr=2, mi=0, sc=0, calendar='decimal360')))
    1992.0696759 1992.0696759
    >>> print('{:.7f} {:.7f}'.format(*date2dec(yr=np.array([1992,1992]), mo=1, dy=26, hr=2, mi=0, sc=0, calendar='decimal360')))
    1992.0696759 1992.0696759
    >>> decimal = date2dec(ascii=[['26.01.1992 02:00','26.01.1992 02:00'],
    ...                           ['26.01.1992 02:00','26.01.1992 02:00'],
    ...                           ['26.01.1992 02:00','26.01.1992 02:00']],
    ...                    calendar='decimal360')
    >>> print('{:.7f} {:.7f}'.format(*decimal[0]))
    1992.0696759 1992.0696759
    >>> print('{:.7f} {:.7f}'.format(*decimal[2]))
    1992.0696759 1992.0696759
    >>> print((date2dec(ascii='01.03.2003 00:00:00') - date2dec(ascii='01.03.2003')) == 0.)
    True

    # en
    >>> decimal = date2dec(en='1992-01-26 02:00', calendar='decimal360')
    >>> print('{:.7f}'.format(decimal))
    1992.0696759
    >>> decimal = date2dec(eng='1992-01-26 02:00', calendar='decimal360')
    >>> print('{:.7f}'.format(decimal))
    1992.0696759

    History
    -------
    Written  Arndt Piayda, Jun 2010
    Modified Matthias Cuntz, Feb 2012 - All input can be scalar, list or array, also a mix
                                        - Changed checks for easier extension
                                        - decimal, decimal360
             Matthias Cuntz, Dec 2012 - change unit of proleptic_gregorian
                                        from 'days since 0001-01-01 00:00:00'
                                        to   'days since 0001-01-00 00:00:00'
             Matthias Cuntz, Feb 2013 - solved Excel leap year problem.
             Matthias Cuntz, Feb 2013 - ported to Python 3
             Matthias Cuntz, Jul 2013 - ascii/eng without time defaults to 00:00:00
             Matthias Cuntz, Oct 2013 - Excel starts at 1 not at 0
             Matthias Cuntz, Oct 2013 - units bugs, e.g. 01.01.0001 was substracted if Julian calendar even with units
             Matthias Cuntz, Nov 2013 - removed remnant of time treatment before time check in eng keyword
             Matthias Cuntz, Jun 2015 - adapted to new netCDF4/netcdftime (>= v1.0) and datetime (>= Python v2.7.9)
             Matthias Cuntz, Oct 2015 - call date2num with list instead of single netCDF4.datetime objects
             Matthias Cuntz, Oct 2016 - netcdftime provided even with netCDF4 > 1.0.0; make mo for months always integer
             Matthias Cuntz, Nov 2016 - 00, 01, etc. for integers not accepted by Python3
             Matthias Cuntz, May 2020 - numpy docstring format
             Matthias Cuntz, Jul 2020 - en for eng
             Matthias Cuntz, Jul 2020 - use proleptic_gregorian for Excel dates
    """
    #
    # Checks
    calendars = ['standard', 'gregorian', 'julian', 'proleptic_gregorian',
                 'excel1900', 'excel1904', '365_day', 'noleap', '366_day',
                 'all_leap', '360_day', 'decimal', 'decimal360']
    import netCDF4 as nt
    try:
        tst = nt.date2num
        tst = nt.datetime
    except:
        try:
            import netcdftime as nt
            if ((nt.__version__ <= '0.9.2') & (calendar == '360_day')):
                raise ValueError("date2dec error: Your version of netcdftime.py is equal"
                                 " or below 0.9.2. The 360_day calendar does not work with"
                                 " arrays here. Please download a newer one.")
        except:
            import cftime as nt
    #
    calendar = calendar.lower()
    if (calendar not in calendars):
        raise ValueError("date2dec error: Wrong calendar!"
                    " Choose: "+''.join([i+' ' for i in calendars]))
    # obsolete eng
    if (eng is not None):
        if (en is not None):
            raise ValueError("date2dec error: 'eng' was succeeded by 'en'. Only one can be given.")
        else:
            en = eng
    # if ascii input is given by user, other input will be neglected
    # calculation of input size and shape
    if (ascii is not None) and (en is not None):
        raise ValueError("date2dec error: 'ascii' and 'en' mutually exclusive")
    if (ascii is not None):
        islist = type(ascii) != type(np.array(ascii))
        isarr = np.ndim(ascii)
        if (islist & (isarr > 2)):
            raise ValueError("date2dec error: ascii input is list > 2D; Use array input")
        if isarr == 0:
            ascii = np.array([ascii])
        else:
            ascii = np.array(ascii)
        insize   = ascii.size
        outsize  = insize
        outshape = ascii.shape
        asciifl  = ascii.flatten()
        timeobj  = np.zeros(insize, dtype=object)
        # slicing of ascii strings to implement in datetime object. missing times
        # will be set to 0.
        yr = np.zeros(insize, dtype=np.int)
        mo = np.zeros(insize, dtype=np.int)
        dy = np.zeros(insize, dtype=np.int)
        hr = np.zeros(insize, dtype=np.int)
        mi = np.zeros(insize, dtype=np.int)
        sc = np.zeros(insize, dtype=np.int)
        for i in range(insize):
            aa      = asciifl[i].split('.')
            dy[i]   = int(aa[0])
            mo[i]   = int(aa[1])
            tail    = aa[2].split()
            yr[i]   = int(tail[0])
            if len(tail) > 1:
                tim     = tail[1].split(':')
                hr[i]   = int(tim[0])
                if len(tim) > 1:
                    mi[i] = int(tim[1])
                else:
                    mi[i] = 0
                if len(tim) > 2:
                    sc[i] = int(tim[2])
                else:
                    sc[i] = 0
            else:
                hr[i] = 0
                mi[i] = 0
                sc[i] = 0
            if ((yr[i]==1900) & (mo[i]==2) & (dy[i]==29)):
                timeobj[i] = nt.datetime(yr[i], 3, 1, hr[i], mi[i], sc[i])
            else:
                timeobj[i] = nt.datetime(yr[i], mo[i], dy[i], hr[i], mi[i], sc[i])
    if (en is not None):
        islist = type(en) != type(np.array(en))
        isarr  = np.ndim(en)
        if isarr == 0:
             en = np.array([en])
        else:
             en = np.array(en)
        if (islist & (isarr > 2)):
            raise ValueError("date2dec error: en input is list > 2D; Use array input")
        en = np.array(en)
        insize   = en.size
        outsize  = insize
        outshape = en.shape
        enfl     = en.flatten()
        timeobj  = np.zeros(insize, dtype=object)
        # slicing of en strings to implement in datetime object. missing times
        # will be set to 0.
        yr = np.zeros(insize, dtype=np.int)
        mo = np.zeros(insize, dtype=np.int)
        dy = np.zeros(insize, dtype=np.int)
        hr = np.zeros(insize, dtype=np.int)
        mi = np.zeros(insize, dtype=np.int)
        sc = np.zeros(insize, dtype=np.int)
        for i in range(insize):
            ee      = enfl[i].split('-')
            yr[i]   = int(ee[0])
            mo[i]   = int(ee[1])
            tail    = ee[2].split()
            dy[i]   = int(tail[0])
            if len(tail) > 1:
                tim     = tail[1].split(':')
                hr[i]   = int(tim[0])
                if len(tim) > 1:
                    mi[i] = int(tim[1])
                else:
                    mi[i] = 0
                if len(tim) > 2:
                    sc[i] = int(tim[2])
                else:
                    sc[i] = 0
            else:
                hr[i] = 0
                mi[i] = 0
                sc[i] = 0
            if ((yr[i]==1900) & (mo[i]==2) & (dy[i]==29)):
                timeobj[i] = nt.datetime(yr[i], 3, 1, hr[i], mi[i], sc[i])
            else:
                timeobj[i] = nt.datetime(yr[i], mo[i], dy[i], hr[i], mi[i], sc[i])
    # if no ascii input, other inputs will be concidered
    # calculation of input sizes, shapes and number of axis
    if (ascii is None) and (en is None):
        isnlist1 = type(yr) == type(np.array(yr))
        isarr1   = np.ndim(yr)
        if isarr1 == 0: yr = np.array([yr])
        else: yr = np.array(yr)
        isnlist2 = type(mo) == type(np.array(mo))
        isarr2   = np.ndim(mo)
        if isarr2 == 0: mo = np.array([mo], dtype=np.int)
        else: mo = np.array(mo, dtype=np.int)
        isnlist3 = type(dy) == type(np.array(dy))
        isarr3   = np.ndim(dy)
        if isarr3 == 0: dy = np.array([dy])
        else: dy = np.array(dy)
        isnlist4 = type(hr) == type(np.array(hr))
        isarr4   = np.ndim(hr)
        if isarr4 == 0: hr = np.array([hr])
        else: hr = np.array(hr)
        isnlist5 = type(mi) == type(np.array(mi))
        isarr5   = np.ndim(mi)
        if isarr5 == 0: mi = np.array([mi])
        else: mi = np.array(mi)
        isnlist6 = type(sc) == type(np.array(sc))
        isarr6   = np.ndim(sc)
        if isarr6 == 0: sc = np.array([sc])
        else: sc = np.array(sc)
        islist = not (isnlist1 | isnlist2 | isnlist3 | isnlist4 | isnlist5 | isnlist6)
        isarr  = isarr1 + isarr2 + isarr3 + isarr4 + isarr5 + isarr6
        shapes = [np.shape(yr), np.shape(mo), np.shape(dy), np.shape(hr), np.shape(mi), np.shape(sc)]
        nyr    = np.size(yr)
        nmo    = np.size(mo)
        ndy    = np.size(dy)
        nhr    = np.size(hr)
        nmi    = np.size(mi)
        nsc    = np.size(sc)
        sizes  = [nyr,nmo,ndy,nhr,nmi,nsc]
        nmax   = np.amax(sizes)
        ii     = np.argmax(sizes)
        outshape = shapes[ii]
        if (islist & (np.size(outshape) > 2)):
            raise ValueError("date2dec error: input is list > 2D; Use array input.")
        if nyr < nmax:
            if nyr == 1: yr  = np.ones(outshape,)*yr
            else: raise ValueError("date2dec error: size of yr != max input or 1.")
        if nmo < nmax:
            if nmo == 1: mo  = np.ones(outshape, dtype=np.int)*mo
            else: raise ValueError("date2dec error: size of mo != max input or 1.")
        if ndy < nmax:
            if ndy == 1: dy  = np.ones(outshape)*dy
            else: raise ValueError("date2dec error: size of dy != max input or 1.")
        if nhr < nmax:
            if nhr == 1: hr  = np.ones(outshape)*hr
            else: raise ValueError("date2dec error: size of hr != max input or 1.")
        if nmi < nmax:
            if nmi == 1: mi  = np.ones(outshape)*mi
            else: raise ValueError("date2dec error: size of mi != max input or 1.")
        if nsc < nmax:
            if nsc == 1: sc  = np.ones(outshape)*sc
            else: raise ValueError("date2dec error: size of sc != max input or 1.")
        indate  = [yr, mo, dy, hr, mi, sc]
        insize  = [np.size(i) for i in indate]
        inshape = [np.shape(i) for i in indate]
        dimnum  = [len(i) for i in inshape]
        # calculation of maximum input size and maximum number of axis for
        # reshaping the output
        indate  = [i.flatten() for i in indate]
        outsize = max(insize)
        timeobj = np.zeros(outsize, dtype=object)
        # datetime object is constructed
        for i in range(outsize):
            iyr = int(indate[0][i])
            imo = int(indate[1][i])
            idy = int(indate[2][i])
            ihr = int(indate[3][i])
            imi = int(indate[4][i])
            isc = int(indate[5][i])

            if ((iyr==1900) & (imo==2) & (idy==29)):
                timeobj[i] = nt.datetime(iyr, 3, 1, ihr, imi, isc)
            else:
                timeobj[i] = nt.datetime(iyr, imo, idy, ihr, imi, isc)
    # depending on chosen calendar and optional set of the time units
    # decimal date is calculated
    output = np.zeros(outsize)
    t0    = nt.datetime(1582, 10, 5, 0, 0, 0)
    t1    = nt.datetime(1582, 10, 15, 0, 0, 0)
    is121 = True if (min(timeobj)<t0) and (max(timeobj)>=t1) else False
    if (calendar == 'standard') or (calendar == 'gregorian'):
        if not units:
            units = 'days since 0001-01-01 12:00:00'
            dec0 = 1721424
        else:
            dec0 = 0
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='gregorian')+dec0
        else:
            output = nt.date2num(timeobj, units, calendar='gregorian')+dec0
    elif calendar == 'julian':
        if not units:
            units = 'days since 0001-01-01 12:00:00'
            dec0 = 1721424
        else:
            dec0 = 0
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='julian')+dec0
        else:
            output = nt.date2num(timeobj, units, calendar='julian')+dec0
    elif calendar == 'proleptic_gregorian':
        if not units: units = 'days since 0001-01-01 00:00:00'
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='proleptic_gregorian')
        else:
            output = nt.date2num(timeobj, units, calendar='proleptic_gregorian')
    elif calendar == 'excel1900':
        doerr = False
        if not units:
            units = 'days since 1899-12-31 00:00:00'
            if excelerr: doerr = True
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='proleptic_gregorian')
        else:
            output = nt.date2num(timeobj, units, calendar='proleptic_gregorian')
        if doerr:
            output = np.where(output >= 60., output+1., output)
            # date2num treats 29.02.1900 as 01.03.1990, i.e. is the same decimal number
            if np.any((output >= 61.) & (output < 62.)):
                for i in range(outsize):
                    # if (timeobj[i].year==1900) & (timeobj[i].month==2) & (timeobj[i].day==29):
                    #     output[i] -= 1.
                    if (yr[i]==1900) & (mo[i]==2) & (dy[i]==29):
                        output[i] -= 1.
    elif calendar == 'excel1904':
        if not units: units = 'days since 1903-12-31 00:00:00'
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='proleptic_gregorian')
        else:
            output = nt.date2num(timeobj, units, calendar='proleptic_gregorian')
    elif (calendar == '365_day') or (calendar == 'noleap'):
        if not units: units = 'days since 0001-01-01 00:00:00'
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='365_day')
        else:
            output = nt.date2num(timeobj, units, calendar='365_day')
    elif (calendar == '366_day') or (calendar == 'all_leap'):
        if not units: units = 'days since 0001-01-01 00:00:00'
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='366_day')
        else:
            output = nt.date2num(timeobj, units, calendar='366_day')
    elif calendar == '360_day':
        if not units: units = 'days since 0001-01-01 00:00:00'
        if is121 and (nt.__version__ < '1.2.2'):
            for ii, tt in enumerate(timeobj): output[ii] = nt.date2num(tt, units, calendar='360_day')
        else:
            output = nt.date2num(timeobj, units, calendar='360_day')
    elif calendar == 'decimal':
        ntime = np.size(yr)
        leap  = np.array((((yr%4)==0) & ((yr%100)!=0)) | ((yr%400)==0)).astype(np.int)
        tdy   = np.array(dy, dtype=np.float)
        diy   = np.array([ [-9,0, 31, 59, 90,120,151,181,212,243,273,304,334,365],
                           [-9,0, 31, 60, 91,121,152,182,213,244,274,305,335,366] ])
        for i in range(ntime):
            tdy[i] = tdy[i] + np.array(diy[leap[i],mo[i]], dtype=np.float)
        days_year = 365.
        output    = ( np.array(yr, dtype=np.float) +
                      ((tdy-1.)*24. + np.array(hr, dtype=np.float) +
                       np.array(mi, dtype=np.float)/60. +
                       np.array(sc, dtype=np.float)/3600.) /
                       ((days_year+np.array(leap, dtype=np.float))*24.) )
        # for numerical stability, i.e. back and forth transforms
        output += 1e-08 # 1/3 sec
    elif calendar == 'decimal360':
        ntime = np.size(yr)
        tdy   = np.array(dy, dtype=np.float)
        diy   = np.array([-9,  0, 30, 60, 90,120,150,180,210,240,270,300,330,360])
        for i in range(ntime):
            tdy[i] = tdy[i] + np.array(diy[mo[i]], dtype=np.float)
        days_year = 360.
        output    = ( np.array(yr, dtype=np.float) +
                      ((tdy-1.)*24. + np.array(hr, dtype=np.float) +
                       np.array(mi, dtype=np.float)/60. +
                       np.array(sc, dtype=np.float)/3600.) /
                       (days_year*24.) )
        # for numerical stability, i.e. back and forth transforms
        output += 1e-08 # 1/3 sec
    else:
        raise ValueError("date2dec error: calendar not implemented; should have been catched before.")


    # return of reshaped output
    output = np.reshape(output, outshape)
    if isarr == 0:
        output = np.float(output)
    else:
        if islist:
            ns = np.size(outshape)
            if ns == 1:
                output = [i for i in output]
            else:
                loutput = [ i for i in output[:,0]]
                for i in range(np.size(output[:,0])):
                    loutput[i] = list(np.squeeze(output[i,:]))
                output = loutput

    return output
コード例 #58
0
ファイル: extinction.py プロジェクト: k-gilbert/beast
    def function(self, lamb, Av=1, Rv=3.1, Alambda=True,
                 draine_extend=False, **kwargs):
        """
        Fitzpatrick99 extinction Law

        Parameters
        ----------
        lamb: float or ndarray(dtype=float)
            wavelength [in Angstroms] at which evaluate the law.

        Av: float
            desired A(V) (default 1.0)

        Rv: float
            desired R(V) (default 3.1)

        Alambda: bool
            if set returns +2.5*1./log(10.)*tau, tau otherwise

        draine_extend: bool
            if set extends the extinction curve to below 912 A

        Returns
        -------
        r: float or ndarray(dtype=float)
            attenuation as a function of wavelength
            depending on Alambda option +2.5*1./log(10.)*tau,  or tau
        """
        # ensure the units are in angstrom
        _lamb = units.Quantity(lamb, units.angstrom).value
        #_lamb = val_in_unit('lamb', lamb, 'angstrom').magnitude

        if isinstance(_lamb, float) or isinstance(_lamb, np.float_):
            _lamb = np.asarray([lamb])
        else:
            _lamb = lamb[:]

        c2 = -0.824 + 4.717 / Rv
        c1 = 2.030 - 3.007 * c2
        c3 = 3.23
        c4 = 0.41
        x0 = 4.596
        gamma = 0.99

        x = 1.e4 / _lamb
        k = np.zeros(np.size(x))

        # compute the UV portion of A(lambda)/E(B-V)
        xcutuv = 10000.0 / 2700.
        xspluv = 10000.0 / np.array([2700., 2600.])
        ind = np.where(x >= xcutuv)

        if np.size(ind) > 0:
            k[ind] = c1 + (c2 * x[ind]) + \
                     c3 * ((x[ind]) ** 2) / ( ((x[ind]) ** 2 -
                                               (x0 ** 2)) ** 2 +
                                              (gamma ** 2) * ((x[ind]) ** 2 ))
            yspluv = c1 + (c2 * xspluv) + c3 * ((xspluv) ** 2) / \
                     ( ((xspluv) ** 2 - (x0 ** 2)) ** 2 +
                       (gamma ** 2) * ((xspluv) ** 2 ))

            # FUV portion
            if not draine_extend:
                fuvind = np.where(x >= 5.9)
                k[fuvind] += c4 * (0.5392 * ((x[fuvind] - 5.9) ** 2) +
                                   0.05644 * ((x[fuvind] - 5.9) ** 3))

            k[ind] += Rv
            yspluv += Rv

        # Optical/NIR portion

        ind = np.where(x < xcutuv)
        if np.size(ind) > 0:
            xsplopir = np.zeros(7)
            xsplopir[0] = 0.0
            xsplopir[1: 7] = 10000.0 / np.array([26500.0, 12200.0, 6000.0,
                                                 5470.0, 4670.0, 4110.0])

            ysplopir = np.zeros(7)
            ysplopir[0: 3] = np.array([0.0, 0.26469, 0.82925]) * Rv / 3.1

            ysplopir[3: 7] = np.array([np.poly1d([2.13572e-04, 1.00270,
                                                  -4.22809e-01])(Rv),
                                       np.poly1d([-7.35778e-05, 1.00216,
                                                  -5.13540e-02])(Rv),
                                       np.poly1d([-3.32598e-05, 1.00184,
                                                  7.00127e-01])(Rv),
                                       np.poly1d([ 1.19456, 1.01707,
                                                   -5.46959e-03, 7.97809e-04,
                                       -4.45636e-05][::-1])(Rv)])

            tck = interpolate.splrep(np.hstack([xsplopir, xspluv]),
                                     np.hstack([ysplopir, yspluv]), k=3)
            k[ind] = interpolate.splev(x[ind], tck)

        # convert from A(lambda)/E(B-V) to A(lambda)/A(V)
        k /= Rv

        # FUV portion from Draine curves
        if draine_extend:
            fuvind = np.where(x >= 5.9)
            tmprvs = np.arange(2.,6.1,0.1)
            diffRv = Rv - tmprvs
            if min(abs(diffRv)) < 1e-8:
                dfname = libdir+'MW_Rv%s_ext.txt' % ("{0:.1f}".format(Rv))
                l_draine, k_draine = np.loadtxt(dfname, usecols=(0,1),
                                                unpack=True)
            else: 
                add, = np.where(diffRv < 0.)
                Rv1 = tmprvs[add[0]-1]
                Rv2 = tmprvs[add[0]]
                dfname = libdir+'MW_Rv%s_ext.txt' % ("{0:.1f}".format(Rv1))
                l_draine, k_draine1 = np.loadtxt(dfname, usecols=(0,1),
                                                 unpack=True)
                dfname = libdir+'MW_Rv%s_ext.txt' % ("{0:.1f}".format(Rv2))
                l_draine, k_draine2 = np.loadtxt(dfname, usecols=(0,1),
                                                 unpack=True)
                frac = diffRv[add[0]-1]/(Rv2-Rv1) 
                k_draine = (1. - frac)*k_draine1 + frac*k_draine2
            
            dind = np.where((1./l_draine) >= 5.9)
            k[fuvind] = interp(x[fuvind],1./l_draine[dind][::-1],
                               k_draine[dind][::-1])

        # setup the output
        if (Alambda):
            return(k * Av)
        else:
            return(k * Av * (np.log(10.) * 0.4))
コード例 #59
0
def compute_bit_error_rate(source, received):
    ''' Calculates the bit error rate '''
    number_of_bits = np.size(source)
    return np.sum(np.bitwise_xor(bits, received_bits))/number_of_bits
コード例 #60
0
ファイル: hmm.py プロジェクト: jklzpg/chord-bounty
(PI,A,B) = initialize(chroma, templates, nested_cof)
(path, states) = viterbi(PI,A,B)

#normalize path
for i in range(nFrames):
	path[:,i] /= sum(path[:,i])

#choose most likely chord - with max value in 'path'
final_chords = []
indices = np.argmax(path,axis=0)
final_states = np.zeros(nFrames)


#find no chord zone
set_zero = np.where(np.max(path,axis=0) < 0.3*np.max(path))[0]
if np.size(set_zero) is not 0:
	indices[set_zero] = -1

#identify chords
for i in range(nFrames):
	if indices[i] == -1:
		final_chords.append('NC')
	else:
		final_states[i] = states[indices[i],i]
		final_chords.append(chords[int(final_states[i])])

print 'Time(s)','Chords'
for i in range(nFrames):
	print timestamp[i], final_chords[i]