Example #1
0
def estCumPos(pos,chrom,offset = 20000000):
    '''
    compute the cumulative position of each variant given the position and the chromosome
    Also return the starting cumulativeposition of each chromosome

    Args:
        pos:        scipy.array of basepair positions (on the chromosome)
        chrom:      scipy.array of chromosomes
        offset:     offset between chromosomes for cumulative position (default 20000000 bp)
    
        Returns:
        cum_pos:    scipy.array of cumulative positions
        chrom_pos:  scipy.array of starting cumulative positions for each chromosme
    '''
    chromvals = SP.unique(chrom)#SP.unique is always sorted
    chrom_pos=SP.zeros_like(chromvals)#get the starting position of each Chrom
    cum_pos = SP.zeros_like(pos)#get the cum_pos of each variant.
    maxpos_cum=0
    for i,mychrom in enumerate(chromvals):
        chrom_pos[i] = maxpos_cum
        i_chr=chrom==mychrom
        maxpos = pos[i_chr].max()+offset
        maxpos_cum+=maxpos
        cum_pos[i_chr]=chrom_pos[i]+pos[i_chr]
    return cum_pos,chrom_pos
Example #2
0
def calc_BB_Y_2s_ham_3s(A_m1, A_p2, C, C_m1, Vlh, Vrh_p1, l_m2, r_p2, l_s_m1, l_si_m1, r_s_p1, r_si_p1):
    Vr_p1 = sp.transpose(Vrh_p1, axes=(0, 2, 1)).conj()
    
    Vrri_p1 = sp.zeros_like(Vr_p1)
    try:
        for s in xrange(Vrri_p1.shape[0]):
            Vrri_p1[s] = r_si_p1.dot_left(Vr_p1[s])
    except AttributeError:
        for s in xrange(Vrri_p1.shape[0]):
            Vrri_p1[s] = Vr_p1[s].dot(r_si_p1)
    
    Vl = sp.transpose(Vlh, axes=(0, 2, 1)).conj()        
    liVl = sp.zeros_like(Vl)            
    for s in xrange(liVl.shape[0]):
        liVl[s] = l_si_m1.dot(Vl[s])

    Y = sp.zeros((Vlh.shape[1], Vrh_p1.shape[2]), dtype=Vrh_p1.dtype)
    if not A_p2 is None:
        for s in xrange(C.shape[0]):
            Y += Vlh[s].dot(l_s_m1.dot(eps_r_op_2s_C12(r_p2, C[s], Vrri_p1, A_p2)))
    if not A_m1 is None:
        for u in xrange(C_m1.shape[2]):
            Y += eps_l_op_2s_A1_A2_C34(l_m2, A_m1, liVl, C_m1[:, :, u]).dot(r_s_p1.dot(Vrh_p1[u]))

    etaBB_sq = mm.adot(Y, Y)
    
    return Y, etaBB_sq
Example #3
0
def estCumPos(position,offset=0,chrom_len=None):
    '''
    compute the cumulative position of each variant given the position and the chromosome
    Also return the starting cumulativeposition of each chromosome

    Args:
        position:   pandas DataFrame of basepair positions (key='pos') and chromosome values (key='chrom')
                    The DataFrame will be updated with field 'pos_cum'
        chrom_len:  vector with predefined chromosome length
        offset:     offset between chromosomes for cumulative position (default 0 bp)
    
    Returns:
        chrom_pos,position:
        chrom_pos:  numpy.array of starting cumulative positions for each chromosome
        position:   augmented position object where cumulative positions are defined
    '''
    RV = position.copy()
    chromvals =  sp.unique(position['chrom'])# sp.unique is always sorted
    chrom_pos_cum= sp.zeros_like(chromvals)#get the starting position of each Chrom
    pos_cum= sp.zeros_like(position.shape[0])
    if not 'pos_cum' in position:
        RV["pos_cum"]= sp.zeros_like(position['pos'])#get the cum_pos of each variant.
    pos_cum=RV['pos_cum'].values
    maxpos_cum=0
    for i,mychrom in enumerate(chromvals):
        chrom_pos_cum[i] = maxpos_cum
        i_chr=position['chrom']==mychrom
        if chrom_len is None:
            maxpos = position['pos'][i_chr].max()+offset
        else:
            maxpos = chrom_len[i]+offset
        pos_cum[i_chr.values]=maxpos_cum+position.loc[i_chr,'pos']
        maxpos_cum+=maxpos      
    
    return RV,chrom_pos_cum
Example #4
0
 def fitKronApprox(a):
     Sbg = SP.zeros_like(S[0])
     Kbg = SP.zeros_like(K[0])
     for i in range(len(S)):  Sbg+= a[i]*S[i]
     for i in range(len(K)):  Kbg+= a[i+len(S)]*K[i]
     Gamma1 = SP.kron(Sbg,Kbg)
     return ((Gamma-Gamma1)**2).sum()
Example #5
0
def makeinputh5(Iono,basedir):
    """This will make a h5 file for the IonoContainer that can be used as starting
    points for the fitter. The ionocontainer taken will be average over the x and y dimensions
    of space to make an average value of the parameters for each altitude.
    Inputs
    Iono - An instance of the Ionocontainer class that will be averaged over so it can
    be used for fitter starting points.
    basdir - A string that holds the directory that the file will be saved to.
    """
    # Get the parameters from the original data
    Param_List = Iono.Param_List
    dataloc = Iono.Cart_Coords
    times = Iono.Time_Vector
    velocity = Iono.Velocity
    zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
    siz = list(Param_List.shape[1:])
    vsiz = list(velocity.shape[1:])

    datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
    outdata = sp.zeros([len(zlist)]+siz)
    outvel = sp.zeros([len(zlist)]+vsiz)
    #  Do the averaging across space
    for izn,iz in enumerate(zlist):
        arr = sp.argwhere(idx==izn)
        outdata[izn] = sp.mean(Param_List[arr],axis=0)
        outvel[izn] = sp.mean(velocity[arr],axis=0)

    Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
                            paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)
    Ionoout.saveh5(basedir/'startdata.h5')
Example #6
0
def makeinputh5(Iono,basedir):
    basedir = Path(basedir).expanduser()

    Param_List = Iono.Param_List
    dataloc = Iono.Cart_Coords
    times = Iono.Time_Vector
    velocity = Iono.Velocity
    zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
    siz = list(Param_List.shape[1:])
    vsiz = list(velocity.shape[1:])

    datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
    outdata = sp.zeros([len(zlist)]+siz)
    outvel = sp.zeros([len(zlist)]+vsiz)

    for izn,iz in enumerate(zlist):
        arr = sp.argwhere(idx==izn)
        outdata[izn]=sp.mean(Param_List[arr],axis=0)
        outvel[izn]=sp.mean(velocity[arr],axis=0)

    Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
                            paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)


    ofn = basedir/'startdata.h5'
    print('writing {}'.format(ofn))
    Ionoout.saveh5(str(ofn))
Example #7
0
def rankStandardizeNormal(X):
	"""
	Gaussianize X: [samples x phenotypes]
	- each phentoype is converted to ranks and transformed back to normal using the inverse CDF
	"""
	Is = X.argsort(axis=0)
	RV = SP.zeros_like(X)
	rank = SP.zeros_like(X)
	for i in xrange(X.shape[1]):
		x =  X[:,i]
		i_nan = SP.isnan(x)
		if 0:
			Is = x.argsort()
			rank = SP.zeros_like(x)
			rank[Is] = SP.arange(X.shape[0])
			#add one to ensure nothing = 0
			rank +=1
		else:
			rank = st.rankdata(x[~i_nan])
		#devide by (N+1) which yields uniform [0,1]
		rank /= ((~i_nan).sum()+1)
		#apply inverse gaussian cdf
		RV[~i_nan,i] = SP.sqrt(2) * special.erfinv(2*rank-1)
		RV[i_nan,i] = x[i_nan]
	return RV
 def plot_drainage_curve(self,
                         pore_volume='volume',
                         throat_volume='volume',pore_label='all',throat_label='all'):
       r"""
       Plot drainage capillary pressure curve
       """
       try:
         PcPoints = sp.unique(self['pore.inv_Pc'])
       except:
         raise Exception('Cannot print drainage curve: ordinary percolation simulation has not been run')
       pores=self._net.pores(labels=pore_label)
       throats = self._net.throats(labels=throat_label)
       Snwp_t = sp.zeros_like(PcPoints)
       Snwp_p = sp.zeros_like(PcPoints)
       Pvol = self._net['pore.'+pore_volume]
       Tvol = self._net['throat.'+throat_volume]
       Pvol_tot = sum(Pvol)
       Tvol_tot = sum(Tvol)
       for i in range(0,sp.size(PcPoints)):
           Pc = PcPoints[i]
           Snwp_p[i] = sum(Pvol[self._p_inv[pores]<=Pc])/Pvol_tot
           Snwp_t[i] = sum(Tvol[self._t_inv[throats]<=Pc])/Tvol_tot
       if sp.mean(self._phase_inv["pore.contact_angle"]) < 90:
           Snwp_p = 1 - Snwp_p
           Snwp_t = 1 - Snwp_t
           PcPoints *= -1
       plt.plot(PcPoints,Snwp_p,'r.-')
       plt.plot(PcPoints,Snwp_t,'b.-')
       r'''
       TODO: Add legend to distinguish the pore and throat curves
       '''
       #plt.xlim(xmin=0)
       plt.show()
Example #9
0
 def _get_indices(self,element,labels,return_indices,mode):
     r'''
     This is the actual method for getting indices, but should not be called
     directly.  
     '''
     if mode == 'union':
         union = sp.zeros_like(self._get_info(element=element,label='all'),dtype=bool)
         for item in labels: #iterate over labels list and collect all indices
                 union = union + self._get_info(element=element,label=item)
         ind = union
     elif mode == 'intersection':
         intersect = sp.ones_like(self._get_info(element=element,label='all'),dtype=bool)
         for item in labels: #iterate over labels list and collect all indices
                 intersect = intersect*self._get_info(element=element,label=item)
         ind = intersect
     elif mode == 'not_intersection':
         not_intersect = sp.zeros_like(self._get_info(element=element,label='all'),dtype=int)
         for item in labels: #iterate over labels list and collect all indices
             info = self._get_info(element=element,label=item)
             not_intersect = not_intersect + sp.int8(info)
         ind = (not_intersect == 1)
     elif mode == 'none':
         none = sp.zeros_like(self._get_info(element=element,label='all'),dtype=int)
         for item in labels: #iterate over labels list and collect all indices
             info = self._get_info(element=element,label=item)
             none = none - sp.int8(info)
         ind = (none == 0)
     if return_indices: ind = sp.where(ind==True)[0]
     return ind
Example #10
0
def porosity_profile(network,
                      fig=None, axis=2):

    r'''
    Compute and plot the porosity profile in all three dimensions

    Parameters
    ----------
    network : OpenPNM Network object
    axis : integer type 0 for x-axis, 1 for y-axis, 2 for z-axis

    Notes
    -----
    the area of the porous medium at any position is calculated from the
    maximum pore coordinates in each direction

    '''
    if fig is None:
        fig = _plt.figure()
    L_x = _sp.amax(network['pore.coords'][:,0]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
    L_y = _sp.amax(network['pore.coords'][:,1]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
    L_z = _sp.amax(network['pore.coords'][:,2]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
    if axis is 0:
        xlab = 'x-direction'
        area = L_y*L_z
    elif axis is 1:
        xlab = 'y-direction'
        area = L_x*L_z
    else:
        axis = 2
        xlab = 'z-direction'
        area = L_x*L_y
    n_max = _sp.amax(network['pore.coords'][:,axis]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
    steps = _sp.linspace(0,n_max,100,endpoint=True)
    vals = _sp.zeros_like(steps)
    p_area = _sp.zeros_like(steps)
    t_area = _sp.zeros_like(steps)

    rp = ((21/88.0)*network['pore.volume'])**(1/3.0)
    p_upper = network['pore.coords'][:,axis] + rp
    p_lower = network['pore.coords'][:,axis] - rp
    TC1 = network['throat.conns'][:,0]
    TC2 = network['throat.conns'][:,1]
    t_upper = network['pore.coords'][:,axis][TC1]
    t_lower = network['pore.coords'][:,axis][TC2]

    for i in range(0,len(steps)):
        p_temp = (p_upper > steps[i])*(p_lower < steps[i])
        t_temp = (t_upper > steps[i])*(t_lower < steps[i])
        p_area[i] = sum((22/7.0)*(rp[p_temp]**2 - (network['pore.coords'][:,axis][p_temp]-steps[i])**2))
        t_area[i] = sum(network['throat.area'][t_temp])
        vals[i] = (p_area[i]+t_area[i])/area
    yaxis = vals
    xaxis = steps/n_max
    _plt.plot(xaxis,yaxis,'bo-')
    _plt.xlabel(xlab)
    _plt.ylabel('Porosity')
    fig.show()
Example #11
0
 def cut_to_stump(self):
     self.max_depth = 0
     self.node_ind = 0
     self.nodes[self.node_ind] = 0
     self.start_index[self.node_ind] = 0
     self.end_index[self.node_ind] = self.subsample.size
     self.num_nodes = 1
     self.num_leafs = 0
     self.left_child = SP.zeros_like(self.left_child)
     self.right_child = SP.zeros_like(self.right_child)
Example #12
0
def predict(data, coeffs):
    """
    Calculate the an autoregressive linear prediction given the signal
    and the prediction coefficients.

    Parameters
    ----------
    data : numpy array
        The signal.
    coeffs : numpy array
        The prediction coefficients.

    Returns
    -------
    data : numpy array
        The predicted signal

    Notes
    -----

    * The first coefficient, 1, is assumed to be left out.

    Prediction works as follows:

          P = a1+ a2+ a3+ a4

          #   _   _   _   _
          #   #   _   _   _
          #   #   #   _   _
          # = # + # + # + _
          _   #   #   #   #
          _   _   #   #   #
          _   _   _   #   #
          _   _   _   _   #

    Where # is a number and _ is a "dont care"

    This means

     1. Create empty pred vector, padded by the number of coefficients
        at the end
     2. Pad original values by number of coefficients at both ends
     3. Crop data in each step accordingly
     4. Crop prediction

    """
    coeffs *= -1
    pred = scipy.zeros_like(data)
    tmp = numpy.hstack((scipy.zeros_like(coeffs), data))

    for j in range(0, coeffs.size):
        offset = coeffs.size - j - 1
        pred = pred + coeffs[j] * tmp[offset:offset + len(pred)]

    return pred[:len(data)]
Example #13
0
def par_fixed_effect(tc, X, oob, depth):
    import scipy as SP
    dview = tc[:]
    dview.block = True
    results = dview.apply(fixed_effect, *[X, oob, depth])
    fixed_sum = SP.zeros_like(results[0][0])
    count = SP.zeros_like(results[0][1])
    for res in results:
        fixed_sum += res[0]
        count += res[1]
    return fixed_sum, count
Example #14
0
def par_get_variable_scores(tc):
    import scipy as SP
    dview = tc[:]
    dview.block = True
    results = dview.apply(get_variable_scores)
    var_used = SP.zeros_like((results[0])[0])
    log_importance = SP.zeros_like(var_used)
    for result in results:
        var_used +=  result[0]
        log_importance += result[1]
    return var_used, log_importance
Example #15
0
def ranktrafo(data):
    X = data.values[:, None]
    Is = X.argsort(axis=0)
    RV = sp.zeros_like(X)
    rank = sp.zeros_like(X)
    for i in xrange(X.shape[1]):
        x =  X[:,i]
        rank = sp.stats.rankdata(x)
        rank /= (X.shape[0]+1)
        RV[:,i] = sp.sqrt(2) * sp.special.erfinv(2*rank-1)

    return RV.flatten()
Example #16
0
File: rnn.py Project: Yevgnen/RNN
    def bptt(self, x, t):
        """Back propagation throuth time of a sample.

        Reference: [1] Deep Learning, Ian Goodfellow, Yoshua Bengio and Aaron Courville, P385.
        """
        dU = sp.zeros_like(self.U)
        dW = sp.zeros_like(self.W)
        db = sp.zeros_like(self.b)
        dV = sp.zeros_like(self.V)
        dc = sp.zeros_like(self.c)

        tau = len(x)
        cells = self.forward_propagation(x)

        dh = sp.zeros(self.n_hiddens)
        for i in range(tau - 1, -1, -1):
            # FIXME:
            # 1. Should not use cell[i] since there maybe multiple hidden layers.
            # 2. Using exponential family as output should not be specified.
            time_input = x[i]
            one_hot_t = sp.zeros(self.n_features)
            one_hot_t[t[i]] = 1

            # Cell of time i
            cell = cells[i]
            # Hidden layer of current cell
            hidden = cell[0]
            # Output layer of current cell
            output = cell[1]
            # Hidden layer of time i + 1
            prev_hidden = cells[i - 1][0] if i - 1 >= 0 else None
            # Hidden layer of time i - 1
            next_hidden = cells[i + 1][0] if i + 1 < tau else None

            # Error of current time i
            da = hidden.backward()
            next_da = next_hidden.backward() if next_hidden is not None else sp.zeros(self.n_hiddens)
            prev_h = prev_hidden.h if prev_hidden is not None else sp.zeros(self.n_hiddens)

            # FIXME: The error function should not be specified here
            # do = sp.dot(output.backward().T, -one_hot_t / output.y)
            do = output.y - one_hot_t
            dh = sp.dot(sp.dot(self.W.T, sp.diag(next_da)), dh) + sp.dot(self.V.T, do)

            # Gradient back propagation through time
            dc += do
            db += da * dh
            dV += sp.outer(do, hidden.h)
            dW += sp.outer(da * dh, prev_h)
            dU[:, time_input] += da * dh

        return (dU, dW, db, dV, dc)
Example #17
0
def exp_gauss_warp(X, n, l0, *msb):
    """Length scale function which is an exponential of a sum of Gaussians.

    The centers and widths of the Gaussians are free parameters.

    The length scale function is given by

    .. math::

        l = l_0 \exp\left ( \sum_{i=1}^{N}\beta_i\exp\left ( -\frac{(x-\mu_i)^2}{2\sigma_i^2} \right ) \right )

    The number of parameters is equal to the three times the number of Gaussians
    plus 1 (for :math:`l_0`). This function is inspired by what Gibbs used in
    his PhD thesis.

    Parameters
    ----------
    X : 1d or 2d array of float
        The points to evaluate the function at. If 2d, it should only have
        one column (but this is not checked to save time).
    n : int
        The derivative order to compute. Used for all `X`.
    l0 : float
        The covariance length scale at the edges of the domain.
    *msb : floats
        Means, standard deviations and weights for each Gaussian, in that order.
    """
    X = scipy.asarray(X, dtype=float)
    msb = scipy.asarray(msb, dtype=float)
    mm = msb[:len(msb) / 3]
    ss = msb[len(msb) / 3:2 * len(msb) / 3]
    bb = msb[2 * len(msb) / 3:]

    # This is done with for-loops, because trying to get fancy with
    # broadcasting was being too memory-intensive for some reason.
    if n == 0:
        l = scipy.zeros_like(X)
        for m, s, b in zip(mm, ss, bb):
            l += b * scipy.exp(-(X - m)**2.0 / (2.0 * s**2.0))
        l = l0 * scipy.exp(l)
        return l
    elif n == 1:
        l1 = scipy.zeros_like(X)
        l2 = scipy.zeros_like(X)
        for m, s, b in zip(mm, ss, bb):
            term = b * scipy.exp(-(X - m)**2.0 / (2.0 * s**2.0))
            l1 += term
            l2 += term * (X - m) / s**2.0
        l = -l0 * scipy.exp(l1) * l2
        return l
    else:
        raise NotImplementedError("Only n <= 1 is supported!")
 def check_consistency(self):
     r'''
     Checks to see if the current geometry conflicts with any other geometry
     '''
     temp = sp.zeros_like(self._net.get_pore_info(label=self.name),dtype=int)
     for item in self._net._geometry:
         temp = temp + sp.array(self._net.get_pore_info(label=item.name),dtype=int)
     print('Geometry labels overlap in', sp.sum(temp>1),'pores')
     print('Geometry not yet applied to',sp.sum(temp==0),'pores')
     
     temp = sp.zeros_like(self._net.get_throat_info(label=self.name),dtype=int)
     for item in self._net._geometry:
         temp = temp + sp.array(self._net.get_throat_info(label=item.name),dtype=int)
     print('Geometry labels overlap in', sp.sum(temp>1),'throats')
     print('Geometry not yet applied to',sp.sum(temp==0),'throats')
Example #19
0
	def ray_update_worker(args):
		angle, p, reco, chunk, calc_wij_sum = args
		upd = sp.zeros_like(reco)
		wij_sum = sp.zeros_like(reco)
		for j in chunk:
			ray = sp.zeros_like(reco)
			ray[:,j]=1
			wij = spn.rotate(ray, angle, reshape=False)
			upd += ((p[j]-sp.sum(wij*reco))/sp.sum(wij**2.0))*wij
			if calc_wij_sum:
				wij_sum+=wij
		if calc_wij_sum:
			return upd, wij_sum
		else:
			return upd, None
Example #20
0
 def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
     if (ni > 1).any() or (nj > 1).any():
         raise ValueError("Derivative orders greater than one are not supported!")
     wXi = scipy.zeros_like(Xi)
     wXj = scipy.zeros_like(Xj)
     for d in range(0, self.num_dim):
         wXi[:, d] = self.w(Xi[:, d], d, 0)
         wXj[:, d] = self.w(Xj[:, d], d, 0)
     out = self.k(wXi, wXj, ni, nj, hyper_deriv=hyper_deriv, symmetric=symmetric)
     for d in range(0, self.num_dim):
         first_deriv_mask_i = ni[:, d] == 1
         first_deriv_mask_j = nj[:, d] == 1
         out[first_deriv_mask_i] *= self.w(Xi[first_deriv_mask_i, d], d, 1)
         out[first_deriv_mask_j] *= self.w(Xj[first_deriv_mask_j, d], d, 1)
     return out
Example #21
0
def G_int(x_in):
    """Polynomial approximation to G(x) (Coulomb integral)
    Defined in Apendix A of Mares and Chuang, J. Appl. Phys. 74, 1388 (1993)

    Keyword arguments:
       x_in -- Normalized 2|ze-zh|/lambda
    """
    
    A = -8.9707E-1
    B = -2.5262E-1
    C = 2.2576E-1
    D = 3.2373E-2
    E = -4.1369E-4
    
    G = sp.zeros_like(x_in)
    G[x_in < 0] = float('nan')
    G[x_in == 0] = 1.0
    
    ind = (x_in > 0) * (x_in <= 6.8)
    x = x_in[ind]
    aux = sp.log(x/2.0)    
    G[ind] = 1.0 + A * x + (B * aux + C) * x**2 +\
                            D * x**3 + E * x**4 * aux
    ind = x_in > 6.8    
    x = x_in[ind]
    G[ind] = 1.0/x - 3.0/x**3 + 45.0/x**5 - 1575.0/x**7
    
    return G
Example #22
0
    def _read_iop_from_file(self, file_name):
        """
        Generic IOP reader that interpolates the iop to the common wavelengths defined in the constructor

        returns: interpolated iop
        """
        lg.info('Reading :: ' + file_name + ' :: and interpolating to ' + str(self.wavelengths))

        if os.path.isfile(file_name):
            iop_reader = csv.reader(open(file_name), delimiter=',', quotechar='"')
            wave = scipy.float32(iop_reader.next())
            iop = scipy.zeros_like(wave)
            for row in iop_reader:
                iop = scipy.vstack((iop, row))

            iop = scipy.float32(iop[1:, :])  # drop the first row of zeros
        else:
            lg.exception('Problem reading file :: ' + file_name)
            raise IOError

        try:
            int_iop = scipy.zeros((iop.shape[0], self.wavelengths.shape[1]))
            for i_iter in range(0, iop.shape[0]):
                # r = scipy.interp(self.wavelengths[0, :], wave, iop[i_iter, :])
                int_iop[i_iter, :] = scipy.interp(self.wavelengths, wave, iop[i_iter, :])
            return int_iop
        except IOError:
            lg.exception('Error interpolating IOP to common wavelength')
            return -1
Example #23
0
 def _compute_dy_dtau(self, tau, b, r2l2):
     r"""Evaluate the derivative of the inner argument of the Matern kernel.
     
     Uses Faa di Bruno's formula to take the derivative of
     
     .. math::
     
         y = 1 + \frac{1}{2\alpha}\sum_i(\tau_i^2/l_i^2)}`.
     
     Parameters
     ----------
     tau : :py:class:`Matrix`, (`M`, `D`)
         `M` inputs with dimension `D`.
     b : :py:class:`Array`, (`P`,)
         Block specifying derivatives to be evaluated.
     r2l2 : :py:class:`Array`, (`M`,)
         Precomputed anisotropically scaled distance.
     
     Returns
     -------
     dy_dtau : :py:class:`Array`, (`M`,)
         Specified derivative at specified locations.
     """
     if len(b) == 0:
         return self._compute_y(tau)
     elif len(b) == 1:
         return 1.0 / self.params[1] * tau[:, b[0]] / (self.params[2 + b[0]])**2.0
     elif len(b) == 2 and b[0] == b[1]:
         return 1.0 / (self.params[1] * (self.params[2 + b[0]])**2.0)
     else:
         return scipy.zeros_like(r2l2)
Example #24
0
    def eps_l(self, n, x, out=None):
        """Implements the left epsilon map
        
        FIXME: Ref.
        
        Parameters
        ----------
        n : int
            The site number.
        x : ndarray
            The argument matrix. For example, using l[n - 1] gives a result l[n]
        out : ndarray
            A matrix to hold the result (with the same dimensions as l[n]). May be None.
    
        Returns
        -------
        res : ndarray
            The resulting matrix.
        """
        if out is None:
            out = sp.zeros_like(self.l[n])
        else:
            out.fill(0.)

        for s in xrange(self.q[n]):
            out += m.mmul(m.H(self.A[n][s]), x, self.A[n][s])
        return out
Example #25
0
 def expect_1s_diss(self,op,n):
     """Applies a single-site operator to a single site and returns
     the value after the change. In contrast to
     mps_gen.apply_op_1s, this routine does not change the state itself.
     
     Also, this does not perform self.update().
     
     Parameters
     ----------
     op : ndarray or callable
         The single-site operator. See self.expect_1s().
     n: int
         The site to apply the operator to.
     """
     if callable(op):
         op = sp.vectorize(op, otypes=[sp.complex128])
         op = sp.fromfunction(op, (self.q[n], self.q[n]))
         
     newAn = sp.zeros_like(self.A[n])
     
     for s in xrange(self.q[n]):
         for t in xrange(self.q[n]):
             newAn[s] += self.A[n][t] * op[s, t]
             
     return newAn
 def run(self):
     #See if setup has been run
     try: capillary_pressure = self._p_cap
     except: 
         raise Exception('setup has not been run, cannot proceed!')
     #Create a pore and throat conditions list to store inv_val at which each is invaded
     self._p_inv = sp.zeros((self._net.num_pores(),))
     self._p_seq = sp.zeros_like(self._p_inv)
     self._t_inv = sp.zeros((self._net.num_throats(),))
     self._t_seq = sp.zeros_like(self._t_inv)
     #Determine the invasion pressures to apply
     self._t_cap = self._net.get_throat_data(phase=self._fluid_inv,prop=capillary_pressure)
     min_p = sp.amin(self._t_cap)*0.98  # nudge min_p down slightly
     max_p = sp.amax(self._t_cap)*1.02  # bump max_p up slightly
     self._inv_points = sp.logspace(sp.log10(min_p),sp.log10(max_p),self._npts)
     self._do_outer_iteration_stage()
Example #27
0
    def forces(self):
        """ get the forces between cells, as array, both from links
            and from the native force_func
        """
        pos = self.get_pos_arr(force=True)

        force_arr = sp.zeros_like(pos)

        for link in self.links:
            force = link.force
            force_arr[link.one.index] += force
            force_arr[link.two.index] -= force


        kdtree = self._get_kdtree(force=True)
        for i,j in kdtree.query_pairs(self.xi*1.0):
            
            force = self.force_func2(self.cells[i], self.cells[j] )
            #disp = self.cells[i].pos - self.cells[j].pos
            #L = norm(disp)
            #force = 2 * self.a**4 * ( 2 * self.xi**2 - 3 * self.xi * L + L**2 )/( self.xi**2 * L**6 ) * disp
            force_arr[i] += force
            force_arr[j] -= force

        return sp.nan_to_num(force_arr)
Example #28
0
 def apply_op_1s(self, op, n, do_update=True):
     """Applies a single-site operator to a single site.
     
     By default, this performs self.update(), which also restores
     state normalization.        
     
     Parameters
     ----------
     op : ndarray or callable
         The single-site operator. See self.expect_1s().
     n: int
         The site to apply the operator to.
     do_update : bool
         Whether to update after applying the operator.
     """
     if callable(op):
         op = sp.vectorize(op, otypes=[sp.complex128])
         op = sp.fromfunction(op, (self.q[n], self.q[n]))
         
     newAn = sp.zeros_like(self.A[n])
     
     for s in xrange(self.q[n]):
         for t in xrange(self.q[n]):
             newAn[s] += self.A[n][t] * op[s, t]
             
     self.A[n] = newAn
     
     if do_update:
         self.update()
Example #29
0
def coarseness(img, k):
    # compute average over all 0,..,k-1 regions of side-length 2^k-1
    A = __coarsness_average(img, k)

    # compute differences between pairs of non-overlapping neighbourhoods
    E = scipy.zeros([img.ndim] + list(A.shape), dtype=scipy.float_) # matrix holding computed differences in all directions

    for dim in range(img.ndim):
        for nbh in range(k):
            shape = img.ndim * [1]
            shape[dim] = 2 * int(math.pow(2, nbh)) + 1
            footprint = scipy.zeros(shape, dtype=scipy.bool_)
            idx = img.ndim * [0]
            footprint[tuple(idx)] = 1
            idx[dim] = -1
            footprint[tuple(idx)] = 1
            generic_filter(A[nbh], lambda x: abs(x[0]- x[1]), output=E[dim][nbh], footprint=footprint, mode='mirror')
            
    # compute for each voxel the k value, that lead to the highest E (regardless in which direction)
    S = scipy.zeros_like(img)

    for x in range(S.shape[0]):
        for y in range(S.shape[1]):
            for z in range(S.shape[2]):
                maxv = 0 
                maxk = 0
                for dim in range(img.ndim):
                    for nbh in range(k):
                        if E[dim][nbh][x,y,z] > maxv:
                            maxv = E[dim][nbh][x,y,z]
                            maxk = nbh
                S[x,y,z] = maxk
    
    return S
Example #30
0
def estimate_q_values(PV,m=None,pi=1):
    """estimate q vlaues from a list of Pvalues
    this algorithm is taken from Storey, significance testing for genomic ...
    m: number of tests, (if not len(PV)), pi: fraction of expected true null (1 is a conservative estimate)
    originally written by Oliver Stegel from MPI and edited by Vipin
    """
    if m is None:
        m = len(PV)
    lPV = len(PV)
    #1. sort pvalues
    PV = PV.squeeze()
    IPV = PV.argsort()
    PV  = PV[IPV]
    #2. estimate lambda
    if pi is None:
        lrange = sp.linspace(0.05,0.95,max(lPV/100,10))
        pil    = sp.double((PV[:,SP.newaxis]>lrange).sum(axis=0))/lPV
        pilr   = pil/(1-lrange)
        #ok, I think for SNPs this is pretty useless, pi is close to 1!
        pi =1
        #if there is something useful in there use the something close to 1
        if pilr[-1]<1:
            pi = pilr[-1]
    #3. initialise q values
    QV_ = pi * m/lPV* PV
    #4. update estimate
    for i in xrange(lPV-2,0,-1):
        QV_[i] = min(pi*m*PV[i]/(i+1),QV_[i+1])
    #5. inverst sorting
    QV = sp.zeros_like(PV)
    QV[IPV] = QV_
    return QV
    def findLocalOptimum(self,
                         fast=False,
                         scales0=None,
                         fixed0=None,
                         init_method=None,
                         termx=0,
                         n_times=10,
                         perturb=True,
                         pertSize=1e-3,
                         verbose=True,
                         lambd=None):
        """
        Train the model using the specified initialization strategy
        
        Args:
            fast:		    if true, fast gp is initialized
            scales0:        if not None init_method is set to manual
            fixed0:         initial fixed effects
            init_method:    initialization method \in {random,diagonal,manual} 
            termx:			term for diagonal diagonalisation
            n_times:        number of times the initialization
            perturb:        if true, the initial point is perturbed with gaussian noise
            perturbSize:    size of the perturbation
            verbose:        print if convergence is achieved
        """

        if init_method == None:
            if self.P == 1: init_method = 'random'
            else: init_method = 'diagonal'

        if not self.init: self.initGP(fast=fast)

        if scales0 != None and ~perturb: init_method = 'manual'

        if init_method == 'diagonal':
            scales0 = self._getScalesDiag(termx=termx)

        if init_method == 'diagonal' or init_method == 'manual':
            if not perturb: n_times = 1

        if fixed0 == None:
            fixed0 = SP.zeros_like(self.gp.getParams()['dataTerm'])

        for i in range(n_times):
            if init_method == 'random':
                scales1 = self._getScalesRand()
                fixed1 = pertSize * SP.randn(fixed0.shape[0], fixed0.shape[1])
            elif perturb:
                scales1 = scales0 + pertSize * self._perturbation()
                fixed1 = fixed0 + pertSize * SP.randn(fixed0.shape[0],
                                                      fixed0.shape[1])
            else:
                scales1 = scales0
                fixed1 = fixed0
            conv = self.trainGP(scales0=scales1, fixed0=fixed1, lambd=lambd)
            if conv: break

        if verbose:
            if conv == False:
                print 'No local minimum found for the tested initialization points'
            else:
                print 'Local minimum found at iteration %d' % i

        return conv
Example #32
0
def pairedpvalsplot(dirin,
                    filepattern='*.txt',
                    pnames=pnames(),
                    rownames=rownames(),
                    minpval=1e-30,
                    newplot=True,
                    heatmap=False,
                    extent=None,
                    ms=5,
                    fs=8,
                    fliporder=False):
    '''
    Paired p-value plot in -log_10 space among all pairs of files in the directory that match the filepattern.
    Reorders all p-values so that rownames match up between files.
    Returns list of p-value lists, one per file
    '''
    import os.path
    import glob
    filepattern = dirin + r'/' + filepattern
    myfilestmp = glob.glob(filepattern)
    myfiles = []
    for f in myfilestmp:
        keep = True
        for strn in excludefiles():
            if strn in f: keep = False
        if keep:
            myfiles.append(f)
            print("keeping: " + f)
        else:
            print("not including: " + f)
    indrange = sp.arange(0, len(myfiles))
    if len(myfiles) == 0: raise Exception("no files found")
    pv = {}
    pvorig = {}
    lambdas = {}
    rowids = {}
    label = {}
    for j in indrange:
        pv[j], rowids[j], dummy1, dummy2 = extractpvals(myfiles[j],
                                                        pnames,
                                                        rownames,
                                                        sort=True)
        pvorig[j] = pv[j]
        lambdas[j] = lambda_gc = estimate_lambda(pv[j])
        #pv[j]=-sp.log10(pv[j])
        #pv[j][pv[j]<minpval]=minpval
        label[j] = os.path.basename(
            myfiles[j]) + ", $\lambda$=%1.3f" % lambdas[j]
        if j == 0:
            idorder = rowids[j]
        else:
            if not all(idorder == idorder):
                raise Exception("ids do not match up across file:" + label[j])

    import pylab as pl
    pl.ion()
    #these loops are to find out which are "notokany"
    notokany = sp.zeros_like(pv[0], dtype=bool)
    for j1 in indrange:
        for j2 in sp.arange(j1 + 1, len(myfiles)):
            assert len(pv[j1]) == len(
                pv[j2]), "different # of pvals in each file"
            imag = (pv[j1] <= 0.0) | (pv[j2] <= 0.0)
            one = (pv[j1] > 1.0) | (pv[j2] > 1.0)
            iok = (~imag) & (~one)
            notokany = notokany | (~iok)

    #these are the standard plotting loops
    for j1 in indrange:
        for j2 in sp.arange(j1 + 1, len(myfiles)):
            if fliporder:
                tmp = j1
                j1 = j2
                j2 = tmp
            assert len(pv[j1]) == len(
                pv[j2]), "different # of pvals in each file"
            if newplot: pl.figure()
            print("%i, %i" % (j1, j2))
            imag1 = (pv[j1] <= 0.0)
            imag2 = (pv[j2] <= 0.0)
            imag = imag1 | imag2
            one = (pv[j1] > 1.0) | (pv[j2] > 1.0)

            #print pv[j1][imag]
            #print pv[j2][imag]
            iok = (~imag) & (~one)

            if not heatmap:
                tmpj1 = sp.copy(pv[j1])
                tmpj2 = sp.copy(pv[j2])
                minpval1 = min(tmpj1[~imag1])
                minpval2 = min(tmpj2[~imag2])
                tmpj1[imag1] = minpval1
                tmpj2[imag2] = minpval2

                pl.plot(-sp.log10(tmpj1[iok]),
                        -sp.log10(tmpj2[iok]),
                        '.k',
                        markersize=ms)
                #pl.plot(-sp.log10(tmpj1[notokany]),-sp.log10(tmpj2[notokany]),'b.',markersize=ms)
                #maxval=max(pl.xlim()[1],pl.ylim()[1])
                pl.plot(-sp.log10(tmpj1[imag1]),
                        -sp.log10(tmpj2[imag1]),
                        'g.',
                        markersize=ms)
                pl.plot(-sp.log10(tmpj1[imag2]),
                        -sp.log10(tmpj2[imag2]),
                        'r.',
                        markersize=ms)

                #pl.plot(-sp.log10(pv[j1][~iok]),-sp.log10(pv[j2][~iok]),'g.')
                maxval = max(pl.xlim()[1], pl.ylim()[1])
                pl.plot([0, maxval + 1], [0, maxval + 1], 'b--', linewidth=1)
                #fix_axes()
                pl.xlim([0, maxval + 1])
                pl.ylim([0, maxval + 1])
                pl.xlabel(label[j1], fontsize=fs)
                pl.ylabel(label[j2], fontsize=fs)
            else:
                heatmap, xedges, yedges = np.histogram2d(-sp.log10(pv[j1]),
                                                         -sp.log10(pv[j2]),
                                                         bins=100)
                if extent is None:
                    extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]
                              ]  #heatmap=heatmap[-1:heatmap.shape[0]:-1,:]
                pl.imshow(sp.log(heatmap + 1), extent=extent, cmap=pl.cm.Greys)
                pl.colorbar()
                import ipdb
                ipdb.set_trace()
    return pvorig
    def optimize(self,
                 fast=None,
                 scales0=None,
                 fixed0=None,
                 init_method=None,
                 termx=0,
                 n_times=10,
                 perturb=True,
                 pertSize=1e-3,
                 verbose=None,
                 lambd=None,
                 lambd_g=None,
                 lambd_n=None):
        """
        Train the model using the specified initialization strategy

        Args:
            fast:            if true, fast gp is considered; if None (default), fast inference is considered if possible
            scales0:        if not None init_method is set to manual
            fixed0:         initial weights for fixed effects
            init_method:    initialization strategy:
                                'random': variance component parameters (scales) are sampled from a normal distribution with mean 0 and std 1,
                                'diagonal': uses the a two-random-effect single trait model to initialize the parameters,
                                'manual': the starting point is set manually,
            termx:            term used for initialization in the diagonal strategy
            n_times:        number of restarts to converge
            perturb:        if true, the initial point (set manually opr through the single-trait model) is perturbed with gaussian noise
            perturbSize:    std of the gassian noise used to perturb the initial point
            verbose:        print if convergence is achieved and how many restarted were needed
        """
        verbose = dlimix_legacy.getVerbose(verbose)

        if init_method is None:
            if self.P == 1: init_method = 'random'
            else: init_method = 'diagonal'

        if not self.init: self._initGP(fast=fast)

        if scales0 is not None and ~perturb: init_method = 'manual'

        if init_method == 'diagonal':
            scales0 = self._getScalesDiag(termx=termx)

        if init_method == 'pairwise':
            assert self.n_randEffs == 2, 'VarianceDecomposition:: pairwise initialization possible only with 2 terms'
            assert self.P > 1, 'VarianceDecomposition:: pairwise initialization possible only with P>1'
            i = (self.trait_covar_type[0]
                 == 'freeform') * (self.trait_covar_type[0] == 'freeform')
            assert i, 'VarianceDecomposition:: pairwise initialization possible only with freeform matrices'
            scales0 = self._getScalesPairwise(verbose=verbose)

        if init_method in ['diagonal', 'manual', 'pairwise']:
            if not perturb: n_times = 1

        if fixed0 is None:
            fixed0 = sp.zeros_like(self.gp.getParams()['dataTerm'])

        for i in range(n_times):
            if init_method == 'random':
                scales1 = self._getScalesRand()
                fixed1 = pertSize * sp.randn(fixed0.shape[0], fixed0.shape[1])
            elif perturb:
                scales1 = scales0 + pertSize * self._perturbation()
                fixed1 = fixed0 + pertSize * sp.randn(fixed0.shape[0],
                                                      fixed0.shape[1])
            else:
                scales1 = scales0
                fixed1 = fixed0

            conv = self.trainGP(scales0=scales1,
                                fixed0=fixed1,
                                lambd=lambd,
                                lambd_g=lambd_g,
                                lambd_n=lambd_n)
            if conv: break

        if verbose:
            if conv == False:
                print(
                    'No local minimum found for the tested initialization points'
                )
            else:
                print(('Local minimum found at iteration %d' % i))

        return conv
Example #34
0
    xL = -1.7
    xR = 1.7
    grid = scipy.arange(xL+dx/2.,xR,dx)
    rho = scipy.ones_like(grid)/(xR-xL)
    print "rho: ", rho

    # Fokker Planck for PDE
    a = 1
    alpha = 0.99
    lambd = scipy.array([a,D,alpha])

    param=pde.FokkerPlanck.getDefaultParameters()
    param['Dt'] = Dt
    precond_param=precond.Precond.getDefaultParameters()
    precond_param['Dstar']=1./2.
    precond_param['sigma']=scipy.zeros_like(grid)
    # precond_param['kappa']=pde.doublewell
    precond_param['kappa']=scipy.zeros_like(grid)
    param['precond']=precond.Precond(precond_param)

    fp_pde = pde.FokkerPlanck(rho,grid,pde.doublewell,lambd,param)
    print fp_pde.param,fp_pde.Dt
    
    # CREATING LINEAR SOLVER
    gmres_param = GMRES.GMRESLinearSolver.getDefaultParameters()
    gmres_param['tol']=1e-8
    gmres_param['print']='short'
    gmres_param['builtin']=True
    linsolv = GMRES.GMRESLinearSolver(gmres_param)

    # CREATING NEWTON SOLVER
Example #35
0
def best_split_full_model(X, Uy, C, S, U, noderange, delta):
    mBest = -1
    sBest = -float('inf')
    score_best = -float('inf')
    left_mean = None
    right_mean = None
    ldelta = SP.log(delta)
    levels = list(map(SP.unique, X[noderange].T))
    feature_map = []
    s = []
    UXt = []
    cnt = 0
    for i in range(X.shape[1]):
        lev = levels[i]
        for j in range(lev.size - 1):
            split_point = SP.median(lev[j:j + 2])
            x = SP.int_(X[noderange, i] > split_point)
            UXt.append(SP.dot(U.T[:, noderange], x))
            feature_map.append(i)
            s.append(split_point)
            cnt += 1
    UXt = SP.array(UXt).T
    if UXt.size == 0:  #predictors are homogeneous
        return mBest, sBest, left_mean, right_mean, score_best
    else:
        #print UXt
        #         print X[noderange]
        #         print ''
        #         print ''
        # test all transformed predictors
        scores = -NP.ones(cnt) * float('inf')
        UC = SP.dot(U.T, C)
        ########################
        #finding the best split#
        ########################
        score_0 = lmm_fast.nLLeval(ldelta, Uy[:, 0], UC, S)
        for snp_cnt in SP.arange(cnt):
            UX = SP.hstack((UXt[:, snp_cnt:snp_cnt + 1], UC))
            scores[snp_cnt] = -lmm_fast.nLLeval(ldelta, Uy[:, 0], UX, S)
            scores[snp_cnt] += score_0
        ############################
        ###evaluate the new means###
        ############################
        kBest = SP.argmax(scores)
        score_best = scores[kBest]
        sBest = s[kBest]
        if score_best > 0:
            sBest = s[kBest]
            score_best = scores[kBest]
            UX = SP.hstack((UXt[:, kBest:kBest + 1], UC))
            _, beta, _ = lmm_fast.nLLeval(ldelta,
                                          Uy[:, 0],
                                          UX,
                                          S,
                                          MLparams=True)
            mBest = feature_map[kBest]
            CX = SP.zeros_like(Uy)
            CX[noderange] = SP.int_(X[noderange, mBest:mBest + 1] > sBest)
            C_new = SP.hstack((CX, C))
            mean = SP.dot(C_new,
                          beta.reshape(beta.size,
                                       -1))  #TODO:is this the correct way?
            left_mean = ((mean[noderange])[CX[noderange] == 0])[0]
            right_mean = ((mean[noderange])[CX[noderange] == 1])[0]
        return mBest, sBest, left_mean, right_mean, score_best
def ipArgand(instruct):
    # Whether to save the plot images.
    doSave = False

    plotThis = 'zPhase'

    pklFolder = instruct.pklFolder
    fileStart = cs.lastName(pklFolder)

    # Processed file choice.
    loadThis = 'zAnyF'

    # Channels plotted.
    ch = 2

    # Whether to identify and plot target files.
    targetBool = True

    # Whether to manually select which file numbers to plot.
    manualTargetBool = True
    manualTargetArra = instruct.fileNums

    # Whether to plot all the files together without erasing at all.
    fileTogether = True

    # Whether to omit 60 Hz data from the plots.
    omit60Hz = True

    # Frequencies plotted.
    maskChoice = 'oddHUp2'

    isYAxStartedFromZero = False

    # Whether to subtract baseline phase results from separate files.
    subtract1 = False
    if subtract1:
        # Pick a standard packet range to use.
        pktRang = range(17)

    # Whether to include the minor note in the legend entries rather than the
    # title.
    minorLegBool = False

    # Whether to include the minor note anywhere at all.
    minorBool = True

    # Whether to swap the description and minor note text for display purposes.
    swapDescriptMinor = False

    legOutside = False
    loc = 'center right'

    stdBool = False

    # Whether to plot only one packet's results instead of an average over all
    # the packets in the file.
    onePkt = False

    # File number from which the plot title is taken. inf if it doesn't matter.
    titleFileNum = sp.inf

    # Let colors be selected automatically if files are plotted together.
    if fileTogether:
        color = None
        stdColor = None

    # Loading the data:
    fileName = fileStart + '_' + loadThis + '.pkl'
    filePath = os.path.join(pklFolder, fileName)
    with open(filePath, 'rb') as f:  # Python 3: open(..., 'rb')
        a = pickle.load(f)

    # List of file numbers.
    fileNumArra = sp.zeros(len(a))
    for t in range(len(a)):
        fileNumArra[t] = a[t].fileNum
        if swapDescriptMinor:
            descript = a[t].descript
            a[t].descript = a[t].minor
            a[t].minor = descript

    if targetBool:
        tarList = []
        lowFilesA = sp.zeros(len(a), dtype=int)
        colorsA = len(a) * ['']
        linestylesA = len(a) * ['']
        legFilterA = sp.zeros(len(a), dtype=bool)
        for t in range(len(a)):
            if manualTargetBool:
                if any(a[t].fileNum == manualTargetArra):
                    tarList.append(t)
                    # Set up the low frequency normalization files for each file.
                    manualIdx = cs.find(manualTargetArra, a[t].fileNum)
                    lowFileNum = instruct.lowFiles[manualIdx]
                    lowFilesA[t] = cs.find(fileNumArra, lowFileNum)
                    colorsA[t] = instruct.colors[manualIdx]
                    linestylesA[t] = instruct.linestyles[manualIdx]
                    legFilterA[t] = instruct.legFilter[manualIdx]
            else:
                # Identify target files (They aren't baselines or tests).
                if a[t].descript != 'baseline' and a[t].descript != 'test':
                    if t < (len(a) - 1):
                        tarList.append(t)

    # Discover the maximum file number.
    maxFile = -sp.inf
    for t in range(len(a)):
        if a[t].fileNum > maxFile:
            maxFile = a[t].fileNum

    # Pick out the desired result data to be plotted as x and y values.
    res = []

    for t in range(len(a)):
        # Result class for each possible file to be read.
        res.append(cs.emptyClass())

    for t in tarList:
        if not subtract1 or not any(t == sp.array(tarList)):
            # (radian)
            phaseDiff = a[t].phaseDiff[ch, ...] / 1000
            zMag = a[t].zMag[ch, ...]
        else:
            # Subtract the baseline phase differences. (radian)
            fileOff = 3
            phaseDiff = (a[t].phaseDiff[ch, pktRang, :] -
                         a[t - fileOff].phaseDiff[ch, pktRang, :]) / 1000
            # Magnitudes.
            zMag = a[t].zMag[ch, pktRang, :]

        res[t].xVal = zMag * sp.cos(phaseDiff)
        res[t].yVal = zMag * sp.sin(phaseDiff)

        # Average over packets.
        if not onePkt:
            res[t].xVal = sp.mean(res[t].xVal, axis=0)
            res[t].yVal = sp.mean(res[t].yVal, axis=0)
        else:
            meanRang = instruct.pkt + sp.array([-1, 0, 1])
            res[t].xVal = res[t].xVal[meanRang, :]
            res[t].yVal = res[t].yVal[meanRang, :]
            # Average over the chosen packet and those on either side.
            res[t].xVal = sp.mean(res[t].xVal, axis=0)
            res[t].yVal = sp.mean(res[t].yVal, axis=0)

        # Mask out unwanted frequencies.
        mask = sp.zeros_like(a[t].freq, dtype=bool)
        if maskChoice == 'oddHUp2':
            mask[4:len(mask):8] = True
            # Number of frequencies included in the plot.
            freqCount = 17
            mask[(1 + 4 + 8 * (freqCount - 1)):] = False
        # Mask out 60 Hz, if requested.
        if omit60Hz:
            mask[a[t].freq == 60] = False
        res[t].xVal = res[t].xVal[mask]
        res[t].yVal = res[t].yVal[mask]

        # Result X and Y data normalized by the low freq. fundamental real
        # component of impedance.
        if t == lowFilesA[t]:
            maxReal1 = res[lowFilesA[t]].xVal[0]
        res[t].xVal /= maxReal1
        res[t].yVal /= maxReal1

        # If the phase difference baselines were subtracted, divide the
        # magnitudes.
        if subtract1 and any(t == sp.array(tarList)):
            baseMags = sp.sqrt(res[t - fileOff].xVal**2 +
                               res[t - fileOff].yVal**2)
            res[t].xVal /= baseMags
            res[t].yVal /= baseMags
            # Renormalize.
            if t == lowFilesA[t]:
                maxReal2 = res[lowFilesA[t]].xVal[0]
            res[t].xVal /= maxReal2
            res[t].yVal /= maxReal2

    # Initialize plot settings.
    ps = cs.emptyClass()
    # Figure with axes.
    ps.color = color
    ps.stdColor = stdColor
    ps.markerSize = 5
    ps.marker = 'o'
    ps.linestyle = '-'
    ps.markerSize = 4
    ps.titleWrap = 83
    ps.legOutside = legOutside
    ps.omit60Hz = omit60Hz
    ps.isYAxStartedFromZero = isYAxStartedFromZero
    ps.xLabel = 'REAL'
    ps.yLabel = 'IMAG'
    ps.stdBool = stdBool
    ps.normMag = False
    ps.loc = loc

    # List of file indices plotted.
    if targetBool:
        tList = tarList
    else:
        tList = range(0, len(a))

    # List adjacent files with each target file, if there are three per plot.
    tarList = tList

    # Plot, and save if needed.
    for idx in range(len(tList)):
        t = tList[idx]
        tar = tarList[idx]
        ps.ch = ch
        ps.color = colorsA[t]
        ps.linestyle = linestylesA[t]
        #        ps.titleStr = ('%s Ch %d (%s). xmitFund = %.0f Hz. %s'
        #                       % (a[t].fileDateStr, ch, a[t].measStr[ch],
        #                          a[t].xmitFund, a[t].major))
        ps.titleStr = ('%s Ch %d (%s). xmitFund = %.0f Hz.' %
                       (a[t].fileDateStr, ch, a[t].measStr[ch], a[t].xmitFund))
        #        ps.titleStr = ('Phase differences for artificial signals.')
        if onePkt:
            ps.titleStr += (' Results averaged over three packets centered '
                            'on each packet listed in the legend.')

        if subtract1:
            ps.titleStr += (
                ' Baseline sand phase angles have been subtracted, ' +
                'and normalized magnitudes have been divided by ' +
                'baseline normalized magnitudes before normalizing ' +
                'to a low frequency real value of 1 again.')
        # Legend text.
        if legFilterA[t]:
            # With legend.
            ps.legStr = 'File %d. Ch %d. %s.' % (a[t].fileNum, ch,
                                                 a[t].descript)
            if onePkt:
                ps.legStr = '(pkt %d) ' % (a[t].pkt[instruct.pkt]) + ps.legStr
        else:
            # Without legend.
            ps.legStr = '_nolegend_'
        if minorBool and (ps.legStr != '_nolegend_'):
            if not minorLegBool:
                if a[t].minor != 'None.':
                    ps.titleStr += ' %s.' % (a[t].minor)
            else:
                ps.legStr += ' %s.' % (a[t].minor)
        # The plot title is taken from the target file.
        ps.titleBool = False
        if t == tar:
            if (titleFileNum == sp.inf) or (titleFileNum == a[t].fileNum):
                ps.titleBool = True

        if plotThis == 'argand':
            ps.titleStr += ' Normalized Apparent Impedance Argand.'
            ps.xVal = res[t].xVal
            ps.yVal = res[t].yVal
        elif plotThis == 'zMag':
            ps.xVal = a[t].freq[mask]
            ps.yVal = sp.sqrt(res[t].xVal**2 + res[t].yVal**2)
            if subtract1:
                ps.titleStr += (
                    ' Normalized magnitudes have been divided by ' +
                    'baseline normalized magnitudes before normalizing ' +
                    'to a low frequency real value of 1 again.')
            ps.xLabel = 'Frequency (Hz)'
            ps.yLabel = 'Impedance Magnitude (Normalized)'
        elif plotThis == 'zPhase':
            ps.xVal = a[t].freq[mask]
            # Milliradian
            ps.yVal = 1000 * sp.arctan2(res[t].yVal, res[t].xVal)
            ps.xLabel = 'Frequency (Hz)'
            ps.yLabel = 'Impedance Phase (mrad)'
        pw.basePlot(ps)

        if (t == tar):
            if ((not fileTogether)
                    or (fileTogether and idx == len(tList) - 1)):
                if doSave:
                    pass
                if not (t == tList[-1]):
                    plt.clf()

    ax = plt.gca()
    if plotThis == 'argand':
        ax.set_aspect('equal', 'box')
    xmin, xmax = plt.xlim()
    ymin, ymax = plt.ylim()
    if ps.isYAxStartedFromZero:
        if ymax > 0:
            ax.axis([xmin, xmax, 0, ymax])
        else:
            ax.axis([0, xmax, ymin, 0])
Example #37
0
File: RCWA.py Project: seghil/EMpy
    def solve(self, wls):
        """Anisotropic solver.
    
        INPUT
        wls = wavelengths to scan (any asarray-able object).
        
        OUTPUT
        self.DEO1, self.DEE1, self.DEO3, self.DEE3 = power reflected
        and transmitted.
        """

        self.wls = S.atleast_1d(wls)

        LAMBDA = self.LAMBDA
        n = self.n
        multilayer = self.multilayer
        alpha = self.alpha
        delta = self.delta
        psi = self.psi
        phi = self.phi

        nlayers = len(multilayer)
        i = S.arange(-n, n + 1)
        nood = 2 * n + 1
        hmax = nood - 1

        DEO1 = S.zeros((nood, self.wls.size))
        DEO3 = S.zeros_like(DEO1)
        DEE1 = S.zeros_like(DEO1)
        DEE3 = S.zeros_like(DEO1)

        c1 = S.array([1., 0., 0.])
        c3 = S.array([1., 0., 0.])
        K = 2 * pi / LAMBDA * S.array(
            [S.sin(phi), 0., S.cos(phi)],
            dtype=complex)  # grating on the xy plane
        dirk1 = S.array([S.sin(alpha) * S.cos(delta), \
                         S.sin(alpha) * S.sin(delta), \
                         S.cos(alpha)])

        # D polarization vector
        u = S.array([ S.cos(psi) * S.cos(alpha) * S.cos(delta) - S.sin(psi) * S.sin(delta), \
                      S.cos(psi) * S.cos(alpha) * S.sin(delta) + S.sin(psi) * S.cos(delta), \
                      -S.cos(psi) * S.sin(alpha)])

        kO1i = S.zeros((3, i.size), dtype=complex)
        kE1i = S.zeros_like(kO1i)
        kO3i = S.zeros_like(kO1i)
        kE3i = S.zeros_like(kO1i)

        Mp = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)
        M = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)

        dlt = (i == 0).astype(int)

        for iwl, wl in enumerate(self.wls):

            nO1 = nE1 = multilayer[0].mat.n(wl).item()
            nO3 = nE3 = multilayer[-1].mat.n(wl).item()

            # wavevectors
            k = 2 * pi / wl

            eps1 = S.diag(S.asarray([nE1, nO1, nO1])**2)
            eps3 = S.diag(S.asarray([nE3, nO3, nO3])**2)

            # ordinary wave
            abskO1 = k * nO1
            #abskO3 = k * nO3
            # extraordinary wave
            #abskE1 = k * nO1 *nE1 / S.sqrt(nO1**2 + (nE1**2 - nO1**2) * S.dot(-c1, dirk1)**2)
            #abskE3 = k * nO3 *nE3 / S.sqrt(nO3**2 + (nE3**2 - nO3**2) * S.dot(-c3, dirk1)**2)

            k1 = abskO1 * dirk1

            kO1i[0, :] = k1[0] - i * K[0]
            kO1i[1, :] = k1[1] * S.ones_like(i)
            kO1i[2, :] = -dispersion_relation_ordinary(kO1i[0, :], kO1i[1, :],
                                                       k, nO1)

            kE1i[0, :] = kO1i[0, :]
            kE1i[1, :] = kO1i[1, :]
            kE1i[2, :] = -dispersion_relation_extraordinary(
                kE1i[0, :], kE1i[1, :], k, nO1, nE1, c1)

            kO3i[0, :] = kO1i[0, :]
            kO3i[1, :] = kO1i[1, :]
            kO3i[2, :] = dispersion_relation_ordinary(kO3i[0, :], kO3i[1, :],
                                                      k, nO3)

            kE3i[0, :] = kO1i[0, :]
            kE3i[1, :] = kO1i[1, :]
            kE3i[2, :] = dispersion_relation_extraordinary(
                kE3i[0, :], kE3i[1, :], k, nO3, nE3, c3)

            #k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [k1[2] - i * K[2]]]
            k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [-i * K[2]]]

            # aliases for constant wavevectors
            kx = kO1i[0, :]  # o kE1i(1,;), tanto e' lo stesso
            ky = k1[1]

            # matrices
            I = S.eye(nood, dtype=complex)
            ZERO = S.zeros((nood, nood), dtype=complex)
            Kx = S.diag(kx / k)
            Ky = ky / k * I
            Kz = S.diag(k2i[2, :] / k)
            KO1z = S.diag(kO1i[2, :] / k)
            KE1z = S.diag(kE1i[2, :] / k)
            KO3z = S.diag(kO3i[2, :] / k)
            KE3z = S.diag(kE3i[2, :] / k)

            ARO = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KO1z * eps1[2, 0]
            BRO = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KO1z * eps1[2, 1]
            CRO_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KO1z * eps1[2, 2])

            ARE = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KE1z * eps1[2, 0]
            BRE = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KE1z * eps1[2, 1]
            CRE_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KE1z * eps1[2, 2])

            ATO = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KO3z * eps3[2, 0]
            BTO = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KO3z * eps3[2, 1]
            CTO_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KO3z * eps3[2, 2])

            ATE = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KE3z * eps3[2, 0]
            BTE = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KE3z * eps3[2, 1]
            CTE_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KE3z * eps3[2, 2])

            DRE = c1[1] * KE1z - c1[2] * Ky
            ERE = c1[2] * Kx - c1[0] * KE1z
            FRE = c1[0] * Ky - c1[1] * Kx

            DTE = c3[1] * KE3z - c3[2] * Ky
            ETE = c3[2] * Kx - c3[0] * KE3z
            FTE = c3[0] * Ky - c3[1] * Kx

            b = S.r_[u[0] * dlt, u[1] * dlt,
                     (k1[1] / k * u[2] - k1[2] / k * u[1]) * dlt,
                     (k1[2] / k * u[0] - k1[0] / k * u[2]) * dlt]
            Ky_CRO_1 = ky / k * CRO_1
            Ky_CRE_1 = ky / k * CRE_1
            Kx_CRO_1 = kx[:, S.newaxis] / k * CRO_1
            Kx_CRE_1 = kx[:, S.newaxis] / k * CRE_1
            MR31 = -S.dot(Ky_CRO_1, ARO)
            MR32 = -S.dot(Ky_CRO_1, BRO) - KO1z
            MR33 = -S.dot(Ky_CRE_1, ARE)
            MR34 = -S.dot(Ky_CRE_1, BRE) - KE1z
            MR41 = S.dot(Kx_CRO_1, ARO) + KO1z
            MR42 = S.dot(Kx_CRO_1, BRO)
            MR43 = S.dot(Kx_CRE_1, ARE) + KE1z
            MR44 = S.dot(Kx_CRE_1, BRE)
            MR = S.asarray(S.bmat([[I, ZERO, I, ZERO], \
                                   [ZERO, I, ZERO, I], \
                                   [MR31, MR32, MR33, MR34], \
                                   [MR41, MR42, MR43, MR44]]))

            Ky_CTO_1 = ky / k * CTO_1
            Ky_CTE_1 = ky / k * CTE_1
            Kx_CTO_1 = kx[:, S.newaxis] / k * CTO_1
            Kx_CTE_1 = kx[:, S.newaxis] / k * CTE_1
            MT31 = -S.dot(Ky_CTO_1, ATO)
            MT32 = -S.dot(Ky_CTO_1, BTO) - KO3z
            MT33 = -S.dot(Ky_CTE_1, ATE)
            MT34 = -S.dot(Ky_CTE_1, BTE) - KE3z
            MT41 = S.dot(Kx_CTO_1, ATO) + KO3z
            MT42 = S.dot(Kx_CTO_1, BTO)
            MT43 = S.dot(Kx_CTE_1, ATE) + KE3z
            MT44 = S.dot(Kx_CTE_1, BTE)
            MT = S.asarray(S.bmat([[I, ZERO, I, ZERO], \
                                   [ZERO, I, ZERO, I], \
                                   [MT31, MT32, MT33, MT34], \
                                   [MT41, MT42, MT43, MT44]]))

            Mp.fill(0.0)
            M.fill(0.0)

            for nlayer in xrange(nlayers - 2, 0, -1):  # internal layers

                layer = multilayer[nlayer]
                thickness = layer.thickness

                EPS2, EPS21 = layer.getEPSFourierCoeffs(wl,
                                                        n,
                                                        anisotropic=True)

                Exx = S.squeeze(EPS2[0, 0, :])
                Exx = toeplitz(S.flipud(Exx[0:hmax + 1]), Exx[hmax:])
                Exy = S.squeeze(EPS2[0, 1, :])
                Exy = toeplitz(S.flipud(Exy[0:hmax + 1]), Exy[hmax:])
                Exz = S.squeeze(EPS2[0, 2, :])
                Exz = toeplitz(S.flipud(Exz[0:hmax + 1]), Exz[hmax:])

                Eyx = S.squeeze(EPS2[1, 0, :])
                Eyx = toeplitz(S.flipud(Eyx[0:hmax + 1]), Eyx[hmax:])
                Eyy = S.squeeze(EPS2[1, 1, :])
                Eyy = toeplitz(S.flipud(Eyy[0:hmax + 1]), Eyy[hmax:])
                Eyz = S.squeeze(EPS2[1, 2, :])
                Eyz = toeplitz(S.flipud(Eyz[0:hmax + 1]), Eyz[hmax:])

                Ezx = S.squeeze(EPS2[2, 0, :])
                Ezx = toeplitz(S.flipud(Ezx[0:hmax + 1]), Ezx[hmax:])
                Ezy = S.squeeze(EPS2[2, 1, :])
                Ezy = toeplitz(S.flipud(Ezy[0:hmax + 1]), Ezy[hmax:])
                Ezz = S.squeeze(EPS2[2, 2, :])
                Ezz = toeplitz(S.flipud(Ezz[0:hmax + 1]), Ezz[hmax:])

                Exx_1 = S.squeeze(EPS21[0, 0, :])
                Exx_1 = toeplitz(S.flipud(Exx_1[0:hmax + 1]), Exx_1[hmax:])
                Exx_1_1 = inv(Exx_1)

                # lalanne
                Ezz_1 = inv(Ezz)
                Ky_Ezz_1 = ky / k * Ezz_1
                Kx_Ezz_1 = kx[:, S.newaxis] / k * Ezz_1
                Exz_Ezz_1 = S.dot(Exz, Ezz_1)
                Eyz_Ezz_1 = S.dot(Eyz, Ezz_1)
                H11 = 1j * S.dot(Ky_Ezz_1, Ezy)
                H12 = 1j * S.dot(Ky_Ezz_1, Ezx)
                H13 = S.dot(Ky_Ezz_1, Kx)
                H14 = I - S.dot(Ky_Ezz_1, Ky)
                H21 = 1j * S.dot(Kx_Ezz_1, Ezy)
                H22 = 1j * S.dot(Kx_Ezz_1, Ezx)
                H23 = S.dot(Kx_Ezz_1, Kx) - I
                H24 = -S.dot(Kx_Ezz_1, Ky)
                H31 = S.dot(Kx, Ky) + Exy - S.dot(Exz_Ezz_1, Ezy)
                H32 = Exx_1_1 - S.dot(Ky, Ky) - S.dot(Exz_Ezz_1, Ezx)
                H33 = 1j * S.dot(Exz_Ezz_1, Kx)
                H34 = -1j * S.dot(Exz_Ezz_1, Ky)
                H41 = S.dot(Kx, Kx) - Eyy + S.dot(Eyz_Ezz_1, Ezy)
                H42 = -S.dot(Kx, Ky) - Eyx + S.dot(Eyz_Ezz_1, Ezx)
                H43 = -1j * S.dot(Eyz_Ezz_1, Kx)
                H44 = 1j * S.dot(Eyz_Ezz_1, Ky)
                H = 1j * S.diag(S.repeat(S.diag(Kz),4)) + \
                    S.asarray(S.bmat([[H11, H12, H13, H14], \
                                      [H21, H22, H23, H24], \
                                      [H31, H32, H33, H34],\
                                      [H41, H42, H43, H44]]))

                q, W = eig(H)
                W1, W2, W3, W4 = S.split(W, 4)

                #
                # boundary conditions
                #
                # x = [R T]
                # R = [ROx ROy REx REy]
                # T = [TOx TOy TEx TEy]
                # b + MR.R = M1p.c
                # M1.c = M2p.c
                # ...
                # ML.c = MT.T
                # therefore: b + MR.R = (M1p.M1^-1.M2p.M2^-1. ...).MT.T
                # missing equations from (46)..(49) in glytsis_rigorous
                # [b] = [-MR Mtot.MT] [R]
                # [0]   [...........] [T]

                z = S.zeros_like(q)
                z[S.where(q.real > 0)] = -thickness
                D = S.exp(k * q * z)
                Sy0 = W1 * D[S.newaxis, :]
                Sx0 = W2 * D[S.newaxis, :]
                Uy0 = W3 * D[S.newaxis, :]
                Ux0 = W4 * D[S.newaxis, :]

                z = thickness * S.ones_like(q)
                z[S.where(q.real > 0)] = 0
                D = S.exp(k * q * z)
                D1 = S.exp(-1j * k2i[2, :] * thickness)
                Syd = D1[:, S.newaxis] * W1 * D[S.newaxis, :]
                Sxd = D1[:, S.newaxis] * W2 * D[S.newaxis, :]
                Uyd = D1[:, S.newaxis] * W3 * D[S.newaxis, :]
                Uxd = D1[:, S.newaxis] * W4 * D[S.newaxis, :]

                Mp[:, :, nlayer] = S.r_[Sx0, Sy0, -1j * Ux0, -1j * Uy0]
                M[:, :, nlayer] = S.r_[Sxd, Syd, -1j * Uxd, -1j * Uyd]

            Mtot = S.eye(4 * nood, dtype=complex)
            for nlayer in xrange(1, nlayers - 1):
                Mtot = S.dot(S.dot(Mtot, Mp[:, :, nlayer]), inv(M[:, :,
                                                                  nlayer]))

            BC_b = S.r_[b, S.zeros_like(b)]
            # BC_A1 = S.asarray(S.bmat([-MR, S.dot(Mtot, MT)]))
            BC_A1 = S.c_[-MR, S.dot(Mtot, MT)]
            BC_A2 = S.asarray(S.bmat( \
                [[(c1[0]*I - c1[2] * S.dot(CRO_1,ARO)), (c1[1]*I - c1[2] * S.dot(CRO_1, BRO)), ZERO, ZERO, ZERO, ZERO, ZERO, ZERO], \
                 [ZERO, ZERO, (DRE - S.dot(S.dot(FRE,CRE_1),ARE)), (ERE - S.dot(S.dot(FRE,CRE_1),BRE)), ZERO, ZERO, ZERO, ZERO], \
                 [ZERO, ZERO, ZERO, ZERO, (c3[0]*I - c3[2] * S.dot(CTO_1,ATO)), (c3[1]*I - c3[2] * S.dot(CTO_1,BTO)), ZERO, ZERO], \
                 [ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, (DTE - S.dot(S.dot(FTE,CTE_1),ATE)), (ETE - S.dot(S.dot(FTE,CTE_1),BTE))]]))

            BC_A = S.r_[BC_A1, BC_A2]

            x = linsolve(BC_A, BC_b)

            ROx, ROy, REx, REy, TOx, TOy, TEx, TEy = S.split(x, 8)

            ROz = -S.dot(CRO_1, (S.dot(ARO, ROx) + S.dot(BRO, ROy)))
            REz = -S.dot(CRE_1, (S.dot(ARE, REx) + S.dot(BRE, REy)))
            TOz = -S.dot(CTO_1, (S.dot(ATO, TOx) + S.dot(BTO, TOy)))
            TEz = -S.dot(CTE_1, (S.dot(ATE, TEx) + S.dot(BTE, TEy)))

            denom = (k1[2] - S.dot(u, k1) * u[2]).real
            DEO1[:,iwl] = -((S.absolute(ROx)**2 + S.absolute(ROy)**2 + S.absolute(ROz)**2) * S.conj(kO1i[2,:]) - \
                            (ROx*kO1i[0,:] + ROy*kO1i[1,:] + ROz*kO1i[2,:]) * S.conj(ROz)).real / denom
            DEE1[:,iwl] = -((S.absolute(REx)**2 + S.absolute(REy)**2 + S.absolute(REz)**2) * S.conj(kE1i[2,:]) - \
                            (REx*kE1i[0,:] + REy*kE1i[1,:] + REz*kE1i[2,:]) * S.conj(REz)).real / denom
            DEO3[:,iwl] =  ((S.absolute(TOx)**2 + S.absolute(TOy)**2 + S.absolute(TOz)**2) * S.conj(kO3i[2,:]) - \
                            (TOx*kO3i[0,:] + TOy*kO3i[1,:] + TOz*kO3i[2,:]) * S.conj(TOz)).real / denom
            DEE3[:,iwl] =  ((S.absolute(TEx)**2 + S.absolute(TEy)**2 + S.absolute(TEz)**2) * S.conj(kE3i[2,:]) - \
                            (TEx*kE3i[0,:] + TEy*kE3i[1,:] + TEz*kE3i[2,:]) * S.conj(TEz)).real / denom

        # save the results
        self.DEO1 = DEO1
        self.DEE1 = DEE1
        self.DEO3 = DEO3
        self.DEE3 = DEE3

        return self
Example #38
0
def test1():
    zs = np.linspace(0.01, 1, 21)
    r = np.array([[0, 0, zz] for zz in zs])
    thetas = np.linspace(0, np.pi * 0.25, 10)
    print "thetas: ", thetas
    k_dir = np.vstack([np.sin(thetas), np.zeros_like(thetas), np.cos(thetas)])
    k_dir = k_dir.transpose()

    wavelength = 10
    k0 = np.pi * 2 / wavelength

    a1 = np.array([1, 0, 0])
    a2 = np.array([0, 1, 0])

    class gratinglobes(object):
        def check(self):
            dx = np.sqrt(np.sum(a1 * a1, axis=-1))
            dy = np.sqrt(np.sum(a2 * a2, axis=-1))
            threshold_d = wavelength / (1 + np.sin(thetas))
            temp = np.array([dx < threshold_d, dy < threshold_d])
            return np.sum(temp, axis=0) == temp.shape[0]

    checker = gratinglobes().check()
    print "grating lobe condition: ", checker

    result_direct = PGF_Direct(k0, a1, a2, 100, 100).pgf(k_dir, r)
    result_poisson = PGF_Poisson(k0, a1, a2, 1, 1).pgf(k_dir, r)
    result_ewald = PGF_EWALD(k0, a1, a2, 20, 20).pgf(k_dir, r)
    import matplotlib.pylab as plt
    plt.figure()

    class Method(object):
        def __init__(self, result, marker, line):
            self.result = result
            self.marker = marker
            self.line = line

        def angle(self, it):
            plt.plot(zs,\
                     np.angle(self.result)[it]/np.pi*180,\
                     self.line,\
                     label='angle %d %s'%(it,self.marker))

        def absolute(self, it):
            plt.plot(zs,\
                     np.absolute(self.result)[it],\
                     self.line,\
                     label='abs %d %s'%(it,self.marker))

    map(Method(result_poisson, 'pois', "s").angle, xrange(k_dir.shape[0]))
    map(Method(result_direct, 'dir', "-").angle, xrange(k_dir.shape[0]))
    map(Method(result_ewald, 'ewald', "+").angle, xrange(k_dir.shape[0]))
    plt.ylabel("angle (degree)")
    plt.xlabel("zs")
    #plt.legend()
    plt.show()

    plt.figure()
    map(Method(result_poisson, 'pois', "s").absolute, xrange(k_dir.shape[0]))
    map(Method(result_direct, 'dir', "-").absolute, xrange(k_dir.shape[0]))
    map(Method(result_ewald, 'ewald', "+").absolute, xrange(k_dir.shape[0]))
    plt.xlabel("zs")
    plt.ylabel("log10(abs) ")
    #plt.legend()
    plt.show()

    plt.figure()
    #map(Method((result_direct-result_poisson)/result_direct,'dir_pois',"-s").absolute,xrange(k_dir.shape[0]))
    map(
        Method((result_direct - result_ewald) / result_direct, 'dir_ewald',
               "-d").absolute, xrange(k_dir.shape[0]))
    #map(Method((result_poisson-result_ewald)/result_ewald,'pois_ewald',"-o").absolute,xrange(k_dir.shape[0]))
    plt.xlabel("zs")
    plt.ylabel("log10(abs) ")
    #plt.legend()
    plt.show()
    def findLocalOptima(self,
                        fast=False,
                        verbose=True,
                        n_times=10,
                        lambd=None):
        """
        Train the model repeadly up to a number specified by the users with random restarts and
        return a list of all relative minima that have been found 
        
        Args:
            fast:       Boolean. if set to True initalize kronSumGP
            verbose:    Boolean. If set to True, verbose output is produced. (default True)
            n_times:    number of re-starts of the optimization. (default 10)
        """
        if not self.init: self.initGP(fast)

        opt_list = []

        fixed0 = SP.zeros_like(self.gp.getParams()['dataTerm'])

        # minimises n_times
        for i in range(n_times):

            scales1 = self._getScalesRand()
            fixed1 = 1e-1 * SP.randn(fixed0.shape[0], fixed0.shape[1])
            conv = self.trainGP(fast=fast,
                                scales0=scales1,
                                fixed0=fixed1,
                                lambd=lambd)

            if conv:
                # compare with previous minima
                temp = 1
                for j in range(len(opt_list)):
                    if SP.allclose(abs(self.getScales()),
                                   abs(opt_list[j]['scales'])):
                        temp = 0
                        opt_list[j]['counter'] += 1
                        break
                if temp == 1:
                    opt = {}
                    opt['counter'] = 1
                    opt['LML'] = self.getLML()
                    opt['scales'] = self.getScales()
                    opt_list.append(opt)

        # sort by LML
        LML = SP.array([opt_list[i]['LML'] for i in range(len(opt_list))])
        index = LML.argsort()[::-1]
        out = []
        if verbose:
            print "\nLocal mimima\n"
            print "n_times\t\tLML"
            print "------------------------------------"
            for i in range(len(opt_list)):
                out.append(opt_list[index[i]])
                if verbose:
                    print "%d\t\t%f" % (opt_list[index[i]]['counter'],
                                        opt_list[index[i]]['LML'])
                print ""

        return out
Example #40
0
#import scipy as np
# In[]
if __name__ == '__main__':
    '''
    test1()
    '''
    thetas = np.linspace(0, np.pi * 0.3, 7)
    #    thetas = np.array([np.pi*0.3])
    print "thetas: ", thetas
    zmin = 1
    zmax = 5
    zs = np.linspace(zmin, zmax, 100)
    r = np.array([[0.1, 0.12, zz] for zz in zs])

    k_dir = np.vstack([np.sin(thetas), np.zeros_like(thetas), np.cos(thetas)])
    k_dir = k_dir.transpose()
    #    print k_dir

    wavelength = 10
    k0 = np.pi * 2 / wavelength

    a1 = np.array([1, 0, 0])
    a2 = np.array([0, 1, 0])

    class Gratinglobes(object):
        def check(self):
            dx = np.sqrt(np.sum(a1 * a1, axis=-1))
            dy = np.sqrt(np.sum(a2 * a2, axis=-1))
            threshold_d = wavelength / (1 + np.sin(thetas))
            temp = np.array([dx < threshold_d, dy < threshold_d])
Example #41
0
def estimate(pv, m=None, verbose=False, lowmem=False, pi0=None):
    """
    Estimates q-values from p-values

    Args
    =====

    m: number of tests. If not specified m = pv.size
    verbose: print verbose messages? (default False)
    lowmem: use memory-efficient in-place algorithm
    pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003. 
         For most GWAS this is not necessary, since pi0 is extremely likely to be
         1

    """

    assert (pv.min() >= 0
            and pv.max() <= 1), "p-values should be between 0 and 1"

    original_shape = pv.shape
    pv = pv.ravel(
    )  # flattens the array in place, more efficient than flatten()

    if m == None:
        m = float(len(pv))
    else:
        # the user has supplied an m
        m *= 1.0

    # if the number of hypotheses is small, just set pi0 to 1
    if len(pv) < 100 and pi0 == None:
        pi0 = 1.0
    elif pi0 != None:
        pi0 = pi0
    else:
        # evaluate pi0 for different lambdas
        pi0 = []
        lam = sp.arange(0, 0.90, 0.01)
        counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)])

        for l in range(len(lam)):
            pi0.append(counts[l] / (m * (1 - lam[l])))

        pi0 = sp.array(pi0)

        # fit natural cubic spline
        tck = sp.interpolate.splrep(lam, pi0, k=3)
        pi0 = sp.interpolate.splev(lam[-1], tck)

        if pi0 > 1:
            if verbose:
                print(
                    "got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1"
                    % pi0)

            pi0 = 1.0

    assert (pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0

    if lowmem:
        # low memory version, only uses 1 pv and 1 qv matrices
        qv = sp.zeros((len(pv), ))
        last_pv = pv.argmax()
        qv[last_pv] = (pi0 * pv[last_pv] * m) / float(m)
        pv[last_pv] = -sp.inf
        prev_qv = last_pv
        for i in xrange(int(len(pv)) - 2, -1, -1):
            cur_max = pv.argmax()
            qv_i = (pi0 * m * pv[cur_max] / float(i + 1))
            pv[cur_max] = -sp.inf
            qv_i1 = prev_qv
            qv[cur_max] = min(qv_i, qv_i1)
            prev_qv = qv[cur_max]

    else:
        p_ordered = sp.argsort(pv)
        pv = pv[p_ordered]
        qv = pi0 * m / len(pv) * pv
        qv[-1] = min(qv[-1], 1.0)

        for i in xrange(len(pv) - 2, -1, -1):
            qv[i] = min(pi0 * m * pv[i] / (i + 1.0), qv[i + 1])

        # reorder qvalues
        qv_temp = qv.copy()
        qv = sp.zeros_like(qv)
        qv[p_ordered] = qv_temp

        # reshape qvalues
        qv = qv.reshape(original_shape)

    return qv
Example #42
0
def main():
    global W, WI, U, L, V, L2, X, Y, Z
    q = 200
    sigma = 0.09
    tau = 0.1

    # boedecker's definition
    target_later = True
    use_input = False

    # smins = np.linspace(0, 1.0, 40)
    # smaxs = np.linspace(0, 2.0, 40)
    smins = np.linspace(0.8, 1.0, 40)
    smaxs = np.linspace(0.9, 1.3, 40)

    X, Y = meshgrid(smins, smaxs)
    Z = zeros(X.shape)

    W = sp.random.normal(0, sigma, [q, q])
    WI = sp.random.uniform(-tau, tau, q)

    U, L, V = svd(W)
    smin_old = L[-1]
    smax_old = L[0]
    L2 = sp.zeros_like(L)

    #smins =  sp.linspace(smin, 0.4, 40)
    mcs = sp.zeros_like(smins)

    for ri, (sminr, smaxr) in enumerate(zip(X, Y)):
        for si, (smin, smax) in enumerate(zip(sminr, smaxr)):
            L2 = stretch_vec(L, smin, smax, smin_old, smax_old)
            W2 = sp.dot(U, sp.dot(sp.diag(L2), V))
            mc = memory_capacity(W2,
                                 WI,
                                 memory_max=2 * q,
                                 iterations_coef_measure=1000,
                                 iterations=1000,
                                 use_input=use_input,
                                 target_later=target_later)
            mcv = sum(mc)
            if np.isnan(mcv):
                Z[ri, si] = -1
            else:
                Z[ri, si] = mcv
            #Z[si, ri] = random.random()
            print('.', end='')
        print(ri, 'of', len(X))
        #mcs[si] = sum(mc)

    #cmap = plt.get_cmap('PiYG')
    cmap = plt.get_cmap('nipy_spectral')

    c = plt.pcolormesh(X, Y, Z, cmap=cmap)
    plt.colorbar()
    #plt.ylim([0, 2])
    #plt.xlim([0, 2])
    plt.xlabel("smin")
    plt.ylabel("smax")
    try_save_fig()
    try_save_fig(ext="pdf")
    plt.show()
Example #43
0
    def solve(self, wls):
        """Isotropic solver.

        INPUT
        wls = wavelengths to scan (any asarray-able object).

        OUTPUT
        self.Rs, self.Ts, self.Rp, self.Tp = power reflected and
        transmitted on s and p polarizations.
        """

        self.wls = S.asarray(wls)

        multilayer = self.multilayer
        theta_inc = self.theta_inc

        nlayers = len(multilayer)
        d = S.array([l.thickness for l in multilayer]).ravel()

        Rs = S.zeros_like(self.wls)
        Ts = S.zeros_like(self.wls)
        Rp = S.zeros_like(self.wls)
        Tp = S.zeros_like(self.wls)

        Dp = S.zeros((2, 2), dtype=complex)
        Ds = S.zeros((2, 2), dtype=complex)
        P = S.zeros((2, 2), dtype=complex)
        Ms = S.zeros((2, 2), dtype=complex)
        Mp = S.zeros((2, 2), dtype=complex)
        k = S.zeros((nlayers, 2), dtype=complex)

        ntot = S.zeros((self.wls.size, nlayers), dtype=complex)
        for i, l in enumerate(multilayer):
            #            ntot[:,i] = l.mat.n(self.wls,l.mat.T0)
            ntot[:, i] = l.mat.n(self.wls, l.mat.toc.T0)

        for iwl, wl in enumerate(self.wls):

            n = ntot[iwl, :]
            theta = snell(theta_inc, n)

            k[:, 0] = 2 * S.pi * n / wl * S.cos(theta)
            k[:, 1] = 2 * S.pi * n / wl * S.sin(theta)

            Ds = [[1., 1.], [n[0] * S.cos(theta[0]), -n[0] * S.cos(theta[0])]]
            Dp = [[S.cos(theta[0]), S.cos(theta[0])], [n[0], -n[0]]]
            Ms = inv(Ds)
            Mp = inv(Dp)

            for nn, dd, tt, kk in zip(n[1:-1], d[1:-1], theta[1:-1], k[1:-1,
                                                                       0]):

                Ds = [[1., 1.], [nn * S.cos(tt), -nn * S.cos(tt)]]
                Dp = [[S.cos(tt), S.cos(tt)], [nn, -nn]]
                phi = kk * dd
                P = [[S.exp(1j * phi), 0], [0, S.exp(-1j * phi)]]
                Ms = S.dot(Ms, S.dot(Ds, S.dot(P, inv(Ds))))
                Mp = S.dot(Mp, S.dot(Dp, S.dot(P, inv(Dp))))

            Ds = [[1., 1.],
                  [n[-1] * S.cos(theta[-1]), -n[-1] * S.cos(theta[-1])]]
            Dp = [[S.cos(theta[-1]), S.cos(theta[-1])], [n[-1], -n[-1]]]
            Ms = S.dot(Ms, Ds)
            Mp = S.dot(Mp, Dp)

            rs = Ms[1, 0] / Ms[0, 0]
            ts = 1. / Ms[0, 0]

            rp = Mp[1, 0] / Mp[0, 0]
            tp = 1. / Mp[0, 0]

            Rs[iwl] = S.absolute(rs)**2
            Ts[iwl] = S.absolute((n[-1] * S.cos(theta[-1])) /
                                 (n[0] * S.cos(theta[0]))) * S.absolute(ts)**2
            Rp[iwl] = S.absolute(rp)**2
            Tp[iwl] = S.absolute((n[-1] * S.cos(theta[-1])) /
                                 (n[0] * S.cos(theta[0]))) * S.absolute(tp)**2

        self.Rs = Rs
        self.Ts = Ts
        self.Rp = Rp
        self.Tp = Tp
        return self
Example #44
0
def herm_fac_with_inv(A, lower=False, zero_tol=1E-15, return_rank=False, 
                      calc_inv=True, force_evd=False, 
                      sanity_checks=False, sc_data=''):
    """Factorizes a Hermitian matrix using either Cholesky or eigenvalue decomposition.
    
    Decomposes a Hermitian A as A = X*X or, if lower == True, A = XX*.
    
    Tries Cholesky first by default, then falls back to EVD if the matrix is 
    not positive-definite. If Cholesky decomposition is used, X is upper (or lower)
    triangular. For the EVD decomposition, the inverse becomes a pseudo-inverse
    and all eigenvalues below the zero-tolerance are set to zero.
    
    Parameters
    ----------
    A : ndarray
        The Hermitian matrix to be factorized.
    lower : bool
        Refers to Cholesky factorization. If True, factorize as A = XX*, otherwise as A = X*X
    zero_tol : float
        Tolerance for detection of zeros in EVD case.
    return_rank : bool
        Whether to return the rank of A. The detected rank is affected by zero_tol.
    calc_inv : bool
        Whether to calculate (and return) the inverse of the factor.
    force_evd : bool
        Whether to force eigenvalue instead of Cholesky decomposition.
    sanity_checks : bool
        Whether to perform soem basic sanity checks.
    """    
    if not force_evd:
        try:
            x = la.cholesky(A, lower=lower)
            if calc_inv:
                xi = mm.invtr(x, lower=lower)
            else:
                xi = None
            
            nonzeros = A.shape[0]
        except sp.linalg.LinAlgError: #this usually means a is not pos. def.
            force_evd = True
            
    if force_evd:
        ev, EV = la.eigh(A, turbo=True) #wraps lapack routines, which return eigenvalues in ascending order
        
        if sanity_checks:
            assert np.all(ev == np.sort(ev)), "Sanity fail in herm_fac_with_inv(): Unexpected eigenvalue ordering"
            
            if ev.min() < -zero_tol:
                log.warning("Sanity fail in herm_fac_with_inv(): Discarding negative eigenvalues! %s %s",
                            ev.min(), sc_data)
        
        nonzeros = np.count_nonzero(ev > zero_tol) 

        ev_sq = sp.zeros_like(ev, dtype=A.dtype)
        ev_sq[-nonzeros:] = sp.sqrt(ev[-nonzeros:])
        ev_sq = mm.simple_diag_matrix(ev_sq, dtype=A.dtype)
        
        if calc_inv:
            #Replace almost-zero values with zero and perform a pseudo-inverse
            ev_sq_i = sp.zeros_like(ev, dtype=A.dtype)
            ev_sq_i[-nonzeros:] = 1. / ev_sq[-nonzeros:]
            
            ev_sq_i = mm.simple_diag_matrix(ev_sq_i, dtype=A.dtype)        
                   
        xi = None
        if lower:
            x = ev_sq.dot_left(EV)
            if calc_inv:
                xi = ev_sq_i.dot(EV.conj().T)
        else:
            x = ev_sq.dot(EV.conj().T)
            if calc_inv:
                xi = ev_sq_i.dot_left(EV)
            
    if sanity_checks:
        if not sp.allclose(A, A.conj().T, atol=1E-13, rtol=1E-13):
            log.warning("Sanity fail in herm_fac_with_inv(): A is not Hermitian! %s %s",
                        la.norm(A - A.conj().T), sc_data)
        
        eye = sp.zeros((A.shape[0]), dtype=A.dtype)
        eye[-nonzeros:] = 1
        eye = mm.simple_diag_matrix(eye)
        
        if lower:
            if calc_inv:
                if not sp.allclose(xi.dot(x), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad left inverse! %s %s",
                                la.norm(xi.dot(x) - eye), sc_data)
                                
                if not sp.allclose(xi.dot(A).dot(xi.conj().T), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad A inverse! %s %s",
                                la.norm(xi.conj().T.dot(A).dot(xi) - eye), sc_data)
    
            if not sp.allclose(x.dot(x.conj().T), A, atol=1E-13, rtol=1E-13):
                log.warning("Sanity fail in herm_fac_with_inv(): Bad decomp! %s %s",
                            la.norm(x.dot(x.conj().T) - A), sc_data)
        else:
            if calc_inv:
                if not sp.allclose(x.dot(xi), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad right inverse! %s %s",
                                la.norm(x.dot(xi) - eye), sc_data)
                if not sp.allclose(xi.conj().T.dot(A).dot(xi), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad A inverse! %s %s",
                                la.norm(xi.conj().T.dot(A).dot(xi) - eye), sc_data)

    
            if not sp.allclose(x.conj().T.dot(x), A, atol=1E-13, rtol=1E-13):
                log.warning("Sanity fail in herm_fac_with_inv(): Bad decomp! %s %s",
                            la.norm(x.conj().T.dot(x) - A), sc_data)
                    
    if calc_inv:
        if return_rank:
            return x, xi, nonzeros
        else:
            return x, xi
    else:
        if return_rank:
            return x, nonzeros
        else:
            return x
Example #45
0
    def _update_canvas(self):
        """
        Update the figure when the user changes and input value.
        :return:
        """
        # Get the parameters from the form
        range_center = float(self.range_center.text())
        x_target = self.x_target.text().split(',')
        y_target = self.y_target.text().split(',')
        rcs = self.rcs.text().split(',')
        xt = []
        yt = []
        rt = []
        for x, y, r in zip(x_target, y_target, rcs):
            xt.append(float(x))
            yt.append(float(y))
            rt.append(float(r))

        x_span = float(self.x_span.text())
        y_span = float(self.y_span.text())

        nx_ny = self.nx_ny.text().split(',')
        nx = int(nx_ny[0])
        ny = int(nx_ny[1])

        start_frequency = float(self.start_frequency.text())
        bandwidth = float(self.bandwidth.text())

        az_start_end = self.az_start_end.text().split(',')
        az_start = float(az_start_end[0])
        az_end = float(az_start_end[1])

        # Set up the azimuth space
        r = sqrt(x_span**2 + y_span**2)
        da = c / (2.0 * r * start_frequency)
        na = int((az_end - az_start) / da)
        az = linspace(az_start, az_end, na)

        # Set up the frequency space
        df = c / (2.0 * r)
        nf = int(bandwidth / df)
        frequency = linspace(start_frequency, start_frequency + bandwidth, nf)

        # Set the length of the FFT
        fft_length = 8 * next_fast_len(nf)

        # Set up the aperture positions
        sensor_x = range_center * cos(radians(az))
        sensor_y = range_center * sin(radians(az))
        sensor_z = zeros_like(sensor_x)

        # Set up the image space
        self.xi = linspace(-0.5 * x_span, 0.5 * x_span, nx)
        self.yi = linspace(-0.5 * y_span, 0.5 * y_span, ny)
        x_image, y_image = meshgrid(self.xi, self.yi)
        z_image = zeros_like(x_image)

        # Calculate the signal (k space)
        signal = zeros([nf, na], dtype=complex)

        index = 0
        for a in az:
            r_los = [cos(radians(a)), sin(radians(a))]

            for x, y, r in zip(xt, yt, rt):
                r_target = -dot(r_los, [x, y])
                signal[:, index] += r * exp(
                    -1j * 4.0 * pi * frequency / c * r_target)
            index += 1

        # Get the selected window from the form
        window_type = self.window_type.currentText()

        if window_type == 'Hanning':
            h1 = hanning(nf, True)
            h2 = hanning(na, True)
            coefficients = sqrt(outer(h1, h2))
        elif window_type == 'Hamming':
            h1 = hamming(nf, True)
            h2 = hamming(na, True)
            coefficients = sqrt(outer(h1, h2))
        elif window_type == 'Rectangular':
            coefficients = ones([nf, na])

        # Apply the selected window
        signal *= coefficients

        # Reconstruct the image
        self.bp_image = backprojection.reconstruct(signal, sensor_x, sensor_y,
                                                   sensor_z, range_center,
                                                   x_image, y_image, z_image,
                                                   frequency, fft_length)

        # Update the image
        self._update_image_only()
Example #46
0
def spike_trains(spiketrains,
                 spiketrains2=None,
                 alignment=None,
                 marker_width=3,
                 samples_per_second=None,
                 label1=None,
                 label2=None,
                 colours=None):
    """plot one set of spike trains or two sets of spike trains with their inter-spike alignment

    :type spiketrains: dict
    :param spiketrains: Dict of 1d ndarray, holding the spike times.
    :type spiketrains2: dict
    :param spiketrains2: Dict of 1d ndarray, holding the spike times. If this is given an interspike
        assignment plot is created.
    :type alignment: list
    :param alignment: List of lists of tupels containing the pairwise spike alignments.
    :type marker_width: int
    :param marker_width: Fancy parameter for the plot.
    :type samples_per_second: int
    :param samples_per_second: Scale parameter for the axis.
    :type label1: list
    :param label1: Label list for interspike alignment set 1.
    :type label2: list
    :param label2: Label list for interspike alignment set 2.
    :rtype: matplotlib.figure.Figure
    """

    # init and checks
    col_lst = colours or COLOURS
    fig = gen_fig()
    ax = fig.add_subplot(111)
    if not len(spiketrains):
        raise Exception('Provide at least one spiketrain in set 1!')
    nneuron = len(spiketrains)
    srate = float(samples_per_second) or 1.0
    offset = 0
    if spiketrains2 is not None:
        nneuron += len(spiketrains2)
        offset = 1
    labels = []
    idx = 0

    # plot the spike trains
    my_max_timesample = 0
    my_min_timesample = spiketrains[spiketrains.keys()[0]][0]
    for unit in sorted(spiketrains.keys()):
        col = col_lst[idx % len(col_lst)]
        ax.plot(spiketrains[unit] / srate,
                sp.zeros_like(spiketrains[unit]) + nneuron - 1 - idx,
                marker='|',
                mec=col,
                mfc=col,
                mew=marker_width,
                ls='None',
                ms=13)
        labels.append('Unit %s' % unit)
        idx += 1
        my_max_timesample = max(my_max_timesample, spiketrains[unit].max())
        my_min_timesample = max(my_min_timesample, spiketrains[unit].min())

    if spiketrains2 is not None:
        labels.append('')
        ax.axhline(y=nneuron - 1 - idx, xmin=0, xmax=1)
        for unit in sorted(spiketrains2.keys()):
            col = col_lst[idx % len(col_lst)]
            ax.plot(spiketrains2[unit] / srate,
                    sp.zeros_like(spiketrains2[unit]) + nneuron - 1 - idx -
                    offset,
                    marker='|',
                    mec=col,
                    mfc=col,
                    mew=marker_width,
                    ls='None',
                    ms=13)
            labels.append('Unit %s' % unit)
            idx += 1
            my_max_timesample = max(my_max_timesample,
                                    spiketrains2[unit].max())
            my_min_timesample = max(my_min_timesample,
                                    spiketrains2[unit].min())

    # plot alignment if provided
    if alignment is not None:
        if spiketrains2 is None:
            skeys = sorted(spiketrains.keys())

            for idx1 in xrange(len(skeys)):
                unit1 = skeys[idx1]
                for idx2 in xrange(idx1 + 1, len(skeys)):
                    unit2 = skeys[idx2]
                    for i in xrange(len(alignment[(unit1, unit2)])):
                        start = spiketrains[unit1][alignment[
                            (unit1, unit2)][i][0]] / srate
                        end = spiketrains[unit2][alignment[
                            (unit1, unit2)][i][1]] / srate
                        ax.plot((start, end),
                                (nneuron - idx1 - 1, nneuron - idx2 - 1),
                                c=(0, 0, 0),
                                ls=":")
        else:
            skeys1 = sorted(spiketrains.keys())
            skeys2 = sorted(spiketrains2.keys())

            for idx1 in xrange(len(skeys1)):
                unit1 = skeys1[idx1]
                for idx2 in xrange(len(skeys2)):
                    unit2 = skeys2[idx2]
                    for i in xrange(len(alignment[(unit1, unit2)])):
                        start = spiketrains[unit1][alignment[
                            (unit1, unit2)][i][0]] / srate
                        end = spiketrains2[unit2][alignment[
                            (unit1, unit2)][i][1]] / srate
                        ax.plot((start, end),
                                (nneuron - idx1 - 1,
                                 nneuron - len(skeys1) - idx2 - 1 - offset),
                                c=(0, 0, 0),
                                ls=":")

    # plot spike labels if provided
    labelList = ['TP', 'TPO', 'FP', 'FPA', 'FPAO', 'FN', 'FNO']
    if label1 is not None:
        idx = 0
        for unit in sorted(spiketrains.keys()):
            for i in xrange(len(label1[unit])):
                if label1[unit][i] - 1 > 1:
                    stri = labelList[label1[unit][i] - 1]
                    ax.text(spiketrains[unit][i] / srate, nneuron - 1 - idx,
                            stri)
            idx += 1
    if label2 is not None and spiketrains2 is not None:
        for unit in sorted(spiketrains2.keys()):
            for i in xrange(len(label2[unit])):
                if label2[unit][i] - 1 > 1:
                    stri = labelList[label2[unit][i] - 1]
                    ax.text(spiketrains2[unit][i] / srate,
                            nneuron - 1 - idx - offset, stri)
            idx += 1

            # beautfy the figure
            #fig_ax.set_title('spiketrains all units')

    if srate is 1:
        ax.set_xlabel('time in samples')
    else:
        ax.set_xlabel('time in seconds')
        #ax.set_ylabel('')
    ax.set_yticks(sp.arange(nneuron + offset) - offset)
    ax.set_yticklabels(labels[::-1])
    ax.set_ylim((-0.5 - offset, nneuron - .5))
    #ax.set_xlim((my_min_timesample-1, my_min_timesample+1))

    # return
    return fig
Example #47
0
File: RCWA.py Project: seghil/EMpy
    def solve(self, wls):
        """Isotropic solver.
    
        INPUT
        wls = wavelengths to scan (any asarray-able object).
        
        OUTPUT
        self.DE1, self.DE3 = power reflected and transmitted.
        
        NOTE
        see:
        Moharam, "Formulation for stable and efficient implementation
        of the rigorous coupled-wave analysis of binary gratings",
        JOSA A, 12(5), 1995
        Lalanne, "Highly improved convergence of the coupled-wave
        method for TM polarization", JOSA A, 13(4), 1996
        Moharam, "Stable implementation of the rigorous coupled-wave
        analysis for surface-relief gratings: enhanced trasmittance
        matrix approach", JOSA A, 12(5), 1995
        """

        self.wls = S.atleast_1d(wls)

        LAMBDA = self.LAMBDA
        n = self.n
        multilayer = self.multilayer
        alpha = self.alpha
        delta = self.delta
        psi = self.psi
        phi = self.phi

        nlayers = len(multilayer)
        i = S.arange(-n, n + 1)
        nood = 2 * n + 1
        hmax = nood - 1

        # grating vector (on the xz plane)
        K = 2 * pi / LAMBDA * S.array(
            [S.sin(phi), 0., S.cos(phi)],
            dtype=complex)  # grating on the xy plane

        DE1 = S.zeros((nood, self.wls.size))
        DE3 = S.zeros_like(DE1)

        dirk1 = S.array([S.sin(alpha) * S.cos(delta), \
                         S.sin(alpha) * S.sin(delta), \
                         S.cos(alpha)])

        # usefull matrices
        I = S.eye(i.size)
        I2 = S.eye(i.size * 2)
        ZERO = S.zeros_like(I)

        X = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
        MTp1 = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
        MTp2 = S.zeros_like(MTp1)

        EPS2 = S.zeros(2 * hmax + 1, dtype=complex)
        EPS21 = S.zeros_like(EPS2)

        dlt = (i == 0).astype(int)

        for iwl, wl in enumerate(self.wls):

            # free space wavevector
            k = 2 * pi / wl

            n1 = multilayer[0].mat.n(wl).item()
            n3 = multilayer[-1].mat.n(wl).item()

            # incident plane wave wavevector
            k1 = k * n1 * dirk1

            # all the other wavevectors
            tmp_x = k1[0] - i * K[0]
            tmp_y = k1[1] * S.ones_like(i)
            tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n1)
            k1i = S.r_[[tmp_x], [tmp_y], [tmp_z]]

            #k2i = S.r_[[k1[0] - i*K[0]], [k1[1] - i * K[1]], [-i * K[2]]]

            tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n3)
            k3i = S.r_[[k1i[0, :]], [k1i[1, :]], [tmp_z]]

            # aliases for constant wavevectors
            kx = k1i[0, :]
            ky = k1[1]

            # angles of reflection
            # phi_i = S.arctan2(ky,kx)
            phi_i = S.arctan2(ky, kx.real)  # OKKIO

            Kx = S.diag(kx / k)
            Ky = ky / k * I
            Z1 = S.diag(k1i[2, :] / (k * n1**2))
            Y1 = S.diag(k1i[2, :] / k)
            Z3 = S.diag(k3i[2, :] / (k * n3**2))
            Y3 = S.diag(k3i[2, :] / k)
            # Fc = S.diag(S.cos(phi_i))
            fc = S.cos(phi_i)
            # Fs = S.diag(S.sin(phi_i))
            fs = S.sin(phi_i)

            MR = S.asarray(S.bmat([[I, ZERO], \
                                   [-1j*Y1, ZERO], \
                                   [ZERO, I], \
                                   [ZERO, -1j*Z1]]))

            MT = S.asarray(S.bmat([[I, ZERO], \
                                   [1j*Y3, ZERO], \
                                   [ZERO, I], \
                                   [ZERO, 1j*Z3]]))

            # internal layers (grating or layer)
            X.fill(0.0)
            MTp1.fill(0.0)
            MTp2.fill(0.0)
            for nlayer in xrange(nlayers - 2, 0, -1):  # internal layers

                layer = multilayer[nlayer]
                d = layer.thickness

                EPS2, EPS21 = layer.getEPSFourierCoeffs(wl,
                                                        n,
                                                        anisotropic=False)

                E = toeplitz(EPS2[hmax::-1], EPS2[hmax:])
                E1 = toeplitz(EPS21[hmax::-1], EPS21[hmax:])
                E11 = inv(E1)
                # B = S.dot(Kx, linsolve(E,Kx)) - I
                B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
                # A = S.dot(Kx, Kx) - E
                A = S.diag((kx / k)**2) - E

                # OKKIO: solution bug alfredo
                # randomizzo Kx un po' a caso finche' cond(A) e' piccolo (<1e10)
                # soluzione sporca... :-(
                # per certi kx, l'operatore di helmholtz ha 2 autovalori nulli e A, B
                # non sono invertibili --> cambio leggermente i kx... ma dovrei invece
                # trattare separatamente (analiticamente) questi casi
                if cond(A) > 1e10:
                    warning('BAD CONDITIONING: randomization of kx')
                    while cond(A) > 1e10:
                        Kx = Kx * (1 + 1e-9 * S.rand())
                        # B = S.dot(Kx, linsolve(E, Kx)) - I
                        B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
                        # A = S.dot(Kx, Kx) - E
                        A = S.diag((kx / k)**2) - E

                if S.absolute(K[2] / k) > 1e-10:

                    raise ValueError(
                        'First Order Helmholtz Operator not implemented, yet!')

                elif ky == 0 or S.allclose(S.diag(Ky / ky * k), 1):

                    # lalanne
                    # H_U_reduced = S.dot(Ky, Ky) + A
                    H_U_reduced = (ky / k)**2 * I + A
                    # H_S_reduced = S.dot(Ky, Ky) + S.dot(Kx, linsolve(E, S.dot(Kx, E11))) - E11
                    H_S_reduced = (ky /
                                   k)**2 * I + kx[:, S.newaxis] / k * linsolve(
                                       E, kx[:, S.newaxis] / k * E11) - E11

                    q1, W1 = eig(H_U_reduced)
                    q1 = S.sqrt(q1)
                    q2, W2 = eig(H_S_reduced)
                    q2 = S.sqrt(q2)

                    # boundary conditions

                    # V11 = S.dot(linsolve(A, W1), S.diag(q1))
                    V11 = linsolve(A, W1) * q1[S.newaxis, :]
                    V12 = (ky / k) * S.dot(linsolve(A, Kx), W2)
                    V21 = (ky / k) * S.dot(linsolve(B, Kx), linsolve(E, W1))
                    # V22 = S.dot(linsolve(B, W2), S.diag(q2))
                    V22 = linsolve(B, W2) * q2[S.newaxis, :]

                    # Vss = S.dot(Fc, V11)
                    Vss = fc[:, S.newaxis] * V11
                    # Wss = S.dot(Fc, W1)  + S.dot(Fs, V21)
                    Wss = fc[:, S.newaxis] * W1 + fs[:, S.newaxis] * V21
                    # Vsp = S.dot(Fc, V12) - S.dot(Fs, W2)
                    Vsp = fc[:, S.newaxis] * V12 - fs[:, S.newaxis] * W2
                    # Wsp = S.dot(Fs, V22)
                    Wsp = fs[:, S.newaxis] * V22
                    # Wpp = S.dot(Fc, V22)
                    Wpp = fc[:, S.newaxis] * V22
                    # Vpp = S.dot(Fc, W2)  + S.dot(Fs, V12)
                    Vpp = fc[:, S.newaxis] * W2 + fs[:, S.newaxis] * V12
                    # Wps = S.dot(Fc, V21) - S.dot(Fs, W1)
                    Wps = fc[:, S.newaxis] * V21 - fs[:, S.newaxis] * W1
                    # Vps = S.dot(Fs, V11)
                    Vps = fs[:, S.newaxis] * V11

                    Mc2bar = S.asarray(S.bmat([[Vss, Vsp,  Vss,  Vsp], \
                                               [Wss, Wsp, -Wss, -Wsp], \
                                               [Wps, Wpp, -Wps, -Wpp], \
                                               [Vps, Vpp,  Vps,  Vpp]]))

                    x = S.r_[S.exp(-k * q1 * d), S.exp(-k * q2 * d)]

                    # Mc1 = S.dot(Mc2bar, S.diag(S.r_[S.ones_like(x), x]))
                    xx = S.r_[S.ones_like(x), x]
                    Mc1 = Mc2bar * xx[S.newaxis, :]

                    X[:, :, nlayer] = S.diag(x)

                    MTp = linsolve(Mc2bar, MT)
                    MTp1[:, :, nlayer] = MTp[0:2 * nood, :]
                    MTp2 = MTp[2 * nood:, :]

                    MT = S.dot(
                        Mc1, S.
                        r_[I2,
                           S.dot(MTp2,
                                 linsolve(MTp1[:, :, nlayer], X[:, :,
                                                                nlayer]))])

                else:

                    ValueError(
                        'Second Order Helmholtz Operator not implemented, yet!'
                    )

            #M = S.asarray(S.bmat([-MR, MT]))
            M = S.c_[-MR, MT]
            b = S.r_[S.sin(psi) * dlt, \
                     1j * S.sin(psi) * n1 * S.cos(alpha) * dlt, \
                     -1j * S.cos(psi) * n1 * dlt, \
                     S.cos(psi) * S.cos(alpha) * dlt]

            x = linsolve(M, b)
            R, T = S.split(x, 2)
            Rs, Rp = S.split(R, 2)
            for ii in xrange(1, nlayers - 1):
                T = S.dot(linsolve(MTp1[:, :, ii], X[:, :, ii]), T)
            Ts, Tp = S.split(T, 2)

            DE1[:,iwl] = (k1i[2,:]/(k1[2])).real       * S.absolute(Rs)**2 + \
                         (k1i[2,:]/(k1[2]*n1**2)).real * S.absolute(Rp)**2
            DE3[:,iwl] = (k3i[2,:]/(k1[2])).real       * S.absolute(Ts)**2 + \
                         (k3i[2,:]/(k1[2]*n3**2)).real * S.absolute(Tp)**2

        # save the results
        self.DE1 = DE1
        self.DE3 = DE3

        return self
def get_stack_data(f):
    #Set up variables.
    deltas = {}
    T_stack = sp.zeros(nstack)
    n_stack = sp.zeros(nstack)

    h = fitsio.FITS(f)
    thid = h['METADATA']['MOCKID'][:]
    if sp.in1d(thid, zcat_thid).sum() == 0:
        h.close()
    ra = h['METADATA']['RA'][:].astype(sp.float64) * sp.pi / 180.
    dec = h['METADATA']['DEC'][:].astype(sp.float64) * sp.pi / 180.
    z = h['METADATA']['Z'][:]
    ll = sp.log10(h['WAVELENGTH'].read())
    trans_names = []
    try:
        trans = h['F_LYA'].read()
    except KeyError:
        try:
            trans = h['F'].read()
        except KeyError:
            try:
                trans = h['TRANSMISSION'].read()
            except KeyError:
                raise KeyError('Transmission not found; check file format.')
    if args.add_Lyb:
        try:
            trans_Lyb = h['F_LYB'].read()
            trans *= trans_Lyb
        except KeyError:
            raise KeyError(
                'Lyb transmission not found; only \'final\' format supported currently.'
            )
    if args.add_metals:
        try:
            trans_metals = h['F_METALS'].read()
            trans *= trans_metals
        except KeyError:
            raise KeyError(
                'Metals transmission not found; only \'final\' format supported currently.'
            )
    nObj = z.size
    pixnum = f.split('-')[-1].split('.')[0]

    if trans.shape[0] != nObj:
        trans = trans.transpose()

    bins = sp.floor((ll - lmin) / dll + 0.5).astype(int)
    tll = lmin + bins * dll
    lObs = (10**tll) * sp.ones(nObj)[:, None]
    lRF = (10**tll) / (1. + z[:, None])
    w = sp.zeros_like(trans).astype(int)
    w[(lObs >= lObs_min) & (lObs < lObs_max) & (lRF > lRF_min) &
      (lRF < lRF_max)] = 1
    nbPixel = sp.sum(w, axis=1)
    cut = nbPixel >= 50
    cut &= sp.in1d(thid, zcat_thid)
    if cut.sum() == 0:
        h.close()

    ra = ra[cut]
    dec = dec[cut]
    z = z[cut]
    thid = thid[cut]
    trans = trans[cut, :]
    w = w[cut, :]
    nObj = z.size
    h.close()

    deltas[pixnum] = []
    for i in range(nObj):
        tll = ll[w[i, :] > 0]
        ttrans = trans[i, :][w[i, :] > 0]

        bins = sp.floor((tll - lmin) / dll + 0.5).astype(int)
        cll = lmin + sp.arange(nstack) * dll
        cfl = sp.bincount(bins, weights=ttrans, minlength=nstack)
        civ = sp.bincount(bins, minlength=nstack).astype(float)

        ww = civ > 0.
        if ww.sum() < 50: continue
        T_stack += cfl
        n_stack += civ
        cll = cll[ww]
        cfl = cfl[ww] / civ[ww]
        civ = civ[ww]
        deltas[pixnum].append(
            delta(thid[i], ra[i], dec[i], z[i], thid[i], thid[i], thid[i], cll,
                  civ, None, cfl, 1, None, None, None, None, None, None))

    return (n_stack, T_stack, deltas)
Example #49
0
def qvalues(pv,
            m=None,
            return_pi0=False,
            lowmem=False,
            pi0=None,
            fix_lambda=None):

    original_shape = pv.shape

    assert (pv.min() >= 0 and pv.max() <= 1)

    pv = pv.ravel(
    )  # flattens the array in place, more efficient than flatten()

    if m == None:
        m = float(len(pv))
    else:
        # the user has supplied an m, let's use it
        m *= 1.0

    # if the number of hypotheses is small, just set pi0 to 1
    if len(pv) < 100:
        pi0 = 1.0
    elif pi0 != None:
        pi0 = pi0
    else:
        # evaluate pi0 for different lambdas
        pi0 = []
        lam = sp.arange(0, 0.90, 0.01)
        counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)])

        if fix_lambda != None:
            interv_count = (pv > fix_lambda - 0.01).sum()
            uniform_sim = sp.array([
                (pv > fix_lambda - 0.01).sum() * (i + 1)
                for i in sp.arange(0, len(sp.arange(0, 0.90, 0.01)))
            ][::-1])
            counts += uniform_sim

        for l in range(len(lam)):
            pi0.append(counts[l] / (m * (1 - lam[l])))

        pi0 = sp.array(pi0)

        # fit natural cubic spline
        tck = sp.interpolate.splrep(lam, pi0, k=3)
        pi0 = sp.interpolate.splev(lam[-1], tck)
        if pi0 > 1:
            LG.warning(
                "got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1"
                % pi0)
            pi0 = 1.0

        assert (pi0 >= 0 and pi0 <= 1), "%f" % pi0

    if lowmem:
        # low memory version, only uses 1 pv and 1 qv matrices
        qv = sp.zeros((len(pv), ))
        last_pv = pv.argmax()
        qv[last_pv] = (pi0 * pv[last_pv] * m) / float(m)
        pv[last_pv] = -sp.inf
        prev_qv = last_pv
        for i in xrange(int(len(pv)) - 2, -1, -1):
            cur_max = pv.argmax()
            qv_i = (pi0 * m * pv[cur_max] / float(i + 1))
            pv[cur_max] = -sp.inf
            qv_i1 = prev_qv
            qv[cur_max] = min(qv_i, qv_i1)
            prev_qv = qv[cur_max]

    else:
        p_ordered = sp.argsort(pv)
        pv = pv[p_ordered]
        # estimate qvalues
        # 	qv = pi0*m*pv/(sp.arange(len(pv))+1.0)

        # 	for i in xrange(int(len(qv))-2, 0, -1):
        # 	    qv[i] = min([qv[i], qv[i+1]])

        qv = pi0 * m / len(pv) * pv
        qv[-1] = min(qv[-1], 1.0)

        for i in xrange(len(pv) - 2, -1, -1):
            qv[i] = min(pi0 * m * pv[i] / (i + 1.0), qv[i + 1])

        # reorder qvalues
        qv_temp = qv.copy()
        qv = sp.zeros_like(qv)
        qv[p_ordered] = qv_temp

    # reshape qvalues
    qv = qv.reshape(original_shape)

    if return_pi0:
        return qv, pi0
    else:
        return qv
Example #50
0
File: lstm.py Project: Yevgnen/LSTM
    def bptt(self, x, t, cells):
        """Back propagation throuth time of a sample.

        Reference: [1] LSTM: A Search Space Odyssey, Klaus Greff, Rupesh Kumar Srivastava, Jan Koutník,
                       Bas R. Steunebrink, Jürgen Schmidhuber, http://arxiv.org/abs/1503.04069
        """
        dWz = sp.zeros_like(self.Wz)
        dWi = sp.zeros_like(self.Wi)
        dWf = sp.zeros_like(self.Wf)
        dWo = sp.zeros_like(self.Wo)

        dRz = sp.zeros_like(self.Rz)
        dRi = sp.zeros_like(self.Ri)
        dRf = sp.zeros_like(self.Rf)
        dRo = sp.zeros_like(self.Ro)

        dpi = sp.zeros_like(self.pi)
        dpf = sp.zeros_like(self.pf)
        dpo = sp.zeros_like(self.po)

        dbz = sp.zeros_like(self.bz)
        dbi = sp.zeros_like(self.bi)
        dbf = sp.zeros_like(self.bf)
        dbo = sp.zeros_like(self.bo)

        dV = sp.zeros_like(self.V)
        dc = sp.zeros_like(self.c)

        tau = len(x)

        dcbar = sp.zeros(self.hidden_size)
        next_dzbar = sp.zeros(self.hidden_size)
        next_dibar = sp.zeros(self.hidden_size)
        next_dfbar = sp.zeros(self.hidden_size)
        next_dobar = sp.zeros(self.hidden_size)

        for i in range(tau - 1, -1, -1):
            # FIXME:
            # 1. Should not use cell[i] since there maybe multiple hidden layers.
            # 2. Using exponential family as output should not be specified.
            ix = x[i]
            one_hot_t = sp.zeros(self.vocab_size)
            one_hot_t[t[i]] = 1

            # Cell of time i
            cell = cells[i]
            # Hidden layer of current cell
            hidden = cell[0]
            # Output layer of current cell
            output = cell[-1]
            # Hidden layer of time i + 1
            prev_hidden = cells[i - 1][0] if i - 1 >= 0 else None
            # Hidden layer of time i - 1
            next_hidden = cells[i + 1][0] if i + 1 < tau else None

            # Error of current time i
            (gzbar, gibar, gfbar, gobar, gc) = hidden.backward()
            # Error or information of time i + 1 or i - 1
            prev_c = prev_hidden.c if prev_hidden is not None else sp.zeros(
                self.hidden_size)
            next_f = next_hidden.f if next_hidden is not None else sp.zeros(
                self.hidden_size)

            # FIXME: The error function should not be specified here
            # The actural and slow epxression for `da` is: da = sp.dot(output.backward().T, -one_hot_t / output.y)
            # The order of evaluating the deltas is IMPORTANT!
            output_da = output.y - one_hot_t
            dh = sp.dot(
                self.V.T, output_da) + sp.dot(self.Rz.T, next_dzbar) + sp.dot(
                    self.Ri.T, next_dibar) + sp.dot(
                        self.Rf.T, next_dfbar) + sp.dot(self.Ro.T, next_dobar)
            dobar = dh * hidden.a * gobar
            dcbar = dh * hidden.o * gc + self.po * dobar + self.pi * next_dibar + self.pf * next_dfbar + dcbar * next_f
            dfbar = dcbar * prev_c * gfbar
            dibar = dcbar * hidden.z * gibar
            dzbar = dcbar * hidden.i * gzbar

            # Gradient back propagation through time
            dbz += dzbar
            dbi += dibar
            dbf += dfbar
            dbo += dobar

            dpi += hidden.c * next_dibar
            dpf += hidden.c * next_dfbar
            dpo += hidden.c * dobar

            dWz[:, ix] += dzbar
            dWi[:, ix] += dibar
            dWf[:, ix] += dfbar
            dWo[:, ix] += dobar

            dRz += sp.outer(next_dzbar, hidden.h)
            dRi += sp.outer(next_dibar, hidden.h)
            dRf += sp.outer(next_dfbar, hidden.h)
            dRo += sp.outer(next_dobar, hidden.h)

            dV += sp.outer(output_da, hidden.h)
            dc += output_da

            # Save current information for time i - 1
            next_dzbar = dzbar
            next_dibar = dibar
            next_dfbar = dfbar
            next_dobar = dobar

            grads = (dWz, dWi, dWf, dWo, dRz, dRi, dRf, dRo, dpi, dpf, dpo,
                     dbz, dbi, dbf, dbo, dV, dc)
            for grad in grads:
                sp.clip(grad, -self.clip, self.clip, out=grad)

        return grads
Example #51
0
    def _update_canvas(self):
        """
        Update the figure when the user changes and input value.
        :return:
        """
        # Get the parameters from the form
        time = self.time.text().split(',')
        start = float(time[0])
        end = float(time[1])
        step = float(time[2])
        number_of_updates = round((end - start) / step)
        t, dt = linspace(start, end, number_of_updates, retstep=True)

        # Adaptive parameters
        threshold = float(self.threshold.text())
        scale = float(self.scale.text())

        # Maneuver parameters
        maneuver_time = float(self.maneuver_time.text())
        vm_xyz = self.maneuver_velocity.text().split(',')
        vmx = float(vm_xyz[0])
        vmy = float(vm_xyz[1])
        vmz = float(vm_xyz[2])

        # Initial position
        p_xyz = self.initial_position.text().split(',')
        px = float(p_xyz[0])
        py = float(p_xyz[1])
        pz = float(p_xyz[2])

        # Initial velocity
        v_xyz = self.initial_velocity.text().split(',')
        vx = float(v_xyz[0])
        vy = float(v_xyz[1])
        vz = float(v_xyz[2])

        # Measurement and process noise variance
        measurement_noise_variance = float(self.measurement_noise_variance.text())
        process_noise_variance = float(self.process_noise_variance.text())

        # Create target trajectory
        x_true = zeros([6, number_of_updates])

        pre_index = [n for n, e in enumerate(t) if e < maneuver_time]
        post_index = [n for n, e in enumerate(t) if e >= maneuver_time]

        x = px + vx * t[pre_index]
        xm = x[-1] + vmx * (t[post_index] - maneuver_time)

        y = py + vy * t[pre_index]
        ym = y[-1] + vmy * (t[post_index] - maneuver_time)

        z = pz + vz * t[pre_index]
        zm = z[-1] + vmz * (t[post_index] - maneuver_time)

        x_true[0] = [*x, *xm]
        x_true[1] = [*(vx * ones_like(t[pre_index])), *(vmx * ones_like(t[post_index]))]
        x_true[2] = [*y, *ym]
        x_true[3] = [*(vy * ones_like(t[pre_index])), *(vmy * ones_like(t[post_index]))]
        x_true[4] = [*z, *zm]
        x_true[5] = [*(vz * ones_like(t[pre_index])), *(vmz * ones_like(t[post_index]))]

        # Measurement noise
        v = sqrt(measurement_noise_variance) * (random.rand(number_of_updates) - 0.5)

        # Initialize state and input control vector
        x = zeros(6)
        u = zeros_like(x)

        # Initialize the covariance and control matrix
        P = 1.0e3 * eye(6)
        B = zeros_like(P)

        # Initialize measurement and process noise variance
        R = measurement_noise_variance * eye(3)
        Q = process_noise_variance * eye(6)

        # State transition and measurement transition
        A = eye(6)
        A[0, 1] = dt
        A[2, 3] = dt
        A[4, 5] = dt

        # Measurement transition matrix
        H = zeros([3, 6])
        H[0, 0] = 1
        H[1, 2] = 1
        H[2, 4] = 1

        # Initialize the Kalman filter
        kf = kalman.Kalman(x, u, P, A, B, Q, H, R)

        # Generate the measurements
        z = [matmul(H, x_true[:, i]) + v[i] for i in range(number_of_updates)]

        # Update the filter for each measurement
        kf.filter_epsilon(z, threshold, scale)

        # Clear the axes for the updated plot
        self.axes1.clear()

        # Get the selected plot from the form
        plot_type = self.plot_type.currentText()

        # Display the results
        if plot_type == 'Position - X':
            self.axes1.plot(t, x_true[0, :], '', label='True')
            self.axes1.plot(t, [z[0] for z in z], ':', label='Measurement')
            self.axes1.plot(t, [x[0] for x in kf.state], '--', label='Filtered')
            self.axes1.set_ylabel('Position - X (m)', size=12)
            self.axes1.legend(loc='best', prop={'size': 10})
        elif plot_type == 'Position - Y':
            self.axes1.plot(t, x_true[2, :], '', label='True')
            self.axes1.plot(t, [z[1] for z in z], ':', label='Measurement')
            self.axes1.plot(t, [x[2] for x in kf.state], '--', label='Filtered')
            self.axes1.set_ylabel('Position - Y (m)', size=12)
            self.axes1.legend(loc='best', prop={'size': 10})
        elif plot_type == 'Position - Z':
            self.axes1.plot(t, x_true[4, :], '', label='True')
            self.axes1.plot(t, [z[2] for z in z], ':', label='Measurement')
            self.axes1.plot(t, [x[4] for x in kf.state], '--', label='Filtered')
            self.axes1.set_ylabel('Position - Z (m)', size=12)
            self.axes1.legend(loc='best', prop={'size': 10})
        elif plot_type == 'Velocity - X':
            self.axes1.plot(t, x_true[1, :], '', label='True')
            self.axes1.plot(t, [x[1] for x in kf.state], '--', label='Filtered')
            self.axes1.set_ylabel('Velocity - X (m/s)', size=12)
            self.axes1.legend(loc='best', prop={'size': 10})
        elif plot_type == 'Velocity - Y':
            self.axes1.plot(t, x_true[3, :], '', label='True')
            self.axes1.plot(t, [x[3] for x in kf.state], '--', label='Filtered')
            self.axes1.set_ylabel('Velocity - Y (m/s)', size=12)
            self.axes1.legend(loc='best', prop={'size': 10})
        elif plot_type == 'Velocity - Z':
            self.axes1.plot(t, x_true[5, :], '', label='True')
            self.axes1.plot(t, [x[5] for x in kf.state], '--', label='Filtered')
            self.axes1.set_ylabel('Velocity - Z (m/s)', size=12)
            self.axes1.legend(loc='best', prop={'size': 10})
        elif plot_type == 'Residual':
            self.axes1.plot(t, kf.residual, '')
            self.axes1.set_ylabel('Residual (m)', size=12)

        # Set the plot title and labels
        self.axes1.set_title('Adaptive Kalman Filter - $\epsilon_k$ Method', size=14)
        self.axes1.set_xlabel('Time (s)', size=12)

        # Set the tick label size
        self.axes1.tick_params(labelsize=12)

        # Turn on the grid
        self.axes1.grid(linestyle=':', linewidth=0.5)

        # Update the canvas
        self.my_canvas.draw()
    def crossValidation(self,
                        seed=0,
                        n_folds=10,
                        fullVector=True,
                        verbose=None,
                        D=None,
                        **keywords):
        """
        Split the dataset in n folds, predict each fold after training the model on all the others

        Args:
            seed:        seed
            n_folds:     number of folds to train the model on
            fullVector:  Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values
            verbose:     if true, prints the fold that is being used for predicitons
            **keywords:  params to pass to the function optimize
        Returns:
            Matrix of phenotype predictions [N,P]
        """
        verbose = dlimix_legacy.getVerbose(verbose)

        # split samples into training and test
        sp.random.seed(seed)
        r = sp.random.permutation(self.Y.shape[0])
        nfolds = 10
        Icv = sp.floor(((sp.ones(
            (self.Y.shape[0])) * nfolds) * r) / self.Y.shape[0])

        RV = {}
        if self.P == 1: RV['var'] = sp.zeros((nfolds, self.n_randEffs))
        else: RV['var'] = sp.zeros((nfolds, self.P, self.n_randEffs))

        Ystar = sp.zeros_like(self.Y)

        for fold_j in range(n_folds):

            if verbose:
                print((".. predict fold %d" % fold_j))

            Itrain = Icv != fold_j
            Itest = Icv == fold_j
            Ytrain = self.Y[Itrain, :]
            Ytest = self.Y[Itest, :]
            vc = VarianceDecomposition(Ytrain)
            vc.setTestSampleSize(Itest.sum())
            for term_i in range(self.n_fixedEffs):
                F = self.vd.getFixed(term_i)
                Ftest = F[Itest, :]
                Ftrain = F[Itrain, :]
                if self.P > 1: A = self.vd.getDesign(term_i)
                else: A = None
                vc.addFixedEffect(F=Ftrain, Ftest=Ftest, A=A)
            for term_i in range(self.n_randEffs):
                if self.P > 1:
                    tct = self.trait_covar_type[term_i]
                    rank = self.rank[term_i]
                    ftc = self.fixed_tc[term_i]
                    jitt = self.jitter[term_i]
                    if tct == 'lowrank_diag1' or tct == 'freeform1':
                        d = D[fold_j, :, term_i]
                    else:
                        d = None
                else:
                    tct = None
                    rank = None
                    ftc = None
                    jitt = None
                    d = None
                if term_i == self.noisPos:
                    vc.addRandomEffect(is_noise=True,
                                       trait_covar_type=tct,
                                       rank=rank,
                                       jitter=jitt,
                                       fixed_trait_covar=ftc,
                                       d=d)
                else:
                    R = self.vd.getTerm(term_i).getK()
                    Rtrain = R[Itrain, :][:, Itrain]
                    Rcross = R[Itrain, :][:, Itest]
                    vc.addRandomEffect(K=Rtrain,
                                       Kcross=Rcross,
                                       trait_covar_type=tct,
                                       rank=rank,
                                       jitter=jitt,
                                       fixed_trait_covar=ftc,
                                       d=d)
            conv = vc.optimize(verbose=False, **keywords)
            if self.P == 1:
                RV['var'][fold_j, :] = vc.getVarianceComps()[0, :]
            else:
                RV['var'][fold_j, :, :] = vc.getVarianceComps()

            if fullVector:
                assert conv, 'VarianceDecompositon:: not converged for fold %d. Stopped here' % fold_j
            if conv:
                Ystar[Itest, :] = vc.predictPhenos()
            else:
                warnings.warn('not converged for fold %d' % fold_j)
                Ystar[Itest, :] = sp.nan

        return Ystar, RV
def plotStrip(at, ps, crop):
    """
    Plot a strip of colored polygons along a trace of GPS coordinates (deg).
    Extension of the strip outward from the line to either side is specified
    by ps.sideRange, in meters.

    Parameters
    ----------
    at.fix : float (deg), (pktCount)x2 array
      [longitude, latitude] coordinates of ship, rows are packets in order.
    at.depth :  float (m), array length pktCount
      Water depth beneath the ship at each fix. Used with extended lead-in
      length of cable to estimate sensor position by layback calculation.
    at.leadin : float (m)
    at.color : float (m), array length pktCount
      List of numbers for each position indicating the color to plot
      representing IP data results.
    """
    # Start by transforming the fix points into a local azimuthal equidistant
    # reference system. Units along x and y are meters.
    ptList = [point(tuple(row)) for row in at.fix]
    dfPt = gpd.GeoDataFrame({'geometry': ptList})
    # Assign the WGS84 latitude-longitude Coordinate Reference System (CRS).
    dfPt.crs = ps.crsWGS84

    # Transform to the azimuthal equidistant reference.
    dfPt = dfPt.to_crs(ps.crsAzEq)
    # Extract the transformed coordinates into an array.
    flatFix = sp.zeros_like(at.fix, dtype=float)
    for p in range(len(flatFix)):
        flatFix[p, :] = sp.array(dfPt.geometry[p].coords)  # (m)

    # Track vectors between each pair of consecutive GPS fixes.
    vParSeg = flatFix[1:, :] - flatFix[0:-1, :]
    # Length of each trach vector.
    segLen = sp.sqrt(vParSeg[:, 0]**2 + vParSeg[:, 1]**2)  # (m)
    # Cumulative sum along the track line.
    sumLen = sp.hstack((0, sp.cumsum(segLen)))

    # Interpolate a laidback fix location on the track line.
    # Layback the extra length at the start of the line according to
    # the boat's heading for the first few meters twice the length of
    # the cable lead in.
    newFix = sp.zeros_like(flatFix, dtype=float)
    linLoc = 2 * at.leadin
    closeIdx = sp.argmin(abs(sumLen - linLoc))
    if linLoc >= sumLen[closeIdx]:
        idx1 = closeIdx
        idx2 = closeIdx + 1
    else:
        idx1 = closeIdx - 1
        idx2 = closeIdx
    l1 = sumLen[idx1]
    l2 = sumLen[idx2]
    startHeadingFix = flatFix[idx1, :] + (
        flatFix[idx2, :] - flatFix[idx1, :]) * (linLoc - l1) / (l2 - l1)
    startHeadingVec = mm.unit(startHeadingFix - flatFix[0, :])
    for p in range(len(flatFix)):
        linLoc = sumLen[p] - mm.cableRange(at.leadin, at.depth[p])
        if linLoc >= 0:
            closeIdx = sp.argmin(abs(sumLen - linLoc))
            if linLoc >= sumLen[closeIdx]:
                idx1 = closeIdx
                idx2 = closeIdx + 1
            else:
                idx1 = closeIdx - 1
                idx2 = closeIdx
            l1 = sumLen[idx1]
            l2 = sumLen[idx2]
            newFix[p, :] = flatFix[idx1, :] + (flatFix[idx2, :] - flatFix[
                idx1, :]) * (linLoc - l1) / (l2 - l1)
        else:
            newFix[p, :] = flatFix[0, :] + linLoc * startHeadingVec
    # Overwrite.
    flatFix = newFix

    # Reevaluate track vectors between each pair of consecutive GPS fixes.
    vParSeg = flatFix[1:, :] - flatFix[0:-1, :]
    # Track vectors at each point, found from points before and after.
    vParPt = flatFix[2:, :] - flatFix[0:-2, :]
    # Include segment parallels for the boundary fix points.
    vParPt = sp.vstack((vParSeg[0, :], vParPt, vParSeg[-1, :]))
    # Midpoints along the sequence of GPS fixes.
    midPts = (flatFix[1:, :] + flatFix[0:-1, :]) / 2

    # Perpendicular vectors at each segment and fix point.
    # Vector lengths are set to sideRange.
    vPerpSeg = ps.sideRange * mm.unit(mm.perp(vParSeg))  # (m)
    vPerpPt = ps.sideRange * mm.unit(mm.perp(vParPt))  # (m)

    # Include each segment between the fix coordinates as its own line object.
    lineList = []
    for p in range(len(flatFix) - 1):
        endPts = [tuple(row) for row in flatFix[p:p + 2, :]]
        lineList.append(lineStr(endPts))
    dfLine = gpd.GeoDataFrame({'geometry': lineList})
    dfLine.crs = ps.crsAzEq

    polyList = []
    # If cropping, only include fix points where asked.
    plottedPkts = sp.array(range(len(at.pkt)))
    if crop:
        plottedPkts = plottedPkts[at.cropLogic]
    # Polygon patches for each packet.
    for p in plottedPkts:
        # Perpendicular displacement, length sideRange, at the first midpoint.
        if p != 0:
            vert01 = sp.vstack((midPts[p - 1, :] - vPerpSeg[p - 1, :],
                                midPts[p - 1, :] + vPerpSeg[p - 1, :]))
        else:
            vert01 = sp.zeros((0, 2))
        # Polygon points offset from the flat fix points themselves.
        vert2 = flatFix[p, :] + vPerpPt[p, :]
        vert5 = flatFix[p, :] - vPerpPt[p, :]
        if p != len(flatFix) - 1:
            # at the second midpoint.
            vert34 = sp.vstack(
                (midPts[p, :] + vPerpSeg[p, :], midPts[p, :] - vPerpSeg[p, :]))
        else:
            vert34 = sp.zeros((0, 2))
        # Polygon vertices.
        verts = sp.vstack((vert01, vert2, vert34, vert5))
        # Vertices as tuples in a list.
        vertList = [tuple(row) for row in verts]
        # Append the latest polygon vertices to the list of polygons.
        polyList.append(polygon(vertList))

    # Geopandas data frame object containing each polygon in the list, along
    # with colors.
    dfPoly = gpd.GeoDataFrame({
        'geometry': polyList,
        'color': at.color[plottedPkts]
    })
    dfPoly.crs = ps.crsAzEq
    # Transform back to (longi,lat), if requested.
    if ps.plotWGS84:
        dfPt = dfPt.to_crs(ps.crsWGS84)
        dfLine = dfLine.to_crs(ps.crsWGS84)
        dfPoly = dfPoly.to_crs(ps.crsWGS84)

    dfPoly.plot(ax=ps.ax,
                column='color',
                cmap=ps.cmap,
                vmin=ps.colMin,
                vmax=ps.colMax)

    if ps.showLines:
        dfLine.plot(ax=ps.ax, color=ps.lineCol)
    if ps.showPts:
        dfPt.plot(ax=ps.ax)

    # Transform back to (longi,lat).
    if ~ps.plotWGS84:
        dfPt = dfPt.to_crs(ps.crsWGS84)
        dfLine = dfLine.to_crs(ps.crsWGS84)
        dfPoly = dfPoly.to_crs(ps.crsWGS84)

    if ps.saveTxt:
        # Pseudocolor plots.
        txtName = 'ch%d_H%d_%s_%s_%d.txt' % (
            ps.ch,
            ps.h,
            ps.plotThis,
            at.fileDateStr,
            at.fileNum,
        )
        txtPath = os.path.join(ps.folderPath, 'plotData', ps.plotThis, txtName)
        with open(txtPath, 'w') as f:
            for p in range(at.pktCount):
                # longi (deg), lat (deg), color (?)
                wStr = (str(dfPt.geometry[p].x) + ',' +
                        str(dfPt.geometry[p].y) + ',' + str(at.color[p]) +
                        '\n')
                f.write(wStr)
    def optimize_with_repeates(self,
                               fast=None,
                               verbose=None,
                               n_times=10,
                               lambd=None,
                               lambd_g=None,
                               lambd_n=None):
        """
        Train the model repeadly up to a number specified by the users with random restarts and
        return a list of all relative minima that have been found. This list is sorted according to
        least likelihood. Each list term is a dictionary with keys "counter", "LML", and "scales".

        After running this function, the vc object will be set at the last iteration. Thus, if you
        wish to get the vc object of one of the repeats, then set the scales. For example:

        vc.setScales(scales=optimize_with_repeates_output[0]["scales"])

        Args:
            fast:       Boolean. if set to True initalize kronSumGP
            verbose:    Boolean. If set to True, verbose output is produced. (default True)
            n_times:    number of re-starts of the optimization. (default 10)
        """
        verbose = dlimix_legacy.getVerbose(verbose)

        if not self.init: self._initGP(fast)

        opt_list = []

        fixed0 = sp.zeros_like(self.gp.getParams()['dataTerm'])

        # minimize n_times
        for i in range(n_times):

            scales1 = self._getScalesRand()
            fixed1 = 1e-1 * sp.randn(fixed0.shape[0], fixed0.shape[1])
            conv = self.trainGP(fast=fast,
                                scales0=scales1,
                                fixed0=fixed1,
                                lambd=lambd,
                                lambd_g=lambd_g,
                                lambd_n=lambd_n)

            if conv:
                # compare with previous minima
                temp = 1
                for j in range(len(opt_list)):
                    if sp.allclose(abs(self.getScales()),
                                   abs(opt_list[j]['scales'])):
                        temp = 0
                        opt_list[j]['counter'] += 1
                        break
                if temp == 1:
                    opt = {}
                    opt['counter'] = 1
                    opt['LML'] = self.getLML()
                    opt['scales'] = self.getScales()
                    opt_list.append(opt)

        # sort by LML
        LML = sp.array([opt_list[i]['LML'] for i in range(len(opt_list))])
        index = LML.argsort()[::-1]
        out = []
        if verbose:
            print("\nLocal mimima\n")
            print("n_times\t\tLML")
            print("------------------------------------")

        for i in range(len(opt_list)):
            out.append(opt_list[index[i]])
            if verbose:
                print(("%d\t\t%f" % (opt_list[index[i]]['counter'],
                                     opt_list[index[i]]['LML'])))
                print("")

        return out
Example #55
0
def run_inter3(afferent,
               p=_DEFAULT_PARAMETERS,
               axis=-3,
               maxiter=_DEFAULT_MAXITER,
               h=_DEFAULT_STEPSIZE,
               keeptime=_DEFAULT_KEEPTIME,
               verbose=_DEFAULT_VERBOSE):
    """ Integrate with Forward Euler method with integration step size h
    """

    ######################################
    # re-arrange array into canonical form
    ######################################
    axis = axis % afferent.ndim
    O, initsz, nunits = to4(afferent, axis=axis)
    I, O_t, I_t = O.copy(), [], []
    Z = sp.zeros_like(I)
    if keeptime:
        O_t.append(O)
        I_t.append(I)

    ############
    # parameters
    ############
    p = _DEFAULT_PARAMETERS if p is None else p
    sigma, tau = p['sigma'], p['tau']
    epsilon, eta = p['epsilon'], p['eta']
    ssc, sss = p['ssc'], p['sss']
    gamma, alpha, mu = p['gamma'], p['alpha'], p['mu']
    delta, beta, nu = p['delta'], p['beta'], p['nu']
    xi, zeta, omega = p['xi'], p['zeta'], p['omega']

    ##############################################
    # make sure pool sizes, input sizes make sense
    ##############################################
    assert sss < afferent.shape[-2]
    assert sss < afferent.shape[-1]
    assert ssc < sss
    assert sss % 2 == 1
    assert ssc % 2 == 1

    tuned_pooling_method = 'mean'  # 'max'
    untuned_pooling_method = 'mean'  # 'max'

    #################################
    # tuned summation: center pooling
    #################################
    zeta = 1.0  # because here unlike the GPU implementation we exclude center
    # and we pulled the default parameters from GPU implementation
    pool_P = {
        'type': 'pool',
        'mode': tuned_pooling_method,
        'size': (1, 1, ssc, ssc),
        'padding': 'reflect',
        'stride_size': None,
        'keepdims': True,
        'exclude_center': (1, 1),
        'subpool': {
            'type': None
        },
    }

    ####################################################
    # untuned suppression: reduction across feature axis
    ####################################################
    pool_U = {
        'type': 'pool',
        'mode': untuned_pooling_method,
        'size': (1, -1, 1, 1),
        'padding': 'reflect',
        'stride_size': None,
        'keepdims': True,
        'exclude_center': None,
    }

    #####################################
    # tuned suppression: surround pooling
    #####################################
    pool_T = {
        'type': 'pool',
        'mode': tuned_pooling_method,
        'size': (1, 1, sss, sss),
        'padding': 'reflect',
        'stride_size': None,
        'keepdims': True,
        'exclude_center': None,  #(ssc, ssc),
        'subpool': {
            'type': None
        },
    }

    ########################
    # untuned summation: cRF
    ########################
    V = sp.linspace(0.0, 1.0, nunits)
    W = stats.norm.pdf(V, loc=V[nunits // 2], scale=omega)
    W /= W.sum()
    pool_Q = {
        'type': 'conv',
        'fb': W,
        'padding': 'wrap',
        'im_dims': 'ndhw',
        'fb_dims': 'd',
        'corr': False
    }

    ###################
    # pooling functions
    ###################
    untuned_suppression = lambda arr: recursive_pool(
        arr, params=pool_U, keyname='subpool', verbose=False)
    tuned_suppression = lambda arr: recursive_pool(
        arr, params=pool_T, keyname='subpool', verbose=False)
    tuned_summation = lambda arr: recursive_pool(
        arr, params=pool_P, keyname='subpool', verbose=False)
    untuned_summation = lambda arr: recursive_pool(
        arr, params=pool_Q, keyname='subpool', verbose=False)

    relu = lambda x: hwrectify(x, '+')
    # relu = lambda x: softplus(x, 10.0)

    ###################################################
    # iterate lateral connections and store time frames
    ###################################################

    a = sigma
    b = sp.sqrt(1)
    c = eta / 5.

    if verbose: pbar = pb(maxiter, 'Integrating [HOST]')
    for i in range(maxiter):
        U = untuned_suppression(O)
        T = tuned_suppression(O)
        P = tuned_summation(I)
        Q = untuned_summation(I)

        Z_summand = relu((alpha * I + mu) * U + (beta * I + nu) * T - b**2)
        Z = (1. - a**2 * h / c) * Z + h / c * Z_summand

        I_summand = relu(xi * afferent - Z)
        I = (1. - epsilon**2 * h / eta) * I + h / eta * I_summand

        O_summand = relu(zeta * I + gamma * P + delta * Q)
        O = (1. - sigma**2 * h / tau) * O + h / tau * O_summand

        if keeptime:
            I_t.append(I)
            O_t.append(O)
        if verbose: pbar.update(i)
    if verbose: pbar.finish()

    ################
    # postprocessing
    ################
    out_I = from4(I_t if keeptime else I,
                  axis=axis,
                  keeptime=keeptime,
                  size=initsz)
    out_O = from4(O_t if keeptime else O,
                  axis=axis,
                  keeptime=keeptime,
                  size=initsz)
    afferent.shape = initsz

    return out_I, out_O
Example #56
0
    def fitNull(self,
                cache=False,
                out_dir='./cache',
                fname=None,
                rewrite=False,
                seed=None,
                factr=1e3,
                n_times=10,
                init_method=None,
                verbose=False):
        r"""
        Fit null model

        Args:
            verbose ()
            cache (bool, optional):
                If False (default), the null model is fitted and
                the results are not cached.
                If True, the cache is activated.
                The cache file dir and name can be specified using
                ``hcache`` and ``fname``.
                When ``cache=True``, we distinguish the following cases:

                - if the specified file does not exist,
                  the output of the null model fiting is cached in the file.
                - if the specified file exists and ``rewrite=True``,
                  the cache file is overwritten.
                - if the specified file exists and ``rewrite=False``,
                  the results from the cache file are imported
                  (the null model is not re-fitted).

            out_dir (str, optional):
                output dir of the cache file.
                The default value is "./cache".
            fname (str, optional):
                Name of the cache hdf5 file.
                It must be specified if ``cache=True``.
            rewrite (bool, optional):
                It has effect only if cache `cache=True``.
                In this case, if ``True``,
                the cache file is overwritten in case it exists.
                The default value is ``False``
            factr (float, optional):
                optimization paramenter that determines the accuracy
                of the solution.
                By default it is 1000.
                (see scipy.optimize.fmin_l_bfgs_b for more details).
            verbose (bool, optional):
                verbose flag.

        Returns:
            (dict): dictionary containing:
                - **B** (*ndarray*): estimated effect sizes (null);
                - **Cg** (*ndarray*): estimated relatedness trait covariance (null);
                - **Cn** (*ndarray*): estimated genetic noise covariance (null);
                - **conv** (*bool*): convergence indicator;
                - **NLL0** (*ndarray*): negative loglikelihood (NLL) of the null model;
                - **LMLgrad** (*ndarray*): norm of the gradient of the NLL.
                - **time** (*time*): elapsed time (in seconds).
        """
        from limix_core.gp import GP2KronSum
        from limix_core.gp import GP2KronSumLR
        from limix_core.gp import GP3KronSumLR
        from limix_core.covar import FreeFormCov
        if seed is not None:
            sp.random.seed(seed)

        read_from_file = False
        if cache:
            assert fname is not None, 'MultiTraitSetTest:: specify fname'
            if not os.path.exists(out_dir):
                os.makedirs(out_dir)
            out_file = os.path.join(out_dir, fname)
            read_from_file = os.path.exists(out_file) and not rewrite

        RV = {}
        if read_from_file:
            f = h5py.File(out_file, 'r')
            for key in list(f.keys()):
                RV[key] = f[key][:]
            f.close()
            self.setNull(RV)
        else:
            start = TIME.time()
            if self.bgRE:
                self._gpNull = GP2KronSum(Y=self.Y,
                                          F=None,
                                          A=None,
                                          Cg=self.Cg,
                                          Cn=self.Cn,
                                          R=None,
                                          S_R=self.S_R,
                                          U_R=self.U_R)
            else:
                self._gpNull = GP2KronSumLR(self.Y,
                                            self.Cn,
                                            G=sp.ones((self.N, 1)),
                                            F=self.F,
                                            A=self.A)
                # freezes Cg to 0
                n_params = self._gpNull.covar.Cr.getNumberParams()
                self._gpNull.covar.Cr.setParams(1e-9 * sp.ones(n_params))
                self._gpNull.covar.act_Cr = False
            for i in range(n_times):
                params0 = self._initParams(init_method=init_method)
                self._gpNull.setParams(params0)
                conv, info = self._gpNull.optimize(verbose=verbose,
                                                   factr=factr)
                if conv:
                    break
            if not conv:
                warnings.warn("not converged")
            LMLgrad = (self._gpNull.LML_grad()['covar']**2).mean()
            LML = self._gpNull.LML()
            if self._gpNull.mean.n_terms == 1:
                RV['B'] = self._gpNull.mean.B[0]
            elif self._gpNull.mean.n_terms > 1:
                warning.warn('generalize to more than 1 fixed effect term')
            if self.bgRE:
                RV['params0_g'] = self.Cg.getParams()
            else:
                RV['params0_g'] = sp.zeros_like(self.Cn.getParams())
            RV['params0_n'] = self.Cn.getParams()
            if self.bgRE:
                RV['Cg'] = self.Cg.K()
            else:
                RV['Cg'] = sp.zeros_like(self.Cn.K())
            RV['Cn'] = self.Cn.K()
            RV['conv'] = sp.array([conv])
            RV['time'] = sp.array([TIME.time() - start])
            RV['NLL0'] = sp.array([LML])
            RV['LMLgrad'] = sp.array([LMLgrad])
            RV['nit'] = sp.array([info['nit']])
            RV['funcalls'] = sp.array([info['funcalls']])
            self.null = RV
            from limix.util.util_functions import smartDumpDictHdf5
            if cache:
                f = h5py.File(out_file, 'w')
                smartDumpDictHdf5(RV, f)
                f.close()
        return RV
def plotStrip(bp, at, ps, crop):
    """
    Plot a strip of colored polygons along a trace of GPS coordinates (deg).
    Extension of the strip outward from the line to either side is specified
    by ps.sideRange, in meters.

    Parameters
    ----------
    bp.polyList: master list of polygons, all lines included
    bp.colorList: master list of colors for each polygon
    bp.lineList: master list of survey lines
    at.fix : float (deg), (pktCount)x2 array
      [longitude, latitude] coordinates of ship, rows are packets in order.
    at.depth :  float (m), array length pktCount
      Water depth beneath the ship at each fix. Used with extended lead-in
      length of cable to estimate sensor position by layback calculation.
    at.leadin : float (m)
    at.color : float (m), array length pktCount
      List of numbers for each position indicating the color to plot
      representing IP data results.
    """
    # Start by transforming the fix points into a local azimuthal equidistant
    # reference system. Units along x and y are meters.
    ptList = [point(tuple(row)) for row in at.fix]
    dfPt = gpd.GeoDataFrame({'geometry': ptList})
    # Assign the WGS84 latitude-longitude Coordinate Reference System (CRS).
    dfPt.crs = ps.crsWGS84

    # Transform to the azimuthal equidistant reference.
    dfPt = dfPt.to_crs(ps.crsAzEq)
    # Extract the transformed coordinates into an array.
    flatFix = sp.zeros_like(at.fix, dtype=float)
    for p in range(len(flatFix)):
        flatFix[p, :] = sp.array(dfPt.geometry[p].coords)  # (m)

    # Track vectors between each pair of consecutive GPS fixes.
    vParSeg = flatFix[1:, :] - flatFix[0:-1, :]
    # Length of each trach vector.
    segLen = sp.sqrt(vParSeg[:, 0]**2 + vParSeg[:, 1]**2)  # (m)
    # Cumulative sum along the track line.
    sumLen = sp.hstack((0, sp.cumsum(segLen)))
    
    # Print the total line length (m).
#    print('%.1f m along line.' % (sumLen[-1]))
    # Distance between start and endpoints.
    startFinDist = mm.norm(flatFix[0, :] - flatFix[-1, :])
#    print('%.1f m distance from start point to finish point.' % startFinDist)
    # Time elapsed on the line.
    lineTime = (at.cpuDT[-1] - at.cpuDT[0]).total_seconds()
#    print('%.0f s elapsed.' % lineTime)
    lineSpeed = startFinDist/lineTime  # (m/s)
    lineSpeed *= 1.94384  # (kt)
#    print('%.1f kt average speed' % lineSpeed)

    # Interpolate a laidback fix location on the track line.
    # Layback the extra length at the start of the line according to
    # the boat's heading for the first few meters twice the length of
    # the cable lead in.
    newFix = sp.zeros_like(flatFix, dtype=float)
    linLoc = 2*at.leadin
    closeIdx = sp.argmin(abs(sumLen - linLoc))
    # If the line is at least as long as twice the lead in.
    if sumLen[-1] > linLoc:
        if linLoc >= sumLen[closeIdx]:
            idx1 = closeIdx
            idx2 = closeIdx + 1
        else:
            idx1 = closeIdx - 1
            idx2 = closeIdx
        l1 = sumLen[idx1]
        l2 = sumLen[idx2]
        startHeadingFix = flatFix[idx1, :] + (flatFix[idx2, :] -
              flatFix[idx1, :])*(linLoc - l1)/(l2 - l1)
    else:
        # Else just use the heading of the whole line.
        startHeadingFix = flatFix[-1, :]
    startHeadingVec = mm.unit(startHeadingFix - flatFix[0, :])
    for p in range(len(flatFix)):
        linLoc = sumLen[p] - mm.cableRange(at.leadin, at.depth[p])
        if linLoc >= 0:
            closeIdx = sp.argmin(abs(sumLen - linLoc))
            if linLoc >= sumLen[closeIdx]:
                idx1 = closeIdx
                idx2 = closeIdx + 1
            else:
                idx1 = closeIdx - 1
                idx2 = closeIdx
            l1 = sumLen[idx1]
            l2 = sumLen[idx2]
            if l1 != l2:
                newFix[p, :] = flatFix[idx1, :] + (flatFix[idx2, :] -
                      flatFix[idx1, :])*(linLoc - l1)/(l2 - l1)
            else:
                # Case of interpolation between two repeated locations.
                newFix[p, :] = flatFix[idx1, :]
        else:
            newFix[p, :] = flatFix[0, :] + linLoc*startHeadingVec
    # Overwrite.
    flatFix = newFix

    # Reevaluate track vectors between each pair of consecutive GPS fixes.
    vParSeg = flatFix[1:, :] - flatFix[0:-1, :]
    # Track vectors at each point, found from points before and after.
    vParPt = flatFix[2:, :] - flatFix[0:-2, :]
    # Include segment parallels for the boundary fix points.
    vParPt = sp.vstack((vParSeg[0, :], vParPt, vParSeg[-1, :]))
    # Midpoints along the sequence of GPS fixes.
    midPts = (flatFix[1:, :] + flatFix[0:-1, :])/2

    # Perpendicular vectors at each segment and fix point.
    # Vector lengths are set to sideRange.
    vPerpSeg = ps.sideRange*mm.unit(mm.perp(vParSeg))  # (m)
    vPerpPt = ps.sideRange*mm.unit(mm.perp(vParPt))  # (m)

    # If cropping, only include fix points where asked.
    plottedPkts = sp.array(range(len(at.pkt)))
    if crop and ps.plotThis != 'crop':
        plottedPkts = plottedPkts[at.cropLogic]
    lastGoodVerts = sp.zeros((4, 2))
    # Polygon patches for each packet.
    for p in plottedPkts:
        # Perpendicular displacement, length sideRange, at the first midpoint.
        if p != 0:
            # Identify a trailing midpoint which is different from the 
            # present fix location. (Not between duplicate fixes.)
            pPrior = p - 1
            while pPrior >= 0 and all(midPts[pPrior, :] == flatFix[p, :]):
                pPrior -= 1
            vert01 = sp.vstack((midPts[pPrior, :] - vPerpSeg[pPrior, :],
                                midPts[pPrior, :] + vPerpSeg[pPrior, :]))
        else:
            vert01 = sp.zeros((0, 2))
        # Polygon points offset from the flat fix points themselves.
        vert2 = flatFix[p, :] + vPerpPt[p, :]
        vert5 = flatFix[p, :] - vPerpPt[p, :]
        if p != len(flatFix) - 1:

            # at the second midpoint.
            vert34 = sp.vstack((midPts[p, :] + vPerpSeg[p, :],
                                midPts[p, :] - vPerpSeg[p, :]))
        else:
            vert34 = sp.zeros((0, 2))
        # Polygon vertices.
        verts = sp.vstack((vert01, vert2, vert34, vert5))
        # In the case where IP packets come in at a higher rate than the GPS
        # fixes are updated, consecutive packets have the same position at
        # times. In this case, reuse the last useable polygon. This will plot
        # on top of the reused position.
        if sp.isnan(verts).any():
            verts = lastGoodVerts.copy()
        else:
            lastGoodVerts = verts.copy()
        # Vertices as tuples in a list.
        vertList = [tuple(row) for row in verts]
        # Append the latest polygon vertices to the list of polygons.
        bp.polyList.append(polygon(vertList))

    bp.colorList = sp.hstack((bp.colorList, at.color[plottedPkts]))

    # Include each segment between the fix coordinates as its own line object.
    for p in plottedPkts:
        if p < len(flatFix) - 1:
            endPts = [tuple(row) for row in flatFix[p:p+2, :]]
            if at.xmitFund == 8:
                bp.lineList.append(lineStr(endPts))

    if ps.saveTxt:
        # Pseudocolor plots.
        txtName = 'ch%d_H%d_%s_%s_%d.txt' % (ps.ch, ps.h, ps.plotThis,
                                             at.fileDateStr, at.fileNum,)
        txtPath = os.path.join(ps.folderPath, 'plotData', ps.plotThis, txtName)
        with open(txtPath, 'w') as f:
            for p in range(at.pktCount):
                # longi (deg), lat (deg), color (?)
                wStr = (str(dfPt.geometry[p].x) + ',' +
                        str(dfPt.geometry[p].y) + ',' +
                        str(at.color[p]) + '\n')
                f.write(wStr)
Example #58
0
def spev(t_int, C, deg, x, cov_C=None, M_spline=False, I_spline=False, n=0):
    """Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients.
    
    `deg` boundary knots are appended at both sides of the domain.
    
    The zeroth order basis functions are modified to ensure continuity at the
    right-hand boundary.
    
    Note that the I-splines include the :math:`i=0` case in order to have a "DC
    offset". This way your functions do not have to start at zero. If you want
    to not include this, simply set the first coefficient in `C` to zero.
    
    Parameters
    ----------
    t_int : array of float, (`M`,)
        The internal knot locations. Must be monotonic (this is NOT checked).
    C : array of float, (`M + deg - 1`,)
        The coefficients applied to the basis functions.
    deg : nonnegative int
        The polynomial degree to use.
    x : array of float, (`N`,)
        The locations to evaluate the spline at.
    cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional
        The covariance matrix of the coefficients. If a 1d array is passed, this
        is treated as the variance. If None, then the uncertainty is not
        computed.
    M_spline : bool, optional
        If True, compute the M-spline instead of the B-spline. M-splines are
        normalized to integrate to unity, as opposed to B-splines which sum to
        unity at all points. Default is False (compute B-spline).
    I_spline : bool, optional
        If True, compute the I-spline instead of the B-spline. Note that this
        will override `M_spline`. I-splines are the integrals of the M-splines,
        and hence ensure curves are monotonic if all coefficients are of the
        same sign. Note that the I-splines returned will be of polynomial degree
        `deg` (i.e., the integral of what is returned from calling the function
        with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline
        or M-spline).
    n : int, optional
        The derivative order to compute. Default is 0. If `n>d`, all zeros are
        returned (i.e., the discontinuities are not included).
    
    Returns
    -------
    `y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline
    at the specified locations.
    """
    C = scipy.asarray(C, dtype=float)
    t_int = scipy.asarray(t_int, dtype=float)
    if (t_int != scipy.sort(t_int)).any():
        raise ValueError("Knots must be in increasing order!")
    # if len(scipy.unique(t_int)) != len(t_int):
    #     raise ValueError("Knots must be unique!")
    if n > deg:
        return scipy.zeros_like(x, dtype=float)
    if I_spline:
        # I_{i,k} = int_L^x M_{i,k}(u)du, so just take the derivative of the
        # underlying M-spline. Discarding the first coefficient dumps the "DC
        # offset" term.
        if cov_C is not None:
            cov_C = scipy.asarray(cov_C)
            if cov_C.ndim == 1:
                cov_C = cov_C[1:]
            elif cov_C.ndim == 2:
                cov_C = cov_C[1:, 1:]
        if n > 0:
            return spev(t_int,
                        C[1:],
                        deg - 1,
                        x,
                        cov_C=cov_C,
                        M_spline=True,
                        I_spline=False,
                        n=n - 1)
        M_spline = True
    if n > 0:
        if M_spline:
            t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
            C = (deg + 1.0) * (
                C[1:] /
                (t[deg + 2:len(t_int) + 2 * deg] - t[1:len(t_int) + deg - 1]) -
                C[:-1] / (t[deg + 1:len(t_int) + 2 * deg - 1] -
                          t[:len(t_int) + deg - 2]))
        else:
            C = C[1:] - C[:-1]
        return spev(t_int,
                    C,
                    deg - 1,
                    x,
                    cov_C=cov_C,
                    M_spline=True,
                    I_spline=False,
                    n=n - 1)

    if len(C) != len(t_int) + deg - 1:
        raise ValueError("Length of C must be equal to M + deg - 1!")

    # Append the external knots directly at the boundary:
    t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))

    # Compute the different orders:
    B = scipy.zeros((deg + 1, len(t) - 1, len(x)))

    # NOTE: The first dimension is indexed by deg, and is zero-indexed.

    # Zeroth order: constant function
    d = 0
    for i in xrange(deg, deg + len(t_int) - 2 + 1):
        # The second condition contains a hack to make the basis functions
        # continuous at the right-hand edge.
        mask = (t[i] <= x) & ((x < t[i + 1]) | ((i == deg + len(t_int) - 2) &
                                                (x == t[-1])))
        B[d, i, mask] = 1.0 / (t[i + 1] - t[i]) if M_spline else 1.0

    # Loop over other orders:
    for d in xrange(1, deg + 1):
        for i in xrange(deg - d, deg + len(t_int) - 2 + 1):
            if t[i + d] != t[i]:
                v = (x - t[i]) * B[d - 1, i, :]
                if not M_spline:
                    v /= t[i + d] - t[i]
                B[d, i, :] += v
            if t[i + d + 1] != t[i + 1]:
                v = (t[i + d + 1] - x) * B[d - 1, i + 1, :]
                if not M_spline:
                    v /= t[i + d + 1] - t[i + 1]
                B[d, i, :] += v
            if M_spline and ((t[i + d] != t[i]) or (t[i + d + 1] != t[i + 1])):
                B[d, i, :] *= (d + 1) / (d * (t[i + d + 1] - t[i]))

    B = B[deg, 0:len(C), :].T

    # Now compute the I-splines, if needed:
    if I_spline:
        I = scipy.zeros_like(B)
        for i in xrange(0, len(C)):
            for m in xrange(i, len(C)):
                I[:, i] += (t[m + deg + 1] - t[m]) * B[:, m] / (deg + 1.0)
        B = I

    y = B.dot(C)
    if cov_C is not None:
        cov_C = scipy.asarray(cov_C)
        # If there are no covariances, promote cov_C to a diagonal matrix
        if cov_C.ndim == 1:
            cov_C = scipy.diag(cov_C)
        cov_y = B.dot(cov_C).dot(B.T)
        return (y, cov_y)
    else:
        return y
Example #59
0
    print 'Start solving pde'

    #rlist= [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]
    dxlist = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1]
    for dx_i in range(0, len(dxlist)):
        dx = dxlist[dx_i]
        print 'dx= ', dx
        r = (2. * D * dt) / (dx)**2
        print 'r= ', r
        if r > 1:
            print 'Stability condition not fulfilled, because r=', r  #(see https://en.wikipedia.org/wiki/FTCS_scheme)
            #sys.exit("Ending Script. Make sure that r<1")

        grid = scipy.arange(xL + dx / 2., xR, dx)
        print len(grid), " discretisation steps"
        rho = scipy.ones_like(grid)
        rho = rho / (sum(rho) * dx)

        v = scipy.zeros_like(grid)
        for j in range(len(grid)):
            v[j] = np.sin(j * 2 * np.pi / len(grid))
        plt.plot(v)

        fp_pde = pde.FokkerPlanck(rho, grid, pde.doublewell, lambd, param)
        Jv_pde = fp_pde.applyJacobian(v)
        rho_Dt_fp = fp_pde.u_Dt
        np.savetxt('rho_dx=%.3f.out' % dx, rho_Dt_fp)
        np.savetxt('Jv_dx=%.3f.out' % dx, Jv_pde)

    t1 = time.time()
    print "Simulation time for solving pde: ", t1 - t0, " seconds"
Example #60
0
    def _compute_dk_dy(self, y, n):
        r"""Evaluate the derivative of the outer form of the Matern kernel.
        
        Uses the general Leibniz rule to compute the n-th derivative of:
        
        .. math::
        
            f(y) = \frac{2^{1-\nu}}{\Gamma(\nu)} y^\nu K_\nu(y)
        
        Notice that this is very poorly-behaved at :math:`x=0`. There, the
        value is approximated using :py:func:`mpmath.diff` with the `singular`
        keyword. This is rather slow, so if you require a fixed value of `nu`
        you may wish to consider implementing the appropriate kernel separately.
        
        Parameters
        ----------
        y : :py:class:`Array`, (`M`,)
            `M` inputs to evaluate at.
        n : non-negative scalar int.
            Order of derivative to compute.
        
        Returns
        -------
        dk_dy : :py:class:`Array`, (`M`,)
            Specified derivative at specified locations.
        """
        warnings.warn(
            "The Matern kernel has not been verified for derivatives. Consider using MaternKernelArb."
        )

        dk_dy = scipy.zeros_like(y, dtype=float)
        non_zero_idxs = (y != 0)
        for k in xrange(0, n + 1):
            dk_dy[non_zero_idxs] += (
                scipy.special.binom(n, k) *
                scipy.special.poch(1 - k + self.nu, k) *
                (y[non_zero_idxs])**(-k + self.nu) *
                scipy.special.kvp(self.nu, y[non_zero_idxs], n=n - k))

        # Handle the cases at y=0.
        # Compute the appropriate value using mpmath's arbitrary precision
        # arithmetic. This is potentially slow, but seems to behave pretty
        # well. In cases where the value should be infinite, very large
        # (but still finite) floats are returned with the appropriate sign.
        if n >= 2 * self.nu:
            warnings.warn("n >= 2*nu can yield inaccurate results.",
                          RuntimeWarning)

        # Use John Wright's expression for n < 2 * nu:
        if n < 2.0 * self.nu:
            if n % 2 == 1:
                dk_dy[~non_zero_idxs] = 0.0
            else:
                m = n / 2.0
                dk_dy[~non_zero_idxs] = ((-1.0)**m * 2.0**(self.nu - 1.0 - n) *
                                         scipy.special.gamma(self.nu - m) *
                                         scipy.misc.factorial(n) /
                                         scipy.misc.factorial(m))
        else:
            # Fall back to mpmath to handle n >= 2 * nu:
            core_expr = lambda x: x**self.nu * mpmath.besselk(self.nu, x)
            deriv = mpmath.chop(
                mpmath.diff(core_expr, 0, n=n, singular=True, direction=1))
            dk_dy[~non_zero_idxs] = deriv

        dk_dy *= 2.0**(1 - self.nu) / (scipy.special.gamma(self.nu))

        return dk_dy