Esempio n. 1
0
    def _init_arrays(self):
        self.D = sp.repeat(self.u_gnd_l.D, self.N + 2)
        self.q = sp.repeat(self.u_gnd_l.q, self.N + 2)

        #Make indicies correspond to the thesis
        #Deliberately add a None to the end to catch [-1] indexing!
        self.K = sp.empty((self.N + 3), dtype=sp.ndarray) #Elements 1..N
        self.C = sp.empty((self.N + 2), dtype=sp.ndarray) #Elements 1..N-1
        self.A = sp.empty((self.N + 3), dtype=sp.ndarray) #Elements 1..N

        self.r = sp.empty((self.N + 3), dtype=sp.ndarray) #Elements 0..N
        self.l = sp.empty((self.N + 3), dtype=sp.ndarray)

        self.eta = sp.zeros((self.N + 1), dtype=self.typ)

        if (self.D.ndim != 1) or (self.q.ndim != 1):
            raise NameError('D and q must be 1-dimensional!')

        #Don't do anything pointless
        self.D[0] = self.u_gnd_l.D
        self.D[self.N + 1] = self.u_gnd_l.D


        self.l[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
        self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
        self.K[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
        self.C[0] = sp.empty((self.q[0], self.q[1], self.D[0], self.D[1]), dtype=self.typ, order=self.odr)
        self.A[0] = sp.empty((self.q[0], self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
        for n in xrange(1, self.N + 2):
            self.K[n] = sp.zeros((self.D[n-1], self.D[n-1]), dtype=self.typ, order=self.odr)
            self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
            self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
            self.A[n] = sp.empty((self.q[n], self.D[n-1], self.D[n]), dtype=self.typ, order=self.odr)
            if n < self.N + 1:
                self.C[n] = sp.empty((self.q[n], self.q[n+1], self.D[n-1], self.D[n+1]), dtype=self.typ, order=self.odr)
def plot_disc_policy():
    #First compute policy function...==========================================
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    psiprime = sp.ones((N,1))
    gamma = 0.1
    beta = 0.9
    
    m = 15
    tol = 10**-9
    delta = 1+tol
    it = 0
    while (delta >= tol):
        it += 1
        
        psi = psiprime.copy()
        arg1 = sp.repeat(sp.transpose(VEprime),N,0)
        arg2 = sp.repeat(EVUprime,N,1)
        arg = sp.array([arg2,arg1])
        psiprime = sp.argmax(arg,axis = 0) 
        
        for j in sp.arange(0,m):
            VE = VEprime.copy()
            VU = VUprime.copy()
            EVU = EVUprime.copy()
            VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
            arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
            arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
            arg = arg1+arg2
            VUprime = alpha_util_grid + beta*arg
            EVUprime = sp.dot(VUprime,f)  
    
        
    
        delta = sp.linalg.norm(psiprime -psi) 

    wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
    wr = w[wr_ind]
    print w[250],wr[250]
        
    #Then plot=================================================================
    plt.plot(w,psiprime[250,:]) 
    plt.ylim([-.5,1.5])      
    plt.xlabel(r'$w\prime$')
    plt.yticks([0,1])
    plt.savefig('disc_policy.pdf')
Esempio n. 3
0
def Problem6Real():
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    psiprime = sp.ones((N,1))
    gamma = 0.1
    beta = 0.9
    
    m = 15
    tol = 10**-9
    delta = 1+tol
    it = 0
    while (delta >= tol):
        it += 1
        
        psi = psiprime.copy()
        arg1 = sp.repeat(sp.transpose(VEprime),N,0)
        arg2 = sp.repeat(EVUprime,N,1)
        arg = sp.array([arg2,arg1])
        psiprime = sp.argmax(arg,axis = 0)    
        
        for j in sp.arange(0,m):
            VE = VEprime.copy()
            VU = VUprime.copy()
            EVU = EVUprime.copy()
            VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
            arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
            arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
            arg = arg1+arg2
            VUprime = alpha_util_grid + beta*arg
            EVUprime = sp.dot(VUprime,f)  
    
        
    
        delta = sp.linalg.norm(psiprime -psi)
        #print(delta)    
        
    wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
    wr = w[wr_ind]
    plt.plot(w,wr)
    plt.show()
    return wr
Esempio n. 4
0
    def __init__(self, N, uni_ground):
        self.odr = 'C'
        self.typ = sp.complex128

        self.zero_tol = sp.finfo(self.typ).resolution
        """Tolerance for detecting zeros. This is used when (pseudo-) inverting 
           l and r."""

        self._sanity_checks = False

        self.N = N
        """The number of sites. Do not change after initializing."""
        
        self.N_centre = N / 2
        """The 'centre' site. This affects the gauge-fixing and canonical
           form. It is the site between the left-gauge parts and the 
           right-gauge parts."""
           
        self.D = sp.repeat(uni_ground.D, self.N + 2)
        """Vector containing the bond-dimensions. A[n] is a 
           q[n] x D[n - 1] x D[n] tensor."""
           
        self.q = sp.repeat(uni_ground.q, self.N + 2)
        """Vector containing the site Hilbert space dimensions. A[n] is a 
           q[n] x D[n - 1] x D[n] tensor."""
           
        self.S_hc = sp.repeat(sp.NaN, self.N + 1)
        """Vector containing the von Neumann entropy S_hc[n] corresponding to 
           splitting the state between sites n and n + 1. Available only
           after performing update(restore_CF=True) or restore_CF()."""   

        self.uni_l = copy.deepcopy(uni_ground)
        self.uni_l.symm_gauge = False
        self.uni_l.sanity_checks = self.sanity_checks
        self.uni_l.update()

        self.uni_r = copy.deepcopy(uni_ground)
        self.uni_r.sanity_checks = self.sanity_checks
        self.uni_r.symm_gauge = False
        self.uni_r.update()

        self.grown_left = 0
        self.grown_right = 0
        self.shrunk_left = 0
        self.shrunk_right = 0

        self._init_arrays()

        for n in xrange(self.N + 2):
            self.A[n][:] = self.uni_l.A

        self.r[self.N] = self.uni_r.r
        self.r[self.N + 1] = self.r[self.N]
        self.l[0] = self.uni_l.l
Esempio n. 5
0
def adj_loglikelihood_gradient(xVec, lenSampleRibo, lenSampleRna, X, y, mu, sign):

    disp = sp.hstack([sp.repeat(xVec[0], lenSampleRibo), sp.repeat(xVec[1], lenSampleRna)])
    Gradient = sp.zeros_like(xVec)

    for i in range(len(xVec)):
        f1 = (digamma((1 / xVec[i])) - digamma( (1 / xVec[i]) + y[i])) / (xVec[i] ** 2)
        f2 = -((xVec[i] * mu[i] + (1 + xVec[i] * mu[i]) * sp.log(1 / (1 + xVec[i] * mu[i]))) / ((xVec[i] ** 2) * (1 + xVec[i] * mu[i])))
        f3 = y[i] / (xVec[i] + (xVec[i] ** 2) * mu[i])
        f4 = 0.5 * X.shape[1] * (mu[i] / (1 + sp.dot(mu.transpose(), disp)))
        Gradient[i] = f1 + f2 + f3 + f4

    return Gradient
Esempio n. 6
0
File: poly.py Progetto: laevar/mapy
def ausw_poly2(a,x):
    """ ausw_poly berechnet den Funktionswert von 
    p(x)=a_1 +a_2 x + a_3 x^2+ ... +a^n x^(n-1)
    INPUT:  a Vektor der Koeffizienten 
            x Vektor der auszuwertenden Punkte
    OUTPUT: y Vektor der Funktionswerte (y=p(x))"""

    n = len(a)
    k = len(x)
    xm = np.array([x])
    A = sp.repeat(xm.T, n,1)
    B = sp.repeat(np.array([range(0,n)]), k,0)
    return dot(A**B,a)
Esempio n. 7
0
    def __init__(self, N, uni_ground):
        self.odr = 'C'
        self.typ = sp.complex128

        self.zero_tol = sp.finfo(self.typ).resolution
        """Tolerance for detecting zeros. This is used when (pseudo-) inverting 
           l and r."""

        self._sanity_checks = False

        self.N = N
        """The number of sites. Do not change after initializing."""
        
        self.N_centre = N / 2
        """The 'centre' site. This affects the gauge-fixing and canonical
           form. It is the site between the left-gauge parts and the 
           right-gauge parts."""
           
        self.D = sp.repeat(uni_ground.D, self.N + 2)
        """Vector containing the bond-dimensions. A[n] is a 
           q[n] x D[n - 1] x D[n] tensor."""
           
        self.q = sp.repeat(uni_ground.q, self.N + 2)
        """Vector containing the site Hilbert space dimensions. A[n] is a 
           q[n] x D[n - 1] x D[n] tensor."""
          
        self.uni_l = copy.deepcopy(uni_ground)
        self.uni_l.symm_gauge = True
        self.uni_l.sanity_checks = self.sanity_checks
        self.uni_l.update()
        
        if not N % self.uni_l.L == 0:
            print "Warning: Length of nonuniform window is not a multiple of the uniform block size."

        self.uni_r = copy.deepcopy(self.uni_l)

        self.grown_left = 0
        self.grown_right = 0
        self.shrunk_left = 0
        self.shrunk_right = 0

        self._init_arrays()

        for n in xrange(1, self.N + 1):
            self.A[n][:] = self.uni_l.A[(n - 1) % self.uni_l.L]
        
        for n in xrange(self.N + 2):
            self.r[n][:] = sp.asarray(self.uni_l.r[(n - 1) % self.uni_l.L])
            self.l[n][:] = sp.asarray(self.uni_l.l[(n - 1) % self.uni_l.L])
Esempio n. 8
0
def ecdf(data,weighted=False,alpha=0.05):
  """
  Given an array and an alpha, returns 
     (x,p,a_n)
  x and p are arrays, where x are values in the sample space and p 
  is the corresponding cdf value.
  a_n is the margin of error according to the DKWM theorem using
  the supplied value of alpha. Interpreted, this means:
  
    P( {|cdf(x) - ecdf(x)| > a_n} ) <= alpha

  If weighted=True, then data should be a N-by-2 matrix,
  where each row contains 
    (data_pt, weight)
  where weight is as defined in the Horvitz-Thompson estimate.
  """

  cdf = {}

  if not weighted:
    # give all elements weight of 1
    data = concatenate( (data.reshape(len(data),1),ones((len(data),1))), axis=1 )

  def helper((x,w)):
    cdf[x] = cdf.get(x,0.0) + w

  print "  Uniqueifying..."
  map(helper,data)

  # data now has unique values
  print "  Arraying..."
  data = array(cdf.items())

  print "  Weighting..."
  w_total = data[:,1].sum()

  print "  Sorting..."
  sort_order = data[:,0].argsort()
  sorted = data[sort_order]
  
  print "  Summing..."
  ret_x = repeat(sorted[:,0],2)
  ret_p = concatenate(( [0.0],
                        repeat(1./w_total * cumsum(sorted[:-1,1]),2),
                        [1.0] ))
  a_n = sqrt( 1./(2*w_total) * log(2./alpha) )

  return ret_x,ret_p,a_n
Esempio n. 9
0
File: dp.py Progetto: jrnold/psc585
def ddpsimul(pstar, s, N, x):
    """ Monte-Carlo simulation of discrete-state/action controlled Markov process

    Parameters
    -------------
    pstar : array, shape (n, n) or (n, n, T)
      Optimal state transition matrix. Usually returned by one of the methods of
      `Dpsolve`. The array has shape (n, n) for infinite horizon processes,
      and (n, n, T) for finite horizon processes.
    s : array, shape (k, )
      Initial states
    N : int
      Number of simulations
    x : array, shape (n, ) or (n, T)
      Optimal controls

    Returns
    ---------
    spath : array, shape (k, N + 1)
       Simulated states
    
    """
    infinite = (len(pstar.shape) == 2)
    n = pstar.shape[1]
    k = len(s)
    spath = sp.zeros((k, N+1), int)
    if infinite:
        ## Row cumulative sum
        cp = pstar.cumsum(1)
        spath[:, 0] = s
        for t in range(1, N + 1):
            ## Draws the column from a categorical distribution
            rdraw = random.rand(k, 1)
            s = (sp.repeat(rdraw, n, 1) > cp[s, ]).sum(1)
            spath[:, t] = s
    else:
        T = pstar.shape[2]
        if N > T:
            print("Simulations greater than the time horizon are ignored.")
        N = min(N, T)
        spath[:, 0] = s
        for t in range(N + 1):
            cp = pstar[...,t].cumsum(1)
            rdraw = random.rand(k, 1)
            s = (sp.repeat(rdraw, n, 1) > cp[s, ]).sum(1)

    xpath = x[spath]
    return (spath, xpath)
Esempio n. 10
0
def MakeTestIonoclass(testv=False,testtemp=False,N_0=1e11,z_0=250.0,H_0=50.0,coords=None,times =sp.array([[0,1e6]])):
    """ This function will create a test ionoclass with an electron density that
    follows a chapman function"""
    if coords is None:
        xvec = sp.arange(-250.0,250.0,20.0)
        yvec = sp.arange(-250.0,250.0,20.0)
        zvec = sp.arange(50.0,900.0,2.0)
        # Mesh grid is set up in this way to allow for use in MATLAB with a simple reshape command
        xx,zz,yy = sp.meshgrid(xvec,zvec,yvec)
        coords = sp.zeros((xx.size,3))
        coords[:,0] = xx.flatten()
        coords[:,1] = yy.flatten()
        coords[:,2] = zz.flatten()
        zzf=zz.flatten()
    else:
        zzf = coords[:,2]
#    H_0 = 50.0 #km scale height
#    z_0 = 250.0 #km
#    N_0 = 10**11

    # Make electron density
    Ne_profile = Chapmanfunc(zzf,H_0,z_0,N_0)
    # Make temperture background
    if testtemp:
        (Te,Ti)= TempProfile(zzf)
    else:
        Te = np.ones_like(zzf)*2000.0
        Ti = np.ones_like(zzf)*2000.0

    # set up the velocity
    (Nlocs,ndims) = coords.shape
    Ntime= len(times)
    vel = sp.zeros((Nlocs,Ntime,ndims))

    if testv:
        vel[:,:,2] = sp.repeat(zzf[:,sp.newaxis],Ntime,axis=1)/5.0
    species=['O+','e-']
    # put the parameters in order
    params = sp.zeros((Ne_profile.size,len(times),2,2))
    params[:,:,0,1] = sp.repeat(Ti[:,sp.newaxis],Ntime,axis=1)
    params[:,:,1,1] = sp.repeat(Te[:,sp.newaxis],Ntime,axis=1)
    params[:,:,0,0] = sp.repeat(Ne_profile[:,sp.newaxis],Ntime,axis=1)
    params[:,:,1,0] = sp.repeat(Ne_profile[:,sp.newaxis],Ntime,axis=1)


    Icont1 = IonoContainer(coordlist=coords,paramlist=params,times = times,sensor_loc = sp.zeros(3),ver =0,coordvecs =
        ['x','y','z'],paramnames=None,species=species,velocity=vel)
    return Icont1
Esempio n. 11
0
def spindens(self,lrgm_out):
    from scipy import split,pi,complex128,zeros,repeat
    number_of_lattice_points = self.canvas.shape[0]*self.canvas.shape[1]
    number_of_nodes = len(self.tuple_canvas_coordinates)
    if number_of_nodes  == number_of_lattice_points:
        if self.order == 'even':
            Gup, Gdown = split(lrgm_out.reshape(self.canvas.shape[0],self.canvas.shape[1]*2),2,axis=1)
        if self.order == 'odd':
            Gup, Gdown =  split(lrgm_out.reshape(-1,2),2,axis=1)
            Gup, Gdown =  Gup.reshape(self.canvas.shape), Gdown.reshape(self.canvas.shape)
        else:
            print "Please specify order of Nodes, i.e 'even' for allspinup-allspindown per sclice or odd for spinup-spindown-spinup-..."
        Sz = self.p.upar.hbar/(4*pi*1j*self.p.upar.a**2)*(Gup-Gdown)
    elif number_of_nodes < number_of_lattice_points:
            Sz= zeros(self.canvas.shape,dtype=complex128)
            print 'calculating spin density for sparse structure'
            lrgm_out = self.p.hbar/(4*pi*1j*self.p.upar.a**2)*lrgm_out
            expanded_array_of_coords = repeat(self.tuple_canvas_coordinates,2,axis=0)
            for index,node_name in enumerate(self.nodelist):
                if node_name % 2 == 0:
                    sign = 1
                else:
                    sign = -1
                Sz[tuple(expanded_array_of_coords[node_name])] += sign * lrgm_out[index]
    else:
        print 'Number of nodes larger than canvas, something is wrong!'
    print 'max Spin Split: ', Sz.real.max()-Sz.real.min()
#Realteil scheint wahrscheinlicher, imag oszilliert wie bloed
    return Sz.real
Esempio n. 12
0
 def getBrownianIncrement(self,N):
     K = self.param['nb_grid']
     M = N/K
     dW = scipy.zeros((1,M))
     dW[0,:]=self.rand.normal(loc=0.,scale=1.,size=M)
     dWW = scipy.repeat(dW,K,axis=0)
     return dWW.flatten()
Esempio n. 13
0
def globs(globs):
    # setup mock urllib2 module to avoid downloading from mldata.org
    mock_datasets = {
        'mnist-original': {
            'data': sp.empty((70000, 784)),
            'label': sp.repeat(sp.arange(10, dtype='d'), 7000),
        },
        'iris': {
            'data': sp.empty((150, 4)),
        },
        'datasets-uci-iris': {
            'double0': sp.empty((150, 4)),
            'class': sp.empty((150,)),
        },
    }

    global custom_data_home
    custom_data_home = tempfile.mkdtemp()
    makedirs(join(custom_data_home, 'mldata'))
    globs['custom_data_home'] = custom_data_home

    global _urllib2_ref
    _urllib2_ref = datasets.mldata.urllib2
    globs['_urllib2_ref'] = _urllib2_ref
    globs['mock_urllib2'] = mock_urllib2(mock_datasets)
    return globs
Esempio n. 14
0
def make_batches(data, labels=None, batch_size=100):
    if labels is not None:
        num_labels = labels.shape[1]
        cls_data = [data[find(labels[:,i] == 1)] for i in range(num_labels)]
        cls_sizes = [d.shape[0] for d in cls_data]
        cls_sels = [permutation(range(s)) for s in cls_sizes]
        n = min(cls_sizes) * len(cls_sizes)
        batch_size = min(n, batch_size)
        lpb = batch_size / num_labels
        new_dat = []
        for i in range(n/batch_size):
            for sel, cd in zip(cls_sels, cls_data):
                new_dat.append(cd[sel[i*lpb:(i+1)*lpb]])
        if sparse.issparse(data):
            data = sparse.vstack(new_dat).tocsr()
        else:
            data = np.vstack(new_dat)
        labels = np.tile(np.repeat(np.eye(num_labels),lpb,0), (n/batch_size,1))
        n = len(labels)
        perm = range(n)
    else:
        n = data.shape[0]
        perm = permutation(range(n))
    i = 0
    while i < n:
        batch = perm[i:i+batch_size]
        i += batch_size
        yield (data[batch], None) if labels is None else (data[batch], labels[batch])
Esempio n. 15
0
File: ps3.py Progetto: jrnold/psc585
def make_y0(model):
    """ Make y0 """
    def mu_ij(i, j):
        return -sp.sqrt(uij[j, i] + (model.c / (1 - model.p[j]))
                        - (1 - model.d) * ubar[j]
                        - model.d * v0[j])

    # \bar{u} : status quo payoffs
    ubar = -(model.ideals ** 2).sum(1) + model.K
    # TODO: where did plus 10 come from?
    uij = (-(model.ideals[:, 0] - model.ideals[:, 0][:, sp.newaxis])**2 +
           -(model.ideals[:, 1] - model.ideals[:, 1][:, sp.newaxis])**2 + model.K)
    # v_0
    v0 = (uij * model.p[:, sp.newaxis]).sum(1) + model.c
        ## \lambda_0
    lam0 = sp.ones((5, 6)) * -sp.sqrt(model.c)
    # if m_i = i
    lam0[sp.r_[0:5], sp.r_[0:5]] = 1
    lam0 = reshape(lam0, (lam0.size, ))
    # x_0
    x0 = sp.reshape(sp.repeat(model.ideals, 6, axis=0), (60, ))
    # \mu_0
    mu0 = sp.zeros((5, 6, 2))
    # For players
    for i in range(5):
        # For coalitions
        for mi in range(6):
            # for each other player in the coalition
            ii = i * 6 + mi
            mu0[i, mi, 0] = mu_ij(i, model.part1[ii])
            mu0[i, mi, 1] = mu_ij(i, model.part2[ii])
    mu0 = sp.ravel(mu0)
    # y_0
    y0 = sp.concatenate((v0, lam0, x0, mu0))
    return y0
 def __init__(self, covars, names=[], *args, **kw_args):
     #1. check that all covars are covariance functions
     #2. get number of params
     super(SumCF, self).__init__(*args, **kw_args)
     self.n_params_list = []
     self.covars = []
     self.covars_theta_I = []
     self.covars_covar_I = []
     
     self.covars = covars
     if names and len(names) == len(self.covars):
         self.names = names
     elif names:
         self.names = []
         print "names provided, but shapes not matching (names:{}) (covars:{})".format(len(names), len(covars))
     else:
         self.names = []
     
     i = 0
     
     for nc in xrange(len(covars)):
         covar = covars[nc]
         assert isinstance(covar, CovarianceFunction), 'SumCF: SumCF is constructed from a list of covaraince functions'
         Nparam = covar.get_number_of_parameters()
         self.n_params_list.append(Nparam)
         self.covars_theta_I.append(sp.arange(i, i + covar.get_number_of_parameters()))
         self.covars_covar_I.extend(sp.repeat(nc, Nparam))
         i += covar.get_number_of_parameters()
     
     self.n_params_list = sp.array(self.n_params_list)
     self.n_hyperparameters = self.n_params_list.sum()
Esempio n. 17
0
    def ground_motion_sample(self, log_mean, log_sigma):
        """
        Like .sample_for_eqrm() but adds spawn and recurrence_model dimensions.

        log_mean, log_sigma: ndarray[gmm, site, event, period]. These
        represent the mean and standard deviation of the predicted
        spectral accelerations (indexed by period) at a site due to an
        event, as calculated by the attenuation model indexed by
        gmm. See the ground_motion_interface module for more
        information.

        Returns: ndarray[spawn, GMmodel, rec_model, site, event,
        period] spectral accelerations, measured in G.
        
        """
        assert log_mean.ndim == 4
        
        if self.var_method == SPAWN:
            s = self._spawn(log_mean, log_sigma)
        else:  
            s = self.sample_for_eqrm(log_mean, log_sigma, self.var_in_last_axis)[newaxis, ...]

        # monte_carlo has added and populated the recurrence model dimension
        if self.var_method == RANDOM_SAMPLING:
            return s

        # Add the recurrence model dimension and "manually" broadcast
        # it so that our caller doesn't have to treat this as a
        # special case.
        return repeat(s[:, :, newaxis, :, :, :],
                      self.n_recurrence_models,
                      2)
Esempio n. 18
0
def Euclidean_DML(feat, M, query=None,
                  is_sparse=False, is_trans=False):
    """ Euclidean distance with DML.
    """
    (N, D) = feat.shape
    dotprod = feat.dot(M).dot(feat.T)
    l2norm = sp.repeat(dotprod.diagonal().reshape(1, -1), N, 0)
    return l2norm + l2norm.T - 2 * dotprod
Esempio n. 19
0
def adj_loglikelihood(xVec, lenSampleRibo, lenSampleRna, X, y, mu, sign):

    disp = sp.hstack([sp.repeat(xVec[0], lenSampleRibo), sp.repeat(xVec[1], lenSampleRna)])
    n = 1 / disp
    p = n / (n + mu)
    loglik = sum(nbinom.logpmf(y, n, p))

    diagVec = mu / (1 + sp.dot(mu.transpose(), disp))
    diagWM = sp.diagflat(diagVec)
    xtwx = sp.dot(sp.dot(sp.transpose(X), diagWM), X)

    coxreid = 0.5 * sp.log(sp.linalg.det(xtwx))
    ret = (loglik - coxreid) * sign
    #print "return value is " + str(ret)
    if isinstance(ret, complex):
        raise complexException()
    return ret
Esempio n. 20
0
def Problem5Real():
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    gamma = 0.1
    beta = 0.9
    
    tol = 10**-9
    delta1 = 1+tol
    delta2 = 1+tol
    it = 0
    while ((delta1 >= tol) or (delta2 >= tol)):
        it += 1
        VE = VEprime.copy()
        VU = VUprime.copy()
        EVU = EVUprime.copy()
        
        VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
        arg1 = sp.repeat(sp.transpose(VE),N,0)
        arg2 = sp.repeat(EVU,N,1)
        arg = sp.array([arg2,arg1])
        VUprime = alpha_util_grid + beta*sp.amax(arg,axis = 0)
        psi = sp.argmax(arg,axis = 0)
        EVUprime = sp.dot(VUprime,f)
    
        delta1 = sp.linalg.norm(VEprime - VE)
        delta2 = sp.linalg.norm(VUprime - VU)
        #print(delta1)
        
    wr_ind = sp.argmax(sp.diff(psi), axis = 1)
    wr = w[wr_ind]
    return wr
Esempio n. 21
0
def connect_pores(network, pores1, pores2, labels=[], add_conns=True):
    r'''
    Returns the possible connections between two group of pores, and optionally
    makes the connections.

    Parameters
    ----------
    network : OpenPNM Network Object

    pores1 : array_like
        The first group of pores on the network

    pores2 : array_like
        The second group of pores on the network

    labels : list of strings
        The labels to apply to the new throats.  This argument is only needed
        if ``add_conns`` is True.

    add_conns : bool
        Indicates whether the connections should be added to the supplied
        network (default is True).  Otherwise, the connections are returned
        as an Nt x 2 array that can be passed directly to ``extend``.

    Notes
    -----
    It creates the connections in a format which is acceptable by
    the default OpenPNM connection ('throat.conns') and either adds them to
    the network or returns them.

    Examples
    --------
    >>> import OpenPNM
    >>> pn = OpenPNM.Network.TestNet()
    >>> pn.Nt
    300
    >>> pn.connect_pores(pores1=[22, 32], pores2=[16, 80, 68])
    >>> pn.Nt
    306
    >>> pn['throat.conns'][300:306]
    array([[16, 22],
           [22, 80],
           [22, 68],
           [16, 32],
           [32, 80],
           [32, 68]])

    '''
    size1 = _sp.size(pores1)
    size2 = _sp.size(pores2)
    array1 = _sp.repeat(pores1, size2)
    array2 = _sp.tile(pores2, size1)
    conns = _sp.vstack([array1, array2]).T
    if add_conns:
        extend(network=network, throat_conns=conns, labels=labels)
    else:
        return conns
Esempio n. 22
0
def Bdist(*args):
    '''binary distance matrix:
    dist(X)    -  return matrix of size (len(X),len(X)) all True!
    dist(X1,X2)-  return matrix of size (len(X1),len(X2)) with (xi==xj)'''
    
    
    if(len(args)==1):
        #return true
        X  = args[0]
        Y  = args[0]
    elif(len(args)>=2):
        X  = args[0]
        Y  = args[1]
    A = SP.repeat(X,1,len(Y))
    B = SP.repeat(Y.T,len(X),1)    
    rv = (A&B)
   
    return rv
Esempio n. 23
0
    def __init__(self,ionoin,configfile,timein=None,mattype='matrix'):
        """ This will create the RadarSpaceTimeOperator object.
            Inputs
                ionoin - The input ionocontainer. This can be either an string that is a ionocontainer file,
                    a list of ionocontainer objects or a list a strings to ionocontainer files.
                config  - The ini file that used to set up the simulation.
                timein - A Ntx2 numpy array of times.
                RSTOPinv - The inverse operator object.
                invmat - The inverse matrix to the original operator.
        """
        mattype=mattype.lower()
        accepttypes=['matrix','sim','real']
        if not mattype in accepttypes:
            raise ValueError('Matrix type can only be {0}'.format(', '.join(accepttypes)))
        d2r = sp.pi/180.0
        (sensdict,simparams) = readconfigfile(configfile)
        # determine if the input ionocontainer is a string, a list of strings or a list of ionocontainers.
        ionoin=makeionocombined(ionoin)
        #Input location
        self.Cart_Coords_In = ionoin.Cart_Coords
        self.Sphere_Coords_In = ionoin.Sphere_Coords

        # Set the input times
        if timein is None:
            self.Time_In = ionoin.Time_Vector
        else:
            self.Time_In = timein

        #Create an array of output location based off of the inputs
        rng_vec2 = simparams['Rangegatesfinal']
        nrgout = len(rng_vec2)

        angles = simparams['angles']
        nang =len(angles)

        ang_data = sp.array([[iout[0],iout[1]] for iout in angles])
        rng_all = sp.repeat(rng_vec2,(nang),axis=0)
        ang_all = sp.tile(ang_data,(nrgout,1))
        self.Sphere_Coords_Out = sp.column_stack((rng_all,ang_all))
        (R_vec,Az_vec,El_vec) = (self.Sphere_Coords_Out[:,0],self.Sphere_Coords_Out[:,1],
            self.Sphere_Coords_Out[:,2])
        xvecmult = sp.sin(Az_vec*d2r)*sp.cos(El_vec*d2r)
        yvecmult = sp.cos(Az_vec*d2r)*sp.cos(El_vec*d2r)
        zvecmult = sp.sin(El_vec*d2r)
        X_vec = R_vec*xvecmult
        Y_vec = R_vec*yvecmult
        Z_vec = R_vec*zvecmult

        self.Cart_Coords_Out = sp.column_stack((X_vec,Y_vec,Z_vec))
        self.Time_Out = sp.column_stack((simparams['Timevec'],simparams['Timevec']+simparams['Tint']))+self.Time_In[0,0]
        self.simparams=simparams
        self.sensdict=sensdict
        self.lagmat = self.simparams['amb_dict']['WttMatrix']
        self.mattype=mattype
        # create the matrix
        (self.RSTMat,self.overlaps,self.blocklocs) = makematPA(ionoin.Sphere_Coords,ionoin.Cart_Coords,ionoin.Time_Vector,configfile,ionoin.Velocity,mattype)
Esempio n. 24
0
def Euclidean(feat, query=None,
              is_sparse=False, is_trans=False):
    """ Euclidean distance.
    """
    if query is None:
        (N, D) = feat.shape
        dotprod = feat.dot(feat.T)
        featl2norm = sp.repeat(dotprod.diagonal().reshape(1, -1), N, 0)
        qryl2norm = featl2norm.T
    else:
        (nQ, D) = query.shape
        (N, D) = feat.shape
        dotprod = query.dot(feat.T)
        qryl2norm = \
            sp.repeat(np.multiply(query, query).sum(1).reshape(-1, 1), N,  1)
        featl2norm = \
            sp.repeat(np.multiply(feat, feat).sum(1).reshape(1, -1), nQ, 0)

    return qryl2norm + featl2norm - 2 * dotprod
Esempio n. 25
0
    def __init__(self,ionoin,configfile):
        r2d = 180.0/sp.pi
        d2r = sp.pi/180.0
        (sensdict,simparams) = readconfigfile(configfile)
        nt = ionoin.Time_Vector.shape[0]
        nloc = ionoin.Sphere_Coords.shape[0]

        #Input location
        self.Cart_Coords_in = ionoin.Cart_Coords
        self.Sphere_Coords_In = ionoin.Sphere_Coords,
        self.Time_In = ionoin.Time_Vector
        self.Cart_Coords_In_Rep = sp.tile(ionoin.Cart_Coords,(nt,1))
        self.Sphere_Coords_In_Rep = sp.tile(ionoin.Sphere_Coords,(nt,1))
        self.Time_In_Rep  = sp.repeat(ionoin.Time_Vector,nloc,axis=0)

        #output locations
        rng_vec2 = simparams['Rangegatesfinal']
        nrgout = len(rng_vec2)

        angles = simparams['angles']
        nang =len(angles)

        ang_data = sp.array([[iout[0],iout[1]] for iout in angles])
        rng_all = sp.tile(rng_vec2,(nang))
        ang_all = sp.repeat(ang_data,nrgout,axis=0)
        nlocout = nang*nrgout

        ntout = len(simparams['Timevec'])
        self.Sphere_Coords_Out = sp.column_stack((rng_all,ang_all))
        (R_vec,Az_vec,El_vec) = (self.Sphere_Coords_Out[:,0],self.Sphere_Coords_Out[:,1],self.Sphere_Coords_Out[:,2])
        xvecmult = sp.cos(Az_vec*d2r)*sp.cos(El_vec*d2r)
        yvecmult = sp.sin(Az_vec*d2r)*sp.cos(El_vec*d2r)
        zvecmult = sp.sin(El_vec*d2r)
        X_vec = R_vec*xvecmult
        Y_vec = R_vec*yvecmult
        Z_vec = R_vec*zvecmult

        self.Cart_Coords_Out = sp.column_stack((X_vec,Y_vec,Z_vec))
        self.Time_Out = simparams['Timevec']
        self.Time_Out_Rep =sp.repeat(simparams['Timevec'],nlocout,axis=0)
        self.Sphere_Coords_Out_Rep =sp.tile(self.Sphere_Coords_Out,(ntout,1))
        self.RSTMat = makematPA(ionoin.Sphere_Coords,ionoin.Time_Vector)
Esempio n. 26
0
def tmp():

    import scipy as sp
    import fastlmm.util.stats.plotp as pt    

    pall=[]
    for j in sp.arange(0,100):
        p=sp.rand(1,1000)
        p100=sp.repeat(p,100)
        pall.append(p100)
    qqplotavg(pall)
Esempio n. 27
0
def runinversion(basedir,configfile,acfdir='ACF',invtype='tik'):
    """ """
    costdir = os.path.join(basedir,'Cost')
    
    pname=os.path.join(costdir,'cost{0}-{1}.pickle'.format(acfdir,invtype))
    pickleFile = open(pname, 'rb')
    alpha_arr=pickle.load(pickleFile)[-1]
    pickleFile.close()
    
    ionoinfname=os.path.join(basedir,acfdir,'00lags.h5')
    ionoin=IonoContainer.readh5(ionoinfname)
    
    dirio = ('Spectrums','Mat','ACFMat')
    inputdir = os.path.join(basedir,dirio[0])
    
    dirlist = glob.glob(os.path.join(inputdir,'*.h5'))
    (listorder,timevector,filenumbering,timebeg,time_s) = IonoContainer.gettimes(dirlist)
    Ionolist = [dirlist[ikey] for ikey in listorder]
    if acfdir.lower()=='acf':
        ionosigname=os.path.join(basedir,acfdir,'00sigs.h5') 
        ionosigin=IonoContainer.readh5(ionosigname)
        nl,nt,np1,np2=ionosigin.Param_List.shape
        sigs=ionosigin.Param_List.reshape((nl*nt,np1,np2))
        sigsmean=sp.nanmean(sigs,axis=0)
        sigdiag=sp.diag(sigsmean)
        sigsout=sp.power(sigdiag/sigdiag[0],.5).real
        alpha_arr=sp.ones_like(alpha_arr)*alpha_arr[0]
    
        acfloc='ACFInv'
    elif acfdir.lower()=='acfmat':
        mattype='matrix'
        acfloc='ACFMatInv'
    mattype='sim'
    RSTO = RadarSpaceTimeOperator(Ionolist,configfile,timevector,mattype=mattype)  
    if 'perryplane' in basedir.lower() or 'SimpData':
        rbounds=[-500,500]
    else:
        rbounds=[0,500]
    
    ionoout=invertRSTO(RSTO,ionoin,alpha_list=alpha_arr,invtype=invtype,rbounds=rbounds)[0]
    outfile=os.path.join(basedir,acfloc,'00lags{0}.h5'.format(invtype))
    ionoout.saveh5(outfile)
    if acfdir=='ACF':
        lagsDatasum=ionoout.Param_List
        # !!! This is done to speed up development 
        lagsNoisesum=sp.zeros_like(lagsDatasum)
        Nlags=lagsDatasum.shape[-1]
        pulses_s=RSTO.simparams['Tint']/RSTO.simparams['IPP']
        Ctt=makeCovmat(lagsDatasum,lagsNoisesum,pulses_s,Nlags)
        outfile=os.path.join(basedir,acfloc,'00sigs{0}.h5'.format(invtype))
        ionoout.Param_List=Ctt
        ionoout.Param_Names=sp.repeat(ionoout.Param_Names[:,sp.newaxis],Nlags,axis=1)
        ionoout.saveh5(outfile)
Esempio n. 28
0
def startvalfunc(Ne_init, loc, time, exinputs):
    """ """

    Ionoin = IonoContainer.readh5(exinputs[0])

    numel = sp.prod(Ionoin.Param_List.shape[-2:]) + 1

    xarray = sp.zeros((loc.shape[0], len(time), numel))
    for ilocn, iloc in enumerate(loc):
        (datast, vel) = Ionoin.getclosest(iloc, time)[:2]
        datast[:, -1, 0] = Ne_init[ilocn, :]
        ionoden = datast[:, :-1, 0]
        ionodensum = sp.repeat(sp.sum(ionoden, axis=-1)[:, sp.newaxis], ionoden.shape[-1], axis=-1)
        ionoden = sp.repeat(Ne_init[ilocn, :, sp.newaxis], ionoden.shape[-1], axis=-1) * ionoden / ionodensum
        datast[:, :-1, 0] = ionoden
        xarray[ilocn, :, :-1] = sp.reshape(datast, (len(time), numel - 1))
        locmag = sp.sqrt(sp.sum(iloc * iloc))
        ilocmat = sp.repeat(iloc[sp.newaxis, :], len(time), axis=0)
        xarray[ilocn, :, -1] = sp.sum(vel * ilocmat) / locmag

    return xarray
Esempio n. 29
0
def startvalfunc(Ne_init, loc,time,inputs):
    """ This is a method to determine the start values for the fitter.
    Inputs
        Ne_init - A nloc x nt numpy array of the initial estimate of electron density. Basically
        the zeroth lag of the ACF.
        loc - A nloc x 3 numpy array of cartisian coordinates.
        time - A nt x 2 numpy array of times in seconds
        exinputs - A list of extra inputs allowed for by the fitter class. It only
            has one element and its the name of the ionocontainer file holding the
            rest of the start parameters.
    Outputs
        xarray - This is a numpy arrya of starting values for the fitter parmaeters."""
    if isinstance(inputs,str):
        if os.path.splitext(inputs)[-1]=='.h5':
            Ionoin = IonoContainer.readh5(inputs)
        elif os.path.splitext(inputs)[-1]=='.mat':
            Ionoin = IonoContainer.readmat(inputs)
        elif os.path.splitext(inputs)[-1]=='':
            Ionoin = IonoContainer.makeionocombined(inputs)
    elif isinstance(inputs,list):
        Ionoin = IonoContainer.makeionocombined(inputs)
    else:
        Ionoin = inputs

    numel =sp.prod(Ionoin.Param_List.shape[-2:]) +1

    xarray = sp.zeros((loc.shape[0],len(time),numel))
    for ilocn, iloc in enumerate(loc):
        (datast,vel)=Ionoin.getclosest(iloc,time)[:2]
        datast[:,-1,0] = Ne_init[ilocn,:]
        ionoden =datast[:,:-1,0]
        ionodensum = sp.repeat(sp.sum(ionoden,axis=-1)[:,sp.newaxis],ionoden.shape[-1],axis=-1)
        ionoden = sp.repeat(Ne_init[ilocn,:,sp.newaxis],ionoden.shape[-1],axis=-1)*ionoden/ionodensum
        datast[:,:-1,0] = ionoden
        xarray[ilocn,:,:-1]=sp.reshape(datast,(len(time),numel-1))
        locmag = sp.sqrt(sp.sum(iloc*iloc))
        ilocmat = sp.repeat(iloc[sp.newaxis,:],len(time),axis=0)
        xarray[ilocn,:,-1] = sp.sum(vel*ilocmat)/locmag
    return xarray
Esempio n. 30
0
 def lift(self,rho,grid,N):
     print "HIER !"
     K = self.param['nb_grid']
     M = N/K
     cum,edges = self.rho2cum(rho,grid)
     print K
     y = 1./K*scipy.arange(0.5,K,1.)
     x = self.inv_cum(y,cum,edges)
     print y
     print x
     xx = scipy.repeat(x,M)
     print K,M
     return xx
Esempio n. 31
0
passociation = [['d' + str(i[0]), 'a' + str(i[1])] for i in passociation]

##################################################################

G = nx.Graph()
G.add_edges_from(passociation)

drugset, adrset = nx.algorithms.bipartite.sets(G)
drugG = nx.algorithms.bipartite.projected_graph(G, drugset)
adrG = nx.algorithms.bipartite.projected_graph(G, adrset)

adjmat = nx.adjacency_matrix(G)

##################################################################

identity = scipy.diag(scipy.repeat(1, 1563))

a = linalg.eig(adjmat)
eigdrug = scipy.amax(a[0])

# drugpath=nx.katz_centrality(drugG,eigdrug)
beta1 = 0.7 * 1 / eigdrug.real

katz = linalg.inv(identity - beta1 * adjmat) - identity

drugsim2 = scipy.zeros((746, 746))
adrsim2 = scipy.zeros((817, 817))

for index1, i in enumerate(G.nodes()):
    for index2, j in enumerate(G.nodes()):
        if i[0] == "d" and j[0] == 'd':
Esempio n. 32
0
def SmeanField(cluster,
               coocMat,
               meanFieldPriorLmbda=0.,
               numSamples=None,
               indTerm=True,
               alternateEnt=False,
               useRegularizedEq=True):
    """
    meanFieldPriorLmbda (0.): 3.23.2014
    indTerm (True)          : As of 2.19.2014, I'm not
                              sure whether this term should
                              be included, but I think so
    alternateEnt (False)    : Explicitly calculate entropy
                              using the full partition function
    useRegularizedEq (True) : Use regularized form of equation
                              even when meanFieldPriorLmbda = 0.
    """

    coocMatCluster = coocCluster(coocMat, cluster)
    # in case we're given an upper-triangular coocMat:
    coocMatCluster = symmetrizeUsingUpper(coocMatCluster)

    outer = scipy.outer
    N = len(cluster)

    freqs = scipy.diag(coocMatCluster)
    c = coocMatCluster - outer(freqs, freqs)

    Mdenom = scipy.sqrt(outer(freqs * (1. - freqs), freqs * (1 - freqs)))
    M = c / Mdenom

    if indTerm:
        Sinds = -freqs*scipy.log(freqs)             \
            -(1.-freqs)*scipy.log(1.-freqs)
        Sind = scipy.sum(Sinds)
    else:
        Sind = 0.

    # calculate off-diagonal (J) parameters
    if (meanFieldPriorLmbda != 0.) or useRegularizedEq:
        # 3.22.2014
        if meanFieldPriorLmbda != 0.:
            gamma = meanFieldPriorLmbda / numSamples
        else:
            gamma = 0.
        mq, vq = scipy.linalg.eig(M)
        mqhat = 0.5*( mq-gamma +                        \
                scipy.sqrt((mq-gamma)**2 + 4.*gamma) )
        jq = 1. / mqhat  #1. - 1./mqhat
        Jprime = scipy.real_if_close(                   \
                dot( vq , dot(scipy.diag(jq),vq.T) ) )
        JMF = zeroDiag(Jprime / Mdenom)

        ent = scipy.real_if_close(                      \
                Sind + 0.5*scipy.sum( scipy.log(mqhat)  \
                + 1. - mqhat ) )
    else:
        # use non-regularized equations
        Minv = scipy.linalg.inv(M)
        JMF = zeroDiag(Minv / Mdenom)

        logMvals = scipy.log(scipy.linalg.svdvals(M))
        ent = Sind + 0.5 * scipy.sum(logMvals)

    # calculate diagonal (h) parameters
    piFactor = scipy.repeat([(freqs - 0.5) / (freqs * (1. - freqs))],
                            N,
                            axis=0).T
    pjFactor = scipy.repeat([freqs], N, axis=0)
    factor2 = c * piFactor - pjFactor
    hMF = scipy.diag(scipy.dot(JMF, factor2.T)).copy()
    if indTerm:
        hMF -= scipy.log(freqs / (1. - freqs))

    J = replaceDiag(0.5 * JMF, hMF)

    if alternateEnt:
        ent = analyticEntropy(J)

    # make 'full' version of J (of size NfullxNfull)
    Nfull = len(coocMat)
    Jfull = JfullFromCluster(J, cluster, Nfull)

    return ent, Jfull
Esempio n. 33
0
    def __init__(self, ionoin, configfile, timein=None, mattype='matrix'):
        """ This will create the RadarSpaceTimeOperator object.
            Inputs
                ionoin - The input ionocontainer. This can be either an string that is a ionocontainer file,
                    a list of ionocontainer objects or a list a strings to ionocontainer files
                config  - The ini file that used to set up the simulation.
                timein - A Ntx2 numpy array of times.
                RSTOPinv - The inverse operator object.
                invmat - The inverse matrix to the original operator.
        """
        mattype = mattype.lower()
        accepttypes = ['matrix', 'sim', 'real']
        if not mattype in accepttypes:
            raise ValueError('Matrix type can only be {0}'.format(
                ', '.join(accepttypes)))
        d2r = sp.pi / 180.0
        (sensdict, simparams) = readconfigfile(configfile)
        # determine if the input ionocontainer is a string, a list of strings or a list of ionocontainers.
        ionoin = makeionocombined(ionoin)
        #Input location
        self.Cart_Coords_In = ionoin.Cart_Coords
        self.Sphere_Coords_In = ionoin.Sphere_Coords

        # Set the input times
        if timein is None:
            self.Time_In = ionoin.Time_Vector
        else:
            self.Time_In = timein

        #Create an array of output location based off of the inputs
        rng_vec2 = simparams['Rangegatesfinal']
        nrgout = len(rng_vec2)

        angles = simparams['angles']
        nang = len(angles)

        ang_data = sp.array([[iout[0], iout[1]] for iout in angles])
        rng_all = sp.repeat(rng_vec2, (nang), axis=0)
        ang_all = sp.tile(ang_data, (nrgout, 1))
        self.Sphere_Coords_Out = sp.column_stack((rng_all, ang_all))
        (R_vec, Az_vec,
         El_vec) = (self.Sphere_Coords_Out[:, 0], self.Sphere_Coords_Out[:, 1],
                    self.Sphere_Coords_Out[:, 2])
        xvecmult = sp.sin(Az_vec * d2r) * sp.cos(El_vec * d2r)
        yvecmult = sp.cos(Az_vec * d2r) * sp.cos(El_vec * d2r)
        zvecmult = sp.sin(El_vec * d2r)
        X_vec = R_vec * xvecmult
        Y_vec = R_vec * yvecmult
        Z_vec = R_vec * zvecmult

        self.Cart_Coords_Out = sp.column_stack((X_vec, Y_vec, Z_vec))
        self.Time_Out = sp.column_stack(
            (simparams['Timevec'],
             simparams['Timevec'] + simparams['Tint'])) + self.Time_In[0, 0]
        self.simparams = simparams
        self.sensdict = sensdict
        self.lagmat = self.simparams['amb_dict']['WttMatrix']
        self.mattype = mattype
        # create the matrix
        (self.RSTMat, self.overlaps,
         self.blocklocs) = makematPA(ionoin.Sphere_Coords, ionoin.Cart_Coords,
                                     ionoin.Time_Vector, configfile,
                                     ionoin.Velocity, mattype)
Esempio n. 34
0
    def initTau(self, groups, pa=1e-3, pb=1e-3, qa=1., qb=1., qE=None):
        """Method to initialise the precision of the noise

        PARAMETERS
        ----------
        pa: float
            'a' parameter of the prior distribution
        pb :float
            'b' parameter of the prior distribution
        qb: float
            initialisation of the 'b' parameter of the variational distribution
        qE: float
            initial expectation of the variational distribution
        """

        # Sanity checks
        assert len(
            groups
        ) == self.N, 'sample groups labels do not match number of samples'

        tau_list = [None] * self.M

        # convert groups into integers from 0 to n_groups
        tmp = np.unique(groups, return_inverse=True)
        groups_ix = tmp[1]

        n_group = len(np.unique(groups_ix))

        for m in range(self.M):

            # Poisson noise model for count data
            if self.lik[m] == "poisson":
                tmp = 0.25 + 0.17 * s.nanmax(self.data[m], axis=0)
                tmp = s.repeat(tmp[None, :], self.N, axis=0)
                tau_list[m] = Tau_Seeger(dim=(self.N, self.D[m]), value=tmp)

            # Bernoulli noise model for binary data
            elif self.lik[m] == "bernoulli":
                # tau_list[m] = Constant_Node(dim=(self.D[m],), value=s.ones(self.D[m])*0.25)
                # tau_list[m] = Tau_Jaakkola(dim=(self.D[m],), value=0.25)
                tau_list[m] = Tau_Jaakkola(dim=((self.N, self.D[m])), value=1.)

            elif self.lik[m] == "zero_inflated":
                # contains parameters to initialise both normal and jaakola tau
                tau_list[m] = Zero_Inflated_Tau_Jaakkola(dim=((n_group,
                                                               self.D[m])),
                                                         value=1.,
                                                         pa=pa,
                                                         pb=pb,
                                                         qa=qa,
                                                         qb=qb,
                                                         groups=groups_ix,
                                                         qE=qE)

            # Gaussian noise model for continuous data
            elif self.lik[m] == "gaussian":
                tau_list[m] = TauD_Node(dim=(n_group, self.D[m]),
                                        pa=pa,
                                        pb=pb,
                                        qa=qa,
                                        qb=qb,
                                        groups=groups_ix,
                                        qE=qE)

        self.nodes["Tau"] = Multiview_Mixed_Node(self.M, *tau_list)
Esempio n. 35
0
data_file_f = open(data_file, 'rb')
data = cPickle.load(data_file_f)

#for name, probe in data.iteritems():
for name in ['CATMA3A12810', 'CATMA3A22550' , 'CATMA4A36120', 'CATMA3A19900']:    
#    if not name == 'CATMA3A12810': #CATMA3A22550 , CATMA4A36120, CATMA3A19900
#        continue
    probe = data[name]
    
    C = probe['C']
    T = probe['T']

    replicates = 4

    x1 = C[0]
    x1_rep = SP.array([SP.repeat(i, len(x)) for i,x in enumerate(x1)])
    #x1 = SP.concatenate((x1, x1_rep), axis=1)
    x2 = T[0]
    x2_rep = SP.array([SP.repeat(i, len(x)) for i,x in enumerate(x2)])
    #x2 = SP.concatenate((x2, x2_rep), axis=1)

    x = SP.concatenate((x1, x2), axis=0)
    y = SP.concatenate((C[1], T[1]), axis=0)
    
    #predictions:
    X = SP.linspace(2, x2.max(), 100)[:, SP.newaxis]
    X_g1 = SP.repeat(0, len(X)).reshape(-1, 1)
    X_g2 = SP.repeat(1, len(X)).reshape(-1, 1)

    #hyperparamters
    dim = 1
Esempio n. 36
0
    c = sc.sqrt(R**2 + 1)
    x = sc.sqrt(A / (sc.pi * c))
    # Derive the values of c and x from geometric equations (see dissertation method).

    abundance_per_m2 = b_density * M**-0.75  # Calculate the number of individuals in 1 m^2.
    S_r = sc.arange(nrows)
    T_r, T_theta = nrows, ncols
    cell_areas = (sc.pi * c * ((((S_r + 1) * x) / T_r)**2 -
                               ((S_r * x) / T_r)**2)) / T_theta
    cell_abundances = sc.around(abundance_per_m2 * cell_areas,
                                0).astype(sc.int64, copy=False)
    # Using c, x, nrows, and ncols, calculate area and # individuals per cell.

    if area_n_inds_fixed == True:
        cell_areas = sc.repeat(cell_areas[14], nrows)
        cell_abundances = sc.repeat(cell_abundances[14], nrows)

    sample_size = cell_abundances[15]  # *
    # area = # inds?

    ## Per band, measure alpha diversity (mean # spp per cell)
    tmp = sc.zeros((nrows, ncols))
    for k in range(ncols):
        #~ pdb.set_trace()
        for j in range(nrows):

            #~ if (j == 0) & (M == 1000):
            #~ continue
            # Skip the top band for the biggest body mass - it has fewer individuals (30) than the sample size.
Esempio n. 37
0
    def pagerank_scipy_patched(self, G, alpha=0.85, personalization=None,
                       max_iter=100, tol=1.0e-6, weight='weight',
                       dangling=None):
        """Return the PageRank of the nodes in the graph.

        PageRank computes a ranking of the nodes in the graph G based on
        the structure of the incoming links. It was originally designed as
        an algorithm to rank web pages.

        Parameters
        ----------
        G : graph
          A NetworkX graph.  Undirected graphs will be converted to a directed
          graph with two directed edges for each undirected edge.

        alpha : float, optional
          Damping parameter for PageRank, default=0.85.

        personalization: dict, optional
           The "personalization vector" consisting of a dictionary with a
           key for every graph node and nonzero personalization value for each
           node. By default, a uniform distribution is used.

        max_iter : integer, optional
          Maximum number of iterations in power method eigenvalue solver.

        tol : float, optional
          Error tolerance used to check convergence in power method solver.

        weight : key, optional
          Edge data key to use as weight.  If None weights are set to 1.

        dangling: dict, optional
          The outedges to be assigned to any "dangling" nodes, i.e., nodes without
          any outedges. The dict key is the node the outedge points to and the dict
          value is the weight of that outedge. By default, dangling nodes are given
          outedges according to the personalization vector (uniform if not
          specified) This must be selected to result in an irreducible transition
          matrix (see notes under google_matrix). It may be common to have the
          dangling dict to be the same as the personalization dict.

        Returns
        -------
        pagerank : dictionary
           Dictionary of nodes with PageRank as value

        Examples
        --------
        >>> G = nx.DiGraph(nx.path_graph(4))
        >>> pr = nx.pagerank_scipy(G, alpha=0.9)

        Notes
        -----
        The eigenvector calculation uses power iteration with a SciPy
        sparse matrix representation.

        This implementation works with Multi(Di)Graphs. For multigraphs the
        weight between two nodes is set to be the sum of all edge weights
        between those nodes.

        See Also
        --------
        pagerank, pagerank_numpy, google_matrix

        References
        ----------
        .. [1] A. Langville and C. Meyer,
           "A survey of eigenvector methods of web information retrieval."
           http://citeseer.ist.psu.edu/713792.html
        .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
           The PageRank citation ranking: Bringing order to the Web. 1999
           http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
        """
        import scipy.sparse

        N = len(G)
        if N == 0:
            return {}

        nodelist = G.nodes()
        M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
                                      dtype=float)
        S = scipy.array(M.sum(axis=1)).flatten()
        S[S != 0] = 1.0 / S[S != 0]
        Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
        M = Q * M

        # initial vector
        x = scipy.repeat(1.0 / N, N)

        # Personalization vector
        if personalization is None:
            p = scipy.repeat(1.0 / N, N)
        else:
            p = scipy.array([personalization.get(n,0) for n in nodelist],
                            dtype=float)
            p = p / p.sum()

        # Dangling nodes
        if dangling is None:
            dangling_weights = p
        else:
            missing = set(nodelist) - set(dangling)
            if missing:
                raise nx.NetworkXError('Dangling node dictionary must have a value for every node. '
                                       'Missing nodes %s' % missing)
            # Convert the dangling dictionary into an array in nodelist order
            dangling_weights = scipy.array([dangling[n] for n in nodelist],
                                           dtype=float)
            dangling_weights /= dangling_weights.sum()
        is_dangling = scipy.where(S == 0)[0]

        # power iteration: make up to max_iter iterations
        for _ in range(max_iter):
            xlast = x
            x = alpha * (x * M + sum(x[is_dangling]) * dangling_weights) + \
                (1 - alpha) * p
            # check convergence, l1 norm
            err = scipy.absolute(x - xlast).sum()
            if err < N * tol:
                return dict(zip(nodelist, map(float, x)))
        print(err)
        raise nx.NetworkXError('pagerank_scipy: power iteration failed to converge in %d iterations.' % max_iter)
Esempio n. 38
0
Position1L_firstSecond_val = sp.delete(
    Position1L_t_val, [t for t in range(2,
                                        sp.shape(Velocity1L_t_val)[1])], 1)
Position2L_firstSecond_val = sp.delete(
    Position2L_t_val, [t for t in range(2,
                                        sp.shape(Velocity1L_t_val)[1])], 1)
Velocity1L_firstSecond_val = sp.delete(
    Velocity1L_t_val, [t for t in range(2,
                                        sp.shape(Velocity1L_t_val)[1])], 1)
Velocity2L_firstSecond_val = sp.delete(
    Velocity2L_t_val, [t for t in range(2,
                                        sp.shape(Velocity1L_t_val)[1])], 1)

dt_Arr_val = sp.delete(
    sp.repeat(d_test['arr_17'][:numSamples, sp.newaxis],
              sp.shape(Velocity1L_t_val)[1],
              axis=1), [t for t in range(2,
                                         sp.shape(Velocity1L_t_val)[1])], 1)

initial_state = sp.dstack(
    (Position1L_firstSecond_val, Velocity1L_firstSecond_val))[:, 0, :]

plotThisX = [initial_state[0, 0]]
plotThisY = [initial_state[0, 1]]
statesCovered = [initial_state]
for i in range(100):
    predictFromThis = sp.array(
        [sp.append(statesCovered[i], dt_Arr_val[0, 0]) for j in range(1)])
    prediction = model.predict(predictFromThis)
    tempXArray = prediction[0, 0]
    tempYArray = prediction[0, 1]
    #
    # Lab Frame Collision Time Series
    #
    #===========================================
    print("Beginning Path Phase: ")
    print(str(time.time() - start_time)+" seconds")

    timeStepOfCollision = 2+1
    timeSteps = 2*(timeStepOfCollision) - 1
    # Dimension (timeSteps)
    t = sp.linspace(0, sp.multiply(2,t_col), timeSteps)

    # Rework dimensions of initial position and velocity matrices during this step

    # Dimension (2(the # of SpatialDimensions), numTrials, timeSteps)
    Position1L_t_pre = sp.repeat(sp.transpose(InitialPosition1L)[:,:,sp.newaxis], timeSteps, axis=2)\
                   + sp.multiply( sp.repeat(sp.transpose(v1L)[:, :, sp.newaxis], timeSteps, axis=2), sp.transpose(t) )
    Position2L_t_pre = sp.repeat(sp.transpose(InitialPosition2L)[:,:,sp.newaxis], timeSteps, axis=2) \
                   + sp.multiply( sp.repeat(sp.transpose(v2L)[:, :, sp.newaxis], timeSteps, axis=2), sp.transpose(t) )
    Position1L_t_post = sp.repeat(Position1L_t_pre[:,:,timeStepOfCollision-1][:,:,sp.newaxis], timeSteps, axis=2) \
                   + sp.multiply( sp.repeat(sp.transpose(v1L_f)[:, :, sp.newaxis], timeSteps, axis=2), sp.transpose(t) )
    Position2L_t_post = sp.repeat(Position2L_t_pre[:,:,timeStepOfCollision-1][:,:,sp.newaxis], timeSteps, axis=2) \
                   + sp.multiply( sp.repeat(sp.transpose(v2L_f)[:, :, sp.newaxis], timeSteps, axis=2), sp.transpose(t) )
    Position1L_t = sp.concatenate((Position1L_t_pre[:,:,:timeStepOfCollision],Position1L_t_post[:,:,1:timeStepOfCollision]),2)
    Position2L_t = sp.concatenate((Position2L_t_pre[:,:,:timeStepOfCollision],Position2L_t_post[:,:,1:timeStepOfCollision]),2)
    Velocity1L_t_pre = sp.repeat(sp.transpose(v1L)[:, :, sp.newaxis], timeSteps, axis=2)
    Velocity2L_t_pre = sp.repeat(sp.transpose(v2L)[:, :, sp.newaxis], timeSteps, axis=2)
    Velocity1L_t_post = sp.repeat(sp.transpose(v1L_f)[:, :, sp.newaxis], timeSteps, axis=2)
    Velocity2L_t_post = sp.repeat(sp.transpose(v2L_f)[:, :, sp.newaxis], timeSteps, axis=2)
    Velocity1L_t = sp.concatenate((Velocity1L_t_pre[:,:,:timeStepOfCollision],Velocity1L_t_post[:,:,timeStepOfCollision:]),2)
    Velocity2L_t = sp.concatenate((Velocity2L_t_pre[:,:,:timeStepOfCollision],Velocity2L_t_post[:,:,timeStepOfCollision:]),2)
Esempio n. 40
0
#	var_filtered = var[var_info['conv']] # filter out genes for which vd has not converged


	# get corrected expression levels
	Ycorr = sclvm.getCorrectedExpression()
	
	# fit lmm without correction
	pv0,beta0,info0 = sclvm.fitLMM(K=None,i0=i0,i1=i1,verbose=True)
	# fit lmm with correction
	pv,beta,info = sclvm.fitLMM(K=Kcc,i0=i0,i1=i1,verbose=True)
	
	#write to file
	count = 0
	for i in xrange(i0,i1):
		gene_id = 'gene_%d' % (i)
		out_group = fout.create_group(gene_id)
		RV = {}
		RV['pv0'] = pv0[count,:]
		RV['pv'] = pv[count,:]
		RV['beta'] = beta[count,:]
		RV['beta0'] = beta0[count,:]
		RV['vars'] = var[count,:]
		RV['varsnorm'] = var[count,:]
		RV['is_converged']=SP.repeat(var_info['conv'][count]*1,5)
		RV['Ycorr'] = Ycorr[:,count]
		dumpDictHdf5(RV,out_group)
		count+=1
	fout.close()
		 
	
Esempio n. 41
0
def initialise_community(nrows, ncols, b_density, M, c, x, fix_abundance=False, species_richness=1, max_diversity=False):
	"""
	Sets up:
	- the simulated community (a 3D array)
	- its initial species richness (amount of unique integers)
	- abundance of positions (amount of individuals per position - length of array's 3rd dimension).
	"""
	#~ pdb.set_trace()
	
	abundance_per_m2 = b_density * M**-0.75
	# Calculate the number of individuals in 1 m^2.
	
	S_r = sc.arange(nrows)
	T_r, T_theta = nrows, ncols
	cell_areas = (sc.pi * c * ((((S_r + 1) * x) / T_r)**2 - ((S_r * x) / T_r)**2)) / T_theta
	cell_abundances = sc.around(abundance_per_m2 * cell_areas, 0).astype(sc.int64, copy=False)
	# For each altitudinal band, calculate the number of individuals in, and area of, a cell.
	# Amount of individuals corresponds to array size, so round abundance to the nearest whole number.
	# A mountain base covers more area than the top. I use a cone's surface as a model of a mountain, but, in silico, I represent the surface as a square array. Each row in the array is an altitudinal band. Going up the mountain, each [row, column] position in the array represents an increasingly narrow area.
	
	if fix_abundance == True:
		community = sc.ones((nrows, ncols, cell_abundances[14]), dtype=sc.int64)
		# fix abundances - use that of middle altitudinal band
	else:
		max_cell_abundance = sc.amax(cell_abundances)
		community = sc.zeros((nrows, max_cell_abundance), dtype=sc.int64)
		for i in range(nrows):
			community[i, :cell_abundances[i]] = 1
		community = sc.repeat(community, ncols, axis=0).reshape(nrows, ncols, max_cell_abundance)
		# Make a 2D array of zeros. Each row is an altitudinal band. The 2nd dimension's length is the max cell abundance.
		# For each row, change the first x items to 1; x is the cell abundance in the band.
		# Replicate each row; the number of replicates is the number of positions along a band (ncols).
		# `sc.repeat(a, repeats, axis)` repeats array elements. `repeats` - number of repeats per element. `axis` - axis along which to repeat values. Returns a flat array.
		# -`sc.reshape` reshapes an array.
	
	#~ pdb.set_trace()
	community_size = sc.flatnonzero(community).size
	# Count non-zero items - individuals. (The total differs slightly from `density`, as the function rounds cell abundances.)
	# `sc.flatnonzero(a)` returns indices of non-zero items (in the flattened version of a).
	# `sc.size` returns the number of elements.
	
	if species_richness > community_size:
		sys.exit("The number of species (`species_richness`) cannot exceed number of individuals (the system's size, which is `nrows` * `ncols` * `density`).")
		# Exit the program and print an error message.
	if max_diversity == True:
		community.ravel()[sc.flatnonzero(community)] = sc.arange(community_size) + 1
		# Generate an initial state with the max number of species for the community size.
		# `sc.flatnonzero` - don't change the value of zeros - these aren't individuals.
		# '+ 1' as 0 is not a species identity.
	elif species_richness > 1:
		species = sc.arange(species_richness) + 1
		# Make a 1D array of species identities (integers).
		
		community.ravel()[sc.flatnonzero(community)[:species_richness]] = species
		# There must be at least one individual per species.
		
		community.ravel()[sc.flatnonzero(community)[species_richness:]] = sc.random.choice(species, size=community_size - species_richness, replace=True)
		# The remaining individuals can take any species identity. I.e., each species has random abundance.
	
	community.ravel()[sc.flatnonzero(community)] = sc.random.permutation(community.ravel()[sc.flatnonzero(community)])
	# Randomly permutate the non-zero items of `community` (i.e. keep zeros in place), so an individual is equally likely to take any species identity.
	# Without this line, the first `species_richness` individuals always will be different species.
	
	return community, cell_areas, cell_abundances
Esempio n. 42
0
def make_nested_dispersal_map(destination, shape, M, T, B_0, alpha, E, max_revolutions, x, c, birth_map, fix_band_radius=False):
	"""Calculates the probability of dispersing to a given cell, from every cell in the simulated landscape. Factors in each position's birth rate."""
	#~ pdb.set_trace()
	
	nrows, ncols = shape
	indices_axis0 = sc.arange(nrows).reshape(-1, 1)
	indices_axis1 = sc.arange(ncols)
	# A 2D array has two axes: axis 0 runs vertically across rows, axis 1 runs horizontally across columns.
	# `reshape(-1, 1)`
		# So the program can use broadcasting to vectorise operations like outer product, it occasionally swaps the axes of 1D arrays.
		# (Items in these arrays are often data, such as temperature, about rows in the simulated system).
		# `-1` - the dimension's length is inferred from the array's size and other dimensions (i.e., it is as long as needed).
	
	y = metabolic_scaling(M, T, B_0, alpha, E)
	# no `reshape(-1, 1)`/`[:, sc.newaxis]` as done for temperatures
	
	## Theta displacements (horizontally across columns)
	radii = (sc.arange(nrows) + 0.5).reshape(-1, 1)
	if fix_band_radius == True:
		radii = sc.repeat(radii[14], nrows).reshape(-1, 1)
	# To remove the effect of area on dispersal, fix the radius of altitudinal bands (the cone becomes a cylinder).
	# use radius of middle band
	# middle of 30 rows is index 14 (middle of indices 0-29) - must change, if change # rows
	
	mean_radial_position = (radii + radii[destination[0]]) / 2 # (S_r + 0.5 + E_r + 0.5) / 2
	
	# *explain
	displacements_negative_direction = (indices_axis1 - destination[1]) % ncols # going left
	displacements_positive_direction = (destination[1] - indices_axis1) % ncols # right
	
	distance_of_x_revolutions = sc.arange(max_revolutions + 1).reshape(-1, 1) * ncols
	distances_neg_dir = displacements_negative_direction + distance_of_x_revolutions # broadcasting - result is 2D
	distances_pos_dir = displacements_positive_direction + distance_of_x_revolutions
	
	probabilities_theta_displacements = sc.zeros((nrows, ncols))
	for i in range(nrows):
		probabilities_neg_displacements = probability_of_theta_distance(-distances_neg_dir, mean_radial_position[i], x, nrows, ncols, y[i]).sum(axis=0)
		# Note '-'.
		# `distances_neg_dir` and output are 2D arrays -> sum along axis 0 -> 1D
		
		probabilities_pos_displacements = probability_of_theta_distance(distances_pos_dir, mean_radial_position[i], x, nrows, ncols, y[i]).sum(axis=0)
		probabilities_theta_displacements[i] = probabilities_neg_displacements + probabilities_pos_displacements
	
	## r displacements (vertically across rows)
	#~ pdb.set_trace()
	n_r = indices_axis0 - destination[0] # r displacements
	T_r = nrows
	variate = (n_r * c * x) / (T_r * y)
	probabilities_r_displacements = stats.norm.pdf(variate)
	
	
	nested_dispersal_map = probabilities_theta_displacements * probabilities_r_displacements
	nested_dispersal_map_birth = nested_dispersal_map * birth_map
	# Multiply arrays element-wise.
	
	nested_dispersal_map_birth = nested_dispersal_map_birth / nested_dispersal_map_birth.sum()
	nested_dispersal_map = nested_dispersal_map / nested_dispersal_map.sum()
	# Re-normalise so probabilities sum to 1.
	# `sc.sum` sums array elements.
	
	return nested_dispersal_map_birth.ravel(), nested_dispersal_map.ravel()
Esempio n. 43
0
def lagdict2ionocont(DataLags, NoiseLags, sensdict, simparams, time_vec):
    """This function will take the data and noise lags and create an instance of the
    Ionocontanier class. This function will also apply the summation rule to the lags.
    Inputs
    DataLags - A dictionary """
    # Pull in Location Data
    angles = simparams['angles']
    ang_data = sp.array([[iout[0], iout[1]] for iout in angles])
    rng_vec = simparams['Rangegates']
    n_samps = len(rng_vec)
    # pull in other data
    pulse = simparams['Pulse']
    p_samps = len(pulse)
    pulsewidth = p_samps * sensdict['t_s']
    txpower = sensdict['Pt']
    if sensdict['Name'].lower() in ['risr', 'pfisr', 'risr-n']:
        Ksysvec = sensdict['Ksys']
    else:

        beamlistlist = sp.array(simparams['outangles']).astype(int)
        inplist = sp.array([i[0] for i in beamlistlist])
        Ksysvec = sensdict['Ksys'][inplist]
        ang_data_temp = ang_data.copy()
        ang_data = sp.array(
            [ang_data_temp[i].mean(axis=0) for i in beamlistlist])

    sumrule = simparams['SUMRULE']
    rng_vec2 = simparams['Rangegatesfinal']
    Nrng2 = len(rng_vec2)
    minrg = p_samps - 1 + sumrule[0].min()
    maxrg = Nrng2 + minrg

    # Copy the lags
    lagsData = DataLags['ACF'].copy()
    # Set up the constants for the lags so they are now
    # in terms of density fluxtuations.
    angtile = sp.tile(ang_data, (Nrng2, 1))
    rng_rep = sp.repeat(rng_vec2, ang_data.shape[0], axis=0)
    coordlist = sp.zeros((len(rng_rep), 3))
    [coordlist[:, 0], coordlist[:, 1:]] = [rng_rep, angtile]
    (Nt, Nbeams, Nrng, Nlags) = lagsData.shape

    # make a range average to equalize out the conntributions from each gate

    plen2 = int(sp.floor(float(p_samps - 1) / 2))
    samps = sp.arange(0, p_samps, dtype=int)

    rng_ave = sp.zeros((Nrng, p_samps))

    for isamp in range(plen2, Nrng + plen2):
        for ilag in range(p_samps):
            toplag = int(sp.floor(float(ilag) / 2))
            blag = int(sp.ceil(float(ilag) / 2))
            if toplag == 0:
                sampsred = samps[blag:]
            else:
                sampsred = samps[blag:-toplag]
            cursamps = isamp - sampsred

            keepsamps = sp.logical_and(cursamps >= 0, cursamps < Nrng)
            cursamps = cursamps[keepsamps]
            rng_samps = rng_vec[cursamps]**2 * 1e6
            keepsamps2 = rng_samps > 0
            if keepsamps2.sum() == 0:
                continue
            rng_samps = rng_samps[keepsamps2]
            rng_ave[isamp - plen2, ilag] = 1. / (sp.mean(1. / (rng_samps)))
    rng_ave_temp = rng_ave.copy()
    if simparams['Pulsetype'].lower() is 'barker':
        rng_ave_temp = rng_ave[:, 0][:, sp.newaxis]
    # rng_ave = rng_ave[int(sp.floor(plen2)):-int(sp.ceil(plen2))]
    # rng_ave = rng_ave[minrg:maxrg]
    rng3d = sp.tile(rng_ave_temp[sp.newaxis, sp.newaxis, :, :],
                    (Nt, Nbeams, 1, 1))
    ksys3d = sp.tile(Ksysvec[sp.newaxis, :, sp.newaxis, sp.newaxis],
                     (Nt, 1, Nrng, Nlags))

    # rng3d = sp.tile(rng_ave[:, sp.newaxis, sp.newaxis, sp.newaxis], (1, Nlags, Nt, Nbeams))
    # ksys3d = sp.tile(Ksysvec[sp.newaxis, sp.newaxis, sp.newaxis, :], (Nrng2, Nlags, Nt, 1))
    radar2acfmult = rng3d / (pulsewidth * txpower * ksys3d)
    pulses = sp.tile(DataLags['Pulses'][:, :, sp.newaxis, sp.newaxis],
                     (1, 1, Nrng, Nlags))
    time_vec = time_vec[:Nt]
    # Divid lags by number of pulses
    lagsData = lagsData / pulses
    # Set up the noise lags and divid out the noise.
    lagsNoise = NoiseLags['ACF'].copy()
    lagsNoise = sp.mean(lagsNoise, axis=2)
    pulsesnoise = sp.tile(NoiseLags['Pulses'][:, :, sp.newaxis], (1, 1, Nlags))
    lagsNoise = lagsNoise / pulsesnoise
    lagsNoise = sp.tile(lagsNoise[:, :, sp.newaxis, :], (1, 1, Nrng, 1))

    # multiply the data and the sigma by inverse of the scaling from the radar
    lagsData = lagsData * radar2acfmult
    lagsNoise = lagsNoise * radar2acfmult
    # Apply summation rule
    # lags transposed from (time,beams,range,lag)to (range,lag,time,beams)
    lagsData = sp.transpose(lagsData, axes=(2, 3, 0, 1))
    lagsNoise = sp.transpose(lagsNoise, axes=(2, 3, 0, 1))
    lagsDatasum = sp.zeros((Nrng2, Nlags, Nt, Nbeams), dtype=lagsData.dtype)
    lagsNoisesum = sp.zeros((Nrng2, Nlags, Nt, Nbeams), dtype=lagsNoise.dtype)

    for irngnew, irng in enumerate(sp.arange(minrg, maxrg)):
        for ilag in range(Nlags):
            lsumtemp = lagsData[irng + sumrule[0, ilag]:irng +
                                sumrule[1, ilag] + 1, ilag].sum(axis=0)
            lagsDatasum[irngnew, ilag] = lsumtemp
            nsumtemp = lagsNoise[irng + sumrule[0, ilag]:irng +
                                 sumrule[1, ilag] + 1, ilag].sum(axis=0)
            lagsNoisesum[irngnew, ilag] = nsumtemp
    # subtract out noise lags
    lagsDatasum = lagsDatasum - lagsNoisesum

    # Put everything in a parameter list
    Paramdata = sp.zeros((Nbeams * Nrng2, Nt, Nlags), dtype=lagsData.dtype)
    # Put everything in a parameter list
    # transpose from (range,lag,time,beams) to (beams,range,time,lag)
    # lagsDatasum = lagsDatasum*radar2acfmult
    # lagsNoisesum = lagsNoisesum*radar2acfmult
    lagsDatasum = sp.transpose(lagsDatasum, axes=(3, 0, 2, 1))
    lagsNoisesum = sp.transpose(lagsNoisesum, axes=(3, 0, 2, 1))

    # multiply the data and the sigma by inverse of the scaling from the radar
    # lagsDatasum = lagsDatasum*radar2acfmult
    # lagsNoisesum = lagsNoisesum*radar2acfmult

    # Calculate a variance using equation 2 from Hysell's 2008 paper. Done use full covariance matrix because assuming nearly diagonal.
    # Get the covariance matrix
    pulses_s = sp.transpose(pulses, axes=(1, 2, 0, 3))[:, :Nrng2]
    Cttout = makeCovmat(lagsDatasum, lagsNoisesum, pulses_s, Nlags)

    Paramdatasig = sp.zeros((Nbeams * Nrng2, Nt, Nlags, Nlags),
                            dtype=Cttout.dtype)

    curloc = 0
    for irng in range(Nrng2):
        for ibeam in range(Nbeams):
            Paramdata[curloc] = lagsDatasum[ibeam, irng].copy()
            Paramdatasig[curloc] = Cttout[ibeam, irng].copy()
            curloc += 1
    ionodata = IonoContainer(coordlist,
                             Paramdata,
                             times=time_vec,
                             ver=1,
                             paramnames=sp.arange(Nlags) * sensdict['t_s'])
    ionosigs = IonoContainer(
        coordlist,
        Paramdatasig,
        times=time_vec,
        ver=1,
        paramnames=sp.arange(Nlags**2).reshape(Nlags, Nlags) * sensdict['t_s'])
    return (ionodata, ionosigs)
#                    [t for t in range(1,sp.shape(Velocity1L_t)[1])],1)
# radius_Arr= sp.delete( sp.repeat(d_test['arr_19'][:numSamples,sp.newaxis],sp.shape(Velocity1L_t)[1],axis=1),
#                    [t for t in range(1,sp.shape(Velocity1L_t)[1])],1)
Position1L_first = sp.delete(Position1L_t,
                             [t for t in range(1,
                                               sp.shape(Position1L_t)[1])], 1)
Position2L_first = sp.delete(Position2L_t,
                             [t for t in range(1,
                                               sp.shape(Position2L_t)[1])], 1)
Velocity1L_first = sp.delete(Velocity1L_t,
                             [t for t in range(1,
                                               sp.shape(Velocity1L_t)[1])], 1)
Velocity2L_first = sp.delete(Velocity2L_t,
                             [t for t in range(1,
                                               sp.shape(Velocity2L_t)[1])], 1)
a = sp.array(sp.repeat(d_test['arr_15'][:numSamples, sp.newaxis], T, axis=1))
# m1_Arr= sp.delete( sp.repeat(d_test['arr_15'][:numSamples,sp.newaxis],sp.shape(Velocity1L_t)[1],axis=1),
#                    [t for t in range(1,sp.shape(Velocity1L_t)[1]-1)],1)
# m2_Arr= sp.delete( sp.repeat(d_test['arr_16'][:numSamples,sp.newaxis],sp.shape(Velocity1L_t)[1],axis=1),
#                    [t for t in range(1,sp.shape(Velocity1L_t)[1]-1)],1)
# dt_Arr=sp.repeat(d_train['arr_17'][:numSamples,sp.newaxis],T,axis=1)
# E_i=d_train['arr_4'][:numSamples]
# E_f=d_train['arr_5'][:numSamples]
# p_x_i=d_train['arr_6'][:numSamples]
# p_x_f=d_train['arr_7'][:numSamples]
# p_y_i=d_train['arr_8'][:numSamples]
# p_y_f=d_train['arr_9'][:numSamples]

Position1L_t_val = sp.transpose(d_test['arr_11'][:, :numSamples, :],
                                (1, 2, 0))  # (samples, timesteps, features)
Position2L_t_val = sp.transpose(d_test['arr_12'][:, :numSamples, :], (1, 2, 0))
Esempio n. 45
0
    def fit_update(self, param):
        """
        This function compute the parcimonious HDDA model from the empirical estimates obtained with fit_init
        """
        ## Get parameters
        C = len(self.ni)
        n, d = sp.sum(self.ni).astype(int), self.mean[0].size
        th = param['th']

        ## Estimation of the signal subspace
        if self.model in ('M2', 'M4', 'M6',
                          'M8'):  # For common size subspace models
            L = linalg.eigh(
                self.W, eigvals_only=True
            )  # Compute intrinsic dimension on the whole data set
            idx = L.argsort()[::-1]
            L = L[idx]
            L[L < EPS] = EPS  # Chek for numerical errors
            dL, p = sp.absolute(
                sp.diff(L)
            ), 1  # To take into account python broadcasting a[:p] = a[0]...a[p-1]
            dL /= dL.max()
            while sp.any(dL[p:] > th):
                p += 1
            minDim = int(min(min(self.ni), d))
            # Check if p >= ni-1 or d-1
            if p < minDim - 1:
                self.pi = [p for c in xrange(C)]
            else:
                self.pi = [(minDim - 2) for c in xrange(C)]

        elif self.model in ('M1', 'M3', 'M5',
                            'M7'):  # For specific size subspace models
            for c in xrange(C):
                # Scree test
                dL, pi = sp.absolute(sp.diff(self.L[c])), 1
                dL /= dL.max()
                while sp.any(dL[pi:] > th):
                    pi += 1
                self.pi.append(pi)
            # Check if pi >= ni-1 or d-1
            self.pi = [
                sPI if sPI < int(min(sNI, d) - 1) else int(min(sNI, d) - 2)
                for sPI, sNI in zip(self.pi, self.ni)
            ]

            ## Estim signal part
        self.a = [sL[:sPI] for sL, sPI in zip(self.L, self.pi)]
        if self.model in ('M5', 'M6', 'M7', 'M8'):
            self.a = [sp.repeat(sA[:].mean(), sA.size) for sA in self.a]

        ## Estim noise term
        if self.model in ('M1', 'M2', 'M5', 'M6'):  # Noise free
            self.b = [(sT - sA.sum()) / (d - sPI)
                      for sT, sA, sPI in zip(self.trace, self.a, self.pi)]
            # Check for very small value of b
            self.b = [b if b > EPS else EPS for b in self.b]

        elif self.model in ('M3', 'M4', 'M7', 'M8'):  # Noise common
            # Estimation of b
            denom = d - sp.sum(
                [sPR * sPI for sPR, sPI in zip(self.prop, self.pi)])
            num = sp.sum([
                sPR * (sT - sA.sum())
                for sPR, sT, sA in zip(self.prop, self.trace, self.a)
            ])

            # Check for very small values
            if num < EPS:
                self.b = [EPS for i in xrange(C)]
            elif denom < EPS:
                self.b = [1 / EPS for i in xrange(C)]
            else:
                self.b = [num / denom for i in xrange(C)]

                ## Compute remainings parameters
        # Precompute logdet
        self.logdet = [(sp.log(sA).sum() + (d - sPI) * sp.log(sB))
                       for sA, sPI, sB in zip(self.a, self.pi, self.b)]
        # Update the Q matrices
        if n >= d:
            self.Q = [sQ[:, :sPI] for sQ, sPI in zip(self.Q, self.pi)]
        else:
            self.Q = [
                sp.dot(sX.T, sQ[:, :sPI]) / sp.sqrt(sL[:sPI])
                for sX, sQ, sPI, sL in zip(self.X, self.Q, self.pi, self.L)
            ]

        ## Compute the number of parameters of the model
        self.q = C * d + (C - 1) + sum(
            [sPI * (d - (sPI + 1) / 2) for sPI in self.pi])
        if self.model in ('M1', 'M3', 'M5', 'M7'):  # Number of noise subspaces
            self.q += C
        elif self.model in ('M2', 'M4', 'M6', 'M8'):
            self.q += 1
        if self.model in ('M1', 'M2'):  # Size of signal subspaces
            self.q += sum(self.pi) + C
        elif self.model in ('M3', 'M4'):
            self.q += sum(self.pi) + 1
        elif self.model in ('M5', 'M6'):
            self.q += 2 * C
        elif self.model in ('M7', 'M8'):
            self.q += C + 1
Esempio n. 46
0
def initFA(Y, terms, I, gene_ids=None, nHidden=3, nHiddenSparse = 0,pruneGenes=True, FPR=0.99, FNR=0.001, \
            noise='gauss', minGenes=20, do_preTrain=True, nFix=None):
    """Initialise the f-scLVM factor analysis model.

    Required 3 inputs are first, a gene expression matrix `Y` containing normalised count values of `N` cells and `G` 
    variable genes in log-space, second a vector `terms` contaning the names of all annotated gene set (correspondig to annotated factors) 
    and third, a binary inidcator matrix `I` linking `G` genes to `K` terms by indicating which genes are annotated to each factor. 
    A variety of options can be specified as described below. 

    Args:
        Y (array_like): Matrix of normalised count values of `N` cells 
                                 and `G` variable genes in log-space.
                                 Dimension (:math:`N\\times G`).
        terms    (vector_like): Names of `K` annotated gene sets. Dimension
                                 (:math:`K\\times 0`).
        I           (array_like): Inidicator matirx specifying
                                 whether a gene is annotated to a specific factor.
                                 Dimension (:math:`G\\times K`).
        gene_ids   (array_like): Gene identifiers (opitonal, defaults to None)
        FNR             (float): False negative rate of annotations.
                                 Defaults to 0.001
        FPR             (float): False positive rate of annotations.
                                 Defaults to 0.99                                 
        nHidden           (int): Number of unannotated dense factors. Defaults to 3.
        nHiddenSparse       (int): Number of unannotated sparse factors. Defaults to 0. 
                                 This value should be changed to e.g. 5 if the diagnositcs fail. 
        pruneGenes         (bool): prune genes that are not annotated to a least one factor. This option allows fast inference and 
                                   should be set to `True` either if the 
                                   key objective is to rank factors or if the annotations cover all genes of interest.  
                                   Defaults to `True`.
        noise              (str): Specifies the observation noise model. Should be either `'gauss'`,`'hurdle'` or `'poisson'`.
                                 Defaults to `gauss`.                                      
        minGenes          (int): minimum number of genes required per term to retain it  
                                 Defaults to `20`.  
        do_preTrain      (bool): Boolean switch indicating whether pre-training should be used to establish the initial 
                                update order. Can be set to `False` for very large datasets.
                                Defaults to `True`                                                                 

    Returns:
        A :class:`fscLVM.CSparseFA` instance.
    """

    #check for consistency of input parameters
    [num_cells, num_genes] = Y.shape
    num_terms = I.shape[1]

    assert I.shape[
        0] == num_genes, 'annotation needs to be matched to gene input dimension'

    assert noise in ['gauss', 'hurdle', 'poisson'], 'invalid noise model'
    assert 0 < FNR < 1, 'FNR is required to be between 0 and 1'
    assert 0 < FNR < 1, 'FPR is required to be between 0 and 1'

    #make sure the annotation is boolean
    I = (I > .5)
    #. filter annotation by min number of required genes
    Iok = I.sum(axis=0) > minGenes
    terms = terms[Iok]
    I = I[:, Iok]
    num_terms = I.shape[1]

    #create initial pi matrix, which corresponds to the effective prior probability of an annotated link
    pi = SP.zeros([num_genes, num_terms], dtype='float')
    #default FNR
    pi[:] = FNR
    #active links
    pi[I] = FPR

    #prune genes?
    if pruneGenes == True:
        idx_genes = SP.sum(I, 1) > 0
        Y = Y[:, idx_genes]
        pi = pi[idx_genes, :]
        if not (gene_ids is None):
            gene_ids = SP.array(gene_ids)[idx_genes]
    else:
        idx_genes = SP.arange(Y.shape[1])

    #center data for Gaussian observation noise
    if noise == 'gauss':
        Y -= SP.mean(Y, 0)

    #include hidden variables
    if nHiddenSparse > 0:
        piSparse = SP.ones((Y.shape[1], nHiddenSparse)) * .01
        idxVar = SP.argsort(-Y.var(0))
        for iH in range(piSparse.shape[1]):
            idxOnH = SP.random.choice(idxVar[:100], 20, replace=False)
            piSparse[idxOnH, iH] = 0.99
        pi = SP.hstack([piSparse, pi])
        thiddenSparse = SP.repeat('hiddenSparse', nHiddenSparse)
        termsHiddnSparse = [
            '%s%s' % t for t in zip(thiddenSparse, SP.arange(nHiddenSparse))
        ]
        terms = SP.hstack([termsHiddnSparse, terms])
        num_terms += nHiddenSparse

    thidden = SP.repeat('hidden', nHidden)
    termsHidden = ['%s%s' % t for t in zip(thidden, SP.arange(nHidden))]
    terms = SP.hstack([termsHidden, terms])

    pi = SP.hstack([SP.ones((Y.shape[1], nHidden)) * .99, pi])
    num_terms += nHidden

    #mean term for non-Gaussian noise models
    if noise != 'gauss':
        terms = SP.hstack(['bias', terms])
        pi = SP.hstack([SP.ones((Y.shape[1], 1)) * (1. - 1e-10), pi])
        num_terms += 1

    if do_preTrain == True:
        Ilabel = preTrain(Y, terms, pi, noise=noise, nFix=nFix)
        pi = pi[:, Ilabel]
        terms = terms[Ilabel]

    init = {'init_data': CGauss(Y), 'Pi': pi, 'terms': terms, 'noise': noise}
    if not gene_ids is None:
        gene_ids = SP.array(gene_ids)
    FA = fscLVM.CSparseFA(components=num_terms,
                          idx_genes=idx_genes,
                          gene_ids=gene_ids)
    FA.saveInit = True

    FA.init(**init)

    return FA
Esempio n. 47
0
# Output is a sinusoid varying between 1 and 2
# with the given initial condition x0.
#
# x = 3/2 + 1/2 sin( 4 pi t + arcsin(2x0 - 3) ) + N(0,0.1)
#

import scipy

numSamples = 100
noiseStd = 0.1

scipy.random.seed(10000)
times = scipy.random.rand(numSamples)
noises = scipy.random.normal(scale=noiseStd, size=numSamples)
x0s = 1. + scipy.random.rand(numSamples)
xs = 1.5 + 0.5 * scipy.sin( 4.*scipy.pi*times \
                            + scipy.arcsin(2.*x0s - 3.) )
xsNoisy = xs + noises

# in the output file,
# column 1: initial condition
# column 2: measurement time
# column 3: measurement value
# column 4: measurement uncertainty (standard deviation)

noiseStds = scipy.repeat(noiseStd, 100)

data = zip(x0s, times, xsNoisy, noiseStds)

scipy.savetxt('simpleExample_data.txt', data)
Esempio n. 48
0
    def solve(self, wls):
        """Anisotropic solver.

        INPUT
        wls = wavelengths to scan (any asarray-able object).

        OUTPUT
        self.DEO1, self.DEE1, self.DEO3, self.DEE3 = power reflected
        and transmitted.
        """

        self.wls = S.atleast_1d(wls)

        LAMBDA = self.LAMBDA
        n = self.n
        multilayer = self.multilayer
        alpha = self.alpha
        delta = self.delta
        psi = self.psi
        phi = self.phi

        nlayers = len(multilayer)
        i = S.arange(-n, n + 1)
        nood = 2 * n + 1
        hmax = nood - 1

        DEO1 = S.zeros((nood, self.wls.size))
        DEO3 = S.zeros_like(DEO1)
        DEE1 = S.zeros_like(DEO1)
        DEE3 = S.zeros_like(DEO1)

        c1 = S.array([1., 0., 0.])
        c3 = S.array([1., 0., 0.])
        # grating on the xy plane
        K = 2 * pi / LAMBDA * \
            S.array([S.sin(phi), 0., S.cos(phi)], dtype=complex)
        dirk1 = S.array([S.sin(alpha) * S.cos(delta),
                         S.sin(alpha) * S.sin(delta),
                         S.cos(alpha)])

        # D polarization vector
        u = S.array([S.cos(psi) * S.cos(alpha) * S.cos(delta) - S.sin(psi) * S.sin(delta),
                     S.cos(psi) * S.cos(alpha) * S.sin(delta) +
                     S.sin(psi) * S.cos(delta),
                     -S.cos(psi) * S.sin(alpha)])

        kO1i = S.zeros((3, i.size), dtype=complex)
        kE1i = S.zeros_like(kO1i)
        kO3i = S.zeros_like(kO1i)
        kE3i = S.zeros_like(kO1i)

        Mp = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)
        M = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)

        dlt = (i == 0).astype(int)

        for iwl, wl in enumerate(self.wls):

            nO1 = nE1 = multilayer[0].mat.n(wl).item()
            nO3 = nE3 = multilayer[-1].mat.n(wl).item()

            # wavevectors
            k = 2 * pi / wl

            eps1 = S.diag(S.asarray([nE1, nO1, nO1]) ** 2)
            eps3 = S.diag(S.asarray([nE3, nO3, nO3]) ** 2)

            # ordinary wave
            abskO1 = k * nO1
            # abskO3 = k * nO3
            # extraordinary wave
            # abskE1 = k * nO1 *nE1 / S.sqrt(nO1**2 + (nE1**2 - nO1**2) * S.dot(-c1, dirk1)**2)
            # abskE3 = k * nO3 *nE3 / S.sqrt(nO3**2 + (nE3**2 - nO3**2) * S.dot(-c3, dirk1)**2)

            k1 = abskO1 * dirk1

            kO1i[0, :] = k1[0] - i * K[0]
            kO1i[1, :] = k1[1] * S.ones_like(i)
            kO1i[2, :] = - \
                dispersion_relation_ordinary(kO1i[0, :], kO1i[1, :], k, nO1)

            kE1i[0, :] = kO1i[0, :]
            kE1i[1, :] = kO1i[1, :]
            kE1i[2,
                 :] = -dispersion_relation_extraordinary(kE1i[0,
                                                              :],
                                                         kE1i[1,
                                                              :],
                                                         k,
                                                         nO1,
                                                         nE1,
                                                         c1)

            kO3i[0, :] = kO1i[0, :]
            kO3i[1, :] = kO1i[1, :]
            kO3i[
                2, :] = dispersion_relation_ordinary(
                kO3i[
                    0, :], kO3i[
                    1, :], k, nO3)

            kE3i[0, :] = kO1i[0, :]
            kE3i[1, :] = kO1i[1, :]
            kE3i[
                2, :] = dispersion_relation_extraordinary(
                kE3i[
                    0, :], kE3i[
                    1, :], k, nO3, nE3, c3)

            # k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [k1[2] - i * K[2]]]
            k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [- i * K[2]]]

            # aliases for constant wavevectors
            kx = kO1i[0, :]  # o kE1i(1,;), tanto e' lo stesso
            ky = k1[1]

            # matrices
            I = S.eye(nood, dtype=complex)
            ZERO = S.zeros((nood, nood), dtype=complex)
            Kx = S.diag(kx / k)
            Ky = ky / k * I
            Kz = S.diag(k2i[2, :] / k)
            KO1z = S.diag(kO1i[2, :] / k)
            KE1z = S.diag(kE1i[2, :] / k)
            KO3z = S.diag(kO3i[2, :] / k)
            KE3z = S.diag(kE3i[2, :] / k)

            ARO = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KO1z * eps1[2, 0]
            BRO = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KO1z * eps1[2, 1]
            CRO_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KO1z * eps1[2, 2])

            ARE = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KE1z * eps1[2, 0]
            BRE = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KE1z * eps1[2, 1]
            CRE_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KE1z * eps1[2, 2])

            ATO = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KO3z * eps3[2, 0]
            BTO = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KO3z * eps3[2, 1]
            CTO_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KO3z * eps3[2, 2])

            ATE = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KE3z * eps3[2, 0]
            BTE = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KE3z * eps3[2, 1]
            CTE_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KE3z * eps3[2, 2])

            DRE = c1[1] * KE1z - c1[2] * Ky
            ERE = c1[2] * Kx - c1[0] * KE1z
            FRE = c1[0] * Ky - c1[1] * Kx

            DTE = c3[1] * KE3z - c3[2] * Ky
            ETE = c3[2] * Kx - c3[0] * KE3z
            FTE = c3[0] * Ky - c3[1] * Kx

            b = S.r_[u[0] * dlt, u[1] * dlt, (k1[1] / k * u[2] - k1[2] / k * u[1]) * dlt, (
                k1[2] / k * u[0] - k1[0] / k * u[2]) * dlt]
            Ky_CRO_1 = ky / k * CRO_1
            Ky_CRE_1 = ky / k * CRE_1
            Kx_CRO_1 = kx[:, S.newaxis] / k * CRO_1
            Kx_CRE_1 = kx[:, S.newaxis] / k * CRE_1
            MR31 = -S.dot(Ky_CRO_1, ARO)
            MR32 = -S.dot(Ky_CRO_1, BRO) - KO1z
            MR33 = -S.dot(Ky_CRE_1, ARE)
            MR34 = -S.dot(Ky_CRE_1, BRE) - KE1z
            MR41 = S.dot(Kx_CRO_1, ARO) + KO1z
            MR42 = S.dot(Kx_CRO_1, BRO)
            MR43 = S.dot(Kx_CRE_1, ARE) + KE1z
            MR44 = S.dot(Kx_CRE_1, BRE)
            MR = S.asarray(S.bmat([[I, ZERO, I, ZERO],
                                   [ZERO, I, ZERO, I],
                                   [MR31, MR32, MR33, MR34],
                                   [MR41, MR42, MR43, MR44]]))

            Ky_CTO_1 = ky / k * CTO_1
            Ky_CTE_1 = ky / k * CTE_1
            Kx_CTO_1 = kx[:, S.newaxis] / k * CTO_1
            Kx_CTE_1 = kx[:, S.newaxis] / k * CTE_1
            MT31 = -S.dot(Ky_CTO_1, ATO)
            MT32 = -S.dot(Ky_CTO_1, BTO) - KO3z
            MT33 = -S.dot(Ky_CTE_1, ATE)
            MT34 = -S.dot(Ky_CTE_1, BTE) - KE3z
            MT41 = S.dot(Kx_CTO_1, ATO) + KO3z
            MT42 = S.dot(Kx_CTO_1, BTO)
            MT43 = S.dot(Kx_CTE_1, ATE) + KE3z
            MT44 = S.dot(Kx_CTE_1, BTE)
            MT = S.asarray(S.bmat([[I, ZERO, I, ZERO],
                                   [ZERO, I, ZERO, I],
                                   [MT31, MT32, MT33, MT34],
                                   [MT41, MT42, MT43, MT44]]))

            Mp.fill(0.0)
            M.fill(0.0)

            for nlayer in range(nlayers - 2, 0, -1):  # internal layers

                layer = multilayer[nlayer]
                thickness = layer.thickness

                EPS2, EPS21 = layer.getEPSFourierCoeffs(
                    wl, n, anisotropic=True)

                # Exx = S.squeeze(EPS2[0, 0, :])
                # Exx = toeplitz(S.flipud(Exx[0:hmax + 1]), Exx[hmax:])
                Exy = S.squeeze(EPS2[0, 1, :])
                Exy = toeplitz(S.flipud(Exy[0:hmax + 1]), Exy[hmax:])
                Exz = S.squeeze(EPS2[0, 2, :])
                Exz = toeplitz(S.flipud(Exz[0:hmax + 1]), Exz[hmax:])

                Eyx = S.squeeze(EPS2[1, 0, :])
                Eyx = toeplitz(S.flipud(Eyx[0:hmax + 1]), Eyx[hmax:])
                Eyy = S.squeeze(EPS2[1, 1, :])
                Eyy = toeplitz(S.flipud(Eyy[0:hmax + 1]), Eyy[hmax:])
                Eyz = S.squeeze(EPS2[1, 2, :])
                Eyz = toeplitz(S.flipud(Eyz[0:hmax + 1]), Eyz[hmax:])

                Ezx = S.squeeze(EPS2[2, 0, :])
                Ezx = toeplitz(S.flipud(Ezx[0:hmax + 1]), Ezx[hmax:])
                Ezy = S.squeeze(EPS2[2, 1, :])
                Ezy = toeplitz(S.flipud(Ezy[0:hmax + 1]), Ezy[hmax:])
                Ezz = S.squeeze(EPS2[2, 2, :])
                Ezz = toeplitz(S.flipud(Ezz[0:hmax + 1]), Ezz[hmax:])

                Exx_1 = S.squeeze(EPS21[0, 0, :])
                Exx_1 = toeplitz(S.flipud(Exx_1[0:hmax + 1]), Exx_1[hmax:])
                Exx_1_1 = inv(Exx_1)

                # lalanne
                Ezz_1 = inv(Ezz)
                Ky_Ezz_1 = ky / k * Ezz_1
                Kx_Ezz_1 = kx[:, S.newaxis] / k * Ezz_1
                Exz_Ezz_1 = S.dot(Exz, Ezz_1)
                Eyz_Ezz_1 = S.dot(Eyz, Ezz_1)
                H11 = 1j * S.dot(Ky_Ezz_1, Ezy)
                H12 = 1j * S.dot(Ky_Ezz_1, Ezx)
                H13 = S.dot(Ky_Ezz_1, Kx)
                H14 = I - S.dot(Ky_Ezz_1, Ky)
                H21 = 1j * S.dot(Kx_Ezz_1, Ezy)
                H22 = 1j * S.dot(Kx_Ezz_1, Ezx)
                H23 = S.dot(Kx_Ezz_1, Kx) - I
                H24 = -S.dot(Kx_Ezz_1, Ky)
                H31 = S.dot(Kx, Ky) + Exy - S.dot(Exz_Ezz_1, Ezy)
                H32 = Exx_1_1 - S.dot(Ky, Ky) - S.dot(Exz_Ezz_1, Ezx)
                H33 = 1j * S.dot(Exz_Ezz_1, Kx)
                H34 = -1j * S.dot(Exz_Ezz_1, Ky)
                H41 = S.dot(Kx, Kx) - Eyy + S.dot(Eyz_Ezz_1, Ezy)
                H42 = -S.dot(Kx, Ky) - Eyx + S.dot(Eyz_Ezz_1, Ezx)
                H43 = -1j * S.dot(Eyz_Ezz_1, Kx)
                H44 = 1j * S.dot(Eyz_Ezz_1, Ky)
                H = 1j * S.diag(S.repeat(S.diag(Kz), 4)) + \
                    S.asarray(S.bmat([[H11, H12, H13, H14],
                                      [H21, H22, H23, H24],
                                      [H31, H32, H33, H34],
                                      [H41, H42, H43, H44]]))

                q, W = eig(H)
                W1, W2, W3, W4 = S.split(W, 4)

                #
                # boundary conditions
                #
                # x = [R T]
                # R = [ROx ROy REx REy]
                # T = [TOx TOy TEx TEy]
                # b + MR.R = M1p.c
                # M1.c = M2p.c
                # ...
                # ML.c = MT.T
                # therefore: b + MR.R = (M1p.M1^-1.M2p.M2^-1. ...).MT.T
                # missing equations from (46)..(49) in glytsis_rigorous
                # [b] = [-MR Mtot.MT] [R]
                # [0]   [...........] [T]

                z = S.zeros_like(q)
                z[S.where(q.real > 0)] = -thickness
                D = S.exp(k * q * z)
                Sy0 = W1 * D[S.newaxis, :]
                Sx0 = W2 * D[S.newaxis, :]
                Uy0 = W3 * D[S.newaxis, :]
                Ux0 = W4 * D[S.newaxis, :]

                z = thickness * S.ones_like(q)
                z[S.where(q.real > 0)] = 0
                D = S.exp(k * q * z)
                D1 = S.exp(-1j * k2i[2, :] * thickness)
                Syd = D1[:, S.newaxis] * W1 * D[S.newaxis, :]
                Sxd = D1[:, S.newaxis] * W2 * D[S.newaxis, :]
                Uyd = D1[:, S.newaxis] * W3 * D[S.newaxis, :]
                Uxd = D1[:, S.newaxis] * W4 * D[S.newaxis, :]

                Mp[:, :, nlayer] = S.r_[Sx0, Sy0, -1j * Ux0, -1j * Uy0]
                M[:, :, nlayer] = S.r_[Sxd, Syd, -1j * Uxd, -1j * Uyd]

            Mtot = S.eye(4 * nood, dtype=complex)
            for nlayer in range(1, nlayers - 1):
                Mtot = S.dot(
                    S.dot(Mtot, Mp[:, :, nlayer]), inv(M[:, :, nlayer]))

            BC_b = S.r_[b, S.zeros_like(b)]
            BC_A1 = S.c_[-MR, S.dot(Mtot, MT)]
            BC_A2 = S.asarray(S.bmat(
                [[(c1[0] * I - c1[2] * S.dot(CRO_1, ARO)), (c1[1] * I - c1[2] * S.dot(CRO_1, BRO)), ZERO, ZERO, ZERO,
                  ZERO, ZERO, ZERO],
                 [ZERO, ZERO, (DRE - S.dot(S.dot(FRE, CRE_1), ARE)), (ERE - S.dot(S.dot(FRE, CRE_1), BRE)), ZERO, ZERO,
                  ZERO, ZERO],
                 [ZERO, ZERO, ZERO, ZERO, (c3[0] * I - c3[2] * S.dot(CTO_1, ATO)),
                  (c3[1] * I - c3[2] * S.dot(CTO_1, BTO)), ZERO, ZERO],
                 [ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, (DTE - S.dot(S.dot(FTE, CTE_1), ATE)),
                  (ETE - S.dot(S.dot(FTE, CTE_1), BTE))]]))

            BC_A = S.r_[BC_A1, BC_A2]

            x = linsolve(BC_A, BC_b)

            ROx, ROy, REx, REy, TOx, TOy, TEx, TEy = S.split(x, 8)

            ROz = -S.dot(CRO_1, (S.dot(ARO, ROx) + S.dot(BRO, ROy)))
            REz = -S.dot(CRE_1, (S.dot(ARE, REx) + S.dot(BRE, REy)))
            TOz = -S.dot(CTO_1, (S.dot(ATO, TOx) + S.dot(BTO, TOy)))
            TEz = -S.dot(CTE_1, (S.dot(ATE, TEx) + S.dot(BTE, TEy)))

            denom = (k1[2] - S.dot(u, k1) * u[2]).real
            DEO1[:, iwl] = -((S.absolute(ROx) ** 2 + S.absolute(ROy) ** 2 + S.absolute(ROz) ** 2) * S.conj(kO1i[2, :]) -
                             (ROx * kO1i[0, :] + ROy * kO1i[1, :] + ROz * kO1i[2, :]) * S.conj(ROz)).real / denom
            DEE1[:, iwl] = -((S.absolute(REx) ** 2 + S.absolute(REy) ** 2 + S.absolute(REz) ** 2) * S.conj(kE1i[2, :]) -
                             (REx * kE1i[0, :] + REy * kE1i[1, :] + REz * kE1i[2, :]) * S.conj(REz)).real / denom
            DEO3[:, iwl] = ((S.absolute(TOx) ** 2 + S.absolute(TOy) ** 2 + S.absolute(TOz) ** 2) * S.conj(kO3i[2, :]) -
                            (TOx * kO3i[0, :] + TOy * kO3i[1, :] + TOz * kO3i[2, :]) * S.conj(TOz)).real / denom
            DEE3[:, iwl] = ((S.absolute(TEx) ** 2 + S.absolute(TEy) ** 2 + S.absolute(TEz) ** 2) * S.conj(kE3i[2, :]) -
                            (TEx * kE3i[0, :] + TEy * kE3i[1, :] + TEz * kE3i[2, :]) * S.conj(TEz)).real / denom

        # save the results
        self.DEO1 = DEO1
        self.DEE1 = DEE1
        self.DEO3 = DEO3
        self.DEE3 = DEE3

        return self
Esempio n. 49
0
    def m_step(self, X):
        """M step of the algorithm

        This function  computes the  empirical estimators of  the mean
        vector,  the convariance  matrix  and the  proportion of  each
        class.

        """
        # Learn the model for each class
        C_ = self.C
        c_delete = []
        for c in range(self.C):
            ni = self.T[:, c].sum()
            # Check if empty
            if self.check_empty and \
               ni < self.population:
                C_ -= 1
                c_delete.append(c)
            else:
                self.ni.append(ni)
                self.prop.append(float(self.ni[-1]) / self.n)
                self.mean.append(sp.dot(self.T[:, c].T, X) / self.ni[-1])
                X_ = (X - self.mean[-1]) * (sp.sqrt(self.T[:, c])[:,
                                                                  sp.newaxis])

                # Use dsyrk to take benefit of symmetric matrices
                if self.n >= self.d:
                    cov = dsyrk(1.0 / (self.ni[-1] - 1), X_.T, trans=False)
                else:
                    cov = dsyrk(1.0 / (self.ni[-1] - 1), X_.T, trans=True)
                    self.X.append(X_)
                X_ = None

                # Only the upper part of cov is initialize -> dsyrk
                L, Q = linalg.eigh(cov, lower=False)

                # Chek for numerical errors
                L[L < EPS] = EPS
                if self.check_empty and (L.max() - L.min()) < EPS:
                    # In that case all eigenvalues are equal
                    # and this does not match the model
                    C_ -= 1
                    c_delete.append(c)
                    del self.ni[-1]
                    del self.prop[-1]
                    del self.mean[-1]
                    if self.n < self.d:
                        del self.X[-1]
                else:
                    idx = L.argsort()[::-1]
                    L, Q = L[idx], Q[:, idx]

                    self.L.append(L)
                    self.Q.append(Q)
                    self.trace.append(cov.trace())

        # Update T
        if c_delete:
            self.T = sp.delete(self.T, c_delete, axis=1)

        # Update the number of clusters
        self.C_.append(C_)
        self.C = C_

        # Estimation of the signal subspace for specific size subspace models
        if self.model in ('M1', 'M3', 'M5', 'M7'):
            for c in range(self.C):
                # Scree test
                dL, pi = sp.absolute(sp.diff(self.L[c])), 1
                dL /= dL.max()
                while sp.any(dL[pi:] > self.th):
                    pi += 1
                if (pi < (min(self.ni[c], self.d) - 1)) and (pi > 0):
                    self.pi.append(pi)
                else:
                    self.pi.append(1)
        elif self.model in ('M2', 'M4', 'M6', 'M8'):
            dL, p = self.dL, 1
            while sp.any(dL[p:] > self.th):
                p += 1
            min_dim = int(min(min(self.ni), self.d))
            # Check if (p >= ni-1 or d-1) and p > 0
            if p < (min_dim - 1):
                self.pi = [p for c in range(self.C)]
            else:
                self.pi = [max((min_dim - 2), 1) for c in range(self.C)]
            del dL, p, idx

        # Estim signal part
        self.a = [sL[:sPI] for sL, sPI in zip(self.L, self.pi)]
        if self.model in ('M5', 'M6', 'M7', 'M8'):
            self.a = [sp.repeat(sA[:].mean(), sA.size) for sA in self.a]

        # Estim noise term
        if self.model in ('M1', 'M2', 'M5', 'M6'):
            # Noise free
            self.b = [(sT - sA.sum()) / (self.d - sPI)
                      for sT, sA, sPI in zip(self.trace, self.a, self.pi)]
            # Check for very small value of b
            self.b = [b if b > EPS else EPS for b in self.b]

        elif self.model in ('M3', 'M4', 'M7', 'M8'):
            # Noise common
            denom = self.d - sp.sum(
                [sPR * sPI for sPR, sPI in zip(self.prop, self.pi)])
            num = sp.sum([
                sPR * (sT - sA.sum())
                for sPR, sT, sA in zip(self.prop, self.trace, self.a)
            ])

            # Check for very small values
            if num < EPS:
                self.b = [EPS for i in range(self.C)]
            elif denom < EPS:
                self.b = [1 / EPS for i in range(self.C)]
            else:
                self.b = [num / denom for i in range(self.C)]

        # Compute remainings parameters
        # Precompute logdet
        self.logdet = [(sp.log(sA).sum() + (self.d - sPI) * sp.log(sB))
                       for sA, sPI, sB in zip(self.a, self.pi, self.b)]

        # Update the Q matrices
        if self.n >= self.d:
            self.Q = [sQ[:, :sPI] for sQ, sPI in zip(self.Q, self.pi)]
        else:
            self.Q = [
                sp.dot(sX.T, sQ[:, :sPI]) / sp.sqrt(sL[:sPI])
                for sX, sQ, sPI, sL in zip(self.X, self.Q, self.pi, self.L)
            ]

        # Compute the number of parameters of the model
        self.q = self.C * self.d + (self.C - 1) + sum(
            [sPI * (self.d - (sPI + 1) / 2) for sPI in self.pi])
        # Number of noise subspaces
        if self.model in ('M1', 'M3', 'M5', 'M7'):
            self.q += self.C
        elif self.model in ('M2', 'M4', 'M6', 'M8'):
            self.q += 1
        # Size of signal subspaces
        if self.model in ('M1', 'M2'):
            self.q += sum(self.pi) + self.C
        elif self.model in ('M3', 'M4'):
            self.q += sum(self.pi) + 1
        elif self.model in ('M5', 'M6'):
            self.q += 2 * self.C
        elif self.model in ('M7', 'M8'):
            self.q += self.C + 1
Esempio n. 50
0
d_test = sp.load("LabValuesIntermediate.npz")

numSamples = 200
T = 199  # number of timesteps in matrix
batchSize = T - 1
numEpochs = 10

Position1L_t = d_train['arr_11'][:, :numSamples, :].reshape(
    (2, numSamples * T))
Position2L_t = d_train['arr_12'][:, :numSamples, :].reshape(
    (2, numSamples * T))
Velocity1L_t = d_train['arr_13'][:, :numSamples, :].reshape(
    (2, numSamples * T))
Velocity2L_t = d_train['arr_14'][:, :numSamples, :].reshape(
    (2, numSamples * T))
m1_Arr = sp.repeat(d_train['arr_15'][:numSamples], T)
m2_Arr = sp.repeat(d_train['arr_16'][:numSamples], T)
dt_Arr = sp.repeat(d_train['arr_17'][:numSamples], T)
E_i = d_train['arr_4'][:numSamples]
E_f = d_train['arr_5'][:numSamples]
p_x_i = d_train['arr_6'][:numSamples]
p_x_f = d_train['arr_7'][:numSamples]
p_y_i = d_train['arr_8'][:numSamples]
p_y_f = d_train['arr_9'][:numSamples]
print(sp.shape(dt_Arr))

Position1L_t_test = d_test['arr_11'][:, :numSamples, :].reshape(
    (2, numSamples * T))
Position2L_t_test = d_test['arr_12'][:, :numSamples, :].reshape(
    (2, numSamples * T))
Velocity1L_t_test = d_test['arr_13'][:, :numSamples, :].reshape(
Esempio n. 51
0
    def get_estimates(self,
                      eig_L,
                      K=None,
                      xs=None,
                      ngrids=50,
                      llim=-10,
                      ulim=10,
                      esp=1e-6,
                      return_pvalue=False,
                      return_f_stat=False,
                      method='REML',
                      dtype='double',
                      eig_R=None):
        """
        Get ML/REML estimates for the effect sizes, as well as the random effect contributions.
        Using the EMMA algorithm (Kang et al., Genetics, 2008).
        Methods available are 'REML', and 'ML'
        """

        if xs is None:
            X = self.X
        else:
            X = np.hstack([self.X, xs])

        if not (eig_R and xs != None):
            eig_R = self._get_eigen_R_(X=X, K=K)

        q = X.shape[1]  # number of fixed effects
        n = self.n  # number of individuls
        p = n - q
        m = ngrids + 1
        print("eigen_L_V ", eig_L['values'])
        print("eigen_R_V ", eig_R['values'])

        print("eigen_L ", eig_L['vectors'])
        print("eigen_R ", eig_R['vectors'])
        print("self.Y ", self.Y)
        etas = np.array(eig_R['vectors'] * self.Y, dtype=dtype)
        print("etas ", etas)
        sq_etas = etas * etas
        print("sq_etas ", sq_etas)
        log_deltas = (np.arange(m, dtype=dtype) / ngrids) * (
            ulim - llim) + llim  # a list of deltas to search
        deltas = np.exp(log_deltas)
        eig_vals = np.array(eig_R['values'], dtype=dtype)
        lambdas = np.reshape(np.repeat(eig_vals, m),
                             (p, m)) + np.reshape(np.repeat(deltas, p),
                                                  (m, p)).T
        s1 = np.sum(sq_etas / lambdas, axis=0)
        print("line204 ", s1)
        if method == 'REML':
            s2 = np.sum(np.log(lambdas), axis=0)
            print("line284 ", s2)
            lls = 0.5 * (p * (sp.log(
                (p) / (2.0 * sp.pi)) - 1 - sp.log(s1)) - s2
                         )  # log likelihoods (eq. 7 from paper)
            print("line288 ", lls)
            s3 = np.sum(sq_etas / (lambdas * lambdas), axis=0)
            print("s3", s3)
            s4 = np.sum(1 / lambdas, axis=0)
            dlls = 0.5 * (
                p * s3 / s1 - s4
            )  # this is the derivation of log likelihood (eq. 9 from paper)
            print("s4", s4)
            print("dlls", dlls)
        elif method == 'ML':  # this part is the function emma.MLE in R emma
            eig_vals_L = sp.array(eig_L['values'], dtype=dtype)
            xis = sp.reshape(sp.repeat(eig_vals_L, m), (n, m)) + \
                  sp.reshape(sp.repeat(deltas, n), (m, n)).T
            s2 = np.sum(np.log(xis), axis=0)
            lls = 0.5 * (n * (np.log(
                (n) / (2.0 * np.pi)) - 1 - np.log(s1)) - s2
                         )  # log likelihoods (eq. 6 from paper)
            s3 = sp.sum(sq_etas / (lambdas * lambdas), axis=0)
            s4 = sp.sum(1 / xis, axis=0)
            dlls = 0.5 * (
                n * s3 / s1 - s4
            )  # this is the derivation of log likelihood (eq. 8 from paper)

        max_ll_i = sp.argmax(lls)
        max_ll = lls[max_ll_i]

        last_dll = dlls[0]
        last_ll = lls[0]

        print("max_ll_i ", max_ll_i)
        print("max_ll ", max_ll)
        print("last_dll ", last_dll)
        print("last_ll ", last_ll)

        zero_intervals = []
        for i in range(1, len(dlls)):
            if dlls[i] < 0 and last_dll > 0:
                zero_intervals.append(
                    ((lls[i] + last_ll) * 0.5,
                     i))  # There is likelihoods value peak in
                # thie interval, go to this interval search the maximum likelihood
            last_ll = lls[i]
            last_dll = dlls[i]

        if len(zero_intervals) > 0:
            opt_ll, opt_i = max(zero_intervals)  # what does this max mean?
            opt_delta = 0.5 * (deltas[opt_i - 1] + deltas[opt_i])
            # Newton-Raphson
            try:
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    if method == 'REML':
                        new_opt_delta = optimize.newton(self._redll_,
                                                        opt_delta,
                                                        args=(eig_vals,
                                                              sq_etas),
                                                        tol=esp,
                                                        maxiter=100)
                    elif method == 'ML':
                        new_opt_delta = optimize.newton(self._dll_,
                                                        opt_delta,
                                                        args=(eig_vals,
                                                              eig_vals_L,
                                                              sq_etas),
                                                        tol=esp,
                                                        maxiter=100)
            except Exception:
                new_opt_delta = opt_delta
            # Validating the delta
            if opt_i > 1 and deltas[
                    opt_i - 1] - esp < new_opt_delta < deltas[opt_i] + esp:
                opt_delta = new_opt_delta
                opt_ll = self._rell_(opt_delta, eig_vals, sq_etas)
            # Cheking lower boundary
            elif opt_i == 1 and 0.0 < new_opt_delta < deltas[opt_i] + esp:
                opt_delta = new_opt_delta
                opt_ll = self._rell_(opt_delta, eig_vals, sq_etas)
            # Cheking upper boundary
            elif opt_i == len(deltas) - 1 and new_opt_delta > deltas[opt_i - 1] - esp \
                    and not np.isinf(new_opt_delta):
                opt_delta = new_opt_delta
                opt_ll = self._rell_(opt_delta, eig_vals, sq_etas)

            if method == 'REML':
                opt_ll = self._rell_(opt_delta, eig_vals, sq_etas)
            elif method == 'ML':
                opt_ll = self._ll_(opt_delta, eig_vals, eig_vals_L, sq_etas)

            if opt_ll < max_ll:
                opt_delta = deltas[max_ll_i]
        else:
            opt_delta = deltas[max_ll_i]
            opt_ll = max_ll

        #        likelyhood = self._ll_(opt_delta, eig_vals, eig_L['values'], sq_etas) # this is used for LRT
        print("line 418 opt_delta ", opt_delta)
        print("line 419 opt_ll ", opt_ll)
        print("sq_etas ", sq_etas)
        print("eig_vals ", eig_vals)
        print("opt_delta ", opt_delta)
        R = sq_etas / (eig_vals + opt_delta)
        print("R ", R)
        opt_vg = np.sum(R) / p  # vg, p = n-q q is the number of fixed effects.
        print("opt_vg ", opt_vg)
        # This is the REML estimation. the ML estimation is np.sum(R) / n
        opt_ve = opt_vg * opt_delta  # ves
        print("opt_ve ", opt_ve)
        # the R emma.MLE and emma.REMLE code stopped here

        # the solve of mixed model equation is mentioned in
        # http://doc.goldenhelix.com/SVS/latest/svsmanual/mixedModelMethods/overview.html#overviewofmixedlinearmodels
        H_sqrt_inv = np.mat(np.diag(
            1.0 / np.sqrt(eig_L['values'] + opt_delta)),
                            dtype=dtype) * eig_L['vectors']  # this
        print("H_shape ", np.shape(H_sqrt_inv))
        print("H_sqrt_inv ", H_sqrt_inv)
        # is the U value from R emma code
        # V = opt_vg * K + opt_ve * sp.eye(len(K))
        # H_sqrt = cholesky(V).T
        # H_sqrt_inv = H_sqrt.I
        X_t = H_sqrt_inv * X
        Y_t = H_sqrt_inv * self.Y
        (beta_est, mahalanobis_rss, rank, sigma) = linalg.lstsq(X_t,
                                                                Y_t,
                                                                rcond=-1.0)
        x_beta = X * beta_est
        print("x_beta ", x_beta)
        print("Y_t ", Y_t)
        print("Y_t - x_beta ", Y_t - x_beta)
        print("Y_t - x_beta test ",
              np.sum((Y_t - X_t * beta_est).T * (Y_t - X_t * beta_est)))
        residuals = self.Y - x_beta
        print("residuals ", residuals)
        rss = residuals.T * residuals
        print("rss ", rss)
        print("mahalanobis_rss ", mahalanobis_rss)
        # x_beta_var = sp.var(x_beta, ddof=1)
        # var_perc = x_beta_var / self.y_var

        # get the likelyhood value for LRT
        opt_ll = self._ll_(opt_delta, eig_vals, eig_L['values'],
                           sq_etas)  # recalculate likelyhood
        print("opt_ll", opt_ll)
        opt_dll = self._dll_(opt_delta, eig_vals, eig_L['values'],
                             sq_etas)  # recalculate likelyhood
        print("opt_dll", opt_dll)
        opt_rell = self._rell_(opt_delta, eig_vals, sq_etas)
        print("opt_rell", opt_rell)
        opt_redll = self._redll_(opt_delta, eig_vals, sq_etas)
        print("opt_redll", opt_redll)

        res_dict = {
            'max_ll': opt_ll,
            'delta': opt_delta,
            'beta': beta_est,
            've': opt_ve,
            'vg': opt_vg,
            'rss': rss,
            'mahalanobis_rss': mahalanobis_rss,
            'H_sqrt_inv': H_sqrt_inv,
            'pseudo_heritability': 1.0 / (1 + opt_delta)
        }

        if (xs is not None) and return_f_stat:
            #            rss_ratio = h0_rss / rss_list
            #            var_perc = 1 - 1 / rss_ratio
            #            f_stats = (rss_ratio - 1) * n_p / float(q)

            h0_X = H_sqrt_inv * self.X
            (h0_betas, h0_rss, h0_rank, h0_s) = linalg.lstsq(h0_X,
                                                             Y_t,
                                                             rcond=-1)
            print("h0_rss ", h0_rss)
            f_stat = (h0_rss / mahalanobis_rss - 1) * p / xs.shape[1]
            res_dict['var_perc'] = 1.0 - mahalanobis_rss / h0_rss
            res_dict['f_stat'] = float(f_stat)
            print("f_stat ", f_stat)
        if return_pvalue:
            p_val = stats.f.sf(f_stat, (xs.shape[1]), p)
            res_dict['p_val'] = float(p_val)
            print("p_val ", p_val)
        return res_dict  # , lls, dlls, sp.log(deltas)
Esempio n. 52
0
    def define_init(self, initTheta=1.):
        """ Define Initialisations of the model

        PARAMETERS
        ----------
        initTheta flaot
          initialisation for theta. Default is 1. (no sparsity)
    """

        N = self.dimensionalities["N"]
        K = self.dimensionalities["K"]
        M = self.dimensionalities["M"]
        D = self.dimensionalities["D"]

        # Latent variables
        self.model_opts["initZ"] = {
            'mean': "random",
            'var': s.ones((K, )),
            'E': None,
            'E2': None
        }

        # Tau
        self.model_opts["initTau"] = {
            'a': [s.nan] * M,
            'b': [s.nan] * M,
            'E': [s.ones(D[m]) * 100 for m in range(M)]
        }

        # ARD of weights
        self.model_opts["initAlpha"] = {
            'a': [s.nan] * M,
            'b': [s.nan] * M,
            'E': [s.ones(K) * 1. for m in range(M)]
        }

        # Theta
        self.model_opts["initTheta"] = {
            'a': [s.ones(K, ) for m in range(M)],
            'b': [s.ones(K, ) for m in range(M)],
            'E': [s.nan * s.zeros(K, ) for m in range(M)]
        }
        if type(initTheta) is float:
            self.model_opts['initTheta']['E'] = [
                s.ones(K, ) * initTheta for m in range(M)
            ]
        else:
            print("Error: 'initTheta' must be a float")
            exit()

        for m in range(M):
            for k in range(K):
                if self.model_opts['sparsity'][m][k] == 0.:
                    self.model_opts["initTheta"]["a"][m][k] = s.nan
                    self.model_opts["initTheta"]["b"][m][k] = s.nan

        # Weights
        self.model_opts["initSW"] = {
            'Theta': [
                s.repeat(self.model_opts['initTheta']['E'][m][None, :],
                         self.dimensionalities["D"][m], 0) for m in range(M)
            ],
            'mean_S0': [s.zeros((D[m], K)) for m in range(M)],
            'var_S0': [s.nan * s.ones((D[m], K)) for m in range(M)],
            'mean_S1': [s.zeros((D[m], K)) for m in range(M)],
            # 'mean_S1':[stats.norm.rvs(loc=0, scale=1, size=(D[m],K)) for m in range(M)],
            'var_S1': [s.ones((D[m], K)) for m in range(M)],
            'ES': [None] * M,
            'EW_S0': [None] * M,
            'EW_S1': [None] * M  # It will be calculated from the parameters
        }
Esempio n. 53
0
#   Import Simulation Data and Preprocess
#===============================================================================================================

# Position1L_t= sp.transpose(d_train['arr_11'][:,:numSamples,:], (1,2,0)) # (samples, timsteps, features)
# Position2L_t= sp.transpose(d_train['arr_12'][:,:numSamples,:], (1,2,0))
Velocity1L_t = sp.transpose(d_train['arr_13'][:, :numSamples, :], (1, 2, 0))
Velocity2L_t = sp.transpose(d_train['arr_14'][:, :numSamples, :], (1, 2, 0))
# Position1L_firstLast= sp.delete(Position1L_t, [t for t in range(1,sp.shape(Position1L_t)[1]-1)],1)
# Position2L_firstLast= sp.delete(Position2L_t, [t for t in range(1,sp.shape(Position2L_t)[1]-1)],1)
Velocity1L_firstLast = sp.delete(
    Velocity1L_t, [t for t in range(1,
                                    sp.shape(Velocity1L_t)[1] - 1)], 1)
Velocity2L_firstLast = sp.delete(
    Velocity2L_t, [t for t in range(1,
                                    sp.shape(Velocity2L_t)[1] - 1)], 1)
a = sp.array(sp.repeat(d_test['arr_15'][:numSamples, sp.newaxis], T, axis=1))
m1_Arr = sp.delete(
    sp.repeat(d_test['arr_15'][:numSamples, sp.newaxis],
              sp.shape(Velocity1L_t)[1],
              axis=1), [t for t in range(1,
                                         sp.shape(Velocity1L_t)[1] - 1)], 1)
m2_Arr = sp.delete(
    sp.repeat(d_test['arr_16'][:numSamples, sp.newaxis],
              sp.shape(Velocity1L_t)[1],
              axis=1), [t for t in range(1,
                                         sp.shape(Velocity1L_t)[1] - 1)], 1)
# dt_Arr=sp.repeat(d_train['arr_17'][:numSamples,sp.newaxis],T,axis=1)
# E_i=d_train['arr_4'][:numSamples]
# E_f=d_train['arr_5'][:numSamples]
# p_x_i=d_train['arr_6'][:numSamples]
# p_x_f=d_train['arr_7'][:numSamples]
Esempio n. 54
0
    def pagerank_scipy(self,
                       flag,
                       alpha=0.85,
                       personalization=None,
                       max_iter=100,
                       tol=1.0e-8,
                       dangling=None):
        start = time.time()
        N = len(self.G)
        if N == 0:
            return {}

        nodelist = sorted(self.G.nodes())
        if (flag == 0):
            M = nx.to_scipy_sparse_matrix(self.G,
                                          nodelist=nodelist,
                                          weight='trueweight',
                                          dtype=float)
        else:
            M = nx.to_scipy_sparse_matrix(self.G,
                                          nodelist=nodelist,
                                          weight='totalWeight',
                                          dtype=float)

        S = scipy.array(M.sum(axis=1)).flatten()

        S[S != 0] = 1.0 / S[S != 0]
        Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
        M = Q * M

        # initial vector
        x = scipy.repeat(1.0 / N, N)

        # Personalization vector
        if personalization is None:
            p = scipy.repeat(1.0 / N, N)
        else:
            missing = set(nodelist) - set(personalization)
            if missing:
                raise NetworkXError('Personalization vector dictionary '
                                    'must have a value for every node. '
                                    'Missing nodes %s' % missing)
            p = scipy.array([personalization[n] for n in nodelist],
                            dtype=float)
            p = p / p.sum()

        # Dangling nodes
        if dangling is None:
            dangling_weights = p
        else:
            missing = set(nodelist) - set(dangling)
            if missing:
                raise NetworkXError('Dangling node dictionary '
                                    'must have a value for every node. '
                                    'Missing nodes %s' % missing)
            # Convert the dangling dictionary into an array in nodelist order
            dangling_weights = scipy.array([dangling[n] for n in nodelist],
                                           dtype=float)
            dangling_weights /= dangling_weights.sum()
        is_dangling = scipy.where(S == 0)[0]
        end = time.time()
        self.preprocess = end - start
        #print ("preprocess ",self.preprocess,"\n")
        # power iteration: make up to max_iter iterations
        for _ in range(max_iter):
            xlast = x
            x = alpha * (x * M + sum(x[is_dangling]) * dangling_weights) + \
             (1 - alpha) * p
            # check convergence, l1 norm
            err = scipy.absolute(x - xlast).sum()
            if err < N * tol:
                return dict(zip(nodelist, map(float, x)))
        raise NetworkXError(
            'pagerank_scipy: power iteration failed to converge '
            'in %d iterations.' % max_iter)
Esempio n. 55
0
	def plot(self,fname=None,n=100,alphaMult=1,axis=None):
		"""Produce a plot of the last n SGD steps. 
		fname -- a file name where to save the plot, or show if None.
		n -- the number of points to display.
		alphaMult -- a geometric multiplier on the alpha value of the segments, 
		with the most recent one having alpha=1.
		axis -- the axis on which to plot the steps."""
		import matplotlib.pyplot as plt
		from mpl_toolkits.mplot3d import Axes3D		
		pts = scipy.array(self.x_hist)
		n = min(n,pts.shape[0])
		pts = scipy.array(self.x_hist[-n:])
		plt.clf()
		fig = plt.figure()
		alpha = 1
		
		if not axis:
			ax = plt.gca()
		else:
			ax = axis
		x = []
		y = []
		z = []
		a = []
		pairs = []
		colors = ['black','g','b','r']
		if self.x0.ndim == 1:
			self.x0 = self.x0.reshape(1,self.x0.size)
		if self.x0.shape[1] == 2:
			for k in range(self.x0.shape[0]): # num rows
				for i in range(len(pts)-1):
					xnew = pts[i][k]
					if xnew.size == 1: xnew = pts[-i]
					ynew = pts[i+1][k]
					if ynew.size == 1: ynew = pts[-(i-1)]
					pairs.append( zip(xnew,ynew) ) # next x and y after a step
					plt.cla()
					colors_array = scipy.arange(self.x0.shape[0])
					colors_array = scipy.repeat(colors_array,(len(pts)-1))
					a = 0.0
					for j in range(len(pairs)): # all x,y pairs
						if (j*self.x0.shape[0]) > ( len(pairs) - (15*self.x0.shape[0]) ): 
							a = 1.0
							a = scipy.absolute( 1 - ( len(pairs) - j ) / float(len(pairs)) )
							if j == 0: a = 1.0/(i*2+1)
							if j > 15: a = scipy.absolute( 1 - (len(pairs) - 15 - j) / 15.0 )
						if self.x0.shape[0] > 1: # vectorized x input
							a = 1.0 # temporary over-ride
						ax.plot(*pairs[j][-10:], c=colors[colors_array[j]], alpha=a )
						# a *= alphaMult
				ax.scatter(2,1,color='red',marker='o',s=40)

		if self.x0.shape[1] == 3:
			import mpl_toolkits.mplot3d
			ax = fig.gca(projection='3d')
			for i in range(len(pts)-1):
				xnew = pts[i][0]
				if xnew.size == 1: xnew = pts[-i]
				ynew = pts[i+1][0]
				if ynew.size == 1: ynew = pts[-(i-1)]
				znew = pts[i+1][0]
				if znew.size == 1: znew = pts[-(i-1)]
				pairs.append( zip(xnew,ynew,znew) )
				plt.cla()
				for j in (range( len(pairs) )):
					a = 0.0
					if j > ( len(pairs) - 15 ): 
						a = scipy.absolute( 1 - ( len(pairs) - j ) / float(len(pairs)) )
						if j == 0: a = 1.0/(i*2+1)
						if j > 15: a = scipy.absolute( 1 - (len(pairs) - 15 - j) / 15.0 )
					ax.plot(*pairs[j], c='black', alpha=a )
			ax.scatter(2,1,4,color='red',marker='o',s=40)

		if fname == None:
			plt.show()
		else:
			fig.savefig(fname)
		plt.close()
Esempio n. 56
0
    def __init__(self, Ionodict, inifile, outdir, outfilelist=None):
        """
            This function will create an instance of the RadarData class.  It will
            take in the values and create the class and make raw IQ data.
            Inputs:
            sensdict - A dictionary of sensor parameters
            angles - A list of tuples which the first position is the az angle
                and the second position is the el angle.
            IPP - The interpulse period in seconds represented as a float.
            Tint - The integration time in seconds as a float.  This will be the
            integration time of all of the beams.
            time_lim - The length of time of the simulation the number of time points
                will be calculated.
            pulse - A numpy array that represents the pulse shape.
            rng_lims - A numpy array of length 2 that holds the min and max range
                that the radar will cover.
        """
        (sensdict, simparams) = readconfigfile(inifile)
        self.simparams = simparams
        N_angles = len(self.simparams['angles'])

        NNs = int(self.simparams['NNs'])
        self.sensdict = sensdict
        Npall = sp.floor(self.simparams['TimeLim'] / self.simparams['IPP'])
        Npall = int(sp.floor(Npall / N_angles) * N_angles)
        Np = Npall / N_angles

        print("All spectrums created already")
        filetimes = Ionodict.keys()
        filetimes.sort()
        ftimes = sp.array(filetimes)
        simdtype = self.simparams['dtype']
        pulsetimes = sp.arange(Npall) * self.simparams['IPP'] + ftimes.min()
        pulsefile = sp.array(
            [sp.where(itimes - ftimes >= 0)[0][-1] for itimes in pulsetimes])
        # differentiate between phased arrays and dish antennas
        if sensdict['Name'].lower() in ['risr', 'pfisr', 'risr-n']:

            beams = sp.tile(sp.arange(N_angles), Npall / N_angles)
        else:

            # for dish arrays
            brate = simparams['beamrate']
            beams2 = sp.repeat(sp.arange(N_angles), brate)
            beam3 = sp.concatenate((beams2, beams2[::-1]))
            ntile = int(sp.ceil(Npall / len(beam3)))
            leftover = int(Npall - ntile * len(beam3))
            if ntile > 0:
                beams = sp.tile(beam3, ntile)
                beams = sp.concatenate((beams, beam3[:leftover]))
            else:
                beams = beam3[:leftover]

        pulsen = sp.repeat(sp.arange(Np), N_angles)
        pt_list = []
        pb_list = []
        pn_list = []
        fname_list = []
        self.datadir = outdir
        self.maindir = outdir.parent
        self.procdir = self.maindir / 'ACF'
        Nf = len(filetimes)
        progstr = 'Data from {:d} of {:d} being processed Name: {:s}.'
        if outfilelist is None:
            print('\nData Now being created.')

            Noisepwr = v_Boltz * sensdict['Tsys'] * sensdict['BandWidth']
            self.outfilelist = []
            for ifn, ifilet in enumerate(filetimes):

                outdict = {}
                ifile = Ionodict[ifilet]
                ifilename = Path(ifile).name
                update_progress(
                    float(ifn) / Nf, progstr.format(ifn, Nf, ifilename))
                curcontainer = IonoContainer.readh5(ifile)
                if ifn == 0:
                    self.timeoffset = curcontainer.Time_Vector[0, 0]
                pnts = pulsefile == ifn
                pt = pulsetimes[pnts]
                pb = beams[pnts]
                pn = pulsen[pnts].astype(int)
                rawdata = self.__makeTime__(pt, curcontainer.Time_Vector,
                                            curcontainer.Sphere_Coords,
                                            curcontainer.Param_List, pb)
                d_shape = rawdata.shape
                n_tempr = sp.random.randn(*d_shape).astype(simdtype)
                n_tempi = 1j * sp.random.randn(*d_shape).astype(simdtype)
                noise = sp.sqrt(Noisepwr / 2) * (n_tempr + n_tempi)
                outdict['AddedNoise'] = noise
                outdict['RawData'] = rawdata + noise
                outdict['RawDatanonoise'] = rawdata
                outdict['NoiseData'] = sp.sqrt(Noisepwr / 2) * (
                    sp.random.randn(len(pn), NNs).astype(simdtype) +
                    1j * sp.random.randn(len(pn), NNs).astype(simdtype))
                outdict['Pulses'] = pn
                outdict['Beams'] = pb
                outdict['Time'] = pt
                fname = '{0:d} RawData.h5'.format(ifn)
                newfn = self.datadir / fname
                self.outfilelist.append(str(newfn))
                dict2h5(str(newfn), outdict)

                #Listing info
                pt_list.append(pt)
                pb_list.append(pb)
                pn_list.append(pn)
                fname_list.append(fname)
            infodict = {
                'Files': fname_list,
                'Time': pt_list,
                'Beams': pb_list,
                'Pulses': pn_list
            }
            dict2h5(str(outdir.joinpath('INFO.h5')), infodict)

        else:
            infodict = h52dict(str(outdir.joinpath('INFO.h5')))
            alltime = sp.hstack(infodict['Time'])
            self.timeoffset = alltime.min()
            self.outfilelist = outfilelist
    p_y_i = CalcConsd.y_momentum(m1_Arr,m2_Arr,v1L,v2L)
    p_x_f = CalcConsd.x_momentum(m1_Arr,m2_Arr,v1L_f,v2L_f)
    p_y_f = CalcConsd.y_momentum(m1_Arr,m2_Arr,v1L_f,v2L_f)

    #===========================================
    #
    # Lab Frame Misses Time Series
    #
    #===========================================
    print("Beginning Path Phase: ")
    print(str(time.time() - start_time)+" seconds")

    t = sp.linspace(0, 10, timeSteps)

    # Dimension (2(the # of SpatialDimensions), numTrials, timeSteps)
    Position1L_t = sp.repeat(sp.transpose(InitialPosition1L)[:,:,sp.newaxis], timeSteps, axis=2)\
                   + sp.multiply( sp.repeat(sp.transpose(v1L)[:, :, sp.newaxis], timeSteps, axis=2), sp.transpose(t) )
    Position2L_t = sp.repeat(sp.transpose(InitialPosition2L)[:,:,sp.newaxis], timeSteps, axis=2) \
                   + sp.multiply( sp.repeat(sp.transpose(v2L)[:, :, sp.newaxis], timeSteps, axis=2), sp.transpose(t) )

    Velocity1L_t = sp.repeat(sp.transpose(v1L)[:, :, sp.newaxis], timeSteps, axis=2)
    Velocity2L_t = sp.repeat(sp.transpose(v2L)[:, :, sp.newaxis], timeSteps, axis=2)
    # print(sp.shape(m1_Arr))
    print("Final X1 Array:" + str(sp.shape(Position1L_t)))
    print("Final X2 Array:" + str(sp.shape(Position2L_t)))
    print("Final V1 Array:" + str(sp.shape(Velocity1L_t)))
    print("Final V2 Array:" + str(sp.shape(Velocity2L_t)))

    # placeholders so that output is same format as other file
    dt = sp.empty(2)
Esempio n. 58
0
    def _emmax_permutations(self,
                            snps,
                            phenotypes,
                            num_perm,
                            K=None,
                            Z=None,
                            method='REML'):
        """
                EMMAX permutation test
                Single SNPs
                
                Returns the list of max_pvals and max_fstats 
                """
        lmm = lm.LinearMixedModel(phenotypes)
        lmm.add_random_effect(Z * K * Z.T)

        eig_L = lmm._get_eigen_L_()

        print 'Getting variance estimates'
        res = lmm.get_estimates(eig_L, method=method)

        q = 1  # Single SNP is being tested
        p = len(lmm.X.T) + q
        n = lmm.n
        n_p = n - p
        H_sqrt_inv = res['H_sqrt_inv']

        Y = H_sqrt_inv * lmm.Y  #The transformed outputs.
        h0_X = H_sqrt_inv * lmm.X
        (h0_betas, h0_rss, h0_rank, h0_s) = linalg.lstsq(h0_X, Y)
        Y = Y - h0_X * h0_betas

        num_snps = len(snps)
        max_fstat_list = []
        min_pval_list = []
        chunk_size = len(Y)
        print "Working with chunk size: " + str(chunk_size)
        print "and " + str(num_snps) + " SNPs."
        Ys = sp.mat(sp.zeros((chunk_size, num_perm)))

        for perm_i in range(num_perm):
            #print 'Permutation nr. % d' % perm_i
            sp.random.shuffle(Y)
            Ys[:, perm_i] = Y

        min_rss_list = sp.repeat(h0_rss, num_perm)
        for i in range(0, num_snps,
                       chunk_size):  #Do the dot-product in chunks!
            snps_chunk = sp.matrix(snps[i:(i + chunk_size)])
            snps_chunk = snps_chunk * Z.T
            Xs = snps_chunk * (H_sqrt_inv.T)
            Xs = Xs - sp.mat(sp.mean(Xs, axis=1))
            for j in range(len(Xs)):  # for each snp
                (betas, rss_list, p,
                 sigma) = linalg.lstsq(Xs[j].T, Ys,
                                       overwrite_a=True)  # read the lstsq lit
                for k, rss in enumerate(rss_list):
                    if not rss:
                        print 'No predictability in the marker, moving on...'
                        continue
                    if min_rss_list[k] > rss:
                        min_rss_list[k] = rss
                if num_snps >= 10 and (i + j + 1) % (
                        num_snps / num_perm) == 0:  #Print dots
                    sys.stdout.write('.')
                    sys.stdout.flush()

        if num_snps >= 10:
            sys.stdout.write('\n')

        #min_rss = min(rss_list)
        max_f_stats = ((h0_rss / min_rss_list) - 1.0) * n_p / float(q)
        min_pvals = (stats.f.sf(max_f_stats, q, n_p))

        res_d = {'min_ps': min_pvals, 'max_f_stats': max_f_stats}
        print "There are: " + str(len(min_pvals))
        return res_d
Esempio n. 59
0
def lagdict2ionocont(DataLags, NoiseLags, sensdict, simparams, time_vec):
    """This function will take the data and noise lags and create an instance of the
    Ionocontanier class. This function will also apply the summation rule to the lags.
    Inputs
    DataLags - A dictionary """
    # Pull in Location Data
    angles = simparams['angles']
    ang_data = sp.array([[iout[0], iout[1]] for iout in angles])
    rng_vec = simparams['Rangegates']
    # pull in other data
    pulsewidth = len(simparams['Pulse']) * sensdict['t_s']
    txpower = sensdict['Pt']
    if sensdict['Name'].lower() in ['risr', 'pfisr', 'risr-n']:
        Ksysvec = sensdict['Ksys']
    else:

        beamlistlist = sp.array(simparams['outangles']).astype(int)
        inplist = sp.array([i[0] for i in beamlistlist])
        Ksysvec = sensdict['Ksys'][inplist]
        ang_data_temp = ang_data.copy()
        ang_data = sp.array(
            [ang_data_temp[i].mean(axis=0) for i in beamlistlist])

    sumrule = simparams['SUMRULE']
    rng_vec2 = simparams['Rangegatesfinal']
    minrg = -sumrule[0].min()
    maxrg = len(rng_vec) - sumrule[1].max()
    Nrng2 = len(rng_vec2)

    # Copy the lags
    lagsData = DataLags['ACF'].copy()
    # Set up the constants for the lags so they are now
    # in terms of density fluxtuations.
    angtile = sp.tile(ang_data, (Nrng2, 1))
    rng_rep = sp.repeat(rng_vec2, ang_data.shape[0], axis=0)
    coordlist = sp.zeros((len(rng_rep), 3))
    [coordlist[:, 0], coordlist[:, 1:]] = [rng_rep, angtile]
    (Nt, Nbeams, Nrng, Nlags) = lagsData.shape
    rng3d = sp.tile(rng_vec[sp.newaxis, sp.newaxis, :, sp.newaxis],
                    (Nt, Nbeams, 1, Nlags)) * 1e3
    ksys3d = sp.tile(Ksysvec[sp.newaxis, :, sp.newaxis, sp.newaxis],
                     (Nt, 1, Nrng, Nlags))
    radar2acfmult = rng3d * rng3d / (pulsewidth * txpower * ksys3d)
    pulses = sp.tile(DataLags['Pulses'][:, :, sp.newaxis, sp.newaxis],
                     (1, 1, Nrng, Nlags))
    time_vec = time_vec[:Nt]
    # Divid lags by number of pulses
    lagsData = lagsData / pulses
    # Set up the noise lags and divid out the noise.
    lagsNoise = NoiseLags['ACF'].copy()
    lagsNoise = sp.mean(lagsNoise, axis=2)
    pulsesnoise = sp.tile(NoiseLags['Pulses'][:, :, sp.newaxis], (1, 1, Nlags))
    lagsNoise = lagsNoise / pulsesnoise
    lagsNoise = sp.tile(lagsNoise[:, :, sp.newaxis, :], (1, 1, Nrng, 1))

    # subtract out noise lags
    lagsData = lagsData - lagsNoise
    # Calculate a variance using equation 2 from Hysell's 2008 paper. Done use full covariance matrix because assuming nearly diagonal.

    # multiply the data and the sigma by inverse of the scaling from the radar
    lagsData = lagsData * radar2acfmult
    lagsNoise = lagsNoise * radar2acfmult

    # Apply summation rule
    # lags transposed from (time,beams,range,lag)to (range,lag,time,beams)
    lagsData = sp.transpose(lagsData, axes=(2, 3, 0, 1))
    lagsNoise = sp.transpose(lagsNoise, axes=(2, 3, 0, 1))
    lagsDatasum = sp.zeros((Nrng2, Nlags, Nt, Nbeams), dtype=lagsData.dtype)
    lagsNoisesum = sp.zeros((Nrng2, Nlags, Nt, Nbeams), dtype=lagsNoise.dtype)
    for irngnew, irng in enumerate(sp.arange(minrg, maxrg)):
        for ilag in range(Nlags):
            lagsDatasum[irngnew,
                        ilag] = lagsData[irng + sumrule[0, ilag]:irng +
                                         sumrule[1, ilag] + 1,
                                         ilag].sum(axis=0)
            lagsNoisesum[irngnew,
                         ilag] = lagsNoise[irng + sumrule[0, ilag]:irng +
                                           sumrule[1, ilag] + 1,
                                           ilag].sum(axis=0)
    # Put everything in a parameter list
    Paramdata = sp.zeros((Nbeams * Nrng2, Nt, Nlags), dtype=lagsData.dtype)
    # Put everything in a parameter list
    # transpose from (range,lag,time,beams) to (beams,range,time,lag)
    lagsDatasum = sp.transpose(lagsDatasum, axes=(3, 0, 2, 1))
    lagsNoisesum = sp.transpose(lagsNoisesum, axes=(3, 0, 2, 1))
    # Get the covariance matrix
    pulses_s = sp.transpose(pulses, axes=(1, 2, 0, 3))[:, :Nrng2]
    Cttout = makeCovmat(lagsDatasum, lagsNoisesum, pulses_s, Nlags)

    Paramdatasig = sp.zeros((Nbeams * Nrng2, Nt, Nlags, Nlags),
                            dtype=Cttout.dtype)

    curloc = 0
    for irng in range(Nrng2):
        for ibeam in range(Nbeams):
            Paramdata[curloc] = lagsDatasum[ibeam, irng].copy()
            Paramdatasig[curloc] = Cttout[ibeam, irng].copy()
            curloc += 1
    ionodata = IonoContainer(coordlist,
                             Paramdata,
                             times=time_vec,
                             ver=1,
                             paramnames=sp.arange(Nlags) * sensdict['t_s'])
    ionosigs = IonoContainer(
        coordlist,
        Paramdatasig,
        times=time_vec,
        ver=1,
        paramnames=sp.arange(Nlags * Nlags).reshape(Nlags, Nlags) *
        sensdict['t_s'])
    return (ionodata, ionosigs)
Esempio n. 60
0
 def sorted_csr(csr, only_topk=None):
     assert isinstance(csr, smat.csr_matrix)
     row_idx = sp.repeat(sp.arange(csr.shape[0], dtype=sp.uint32),
                         csr.indptr[1:] - csr.indptr[:-1])
     return smat_util.sorted_csr_from_coo(csr.shape, row_idx, csr.indices,
                                          csr.data, only_topk)