Пример #1
0
 def bin_data(self, ruv=None, binsize=10, nbins=30, ignore_wt=False):
     """
     Function to bin UV data, both vector and scalar average is calculated
     creates Bin.Sca and Bin.Vec objects in self
     
     needs : uvdist_klam
             re, im, wt, amp
     TODO: move core calcs to separate function for reuse
     """
  
     if self.__dict__.has_key('Model'):
         if ruv is not None:
             uvdist = ruv
         else:
             uvdist = self.Model.uvdist_klam
         mre = self.Model.re
         mim = self.Model.im
         if ignore_wt:
             mwt = _sp.ones_like(self.Model.re)
         else:
             mwt = self.Model.wt
         self.Model.BinVec = uv_bin_vector(uvdist, re, im, mwt, binsize=binsize, nbins=nbins)
         self.Model.BinSca = uv_bin_scalar(uvdist, re, im, mwt, binsize=binsize, nbins=nbins)
     if ruv is not None:
         uvdist = ruv
     else:
         uvdist = self.uvdist_klam
     if ignore_wt:
             wt = _sp.ones_like(self.re)
     else:
             wt = self.wt
     self.BinVec = uv_bin_vector(uvdist, self.re, self.im, wt, binsize=binsize, nbins=nbins)
     self.BinSca = uv_bin_scalar(uvdist, self.re, self.im, wt, binsize=binsize, nbins=nbins)
Пример #2
0
    def setUp(self):
        SP.random.seed(1)
        self.n_dimensions=2
        self.n_samples = 100
        self.setXy()
        #self.X = SP.rand(self.n_samples,self.n_dimensions)
        covar  = limix.CCovSqexpARD(self.n_dimensions)
        ll  = limix.CLikNormalIso()
        covar_params = SP.array([1,1,1])
        lik_params   = SP.array([0.5])
        hyperparams0 = limix.CGPHyperParams()
        hyperparams0['covar'] = covar_params
        hyperparams0['lik'] = lik_params
        self.constrainU = limix.CGPHyperParams()
        self.constrainL = limix.CGPHyperParams()
        self.constrainU['covar'] = +10*SP.ones_like(covar_params);
        self.constrainL['covar'] = 0*SP.ones_like(covar_params);
        self.constrainU['lik'] = +5*SP.ones_like(lik_params);
        self.constrainL['lik'] = 0*SP.ones_like(lik_params);

        self.gp=limix.CGPbase(covar,ll)
        self.gp.setX(self.X)
        self.gp.setParams(hyperparams0)
        #self.genY()
        self.gp.setY(self.y)
Пример #3
0
def far_fields(radius, frequency, r, theta, phi):
    """
    Calculate the electric and magnetic fields in the far field of the aperture.
    :param r: The range to the field point (m).
    :param theta: The theta angle to the field point (rad).
    :param phi: The phi angle to the field point (rad).
    :param radius: The radius of the aperture (m).
    :param frequency: The operating frequency (Hz).
    :return: The electric and magnetic fields radiated by the aperture (V/m), (A/m).
    """
    # Calculate the wavenumber
    k = 2.0 * pi * frequency / c

    # Calculate the wave impedance
    eta = sqrt(mu_0 / epsilon_0)

    # Calculate the argument for the Bessel function
    z = k * radius * sin(theta)

    # Calculate the Bessel function
    bessel_term1 = 0.5 * ones_like(z)
    index = z != 0.0
    bessel_term1[index] = jv(1, z[index]) / z[index]

    # Calculate the Bessel function
    jnpz = 1.84118378134065
    denominator = (1.0 - (z / jnpz)**2)
    index = denominator != 0.0
    bessel_term2 = 0.377 * ones_like(z)
    bessel_term2[index] = jvp(1, z[index]) / denominator[index]

    # Define the radial-component of the electric far field (V/m)
    e_r = 0.0

    # Define the theta-component of the electric far field (V/m)
    e_theta = 1j * k * radius * jv(1, jnpz) * exp(
        -1j * k * r) / r * sin(phi) * bessel_term1

    # Define the phi-component of the electric far field (V/m)
    e_phi = 1j * k * radius * jv(1, jnpz) * exp(
        -1j * k * r) / r * cos(theta) * cos(phi) * bessel_term2

    # Define the radial-component of the magnetic far field (A/m)
    h_r = 0.0

    # Define the theta-component of the magnetic far field (A/m)
    h_theta = 1j * k * radius * jv(1, jnpz) * exp(
        -1j * k * r) / r * -cos(theta) * cos(phi) / eta * bessel_term2

    # Define the phi-component of the magnetic far field (A/m)
    h_phi = 1j * k * radius * jv(1, jnpz) * exp(
        -1j * k * r) / r * sin(phi) / eta * bessel_term1

    # Return all six components of the far field
    return e_r, e_theta, e_phi, h_r, h_theta, h_phi
Пример #4
0
    def bin_data(self, ruv=None, binsize=10, nbins=30, ignore_wt=False):
        """
        Function to bin UV data, both vector and scalar average is calculated
        creates Bin.Sca and Bin.Vec objects in self
        
        needs : uvdist_klam
                re, im, wt, amp
        TODO: move core calcs to separate function for reuse
        """

        if self.__dict__.has_key('Model'):
            if ruv is not None:
                uvdist = ruv
            else:
                uvdist = self.Model.uvdist_klam
            mre = self.Model.re
            mim = self.Model.im
            if ignore_wt:
                mwt = _sp.ones_like(self.Model.re)
            else:
                mwt = self.Model.wt
            self.Model.BinVec = uv_bin_vector(uvdist,
                                              re,
                                              im,
                                              mwt,
                                              binsize=binsize,
                                              nbins=nbins)
            self.Model.BinSca = uv_bin_scalar(uvdist,
                                              re,
                                              im,
                                              mwt,
                                              binsize=binsize,
                                              nbins=nbins)
        if ruv is not None:
            uvdist = ruv
        else:
            uvdist = self.uvdist_klam
        if ignore_wt:
            wt = _sp.ones_like(self.re)
        else:
            wt = self.wt
        self.BinVec = uv_bin_vector(uvdist,
                                    self.re,
                                    self.im,
                                    wt,
                                    binsize=binsize,
                                    nbins=nbins)
        self.BinSca = uv_bin_scalar(uvdist,
                                    self.re,
                                    self.im,
                                    wt,
                                    binsize=binsize,
                                    nbins=nbins)
 def run(self,phase,inlets,throat_prop='throat.capillary_pressure'):
     r'''
     Perform the algorithm
     
     Parameters
     ----------
     phase : OpenPNM Phase object
         The phase to be injected into the Network.  The Phase must have the
         capillary entry pressure values for the system.
         
     inlets : array_like
         The list of inlet pores from which the Phase can enter the Network
         
     throat_prop : string
         The name of the throat property containing the capillary entry
         pressure.  The default is 'throat.capillary_pressure'.
     
     '''
     import heapq as hq
     queue = []
     hq.heapify(queue)
     self._phase = phase
     net = self._net
     # Setup arrays and info
     t_entry = phase[throat_prop]
     t_sorted = sp.argsort(t_entry,axis=0)  # Indices into t_entry giving a sorted list
     t_order = sp.zeros_like(t_sorted)
     t_order[t_sorted] = sp.arange(0,net.Nt)  # Location in sorted list
     t_inv = -sp.ones_like(net.Ts)  # List for tracking throat invasion order
     p_inv = -sp.ones_like(net.Ps)  # List for tracking pore invasion order
     p_inv[inlets] = 0  # Set inlet pores to invaded
     # Perform initial analysis on input pores
     Ts = net.find_neighbor_throats(pores=inlets)
     [hq.heappush(queue,T) for T in t_order[Ts]]  # Push the new throats to the heap
     tcount = 1
     while len(queue) > 0:
         t = hq.heappop(queue)  # Find throat at the top of the queue
         t_next = t_sorted[t]  # Extract actual throat number
         t_inv[t_next] = tcount  # Note invasion sequence
         while (len(queue)>0) and (queue[0] == t):  # If throat is duplicated
             t = hq.heappop(queue)  # Note: Preventing duplicate entries below might save some time here
         Ps = net['throat.conns'][t_next]  # Find pores connected to newly invaded throat
         Ps = Ps[p_inv[Ps]<0]  # Remove already invaded pores from Ps
         if len(Ps)>0:
             p_inv[Ps] = tcount  # Note invasion sequence
             Ts = net.find_neighbor_throats(pores=Ps)  # Find connected throats
             Ts = Ts[t_inv[Ts]<0]  # Remove already invaded throats from Ts
             [hq.heappush(queue,T) for T in t_order[Ts]]  # Add new throats to queue
         tcount += 1
     self['throat.invasion_sequence'] = t_inv
     self['pore.invasion_sequence'] = p_inv
Пример #6
0
 def test_different_windows(self) :
     window1 = sp.ones_like(self.wave1)
     window1 += sp.sin(sp.arange(self.n)/40.0)
     window2 = 2*sp.ones_like(self.wave1)
     window2 += 2*sp.sin(sp.arange(self.n)/62.0)
     window2[window2<0.5] = 0
     wave1 = self.wave1*window1
     wave2 = self.wave1*window2
     power = np.windowed_power(wave1, window1, wave2, window2)
     self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1.0, 3)
     self.assertTrue(sp.allclose(power[:self.mode1], 0, 
                                 atol=self.amp1**2*self.n/1e3))
     self.assertTrue(sp.allclose(power[self.mode1+1:], 0, 
                                 atol=self.amp1**2*self.n/1e3))
Пример #7
0
    def _set_info(self,element='',label='',locations='',mode='merge'):
        r'''
        This is the actual info setter method, but it should not be called directly.  
        Wrapper methods have been created.  Use set_pore_info and get_pore_info.
        
        See Also
        --------
        set_pore_info, set_throat_info
        '''
        if type(locations)==list: 
            try: locations = getattr(self,'get_'+element+'_indices')(locations)
            except: locations = sp.array(locations,ndmin=1)
        elif type(locations)==sp.ndarray:
            try: locations = getattr(self,'get_'+element+'_indices')(locations)
            except : pass 
        if locations!='':
            
            try: 
                locations = locations.name
                label = locations
            except: pass
            if type(locations)==str: locations = getattr(self,'get_'+element+'_indices')([locations])           
            locations=sp.array(locations,ndmin=1)
            if label:
                if label=='all':
                    try: 
                        old_label = getattr(self,'_'+element+'_info')[label]
                        if sp.shape(old_label)[0]<sp.shape(locations)[0]:
                            getattr(self,'_'+element+'_info')[label] = sp.ones_like(locations,dtype=bool)
                            self._logger.info('label=all for '+element+'has been updated to a bigger size!')
                            for info_labels in getattr(self,'_'+element+'_info').keys():
                                if info_labels!=label:
                                    temp = sp.zeros((getattr(self,'num_'+element+'s')(),),dtype=bool)
                                    temp[old_label] = getattr(self,'_'+element+'_info')[info_labels]
                                    getattr(self,'_'+element+'_info')[info_labels] = temp
                        elif sp.shape(old_label)[0]>sp.shape(locations)[0]: 
                            self._logger.error('To apply a new numbering label (label=all) to '+element+'s, size of the locations cannot be less than total number of '+element+'s!!')
                    except: getattr(self,'_'+element+'_info')[label] = sp.ones_like(locations,dtype=bool)
                else:    
                    try: getattr(self,'_'+element+'_info')[label]
                    except: getattr(self,'_'+element+'_info')[label] = sp.zeros((getattr(self,'num_'+element+'s')(),),dtype=bool)
                    if mode=='overwrite':
                        getattr(self,'_'+element+'_info')[label] = sp.zeros((getattr(self,'num_'+element+'s')(),),dtype=bool)
                        getattr(self,'_'+element+'_info')[label][locations] = True
                    elif mode=='remove':  getattr(self,'_'+element+'_info')[label][locations] = False                           
                    elif mode=='merge':  getattr(self,'_'+element+'_info')[label][locations] = True
            else: self._logger.error('No label has been defined for these locations')                

        elif mode=='remove':  del getattr(self,'_'+element+'_info')[label]
        else:  getattr(self,'_'+element+'_info')[label] = sp.zeros((getattr(self,'num_'+element+'s')(),),dtype=bool)
Пример #8
0
def readMahalih5(filename,des_site):
    """ This function will read the mahali GPS data into a GeoData data structure.
        The user only has to give a filename and name of the desired site.
        Input
            filename - A string that holds the file name.
            des_site - The site name. Should be listed in the h5 file in the
                table sites.
    """
    h5fn = Path(filename).expanduser()

    with h5py.File(str(h5fn), "r", libver='latest') as f:

        despnts = sp.where(f['data']['site']==des_site)[0]
        # TODO: hard coded for now
        doy =  doy= f['data']['time'][despnts]
        year = 2015*sp.ones_like(doy,dtype=int)

        TEC = f['data']['los_tec'][despnts]

        nTEC = f['data']['err_los_tec'][despnts]

        vTEC = f['data']['vtec'][despnts]
        az2sat = f['data']['az'][despnts]
        el2sat = f['data']['az'][despnts]

        piercelat = f['data']['pplat'][despnts]
        piercelong = f['data']['pplon'][despnts]
        satnum= f['data']['prn'][despnts]
        recBias = f['data']['rec_bias'][despnts]
        nrecBias = f['data']['err_rec_bias'][despnts]

    # Make the integration time on the order of 15 seconds.
    if (year==year[1]).all():
        unixyear =(datetime(year[0],1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds()
        uttime = unixyear + sp.round_(24*3600*sp.column_stack((doy,doy+15./24./3600.))) # Making the difference in time to be a minute
    else:
        (y_u,y_iv) = np.unique(year,return_inverse=True)
        unixyearu = sp.array([(datetime(iy,1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds() for iy in y_u])
        unixyear = unixyearu[y_iv]
        uttime = unixyear + 24*3600*sp.column_stack((doy,doy+15./24./3600.))


    data = {'TEC':TEC,'nTEC':nTEC,'vTEC':vTEC,'recBias':recBias,'nrecBias':nrecBias,'satnum':satnum,'az2sat':az2sat,'el2sat':el2sat}
    coordnames = 'WGS84'
    sensorloc = sp.nan*sp.ones(3)
    dataloc = sp.column_stack((piercelat,piercelong, 350e3*sp.ones_like(piercelat)))

    return (data,coordnames,dataloc,sensorloc,uttime)
Пример #9
0
def readMahalih5(filename,des_site):
    """ This function will read the mahali GPS data into a GeoData data structure.
        The user only has to give a filename and name of the desired site.
        Input
            filename - A string that holds the file name.
            des_site - The site name. Should be listed in the h5 file in the
                table sites.
    """
    h5fn = Path(filename).expanduser()

    with h5py.File(str(h5fn), "r", libver='latest') as f:

        despnts = sp.where(f['data']['site']==des_site)[0]
        # TODO: hard coded for now
        doy =  doy= f['data']['time'][despnts]
        year = 2015*sp.ones_like(doy,dtype=int)

        TEC = f['data']['los_tec'][despnts]

        nTEC = f['data']['err_los_tec'][despnts]

        vTEC = f['data']['vtec'][despnts]
        az2sat = f['data']['az'][despnts]
        el2sat = f['data']['az'][despnts]

        piercelat = f['data']['pplat'][despnts]
        piercelong = f['data']['pplon'][despnts]
        satnum= f['data']['prn'][despnts]
        recBias = f['data']['rec_bias'][despnts]
        nrecBias = f['data']['err_rec_bias'][despnts]

    # Make the integration time on the order of 15 seconds.
    if (year==year[1]).all():
        unixyear =(datetime(year[0],1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds()
        uttime = unixyear + sp.round_(24*3600*sp.column_stack((doy,doy+15./24./3600.))) # Making the difference in time to be a minute
    else:
        (y_u,y_iv) = np.unique(year,return_inverse=True)
        unixyearu = sp.array([(datetime(iy,1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds() for iy in y_u])
        unixyear = unixyearu[y_iv]
        uttime = unixyear + 24*3600*sp.column_stack((doy,doy+15./24./3600.))


    data = {'TEC':TEC,'nTEC':nTEC,'vTEC':vTEC,'recBias':recBias,'nrecBias':nrecBias,'satnum':satnum,'az2sat':az2sat,'el2sat':el2sat}
    coordnames = 'WGS84'
    sensorloc = sp.nan*sp.ones(3)
    dataloc = sp.column_stack((piercelat,piercelong, 350e3*sp.ones_like(piercelat)))

    return (data,coordnames,dataloc,sensorloc,uttime)
Пример #10
0
def objectiveFunction(parameters, targetKnots, statisticsByLabelBeforeMotion,
                      samplesByLabelInMotion):

    gyroMisalignmentsAndScales = sp.reshape(parameters[0:9], (3, 3))

    correctedMeasures = sp.transpose(
        utils.extract_and_correct_gyro_motion(gyroMisalignmentsAndScales,
                                              statisticsByLabelBeforeMotion,
                                              samplesByLabelInMotion))

    imuSamplesCount = len(samplesByLabelInMotion['timestamps'])
    correctedMeasures = correctedMeasures[0:imuSamplesCount:3]

    skew, shift = parameters[9], parameters[10]
    imuTimestamps = sp.array(
        samplesByLabelInMotion['timestamps'])[0:imuSamplesCount:3] * skew
    imuTimestamps += sp.ones_like(imuTimestamps) * shift

    targetVelocities = computeTargetVelocities(targetKnots, imuTimestamps)

    residualVectors = targetVelocities - correctedMeasures
    residuals = residualVectors * residualVectors
    residuals = sp.sqrt(residuals[:, 0] + residuals[:, 1] + residuals[:, 2])

    print sp.dot(residuals, residuals)

    return sp.sqrt(sp.dot(residuals, residuals))
Пример #11
0
 def test_convolve_normalization(self) :
     window = sp.ones_like(self.wave1)
     power = npow.calculate_power(self.wave1)
     window_power = npow.calculate_power(window)
     # Convolving with the window function should do nothing for all ones.
     convolved_power = npow.convolve_power(power, window_power)
     self.assertTrue(sp.allclose(power, convolved_power))
Пример #12
0
def I(k, r, theta, phi, horn_effective_length, horn_height, guide_width):
    """
    Calculate the integral used in the far field calculations.
    :param k: The wavenumber (rad/m).
    :param r: The distance to the field point (m).
    :param theta: The theta angle to the field point (rad).
    :param phi: The phi angle to the field point (rad).
    :param horn_effective_length: The horn effective length (m).
    :param horn_height: The horn height (m).
    :param guide_width: The width of the waveguide feed (m).
    :return: The result of the integral for far field calculations.
    """
    # Calculate the x and y components of the wavenumber
    kx = k * sin(theta) * cos(phi)
    ky = k * sin(theta) * sin(phi)

    # Two separate terms for the Fresnel integrals
    t1 = sqrt(1.0 / (pi * k * horn_effective_length)) * (-k * horn_height * 0.5 - ky * horn_effective_length)
    t2 = sqrt(1.0 / (pi * k * horn_effective_length)) * ( k * horn_height * 0.5 - ky * horn_effective_length)

    # Get the Fresnel sin and cos integrals
    s1, c1 = fresnel(t1)
    s2, c2 = fresnel(t2)

    index = (kx * guide_width != pi) & (kx * guide_width != -pi)
    term2 = -ones_like(kx) / pi
    term2[index] = cos(kx[index] * guide_width * 0.5) / ((kx[index] * guide_width * 0.5) ** 2 - (pi * 0.5) ** 2)

    return exp(1j * ky ** 2 * horn_effective_length / (2.0 * k)) * -1j * guide_width * \
           sqrt(pi * k * horn_effective_length) / (8.0 * r) * exp(-1j * k * r) * term2 * ((c2 - c1) - 1j * (s2 - s1))
Пример #13
0
 def _additionalInit(self):
     # default setting
     if self.slow_constant is None:
         self.slow_constant = max(1, int(self.paramdim / 10.))
     
     # get a few initial samples to work with
     tmp = self.batch_size
     self.batch_size = self.init_samples * tmp
     self._collectGradients()
     self._num_updates += self.init_samples
     self.batch_size = tmp
     
     # mean gradient vector
     self._gbar = mean(self._last_gradients, axis=0)
     # mean squared gradient
     self._vbar = (mean(self._last_gradients ** 2, axis=0) + self.epsilon) * self.slow_constant
     self._vpart = self._gbar ** 2 / self._vbar
     
     # mean diagonal Hessian
     #hs = clip(mean(self._last_diaghessians, axis=0), 1, 1 / self.epsilon)
     #self._hbar = hs * self.slow_constant
     self._hbar = mean(self._last_diaghessians, axis=0)
     
     # time constants
     self._taus = (ones_like(self.parameters) + self.epsilon) * 2#* self.init_samples
     
     # for debugging
     self._print_quantities = [('p', self.parameters),
                               ('tau', self._taus),
                               ('g', self._gbar),
                               ('v', self._vbar),
                               ('vpa', self._vpart),
                               ('h', self._hbar),
                               ]
Пример #14
0
def steepest_descent(X,Y, step=.001, tol=1e-5, maxiter=5000, bounds=[-5,5], res=.1):
    w = betahat(X,Y)
    a = sp.exp(X.dot(w))
    yhat = (a.T/sp.sum(a, axis=1)).T
    grad = X.T.dot(yhat-Y)
    while la.norm(grad)>tol and maxiter > 0:
        w = w - grad*step
        a=sp.exp(X.dot(w))
        yhat = (a.T/sp.sum(a,axis=1)).T
        grad = X.T.dot(yhat-Y)
        maxiter -= 1

    rang = sp.arange(bounds[0],bounds[1]+res, res)
    Xg, Yg = sp.meshgrid(rang, rang)
    
    Xm = sp.c_[sp.ones_like(Xg.flatten()), Xg.flatten(), Yg.flatten()]
    Xmdot = Xm.dot(w)
    
    types = Xmdot.argmax(axis=1)
    print Xm.shape
    #plot the regions
    c = ['b','r','g']
    for i in range(Xmdot.shape[1]):
        plot_on = types==i
        plt.plot(Xm[plot_on,1], Xm[plot_on,2], c[i]+'.', X[Y[:,i]==1,1], X[Y[:,i]==1,2], c[i]+'o')

    #plot the data segmented
    #tmp = sp.where(Ydat[:,0]==True)[0]
    #plt.plot(Xdat[tmp,1], Xdat[tmp,2], 'o', Xdat[~tmp,1], Xdat[~tmp,2], 'o')
    plt.show()
Пример #15
0
	def shotnoise(self,kmin=5e-4,kmax=10.):
		ones = scipy.ones_like(self.k)
		mask = self.k>kmax
		ones[mask] *= scipy.exp(-(self.k[mask]/kmax-1)**2)
		mask = self.k<kmin
		ones[mask] *= scipy.exp(-(kmin/self.k[mask]-1)**2)
		return ones
Пример #16
0
def nano_henshin_hou(thetas, kplanets, tags, fixed_values, anticoor):
    t2 = sp.ones_like(thetas)
    for i in range(kplanets):
        if tags[i][0]:
            t2[i * 5] = sp.exp(thetas[i * 5])
            print('changed period! (devs note)')
        if tags[i][1]:
            Ask = thetas[i * 5 + 1]
            Ack = thetas[i * 5 + 2]
            Ak = Ask**2 + Ack**2
            Phasek = sp.where(Ask >= 0, sp.arccos(Ack / (Ak**0.5)),
                              2 * sp.pi - sp.arccos(Ack / (Ak**0.5)))
            t2[i * 5 + 1] = Ak
            t2[i * 5 + 2] = Phasek
            print('changed amplitude! (devs note)')
        if tags[i][2]:
            Sk = thetas[i * 5 + 3]
            Ck = thetas[i * 5 + 4]
            ecck = Sk**2 + Ck**2
            wk = sp.where(Sk >= 0, sp.arccos(Ck / (ecck**0.5)),
                          2 * sp.pi - sp.arccos(Ck / (ecck**0.5)))
            t2[i * 5 + 3] = ecck
            t2[i * 5 + 4] = wk
            print('changed eccentricity! (devs note)')
    for i in range(len(thetas))[5 * kplanets:]:
        t2[i] = thetas[i]
    return thetas
Пример #17
0
	def derived_parameters(self,values,errors={}):
		sigma8 = self.model_sp.sigma8*values.get('rsigma8',1.)
		dvalues = {par:1.*val for par,val in values.items() if par in self.fitargs}
		def mults8(key):
			for par in ['f','b']:
				if key.startswith(par) and not key.startswith('beta') and 'sigma8' not in key: return True
			return False
		for key in dvalues.keys():
			if mults8(key): dvalues['{}sigma8'.format(key)] = values[key]*sigma8
		derrors = {par:1.*val for par,val in errors.items() if par in self.fitargs}
		for key in derrors.keys():
			if key in self.vary: derrors[key] = scipy.sqrt(self.scale_parameter_covariance[key])*errors[key]
			if mults8(key): derrors['{}sigma8'.format(key)] = derrors[key]*sigma8
		dvalues['sigma8'] = sigma8
		tmp = values.values()[0]
		if not scipy.isscalar(tmp): dvalues['sigma8'] = scipy.ones_like(tmp)*dvalues['sigma8'] # to get the correct shape
		derrors['sigma8'] = 0.*dvalues['sigma8']
		dlatex = {par:val for par,val in self.latex.items()}
		for key in dlatex.keys():
			if mults8(key): dlatex['{}sigma8'.format(key)] = '{}\\sigma_{{8}}'.format(self.latex[key])
		dlatex['sigma8'] = '\\sigma_{{8}}'
		dsorted = pyspectrum.utils.sorted_parameters(dlatex.keys())
		params = {key:val for key,val in self.params.items()}
		params['scalecov'] = self.scale_parameter_covariance
		params['scalemockcov'] = self.scale_mock_parameter_covariance

		return dvalues,derrors,dlatex,dsorted,params
Пример #18
0
def fast_xcf(z1, r1, rdm1, w1, d1, z2, r2, rdm2, w2, ang):
    if ang_correlation:
        rp = r1[:, None] / r2
        rt = ang * sp.ones_like(rp)
    else:
        rp = (r1[:, None] - r2) * sp.cos(ang / 2)
        rt = (rdm1[:, None] + rdm2) * sp.sin(ang / 2)
    z = (z1[:, None] + z2) / 2

    we = w1[:, None] * w2
    wde = (w1 * d1)[:, None] * w2

    w = (rp > rp_min) & (rp < rp_max) & (rt < rt_max)
    rp = rp[w]
    rt = rt[w]
    z = z[w]
    we = we[w]
    wde = wde[w]

    bp = ((rp - rp_min) / (rp_max - rp_min) * np).astype(int)
    bt = (rt / rt_max * nt).astype(int)
    bins = bt + nt * bp

    cd = sp.bincount(bins, weights=wde)
    cw = sp.bincount(bins, weights=we)
    crp = sp.bincount(bins, weights=rp * we)
    crt = sp.bincount(bins, weights=rt * we)
    cz = sp.bincount(bins, weights=z * we)
    cnb = sp.bincount(bins, weights=(we > 0.))

    return cw, cd, crp, crt, cz, cnb
Пример #19
0
    def getGPR(self, M, opt=True):
        """create a GP object, optimize hyperparameters for M[0]: x , M[1]: multiple time series y"""

        [x, y] = self.M2GPxy(M)

        logtheta = self.logtheta0

        if self.robust:
            gpr = GPREP.GPEP(covar=self.covar,
                             Nep=3,
                             likelihood=self.likelihood,
                             Smean=True,
                             x=x,
                             y=y)
            pass
        else:
            gpr = GPR.GP(self.covar, Smean=True, x=x, y=y)
        if opt:
            Ifilter = S.ones_like(logtheta)
            LG.debug("opt")
            LG.debug('priors: %s' % str(self.priors))
            logtheta = GPR.optHyper(gpr,
                                    logtheta,
                                    Ifilter=Ifilter,
                                    priors=self.priors,
                                    maxiter=self.maxiter)

        gpr.logtheta = logtheta
        gpr.priors = self.priors
        return gpr
Пример #20
0
def incidence_matrices(relation):
    results = {}
    with tqdm(total=len(relation)) as pbar:
        for i, (subreddit, group) in enumerate(relation.groupby('subreddit')):
            links = sp.array(sorted(sp.concatenate(group.link_ids.tolist())))
            authors = sp.array(sorted(group.author))

            rs, cs = [], []
            for _, row in group.iterrows():
                r = sp.searchsorted(authors, row.author)
                c = sp.searchsorted(links, row.link_ids)

                rs.append(sp.full_like(c, r))
                cs.append(c)
            rs, cs = sp.concatenate(rs), sp.concatenate(cs)
            vals = sp.ones_like(rs)

            incidence = sp.sparse.csr_matrix((vals, (rs, cs)),
                                             (len(authors), len(links)))
            results[subreddit] = {
                'incidence': incidence,
                'authors': authors,
                'links': links
            }

            pbar.update(len(group))

    return results
Пример #21
0
 def _get_indices(self,element,labels,return_indices,mode):
     r'''
     This is the actual method for getting indices, but should not be called
     directly.  
     '''
     if mode == 'union':
         union = sp.zeros_like(self._get_info(element=element,label='all'),dtype=bool)
         for item in labels: #iterate over labels list and collect all indices
                 union = union + self._get_info(element=element,label=item)
         ind = union
     elif mode == 'intersection':
         intersect = sp.ones_like(self._get_info(element=element,label='all'),dtype=bool)
         for item in labels: #iterate over labels list and collect all indices
                 intersect = intersect*self._get_info(element=element,label=item)
         ind = intersect
     elif mode == 'not_intersection':
         not_intersect = sp.zeros_like(self._get_info(element=element,label='all'),dtype=int)
         for item in labels: #iterate over labels list and collect all indices
             info = self._get_info(element=element,label=item)
             not_intersect = not_intersect + sp.int8(info)
         ind = (not_intersect == 1)
     elif mode == 'none':
         none = sp.zeros_like(self._get_info(element=element,label='all'),dtype=int)
         for item in labels: #iterate over labels list and collect all indices
             info = self._get_info(element=element,label=item)
             none = none - sp.int8(info)
         ind = (none == 0)
     if return_indices: ind = sp.where(ind==True)[0]
     return ind
Пример #22
0
def compactness(geometry, throat_perimeter='throat.perimeter',
                throat_area='throat.area', **kwargs):
    r"""
    Mortensen et al. have shown that the Hagen-Poiseuille hydraluic resistance is
    linearly dependent on the compactness. Defined as perimeter^2/area.
    The dependence is not universal as shapes with sharp corners provide more
    resistance than those that are more elliptical. Count the number of vertices
    and apply the right correction.
    """
    # Only apply to throats with an area
    ts = geometry.throats()[geometry[throat_area] > 0]
    P = geometry[throat_perimeter]
    A = geometry[throat_area]
    C = _sp.ones(geometry.num_throats())
    C[ts] = P[ts]**2/A[ts]
    verts = geometry['throat.offset_vertices']
    alpha = _sp.ones_like(C)
    for i in ts:
        if len(verts[i]) == 3:
            # Triangular Correction
            alpha[i] = C[i]*(25/17) + (40*_sp.sqrt(3)/17)
        elif len(verts[i]) == 4:
            # Rectangular Correction
            alpha[i] = C[i]*(22/7) - (65/3)
        elif len(verts[i]) > 4:
            # Approximate Elliptical Correction
            alpha[i] = C[i]*(8/3) - (8*_sp.pi/3)
    # For a perfect circle alpha = 8*pi so normalize by this
    alpha /= 8*_sp.pi
    # Very small throats could have values less than one
    alpha[alpha < 1.0] = 1.0

    return alpha
Пример #23
0
def plotGyroFits(gyroDataSet,
                 misalignmentsAndScales,
                 skew,
                 shift,
                 targetKnots=None):

    figureNumber = 0
    statisticsByLabelBeforeMotion, samplesByLabelInMotion = gyroDataSet
    correctedMeasures = utils.extract_and_correct_gyro_motion(
        misalignmentsAndScales, statisticsByLabelBeforeMotion,
        samplesByLabelInMotion)
    timestamps = sp.array(gyroDataSet[1]['timestamps']) * skew
    timestamps += sp.ones_like(timestamps) * shift

    if targetKnots is not None:
        plots.plot_targets(
            regression_gyro.computeTargetVelocities(targetKnots, timestamps),
            timestamps, figureNumber, (timestamps[0], timestamps[-1]), math.pi)

    plots.plot_motion(correctedMeasures, timestamps,
                      "Gyro measurements vs CNC targets", figureNumber,
                      (timestamps[0], timestamps[-1]), math.pi)

    figureNumber += 1

    plots.show_figures()
Пример #24
0
def compactness(target, throat_perimeter='throat.perimeter',
                throat_area='throat.area'):
    r"""
    Mortensen et al. have shown that the Hagen-Poiseuille hydraluic resistance
    is linearly dependent on the compactness. Defined as perimeter^2/area.
    The dependence is not universal as shapes with sharp corners provide more
    resistance than those that are more elliptical. Count the number of
    vertices and apply the right correction.

    Parameters
    ----------
    target : OpenPNM Object
        The object which this model is associated with. This controls the
        length of the calculated array, and also provides access to other
        necessary properties.

    throat_perimeter : string
        The dictionary key of the array containing the throat perimeter values.

    throat_area : string
        The dictionary key of the array containing the throat area values.

    Returns
    -------
    alpha : NumPy ndarray
        Array containing throat compactness values.

    References
    ----------
    Mortensen N.A, Okkels F., and Bruus H. Reexamination of Hagen-Poiseuille
    flow: Shape dependence of the hydraulic resistance in microchannels.
    Physical Review E, v.71, pp.057301 (2005).

    """
    # Only apply to throats with an area
    ts = target.throats()[target[throat_area] > 0]
    P = target[throat_perimeter]
    A = target[throat_area]
    C = _sp.ones(target.num_throats())
    C[ts] = P[ts]**2/A[ts]
    alpha = _sp.ones_like(C)*8*_sp.pi
    if 'throat.offset_vertices' in target.props():
        verts = target['throat.offset_vertices']
        for i in ts:
            if ~_sp.any(_sp.isnan(verts[i])):
                if len(verts[i]) == 3:
                    # Triangular Correction
                    alpha[i] = C[i]*(25/17) + (40*_sp.sqrt(3)/17)
                elif len(verts[i]) == 4:
                    # Rectangular Correction
                    alpha[i] = C[i]*(22/7) - (65/3)
                elif len(verts[i]) > 4:
                    # Approximate Elliptical Correction
                    alpha[i] = C[i]*(8/3) - (8*_sp.pi/3)
    # For a perfect circle alpha = 8*pi so normalize by this
    alpha /= 8*_sp.pi
    # Very small throats could have values less than one
    alpha[alpha < 1.0] = 1.0
    return alpha
def plot3All(a, ps):
    '''
    Plots all file data for a target and baselines falling before and after.

    Parameters
    ----------
    a  : list of fileClass objects loaded from the .pkl file.
      0-indexed list of all file data read and processed.
    ps : class containing plotting choices
      See construction in ipPlot()
    '''
    tArr = ps.tar + sp.array([-1, 1, 0], dtype=int)
    for idx in range(3):
        t = tArr[idx]
        # Marker size.
        mrkSize = 3
        # Legend text.
        legStr = ('%d %s (%d packets)' %
                  (a[t].fileNum, a[t].descript, a[t].pktCount))
        freq = a[t].freq
        mask = sp.ones_like(freq, dtype=bool)
        # Mask out 60 Hz, if requested.
        if ps.omit60Hz:
            mask[freq == 60] = False
        if ps.h135:
            mask[3:] = False
            mrkSize = 6
        freq = freq[mask]
        # Rotate through each packet in the file and plot.
        for pkt in range(a[t].pktCount):
            if ps.plotThis == 'zPhase':
                yVal = a[t].phaseDiff[ps.ch, pkt, :]  # (mrad)
            elif ps.plotThis == 'zMag':
                yVal = a[t].zMag[ps.ch, pkt, :]  # (mOhm)
            yVal = yVal[mask]
            # Plot the results.
            if pkt == 1:
                legStr = '_nolegend_'
            plt.plot(freq,
                     yVal,
                     color=ps.meanCol[idx],
                     markersize=mrkSize,
                     marker='o',
                     label=legStr)

    at = a[ps.tar]
    titleStr = ('%s Ch %d (%s). xmitFund = %.1f Hz. %s %s' %
                (at.fileDateStr, ps.ch, at.measStr[ps.ch], at.xmitFund,
                 at.major, a[t].minor))
    if ps.omit60Hz:
        titleStr += ' 60 Hz omitted.'
    # Wrap text at a set character length.
    titleStr = '\n'.join(wrap(titleStr, 75))
    plt.title(titleStr)
    plt.xlabel('Frequency (Hz)')
    if ps.plotThis == 'zPhase':
        plt.ylabel('Impedance Phase (mrad)')
    elif ps.plotThis == 'zMag':
        plt.ylabel('Impedance Amplitude (m$\Omega$)')
Пример #26
0
 def test_find_path_with_weights(self):
     w = sp.ones_like(self.net.Ts)
     w[0] = self.net.Nt + 1
     a = misc.find_path(network=self.net,
                        pore_pairs=([0, 1]),
                        weights=w)
     assert len(a['pores'][0]) > 2
     assert len(a['throats'][0]) > 1
Пример #27
0
 def predict(self, XTest=None, k=None, mean=None):
     # k is cross covariance KTestTrain
     self.mean = mean
     if self.mean is None:
         self.mean = SP.ones_like(self.yTrain)*self.yTrain.mean()
     if k is None:
         k = self.kernel
     Core = SP.dot(k, LA.inv((self.kernel + SP.eye(self.nTrain)*self.delta)))
     return SP.dot(Core, self.yTrain-self.mean)
Пример #28
0
def planeFromPoints(points):
    """ Produces a plane from N points using least squares, and returns of the form:
        Z = aX + bY + c
        Follows logic from http://www.velocityreviews.com/forums/t368189-re-linear-regression-in-3-dimensions.html
    """
    x, y, z = zip(*points)
    A = column_stack([x, y, ones_like(x)])
    abc, residuals, rank, s = lstsq(A,z)
    return abc
Пример #29
0
 def test_convolve(self) :
     window = sp.ones_like(self.wave1)
     window += sp.sin(sp.arange(self.n)/50.0)
     self.wave1 *= window
     power = npow.calculate_power(self.wave1)
     window_power = npow.calculate_power(window)
     deconvolved_power = npow.deconvolve_power(power, window_power)
     reconvolved_power = npow.convolve_power(deconvolved_power, window_power)
     self.assertTrue(sp.allclose(power, reconvolved_power))
Пример #30
0
 def test_window_with_zeros(self) :
     window = sp.ones_like(self.wave1)
     window += sp.sin(sp.arange(self.n)/50.0)
     self.wave1 *= window
     power = np.windowed_power(self.wave1, window)
     self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1.0, 3)
     self.assertTrue(sp.allclose(power[:self.mode1], 0, 
                                 atol=self.amp1**2*self.n/1e3))
     self.assertTrue(sp.allclose(power[self.mode1+1:], 0, 
                                 atol=self.amp1**2*self.n/1e3))
def lower_confidence_bound(x_test, x_obs, y_obs, p=0.01, **kwargs):
    if len(x_obs) == 0:
        return sp.inf*sp.ones_like(x_test)
    
    mu_test, sigma_test = evaluate(x_test, x_obs, y_obs, **kwargs)
        
    std_test = sp.sqrt(sp.diag(sigma_test))
    lcb = mu_test.flatten() + sp.stats.norm.ppf(0.01)*std_test

    return lcb
Пример #32
0
def filt(k,kcut,beta,alph):

    #SPINS default parameters: 0.6, 2.0, 20.0

    knyq = max(k)
    kxcut = kcut*knyq
    filt = np.ones_like(k)
    filt = np.exp(-alph*((np.absolute(k) - kxcut)/(knyq - kxcut))**beta)*(np.absolute(k)>kxcut) + (np.absolute(k)<=kxcut)

    return filt
Пример #33
0
 def controlJacobian(self,v):
     eps = self.param['eps']
     # compute weights per bin for perturbation 
     # w = 1 + eps * v/rho
     w_bin = 1. + eps * v/norm(v)/self.u
     # transform to weights per particle
     w_part = w_bin[self.bin]
     total = scipy.sum(w_part)/self.N #divide by Nlarge
     # note that the perturbed state is no longer a probability distribution
     # so norming the histograms is not entirely correct
     u_eps_Dt = total*self.restriction.restrict(self.x_Dt,self.grid,self.domain,w=w_part)
     c = self.control
     # print "self.control", c,
     # print self.lambd
     if c==None:
         print "no control variable ..."
         control = scipy.zeros_like(v)
     else: 
         print "control variable ..."
         eps = self.param['eps']
         skip = self.skip
         Nlarge = self.param['Nlarge']
         Nsmall = self.param['Nsmall']
         w_part_large = scipy.ones_like(c.x)
         w_part_small = scipy.ones_like(self.x)
         w_bin_eps = 1. + eps * v/norm(v)/c.u
         w_eps_part_large = w_bin[c.bin]
         w_eps_part_small = w_bin[c.bin[skip/2:Nlarge:skip]]
         total_large = scipy.sum(w_part_large)/Nlarge
         total_small = scipy.sum(w_part_small)/Nsmall
         u_Dt_large = total_large*self.restriction.restrict(\
             c.x_Dt,c.grid,c.domain,w=w_part_large)
         u_eps_Dt_large = total_large*self.restriction.restrict(\
             c.x_Dt,c.grid,c.domain,w=w_eps_part_large)
         control_large = u_eps_Dt_large - u_Dt_large
         u_Dt_small = total_small*self.restriction.restrict(\
             c.x_Dt[skip/2:Nlarge:skip],c.grid,c.domain,w=w_part_small)
         u_eps_Dt_small = total_small*self.restriction.restrict(\
             c.x_Dt[skip/2:Nlarge:skip],c.grid,c.domain,w=w_eps_part_small)
         control_small = (u_eps_Dt_small - u_Dt_small)
         control = (control_small - control_large)/eps*norm(v)
     return control      
Пример #34
0
 def test_non_zero_window(self) :
     window = sp.ones_like(self.wave1)
     window += 0.5*sp.sin(sp.arange(self.n)/15.0)
     self.wave1 *= window
     power = npow.windowed_power(self.wave1, window)
     power = npow.prune_power(power)
     self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1.0, 3)
     self.assertTrue(sp.allclose(power[:self.mode1], 0, 
                                 atol=self.amp1**2*self.n/1e3))
     self.assertTrue(sp.allclose(power[self.mode1+1:], 0, 
                                 atol=self.amp1**2*self.n/1e3))
Пример #35
0
 def test_no_window(self) :
     window = sp.ones_like(self.wave1)
     power = np.windowed_power(self.wave1, window)
     self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1)
     self.assertTrue(sp.allclose(power[:self.mode1], 0, 
                                 atol=self.amp1**2*self.n/1e15))
     self.assertTrue(sp.allclose(power[self.mode1+1:], 0, 
                                 atol=self.amp1**2*self.n/1e15))
     # With no window, we have a quick way to the answer
     quick_power = (abs(fft.fft(self.wave1))**2)[:self.n//2]/self.n
     self.assertTrue(sp.allclose(power, quick_power))
Пример #36
0
    def train(self,jitter=1e-4):
        """train vds module"""
        covar  = limix.CSumCF()
        covar_params = []
        for K in self.Ks:
            covar.addCovariance(limix.CFixedCF(K+jitter*sp.eye(self.N)))
            covar_params.append(sp.ones(1)/sp.sqrt(self.n_terms))
        if self.noise is 'correlated':
            _cov1 = limix.CFixedCF(sp.eye(int(self.Y.shape[0]/2)))
            self.cov_noise = limix.CFixedDiagonalCF(limix.CFreeFormCF(2),sp.ones(2))
            self.cov_noise.setParamMask(sp.array([0,1,0]))
            self.Knoise = limix.CKroneckerCF(_cov1,self.cov_noise)
            covar.addCovariance(self.Knoise)
            covar_params.append(sp.ones(1)/sp.sqrt(self.n_terms))
            covar_params.append(sp.array([1.,1e-3,1.]))
        elif self.noise is not 'off':
            covar.addCovariance(limix.CFixedCF(sp.eye(self.N)))
            covar_params.append(sp.ones(1)/sp.sqrt(self.n_terms))

        hyperparams = limix.CGPHyperParams()
        covar_params = sp.concatenate(covar_params)
        hyperparams['covar'] = covar_params
        constrainU = limix.CGPHyperParams()
        constrainL = limix.CGPHyperParams()
        constrainU['covar'] = +5*sp.ones_like(covar_params);
        constrainL['covar'] = -5*sp.ones_like(covar_params);

        self.gp=limix.CGPbase(covar,limix.CLikNormalNULL())
        self.gp.setY(self.Y)
        lml0 = self.gp.LML(hyperparams)
        dlml0 = self.gp.LMLgrad(hyperparams)
        gpopt = limix.CGPopt(self.gp)
        gpopt.setOptBoundLower(constrainL);
        gpopt.setOptBoundUpper(constrainU);

        t1 = time.time()
        conv = gpopt.opt()
        t2 = time.time()

        RV = {'Converged': True, 'time': t2-t1}
        return RV
Пример #37
0
 def test_no_window(self) :
     window = sp.ones_like(self.wave1)
     power = npow.windowed_power(self.wave1, window)
     power = npow.prune_power(power)
     self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1)
     self.assertTrue(sp.allclose(power[:self.mode1], 0, 
                                 atol=self.amp1**2*self.n/1e15))
     self.assertTrue(sp.allclose(power[self.mode1+1:], 0, 
                                 atol=self.amp1**2*self.n/1e15))
     # With no window, we have a quick way to the answer
     quick_power = npow.calculate_power(self.wave1)
     self.assertTrue(sp.allclose(power, quick_power[:self.n//2]))
Пример #38
0
def runinversion(basedir,configfile,acfdir='ACF',invtype='tik'):
    """ """
    costdir = os.path.join(basedir,'Cost')
    
    pname=os.path.join(costdir,'cost{0}-{1}.pickle'.format(acfdir,invtype))
    pickleFile = open(pname, 'rb')
    alpha_arr=pickle.load(pickleFile)[-1]
    pickleFile.close()
    
    ionoinfname=os.path.join(basedir,acfdir,'00lags.h5')
    ionoin=IonoContainer.readh5(ionoinfname)
    
    dirio = ('Spectrums','Mat','ACFMat')
    inputdir = os.path.join(basedir,dirio[0])
    
    dirlist = glob.glob(os.path.join(inputdir,'*.h5'))
    (listorder,timevector,filenumbering,timebeg,time_s) = IonoContainer.gettimes(dirlist)
    Ionolist = [dirlist[ikey] for ikey in listorder]
    if acfdir.lower()=='acf':
        ionosigname=os.path.join(basedir,acfdir,'00sigs.h5') 
        ionosigin=IonoContainer.readh5(ionosigname)
        nl,nt,np1,np2=ionosigin.Param_List.shape
        sigs=ionosigin.Param_List.reshape((nl*nt,np1,np2))
        sigsmean=sp.nanmean(sigs,axis=0)
        sigdiag=sp.diag(sigsmean)
        sigsout=sp.power(sigdiag/sigdiag[0],.5).real
        alpha_arr=sp.ones_like(alpha_arr)*alpha_arr[0]
    
        acfloc='ACFInv'
    elif acfdir.lower()=='acfmat':
        mattype='matrix'
        acfloc='ACFMatInv'
    mattype='sim'
    RSTO = RadarSpaceTimeOperator(Ionolist,configfile,timevector,mattype=mattype)  
    if 'perryplane' in basedir.lower() or 'SimpData':
        rbounds=[-500,500]
    else:
        rbounds=[0,500]
    
    ionoout=invertRSTO(RSTO,ionoin,alpha_list=alpha_arr,invtype=invtype,rbounds=rbounds)[0]
    outfile=os.path.join(basedir,acfloc,'00lags{0}.h5'.format(invtype))
    ionoout.saveh5(outfile)
    if acfdir=='ACF':
        lagsDatasum=ionoout.Param_List
        # !!! This is done to speed up development 
        lagsNoisesum=sp.zeros_like(lagsDatasum)
        Nlags=lagsDatasum.shape[-1]
        pulses_s=RSTO.simparams['Tint']/RSTO.simparams['IPP']
        Ctt=makeCovmat(lagsDatasum,lagsNoisesum,pulses_s,Nlags)
        outfile=os.path.join(basedir,acfloc,'00sigs{0}.h5'.format(invtype))
        ionoout.Param_List=Ctt
        ionoout.Param_Names=sp.repeat(ionoout.Param_Names[:,sp.newaxis],Nlags,axis=1)
        ionoout.saveh5(outfile)
Пример #39
0
 def _additionalInit(self):
     vSGD._additionalInit(self)
     #h2s = clip(mean(self._last_diaghessians ** 2, axis=0), 1, 1 / self.epsilon)
     #self._vhbar = h2s * self.slow_constant #** 2     
     if self.init_samples > 0:
         self._vhbar = mean(self._last_diaghessians ** 2, axis=0)
     else:
         self._hbar = zeros_like(self.parameters)
         self._vhbar = ones_like(self.parameters) * self.epsilon 
     self._hpart = (self._hbar+self.epsilon) / (self._vhbar + self.epsilon)
     self._print_quantities.extend([('vh', self._vhbar),
                                    ('hpa', self._hpart),
                                    ])   
Пример #40
0
 def _generate_pores(self):
     r"""
     Generate the pores (coordinates, numbering and types)
     """
     Nx = self._Nx
     Ny = self._Ny
     Nz = self._Nz
     Lc = self._Lc
     Np = Nx*Ny*Nz
     ind = sp.arange(0,Np)
     self['pore.all'] = sp.ones_like(ind,dtype=bool)
     pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T
     self['pore.coords'] = pore_coords
Пример #41
0
 def _generate_pores(self):
     r"""
     Generate the pores (coordinates, numbering and types)
     """
     Nx = self._Nx
     Ny = self._Ny
     Nz = self._Nz
     Lc = self._Lc
     Np = Nx*Ny*Nz
     ind = sp.arange(0,Np)
     self.set_pore_data(prop='numbering',data=ind)
     self.set_pore_info(label='all',locations=sp.ones_like(ind))
     pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T
     self.set_pore_data(prop='coords',data=pore_coords)
Пример #42
0
 def __init__(self, forest, subsample):
     '''
     Constructor
     '''
     self.forest = forest
     if self.forest is not None:
         self.verbose = self.forest.verbose
     else:
         self.verbose = 0
     self.max_depth = 0
     # Estimate the potential number of nodes
     self.subsample = subsample
     self.subsample_bin = SP.zeros(self.forest.n, dtype='bool')
     self.subsample_bin[self.subsample] = True
     self.oob = SP.arange(self.forest.n)[~self.subsample_bin]
     nr_nodes = 4*subsample.size
     self.nodes = SP.zeros(nr_nodes, dtype='int')
     self.best_predictor = SP.empty(nr_nodes, dtype='int')
     self.start_index = SP.empty(nr_nodes, dtype='int')
     self.end_index = SP.empty(nr_nodes, dtype='int')
     self.left_child = SP.zeros(nr_nodes, dtype='int')
     self.right_child = SP.zeros(nr_nodes, dtype='int')
     self.parent = SP.empty(nr_nodes, dtype='int')
     self.mean = SP.zeros(nr_nodes)
     # Initialize root node
     self.node_ind = 0
     self.nodes[self.node_ind] = 0
     self.start_index[self.node_ind] = 0
     self.end_index[self.node_ind] = subsample.size
     self.num_nodes = 1
     self.num_leafs = 0
     self.s = SP.ones_like(self.nodes)*float('inf')
     kernel = self.get_kernel()
     if not(self.forest.optimize_memory_use):
         self.X = self.forest.X[subsample]
     if self.verbose > 1:
         print('compute tree wise singular value decomposition')
     self.S, self.U = LA.eigh(kernel + SP.eye(subsample.size)*1e-8)
     self.Uy = SP.dot(self.U.T, self.forest.y[subsample])
     if self.verbose > 1:
         print('compute tree wise bias')
     self.mean[0] = SC.estimate_bias(self.Uy, self.U, self.S,
                                     SP.log(self.forest.delta))
     self.sample = SP.arange(subsample.size)
     ck = self.get_cross_kernel(self.oob, self.subsample)
     self.cross_core = SP.dot(ck, LA.inv(kernel +
                                         SP.eye(self.subsample.size) *
                                         self.forest.delta))
     if self.verbose > 1:
         print('done initializing tree')
Пример #43
0
def params_from_clique_marginals(theta, alpha, beta, gamma, emit_probs, clq_Q, clq_Q_pairs, X, args):
    """Recompute parameters using marginal probabilities"""
    I,T,L = X.shape
    K = gamma.shape[0]
    vp = 0
    pc = 1e-10 # pseudocount
    theta[:] = pc; alpha[:] = pc; beta[:] = pc; gamma[:] = pc; emit_probs[:] = pc
    #theta[:] = 0; alpha[:] = 0; beta[:] = 0; gamma[:] = 0; emit_probs[:] = 0
    global combinations
    combinations = list(enumerate(itertools.product(range(K), repeat=I)))
    #e_sum = sp.ones(emit_probs.shape[0]) * pc
    e_sum = sp.ones_like(emit_probs) * pc
    #e_sum = sp.ones(emit_probs.shape[1]) * 0
    t = 0
    for k_to, to_val in combinations:
        for i in xrange(I):
            for l in xrange(L):
                if X[i,t,l]:
                    emit_probs[to_val[i], l] += clq_Q[t, k_to]
                #e_sum[to_val[i]] += clq_Q[t, k_to]
                e_sum[to_val[i], l] += clq_Q[t, k_to]
        gamma[to_val[vp]] += clq_Q[t,k_to]
        for i in xrange(1, I):
            beta[to_val[vp], to_val[i]] += clq_Q[t,k_to]
    
    #import ipdb; ipdb.set_trace()
    for t in xrange(1, T):
        for k_to, to_val in combinations:
            for i in xrange(I):
                for l in xrange(L):
                    if X[i,t,l]:
                        #import ipdb; ipdb.set_trace()
                        emit_probs[to_val[i], l] += clq_Q[t, k_to]
                    #e_sum[to_val[i]] += clq_Q[t, k_to]
                    e_sum[to_val[i], l] += clq_Q[t, k_to]
            for k_from, from_val in combinations:    
                alpha[from_val[vp], to_val[vp]] += clq_Q_pairs[t,k_from, k_to]
                for i in xrange(1, I):
                    theta[to_val[vp], from_val[i], to_val[i]] += clq_Q_pairs[t,k_from, k_to]
    #import ipdb; ipdb.set_trace()
    
    #theta, alpha, beta, gamma = theta+pc*theta.max(), alpha+pc*alpha.max(),beta+pc*beta.max(),gamma+pc*gamma.max()
    normalize_trans(theta, alpha, beta, gamma)
    #emit_probs[:] = sp.dot(sp.diag(1./e_sum), emit_probs)
    #emit_probs[:] = sp.dot(emit_probs, sp.diag(1./e_sum * L))
    emit_probs[:] = emit_probs / e_sum
    #emit_probs[:] = sp.dot(emit_probs + pc*emit_probs.max(), sp.diag(1./(e_sum + pc*emit_probs.max())))
    args.emit_sum = e_sum