Example #1
0
 def cut(self):
   average = sp.sum(sp.absolute(self.data))/sp.size(self.data)
   head = sp.nonzero(sp.absolute(self.data)>average)[0][5]
   bottom = sp.nonzero(sp.absolute(self.data)>average)[0][-1]
   self.data = self.data[head:bottom]
   self.duration_list = self.duration_list[head:bottom]
   self.duration = self.duration_list[-1] - self.duration_list[0]
    def calculateFFT(self, duration, framerate, sample):
        """
            Calculates FFT for a given sound wave.
            Considers only frequencies with the magnitudes higher than
            a given threshold.
        """

        fft_length = int(duration * framerate)

        fft_length = get_next_power_2(fft_length)
        FFT = numpy.fft.fft(sample, n=fft_length)

        ''' ADJUSTING THRESHOLD '''
        threshold = 0
        power_spectra = []
        for i in range(len(FFT) / 2):
            power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
            if power_spectrum > threshold:
                threshold = power_spectrum
            power_spectra.append(power_spectrum)
        threshold *= 0.1

        binResolution = float(framerate) / float(fft_length)
        frequency_power = []
        # For each bin calculate the corresponding frequency.
        for k in range(len(FFT) / 2):
            binFreq = k * binResolution

            if binFreq > self.minFreqConsidered and binFreq < self.maxFreqConsidered:
                power_spectrum = power_spectra[k]
                #dB = 10*math.log10(power_spectrum)
                if power_spectrum > threshold:
                    frequency_power.append((binFreq, power_spectrum))

        return frequency_power
Example #3
0
 def getAxis(self,X,Y):
     """
     return the proper axis limits for the plots
     """
     out = []
     mM = [(min(X),max(X)),(min(Y),max(Y))]
     for i,j in mM:
         #YJC: checking if values are negative, if yes, return 0 and break
         if j <0 or i <0:
             return 0
         log_i = scipy.log10(i)
         d, I = scipy.modf(log_i)
         if log_i < 0:
             add = 0.5 *(scipy.absolute(d)<0.5)
         else:
             add = 0.5 *(scipy.absolute(d)>0.5)
         m = scipy.floor(log_i) + add
         out.append(10**m)
         log_j = scipy.log10(j)
         d, I = scipy.modf(log_j)
         if log_j < 0:
             add = - 0.5 *(scipy.absolute(d)>0.5)
         else:
             add = - 0.5 *(scipy.absolute(d)<0.5)
         m = scipy.ceil(log_j) + add
         out.append(10**m)
     return tuple(out)
Example #4
0
    def selectTraits(self,phenoMAF=None,corrMin=None,nUnique=False):
        """
        use only a subset of traits

        filter out all individuals that have missing values for the selected ones
        """
        self.idx_samples = SP.ones(self.n_s,dtype=bool)
        
        # filter out nan samples
        self.idx_samples[SP.isnan(self.Y[:,self.idx_traits]).any(1)] = False
        
        # filter out phenotypes that are not diverse enough
        if phenoMAF!=None:
            expr_mean = self.Y[self.idx_samples].mean(0)
            expr_std = self.Y[self.idx_samples].std(0)
            z_scores = SP.absolute(self.Y[self.idx_samples]-expr_mean)/SP.sqrt(expr_std)
            self.idx_traits[(z_scores>1.5).mean(0) < phenoMAF] = False

        # use only correlated phenotypes
        if corrMin!=None and self.Y.shape[1]>1:
            corr = SP.corrcoef(self.Y[self.idx_samples].T)
            corr-= SP.eye(corr.shape[0])
            self.idx_traits[SP.absolute(corr).max(0)<0.3] = False

        # filter out binary phenotypes
        if nUnique and self.Y.shape[1]>1:
            for i in range(self.Y.shape[1]):
                if len(SP.unique(self.Y[self.idx_samples][:,i]))<=nUnique:
                    self.idx_traits[i] = False

        LG.debug('number of traits(before filtering): %d'%self.n_t)
        LG.debug('number of traits(after filtering): %d'%self.idx_traits.sum())
        LG.debug('number of samples(before filtering): %d'%self.n_s)
        LG.debug('number of samples(after filtering): %d'%self.idx_samples.sum())
    def calculateFFT(self, duration, framerate, sample):
        """
            Calculates FFT for a given sound wave.
            Considers only frequencies with the magnitudes higher than
            a given threshold.
        """

        fft_length = int(duration * framerate)
        # For the FFT to work much faster take the length that is a power of 2.
        fft_length = get_next_power_2(fft_length)
        FFT = numpy.fft.fft(sample, n=fft_length)

        ''' ADJUSTING THRESHOLD - HIGHEST SPECTRAL PEAK METHOD'''
        threshold = 0
        power_spectra = []
        frequency_bin_with_max_spectrum = 0
        for i in range(len(FFT) / 2):
            power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
            if power_spectrum > threshold:
                threshold = power_spectrum
                frequency_bin_with_max_spectrum = i
            power_spectra.append(power_spectrum)
        max_power_spectrum = threshold
        threshold *= 0.1

        binFrequencies = []
        magnitudes = []
        binResolution = float(framerate) / float(fft_length)
        sum_of_significant_spectra = 0
        # For each bin calculate the corresponding frequency.
        for k in range(len(FFT)):
            binFreq = k * binResolution

            # Truncating the FFT so we consider only hearable frequencies.
            if binFreq > self.maxFreqConsidered:
                FFT = FFT[:k]
                break
            elif binFreq > self.minFreqConsidered:
                # Consider only the frequencies
                # with magnitudes higher than the threshold.
                power_spectrum = power_spectra[k]
                if power_spectrum > threshold:
                    magnitudes.append(power_spectrum)
                    binFrequencies.append(binFreq)

                    # Sum all significant power spectra
                    # except the max power spectrum.
                    if power_spectrum != max_power_spectrum:
                        sum_of_significant_spectra += power_spectrum

        significant_freq = 0.0

        if max_power_spectrum > sum_of_significant_spectra:
            significant_freq = frequency_bin_with_max_spectrum * binResolution

        # Max. frequency considered after truncating.
        # maxFreq = rate without truncating.
        maxFreq = len(FFT) / duration

        return (FFT, binFrequencies, maxFreq, magnitudes, significant_freq)
Example #6
0
File: RCWA.py Project: LeiDai/EMpy
def dispersion_relation_extraordinary(kx, ky, k, nO, nE, c):
    """Dispersion relation for the extraordinary wave.

    NOTE
    See eq. 16 in Glytsis, "Three-dimensional (vector) rigorous
    coupled-wave analysis of anisotropic grating diffraction",
    JOSA A, 7(8), 1990 Always give positive real or negative
    imaginary.
    """

    if kx.shape != ky.shape or c.size != 3:
        raise ValueError('kx and ky must have the same length and c must have 3 components')

    kz = S.empty_like(kx)

    for ii in xrange(0, kx.size):

        alpha = nE**2 - nO**2
        beta = kx[ii]/k * c[0] + ky[ii]/k * c[1]

        # coeffs
        C = S.array([nO**2 + c[2]**2 * alpha, \
                     2. * c[2] * beta * alpha, \
                     nO**2 * (kx[ii]**2 + ky[ii]**2) / k**2 + alpha * beta**2 - nO**2 * nE**2])

        # two solutions of type +x or -x, purely real or purely imag
        tmp_kz = k * S.roots(C)

        # get the negative imaginary part or the positive real one
        if S.any(S.isreal(tmp_kz)):
            kz[ii] = S.absolute(tmp_kz[0])
        else:
            kz[ii] = -1j * S.absolute(tmp_kz[0])

    return kz
Example #7
0
def rendGauss(x,y, sx, imageBounds, pixelSize):
    fuzz = 3*scipy.median(sx)
    roiSize = int(fuzz/pixelSize)
    fuzz = pixelSize*roiSize

    X = numpy.arange(imageBounds.x0 - fuzz,imageBounds.x1 + fuzz, pixelSize)
    Y = numpy.arange(imageBounds.y0 - fuzz,imageBounds.y1 + fuzz, pixelSize)

    #print X
    
    im = scipy.zeros((len(X), len(Y)), 'f')

    #record our image resolution so we can plot pts with a minimum size equal to res (to avoid missing small pts)
    delX = scipy.absolute(X[1] - X[0]) 
    
    for i in range(len(x)):
        ix = scipy.absolute(X - x[i]).argmin()
        iy = scipy.absolute(Y - y[i]).argmin()

        sxi =  max(sx[i], delX)       
        
        imp = Gauss2D(X[(ix - roiSize):(ix + roiSize + 1)], Y[(iy - roiSize):(iy + roiSize + 1)],1/sxi, x[i],y[i],sxi)
        im[(ix - roiSize):(ix + roiSize + 1), (iy - roiSize):(iy + roiSize + 1)] += imp

    im = im[roiSize:-roiSize, roiSize:-roiSize]

    return im
Example #8
0
 def testMomentOfInertiaRotatedEllipsoid(self):
     img = mango.zeros(shape=self.imgShape*2, mtype="tomo", origin=(0,0,0))
     img.md.setVoxelSize((1,1,1))
     img.md.setVoxelSizeUnit("mm")
     c = (sp.array(img.origin) + img.origin + img.shape-1)*0.5
     r = sp.array(img.shape-1)*0.25
     
     mango.data.fill_ellipsoid(img, centre=c, radius=r, fill=512)
     rMatrix = \
         (
             mango.image.rotation_matrix(-25, 2).dot(
             mango.image.rotation_matrix( 10, 1).dot(
             mango.image.rotation_matrix( 45, 0)
             ))
         )
     img = mango.image.affine_transform(img, rMatrix, offset=c-img.origin, interptype=mango.image.InterpolationType.CATMULL_ROM_CUBIC_SPLINE)
     #mango.io.writeDds("tomoMoiRotEllipsoid.nc", img)
     
     pmoi, pmoi_axes, com = mango.image.moment_of_inertia(img)
     rootLogger.info("rmtx = \n%s" % (rMatrix,))
     rootLogger.info("pmoi = \n%s" % (pmoi,))
     rootLogger.info("pmoi_axes = \n%s" % (pmoi_axes,))
     rootLogger.info("c = %s, com = %s" % (c, com))
     self.assertTrue(sp.all(sp.absolute(c - com) <= 1.0e-10))
     self.assertLess(pmoi[0], pmoi[1])
     self.assertLess(pmoi[1], pmoi[2])
     self.assertTrue(sp.all(sp.absolute(pmoi_axes[:,0]-rMatrix[:,2]) <= 1.0e-3))
     self.assertTrue(sp.all(sp.absolute(pmoi_axes[:,1]-rMatrix[:,1]) <= 1.0e-3))
     self.assertTrue(sp.all(sp.absolute(pmoi_axes[:,2]-rMatrix[:,0]) <= 1.0e-3))
Example #9
0
def extendPWMs(pwm1, pwm2, offset2=0, fillValue=.25):
    """Extend both PWMs so that they are the same length by filling in values from fillValue.
    Optionally, a positive or negative offset for motif2 can be specified and both motifs will be filled.
    fillValue may be a number, or a 4-element array with nuc frequencies
    Returns (extendedPwm1, extendedPwm2) as 2D lists
    """
    # check for errors, convert pwms to list if necessary
    if type(fillValue) != list:
        fillValue = [fillValue] * 4 # extend to 4 nucleotides
    elif len(fillValue) != 4:
        raise RuntimeError('fillValue for extendPWMs must be a single number or a 4-element list!')
    if type(pwm1) == scipy.ndarray:
        pwm1 = pwm1.tolist()
    if type(pwm2) == scipy.ndarray:
        pwm2 = pwm2.tolist()
    
    if offset2 < 0:
        # prepend filler for pwm1
        pwm1 = [fillValue] * scipy.absolute(offset2) + pwm1
    elif offset2 > 0:
        # prepend filler for pwm2
        pwm2 = [fillValue] * scipy.absolute(offset2) + pwm2
    # extend the pwms as necessary on the right side
    pwm1 = pwm1 + [fillValue] * (len(pwm2) - len(pwm1))
    pwm2 = pwm2 + [fillValue] * (len(pwm1) - len(pwm2))
    
    return pwm1, pwm2
Example #10
0
def drazin(A, tol):
    CB = A.copy()

    Bs = []
    Cs = []
    k = 1

    while not (sp.absolute(CB) < tol).all() and sp.absolute(la.det(CB)) < tol:
        U, s, Vh = la.svd(CB)
        S = sp.diag(s)
        S = S * (S > tol)
        r = sp.count_nonzero(S)
        B = sp.dot(U, sp.sqrt(S))
        C = sp.dot(sp.sqrt(S), Vh)
        B = B[:, 0:r]
        Bs.append(B)
        C = C[0:r, :]
        Cs.append(C)
        CB = sp.dot(C, B)
        k += 1

    D = sp.eye(A.shape[0])
    for B in Bs:
        D = sp.dot(D, B)
    if (sp.absolute(CB) < tol).all():
        D = sp.dot(D, CB)
    else:
        D = sp.dot(D, np.linalg.matrix_power(CB, -(k + 1)))
    for C in reversed(Cs):
        D = sp.dot(D, C)
    return D
Example #11
0
 def testGaussianPValue(self):
     for typePair in [(None, "float32"), ("tomo", None)]:
         mtype = typePair[0]
         dtype = typePair[1]
         mean = 32000.0
         stdd = 1000.0
         noisDds = mango.data.gaussian_noise(shape=(105,223,240), mean=mean, stdd=stdd, mtype=mtype, dtype=dtype)
         
         pvalDds = \
             mango.fmm.gaussian_pvalue(
                 noisDds,
                 mean=mean,
                 stdd=stdd,
                 sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
             )
         
         alpha = 0.05
         count = sp.sum(sp.where(pvalDds.asarray() <= alpha, 1, 0))
         if (pvalDds.mpi.comm != None):
             count = pvalDds.mpi.comm.allreduce(count)
         
         expCount = sp.product(noisDds.shape)*alpha
         count = float(count)
         relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
         rootLogger.info("relErr = %s" % relErr)
         self.assertTrue(relErr < 0.10)
def process_maps(aper_map, data_map1, data_map2, args):
    r"""
    subtracts the data maps and then calculates percentiles of the result
    before outputting a final map to file.
    """
    #
    # creating resultant map from clone of aperture map
    result = aper_map.clone()
    result.data_map = data_map1 - data_map2
    result.data_vector = sp.ravel(result.data_map)
    result.infile = args.out_name
    result.outfile = args.out_name
    #
    print('Percentiles of data_map1 - data_map2')
    output_percentile_set(result, args)
    #
    # checking if data is to be normalized and/or absolute
    if args.post_abs:
        result.data_map = sp.absolute(result.data_map)
        result.data_vector = sp.absolute(result.data_vector)
    #
    if args.post_normalize:
        result.data_map = result.data_map/sp.amax(sp.absolute(result.data_map))
        result.data_vector = sp.ravel(result.data_map)
    #
    return result
def train_srs():
    # Load train set    
    print 'Loading files'
    X, Y = dataIO.train_set(setsize)

    X = scipy.absolute(X)
    Y = scipy.absolute(Y)

    scale = np.mean(X)
    var = np.std(X)
    print("Scale: " + str(scale))
    Y = (Y - scale)/var
    X = (X - scale)/var

    # Create net
    print 'Building RNN'
    rnn = DNN.DNN(512, hidden_layer, nodes, X,
            loss=neural_network.loss_function.source_separation_loss_function, activation=activation)

    # Train net
    print 'Training'
    rnn.fit(X, Y, nb_epoch=nb_epoch, batch_size=batch_size)

    # Save net
    print 'Saving'
    rnn.save()
    return rnn
Example #14
0
    def _LMLgrad_lik(self,hyperparams):
        """derivative of the likelihood parameters"""

	logtheta = hyperparams['covar']
        try:   
            KV = self.get_covariances(hyperparams)
        except linalg.LinAlgError:
            LG.error("exception caught (%s)" % (str(hyperparams)))
            return 1E6
	
        #loop through all dimensions
        #logdet term:
        Kd = 2*KV['Knoise']
        dldet = 0.5*(Kd*KV['Si']).sum(axis=0)
        #quadratic term
        y_roti = KV['y_roti']
        dlquad = -0.5 * (y_roti * Kd * y_roti).sum(axis=0)
        if VERBOSE:
            dldet_  = SP.zeros([self.d])
            dlquad_ = SP.zeros([self.d])
            for d in xrange(self.d):
                _K = KV['K'] + SP.diag(KV['Knoise'][:,d])
                _Ki = SP.linalg.inv(_K)
                dldet_[d] = 0.5* SP.dot(_Ki,SP.diag(Kd[:,d])).trace()
                dlquad_[d] = -0.5*SP.dot(self.y[:,d],SP.dot(_Ki,SP.dot(SP.diag(Kd[:,d]),SP.dot(_Ki,self.y[:,d]))))

            assert (SP.absolute(dldet-dldet_)<1E-3).all(), 'outch'
            assert (SP.absolute(dlquad-dlquad_)<1E-3).all(), 'outch'


        LMLgrad = dldet + dlquad
        RV = {'lik': LMLgrad}
    
        return RV
Example #15
0
    def domain_length(self,face_1,face_2):
        r'''
        Calculate the distance between two faces of the network

        Parameters
        ----------
        face_1 and face_2 : array_like
            Lists of pores belonging to opposite faces of the network

        Returns
        -------
        The length of the domain in the specified direction

        Notes
        -----
        - Does not yet check if input faces are perpendicular to each other
        '''
        #Ensure given points are coplanar before proceeding
        if misc.iscoplanar(self['pore.coords'][face_1]) and misc.iscoplanar(self['pore.coords'][face_2]):
            #Find distance between given faces
            x = self['pore.coords'][face_1]
            y = self['pore.coords'][face_2]
            Ds = misc.dist(x,y)
            L = sp.median(sp.amin(Ds,axis=0))
        else:
            logger.warning('The supplied pores are not coplanar. Length will be approximate.')
            f1 = self['pore.coords'][face_1]
            f2 = self['pore.coords'][face_2]
            distavg = [0,0,0]
            distavg[0] = sp.absolute(sp.average(f1[:,0]) - sp.average(f2[:,0]))
            distavg[1] = sp.absolute(sp.average(f1[:,1]) - sp.average(f2[:,1]))
            distavg[2] = sp.absolute(sp.average(f1[:,2]) - sp.average(f2[:,2]))
            L = max(distavg)
        return L
Example #16
0
File: rnn.py Project: Yevgnen/RNN
 def check_gradient(self, x, t):
     (dU, dW, db, dV, dc) = self.bptt(x, t)
     (ndU, ndW, ndb, ndV, ndc) = self.numerical_gradient(x, t, 1e-5)
     print('Check gradient of bptt: max|dU-dU|={0}'.format((sp.absolute(dU - ndU)).max()))
     print('Check gradient of bptt: max|dW-dW|={0}'.format((sp.absolute(dW - ndW)).max()))
     print('Check gradient of bptt: max|db-db|={0}'.format((sp.absolute(db - ndb)).max()))
     print('Check gradient of bptt: max|dV-dV|={0}'.format((sp.absolute(dV - ndV)).max()))
     print('Check gradient of bptt: max|dc-dc|={0}'.format((sp.absolute(dc - ndc)).max()))
    def plot_power_spectrum(self, fft):
        T = int(600)

        pylab.figure('Power spectrum')
        pylab.plot(scipy.absolute(fft[:T]) * scipy.absolute(fft[:T]),)
        pylab.xlabel('Frequency [Hz]')
        pylab.ylabel('Power spectrum []')
        pylab.show()
def log_spectrum_distance(s,shat,winfunc):
    size = min(len(s),len(shat))
    window = winfunc(size)
    s = s[0:size]
    shat = shat[0:size]
    s_amp = sp.absolute(sp.fft(s*window))
    shat_amp = sp.absolute(sp.fft(shat*window))
    return sp.sqrt(sp.mean((sp.log10(s_amp / shat_amp)*10.0)**2.0))
def itakura_saito_spectrum_distance(s,shat,winfunc):
    size = min(len(s),len(shat))
    window = winfunc(size)
    s = s[0:size]
    shat = shat[0:size]
    s_amp = sp.absolute(sp.fft(s*window))
    shat_amp = sp.absolute(sp.fft(shat*window))
    return sp.mean(sp.log(s_amp / shat_amp) + (shat_amp/s_amp) - 1.0)
def smape(x, y):
	m = len(x)
	sum = 0.0
	for i in range(0, m):
		err = sp.absolute(y[i] - x[i]) / ((sp.absolute(y[i]) + sp.absolute(x[i])) / 2.0)
		sum = sum + err
	sum = sum / m
	return sum
def plotPowerSpectrum(FFT, binFrequencies, maxFreq):

    T = int(maxFreq)
    pylab.figure('Power spectrum')
    pylab.plot(binFrequencies[:T], scipy.absolute(FFT[:T])*scipy.absolute(FFT[:T]),)
    pylab.xlabel('Frequency (Hz)')
    pylab.ylabel('Power spectrum (|X[k]|^2)')
    pylab.show()
    def getFilteredFFT(self, FFT, duration, threshold):
        significantFreqs = []
        for i in range(len(FFT)):
            power_spectrum = scipy.absolute(FFT[i])*scipy.absolute(FFT[i])
            if power_spectrum > threshold:
                significantFreqs.append(i / duration)

        return significantFreqs
Example #23
0
def rendGaussProd(x,y, sx, imageBounds, pixelSize):
    fuzz = 6*scipy.median(sx)
    roiSize = int(fuzz/pixelSize)
    fuzz = pixelSize*(roiSize)

    #print imageBounds.x0
    #print imageBounds.x1
    #print fuzz
    #print roiSize

    #print pixelSize

    X = numpy.arange(imageBounds.x0 - fuzz,imageBounds.x1 + fuzz, pixelSize)
    Y = numpy.arange(imageBounds.y0 - fuzz,imageBounds.y1 + fuzz, pixelSize)

    #print X
    
    ctval = 1e-4
    
    l3 = numpy.log(ctval)
    #l3 = -10
    
    im = len(x)*l3*scipy.ones((len(X), len(Y)), 'd')
    print((im.min()))
    
    fac = 1./numpy.sqrt(2*numpy.pi)

    #record our image resolution so we can plot pts with a minimum size equal to res (to avoid missing small pts)
    delX = scipy.absolute(X[1] - X[0]) 
    
    for i in range(len(x)):
        ix = scipy.absolute(X - x[i]).argmin()
        iy = scipy.absolute(Y - y[i]).argmin()
        
        if (ix > (roiSize + 1)) and (ix < (im.shape[0] - roiSize - 2)) and (iy > (roiSize+1)) and (iy < (im.shape[1] - roiSize-2)):       
            #print i, X[(ix - roiSize):(ix + roiSize + 1)], Y[(iy - roiSize):(iy + roiSize + 1)]
            #imp = Gauss2D(X[(ix - roiSize):(ix + roiSize + 1)], Y[(iy - roiSize):(iy + roiSize + 1)],1, x[i],y[i],max(sx[i], delX))
            #print 'r'
            #imp[numpy.isnan(imp)] = ctval
            #imp = numpy.maximum(imp, ctval)
            #if imp.max() > 1:        
            #    print imp.max()
            #if not imp.min() > 1e-20:
                
            #    print imp.min()
            #imp_ = numpy.log(1.0*imp)
            
            sxi = max(sx[i], delX)
            Xi, Yi = X[(ix - roiSize):(ix + roiSize + 1)][:,None], Y[(iy - roiSize):(iy + roiSize + 1)][None,:]
            imp = numpy.log(fac/sxi) - ((Xi - x[i])**2 + (Yi -y[i])**2)/(2*sxi**2)
            print((imp.max(), imp.min(), l3, imp.shape))
            imp_ = numpy.maximum(imp, l3)
            im[(ix - roiSize):(ix + roiSize + 1), (iy - roiSize):(iy + roiSize + 1)] += imp_ - l3

    im = im[roiSize:-roiSize, roiSize:-roiSize]

    return im
Example #24
0
def test_Darcy_alg():
    #Generate Network and clean up some of boundaries
    divs = [1,50,10]
    Lc = 0.00004
    pn = OpenPNM.Network.Cubic(shape = divs, spacing = Lc)
    pn.add_boundaries()
    Ps = pn.pores(['front_boundary','back_boundary'])
    pn.trim(pores=Ps)
    #Generate Geometry objects for internal and boundary pores
    Ps = pn.pores('boundary',mode='not')
    Ts = pn.find_neighbor_throats(pores=Ps,mode='intersection',flatten=True)
    geom = OpenPNM.Geometry.Toray090(network=pn,pores=Ps,throats=Ts)
    Ps = pn.pores('boundary')
    Ts = pn.find_neighbor_throats(pores=Ps,mode='not_intersection')
    boun = OpenPNM.Geometry.Boundary(network=pn,pores=Ps,throats=Ts)
    #Create Phase object and associate with a Physics object
    air = OpenPNM.Phases.Air(network=pn)
    Ps = pn.pores()
    Ts = pn.throats()
    phys = OpenPNM.Physics.GenericPhysics(network=pn,phase=air,pores=Ps,throats=Ts)
    from OpenPNM.Physics import models as pm
    phys.add_model(propname='throat.hydraulic_conductance',
                   model=pm.hydraulic_conductance.hagen_poiseuille,
                   calc_pore_len=False)
    phys.regenerate()  # Update the conductance values
    #Setup Algorithm objects
    Darcy1 = OpenPNM.Algorithms.StokesFlow(network=pn,phase=air)
    inlets = pn.pores('bottom_boundary')
    Ps = pn.pores('top_boundary')
    outlets = Ps[pn['pore.coords'][Ps,1]<(divs[1]*Lc/2)]
    P_out = 0  # Pa
    Q_in = 0.6667*(Lc**2)*divs[1]*divs[0]  # m^3/s
    Darcy1.set_boundary_conditions(bctype='Neumann_group',bcvalue=-Q_in,pores=inlets)
    Darcy1.set_boundary_conditions(bctype='Dirichlet',bcvalue=P_out,pores=outlets)
    Darcy1.run()
    Darcy1.return_results()
    print('pore pressure for Darcy1 algorithm:')
    print(air['pore.pressure'])
    Darcy2 = OpenPNM.Algorithms.StokesFlow(network=pn,phase=air)
    inlets = pn.pores('bottom_boundary')
    outlets = pn.pores('top_boundary')
    P_out = 10  # Pa
    P_in = 1000  # Pa
    Darcy2.set_boundary_conditions(bctype='Dirichlet',bcvalue=P_in,pores=inlets)
    Darcy2.set_boundary_conditions(bctype='Dirichlet',bcvalue=P_out,pores=outlets)
    Darcy2.run()
    print('pore pressure for Darcy2 algorithm:')
    print(Darcy2['pore.'+air.name+'_pressure'])
    Q = -Darcy2.rate(inlets)
    K = Q*air['pore.viscosity'][0]*divs[2]*Lc/(divs[0]*divs[1]*Lc**2*(P_in-P_out))
    Vp = sp.sum(pn['pore.volume']) + sp.sum(pn['throat.volume'])
    Vb = sp.prod(divs)*Lc**3
    e = Vp/Vb
    print('Effective permeability: ',K,'- Porosity: ',e)

    assert round(sp.absolute(Darcy1.rate(outlets))[0],16) == round(sp.absolute(sp.unique(Darcy1['pore.'+air.name+'_bcval_Neumann_group']))[0],16)
    assert round(sp.absolute(Darcy2.rate(inlets))[0],16) == round(sp.absolute(Darcy2.rate(outlets))[0],16)
Example #25
0
	def hubs_and_authorities(self, tweets):
		self.set_users(tweets)
		print "Users Set"
		A = self.set_A()
		AT = csr_matrix.transpose(A)
		self.auth = csr_matrix(np.ones((len(self.user_to_id.keys()),1)))
		self.hub = csr_matrix(np.ones((len(self.user_to_id.keys()),1)))
		test_auth = csr_matrix(np.zeros((len(self.user_to_id.keys()),1)))
		test_hub = csr_matrix(np.zeros((len(self.user_to_id.keys()),1)))
		print "Matrices Made"
		
		while not np.allclose(np.asarray(self.auth.todense()),np.asarray(test_auth.todense())) or not np.allclose(np.asarray(self.hub.todense()),np.asarray(test_hub.todense())):
			test_auth = self.auth.copy()
			test_hub = self.hub.copy()
			self.auth = AT.dot(self.hub)
			norm = sp.sqrt(sp.sum(sp.absolute(np.asarray(self.auth.todense()))**2))
			self.auth = csr_matrix(np.asarray(self.auth.todense()) / norm)
			self.hub = A.dot(self.auth)
			norm = sp.sqrt(sp.sum(sp.absolute(np.asarray(self.hub.todense()))**2))
			self.hub = csr_matrix(np.asarray(self.hub.todense()) / norm)
		print "Hubs and Authorities Done"
		del test_auth
		del test_hub
		temp = csr_matrix.transpose(self.hub)
		hubs = {}
		count = 0
		for hub in np.asarray(temp.todense()).tolist()[0]:
			hubs[self.id_to_user[count]] = hub
			count += 1
		temp = csr_matrix.transpose(self.auth)
		auths = {}
		count = 0
		for auth in np.asarray(temp.todense()).tolist()[0]:
			auths[self.id_to_user[count]] = auth
			count += 1
		print "Hubs and Authorities Set"
		count = 0
		for row in A:
			for item in row.nonzero()[1]:
				self.graph.add_edge(self.id_to_user[count], self.id_to_user[item], weight = 1 - hubs[self.id_to_user[item]])
			count += 1
		count = 0
		for row in AT:
			for item in row.nonzero()[1]:
				if not self.graph.has_edge(self.id_to_user[count], self.id_to_user[item]):
					self.graph.add_edge(self.id_to_user[count], self.id_to_user[item], weight = 1 - auths[self.id_to_user[item]])
			count += 1
		print "Saving Graphs"
		graph_dict = {}
		for edge in self.graph.edges():
			graph_dict[str(edge)] = self.graph.get_edge_data(edge[0], edge[1])
		f = open("data.json", "w")
		for edge in graph_dict:
			temp = {edge: graph_dict[edge]}
			f.write(json.dumps(temp))
			f.write("\n")
		print "Graph Saved"
def pltFunction (cavity1,cavity2,cavity3,plotType):
  if (plotType==0):
    return sp.absolute(cavity1[:])**2,sp.absolute(cavity2[:])**2,sp.absolute(cavity3[:])**2
  elif (plotType==1):
    return sp.real(cavity1[:]),sp.real(cavity2[:]),sp.real(cavity3[:])
  elif (plotType==2):
    return sp.imag(cavity1[:]),sp.imag(cavity2[:]),sp.imag(cavity3[:])
  else: 
    return cavity1, cavity2, cavity3
Example #27
0
def softThreshold(coeffs,thresh):
	new_coeffs = []
	for j in coeffs:
		new_coeffs.append(sp.copy(j))
	for j in xrange(1,len(new_coeffs)):
		for i in new_coeffs[j]:
			i[sp.absolute(i)<thresh] = 0
			i[sp.absolute(i)>=thresh] -= (sp.sign(i[sp.absolute(i)>=thresh]))*thresh
	return new_coeffs
def filt(k,kcut,beta,alph):

    #SPINS default parameters: 0.6, 2.0, 20.0

    knyq = max(k)
    kxcut = kcut*knyq
    filt = np.ones_like(k)
    filt = np.exp(-alph*((np.absolute(k) - kxcut)/(knyq - kxcut))**beta)*(np.absolute(k)>kxcut) + (np.absolute(k)<=kxcut)

    return filt
Example #29
0
 def test_fit(self):
     #create optimization object
     self.gpopt = limix.CGPopt(self.gp)
     #run
     RV = self.gpopt.opt()
     RV = self.gpopt.opt()
     
     RV = RV & (SP.absolute(self.gp.LMLgrad()['X']).max()<1E-1)
     RV = RV & (SP.absolute(self.gp.LMLgrad()['covar']).max()<1E-1)
     RV = RV & (SP.absolute(self.gp.LMLgrad()['lik']).max()<1E-1)
     self.assertTrue(RV)
Example #30
0
 def test_fit(self):
     """ optimization test """
     self.vc.optimize(verbose=False)
     params = self.vc.getScales()
     if self.generate:
         self.D['params_true'] = params
         data.dump(self.D,self.dataset)
         self.generate=False
     params_true = self.D['params_true']
     RV = ((SP.absolute(params)-SP.absolute(params_true))**2).max()
     self.assertTrue(RV<1e-6)
Example #31
0
def find_nearest(array,value):
    idx=sp.absolute(array-value).argmin()
    return array[idx]
Example #32
0
def find_nearest_index(array,value):
    idx=sp.absolute(array-value).argmin()
    return idx
Example #33
0
    def solve(self, wls):
        """Anisotropic solver.

        INPUT
        wls = wavelengths to scan (any asarray-able object).

        OUTPUT
        self.R, self.T = power reflected and transmitted.
        """

        self.wls = S.asarray(wls)

        multilayer = self.multilayer
        theta_inc_x = self.theta_inc_x
        theta_inc_y = self.theta_inc_y

        def find_roots(wl, epsilon, alpha, beta):
            """Find roots of characteristic equation.

            Given a wavelength, a 3x3 tensor epsilon and the tangential components
            of the wavevector k = (alpha,beta,gamma_i), returns the 4 possible
            gamma_i, i = 1,2,3,4 that satisfy the boundary conditions.
            """

            omega = 2. * S.pi * c / wl
            K = omega**2 * mu0 * epsilon

            k0 = 2. * S.pi / wl
            K /= k0**2
            alpha /= k0
            beta /= k0

            alpha2 = alpha**2
            alpha3 = alpha**3
            alpha4 = alpha**4
            beta2 = beta**2
            beta3 = beta**3
            beta4 = beta**4

            coeff = [
                K[2,
                  2], alpha * (K[0, 2] + K[2, 0]) + beta * (K[1, 2] + K[2, 1]),
                alpha2 * (K[0, 0] + K[2, 2]) + alpha * beta *
                (K[1, 0] + K[0, 1]) + beta2 * (K[1, 1] + K[2, 2]) +
                (K[0, 2] * K[2, 0] + K[1, 2] * K[2, 1] - K[0, 0] * K[2, 2] -
                 K[1, 1] * K[2, 2]), alpha3 * (K[0, 2] + K[2, 0]) + beta3 *
                (K[1, 2] + K[2, 1]) + alpha2 * beta * (K[1, 2] + K[2, 1]) +
                alpha * beta2 * (K[0, 2] + K[2, 0]) + alpha *
                (K[0, 1] * K[1, 2] + K[1, 0] * K[2, 1] - K[0, 2] * K[1, 1] -
                 K[2, 0] * K[1, 1]) + beta *
                (K[0, 1] * K[2, 0] + K[1, 0] * K[0, 2] - K[0, 0] * K[1, 2] -
                 K[0, 0] * K[2, 1]), alpha4 * (K[0, 0]) + beta4 * (K[1, 1]) +
                alpha3 * beta * (K[0, 1] + K[1, 0]) + alpha * beta3 *
                (K[0, 1] + K[1, 0]) + alpha2 * beta2 * (K[0, 0] + K[1, 1]) +
                alpha2 * (K[0, 1] * K[1, 0] + K[0, 2] * K[2, 0] -
                          K[0, 0] * K[2, 2] - K[0, 0] * K[1, 1]) + beta2 *
                (K[0, 1] * K[1, 0] + K[1, 2] * K[2, 1] - K[0, 0] * K[1, 1] -
                 K[1, 1] * K[2, 2]) + alpha * beta *
                (K[0, 2] * K[2, 1] + K[2, 0] * K[1, 2] - K[0, 1] * K[2, 2] -
                 K[1, 0] * K[2, 2]) + K[0, 0] * K[1, 1] * K[2, 2] -
                K[0, 0] * K[1, 2] * K[2, 1] - K[1, 0] * K[0, 1] * K[2, 2] +
                K[1, 0] * K[0, 2] * K[2, 1] + K[2, 0] * K[0, 1] * K[1, 2] -
                K[2, 0] * K[0, 2] * K[1, 1]
            ]

            gamma = S.roots(coeff)
            tmp = S.sort_complex(gamma)
            gamma = tmp[[3, 0, 2, 1]]  # convention

            k = k0 * \
                S.array([alpha *
                         S.ones(gamma.shape), beta *
                         S.ones(gamma.shape), gamma]).T
            v = S.zeros((4, 3), dtype=complex)

            for i, g in enumerate(gamma):

                H = K + [[-beta2 - g**2, alpha * beta, alpha * g],
                         [alpha * beta, -alpha2 - g**2, beta * g],
                         [alpha * g, beta * g, -alpha2 - beta2]]
                v[i, :] = [
                    (K[1, 1] - alpha2 - g**2) *
                    (K[2, 2] - alpha2 - beta2) - (K[1, 2] + beta * g)**2,
                    (K[1, 2] + beta * g) * (K[2, 0] + alpha * g) -
                    (K[0, 1] + alpha * beta) * (K[2, 2] - alpha2 - beta2),
                    (K[0, 1] + alpha * beta) * (K[1, 2] + beta * g) -
                    (K[0, 2] + alpha * g) * (K[1, 1] - alpha2 - g**2)
                ]

            p3 = v[0, :]
            p3 /= norm(p3)
            p4 = v[1, :]
            p4 /= norm(p4)
            p1 = S.cross(p3, k[0, :])
            p1 /= norm(p1)
            p2 = S.cross(p4, k[1, :])
            p2 /= norm(p2)

            p = S.array([p1, p2, p3, p4])
            q = wl / (2. * S.pi * mu0 * c) * S.cross(k, p)

            return k, p, q

        nlayers = len(multilayer)
        d = S.asarray([l.thickness for l in multilayer])

        # R and T are real, because they are powers
        # r and t are complex!
        R = S.zeros((2, 2, self.wls.size))
        T = S.zeros((2, 2, self.wls.size))

        epstot = S.zeros((3, 3, self.wls.size, nlayers), dtype=complex)
        for i, l in enumerate(multilayer):
            epstot[:, :, :, i] = l.mat.epsilonTensor(self.wls)

        for iwl, wl in enumerate(self.wls):

            epsilon = epstot[:, :, iwl, :]

            kx = 2 * S.pi / wl * S.sin(theta_inc_x)
            ky = 2 * S.pi / wl * S.sin(theta_inc_y)
            x = S.array([1, 0, 0], dtype=float)
            y = S.array([0, 1, 0], dtype=float)
            z = S.array([0, 0, 1], dtype=float)
            k = S.zeros((4, 3, nlayers), dtype=complex)
            p = S.zeros((4, 3, nlayers), dtype=complex)
            q = S.zeros((4, 3, nlayers), dtype=complex)
            D = S.zeros((4, 4, nlayers), dtype=complex)
            P = S.zeros((4, 4, nlayers), dtype=complex)

            for i in range(nlayers):

                k[:, :, i], p[:, :,
                              i], q[:, :,
                                    i] = find_roots(wl, epsilon[:, :, i], kx,
                                                    ky)
                D[:, :, i] = [[
                    S.dot(x, p[0, :, i]),
                    S.dot(x, p[1, :, i]),
                    S.dot(x, p[2, :, i]),
                    S.dot(x, p[3, :, i])
                ],
                              [
                                  S.dot(y, q[0, :, i]),
                                  S.dot(y, q[1, :, i]),
                                  S.dot(y, q[2, :, i]),
                                  S.dot(y, q[3, :, i])
                              ],
                              [
                                  S.dot(y, p[0, :, i]),
                                  S.dot(y, p[1, :, i]),
                                  S.dot(y, p[2, :, i]),
                                  S.dot(y, p[3, :, i])
                              ],
                              [
                                  S.dot(x, q[0, :, i]),
                                  S.dot(x, q[1, :, i]),
                                  S.dot(x, q[2, :, i]),
                                  S.dot(x, q[3, :, i])
                              ]]

            for i in range(1, nlayers - 1):
                P[:, :, i] = S.diag(S.exp(1j * k[:, 2, i] * d[i]))

            M = inv(D[:, :, 0])
            for i in range(1, nlayers - 1):
                M = S.dot(
                    M, S.dot(D[:, :, i], S.dot(P[:, :, i], inv(D[:, :, i]))))
            M = S.dot(M, D[:, :, -1])

            deltaM = M[0, 0] * M[2, 2] - M[0, 2] * M[2, 0]

            # reflectance matrix (from yeh_electromagnetic)
            # r = [rss rsp; rps rpp]
            r = S.array([[
                M[1, 0] * M[2, 2] - M[1, 2] * M[2, 0],
                M[3, 0] * M[2, 2] - M[3, 2] * M[2, 0]
            ],
                         [
                             M[0, 0] * M[1, 2] - M[1, 0] * M[0, 2],
                             M[0, 0] * M[3, 2] - M[3, 0] * M[0, 2]
                         ]],
                        dtype=complex) / deltaM

            # transmittance matrix (from yeh_electromagnetic)
            # t = [tss tsp; tps tpp]
            t = S.array([[M[2, 2], -M[2, 0]], [-M[0, 2], M[0, 0]]]) / deltaM

            # P_t/P_inc = |E_t|**2/|E_inc|**2 . k_t_z/k_inc_z
            T[:, :, iwl] = (S.absolute(t)**2 * k[0, 2, -1] / k[0, 2, 0]).real

            # P_r/P_inc = |E_r|**2/|E_inc|**2
            R[:, :, iwl] = S.absolute(r)**2

        self.R = R
        self.T = T
        return self
def sse_score(gt,predictions):
    predictions = S.array(predictions,ndmin=2)
    N,M = predictions.shape
    
    errors = [ S.power(S.maximum(0.0,row)-S.maximum(0.0,gt),2.0).mean(1) for row in predictions]#we expect a vector, but if we get a matrix, this mean is more deffensive. (will cause an error later)
    return S.absolute(S.array(errors).ravel())
Example #35
0
def vca(Y, R, verbose=False, snr_input=0):
    # Vertex Component Analysis
    #
    # Ae, indice, Yp = vca(Y,R,verbose = True,snr_input = 0)
    #
    # ------- Input variables -------------
    #  Y - matrix with dimensions L(channels) x N(pixels)
    #      each pixel is a linear mixture of R endmembers
    #      signatures Y = M x s, where s = gamma x alfa
    #      gamma is a illumination perturbation factor and
    #      alfa are the abundance fractions of each endmember.
    #  R - positive integer number of endmembers in the scene
    #
    # ------- Output variables -----------
    # Ae     - estimated mixing matrix (endmembers signatures)
    # indice - pixels that were chosen to be the most pure
    # Yp     - Data matrix Y projected.
    #
    # ------- Optional parameters---------
    # snr_input - (float) signal to noise ratio (dB)
    # v         - [True | False]
    # ------------------------------------
    #
    # Author: Adrien Lagrange ([email protected])
    # This code is a translation of a matlab code provided by
    # Jose Nascimento ([email protected]) and Jose Bioucas Dias ([email protected])
    # available at http://www.lx.it.pt/~bioucas/code.htm under a non-specified Copyright (c)
    # Translation of last version at 22-February-2018 (Matlab version 2.1 (7-May-2004))
    #
    # more details on:
    # Jose M. P. Nascimento and Jose M. B. Dias
    # "Vertex Component Analysis: A Fast Algorithm to Unmix Hyperspectral Data"
    # submited to IEEE Trans. Geosci. Remote Sensing, vol. .., no. .., pp. .-., 2004
    #
    #

    #############################################
    # Initializations
    #############################################
    if len(Y.shape) != 2:
        sys.exit(
            'Input data must be of size L (number of bands i.e. channels) by N (number of pixels)'
        )

    [L, N] = Y.shape  # L number of bands (channels), N number of pixels

    R = int(R)
    if (R < 0 or R > L):
        sys.exit('ENDMEMBER parameter must be integer between 1 and L')

#############################################
# SNR Estimates
#############################################

    if snr_input == 0:
        y_m = sp.mean(Y, axis=1, keepdims=True)
        Y_o = Y - y_m  # data with zero-mean
        Ud = splin.svd(sp.dot(Y_o, Y_o.T) /
                       float(N))[0][:, :R]  # computes the R-projection matrix
        x_p = sp.dot(Ud.T, Y_o)  # project the zero-mean data onto p-subspace

        SNR = estimate_snr(Y, y_m, x_p)

        if verbose:
            print("SNR estimated = {}[dB]".format(SNR))
    else:
        SNR = snr_input
        if verbose:
            print("input SNR = {}[dB]\n".format(SNR))

    SNR_th = 15 + 10 * sp.log10(R)

    #############################################
    # Choosing Projective Projection or
    #          projection to p-1 subspace
    #############################################

    if SNR < SNR_th:
        if verbose:
            print("... Select proj. to R-1")

        d = R - 1
        if snr_input == 0:  # it means that the projection is already computed
            Ud = Ud[:, :d]
        else:
            y_m = sp.mean(Y, axis=1, keepdims=True)
            Y_o = Y - y_m  # data with zero-mean

            Ud = splin.svd(
                sp.dot(Y_o, Y_o.T) /
                float(N))[0][:, :d]  # computes the p-projection matrix
            x_p = sp.dot(Ud.T,
                         Y_o)  # project thezeros mean data onto p-subspace

        Yp = sp.dot(Ud, x_p[:d, :]) + y_m  # again in dimension L

        x = x_p[:d, :]  #  x_p =  Ud.T * Y_o is on a R-dim subspace
        c = sp.amax(sp.sum(x**2, axis=0))**0.5
        y = sp.vstack((x, c * sp.ones((1, N))))
    else:
        if verbose:
            print("... Select the projective proj.")

        d = R
        Ud = splin.svd(sp.dot(Y, Y.T) /
                       float(N))[0][:, :d]  # computes the p-projection matrix

        x_p = sp.dot(Ud.T, Y)
        Yp = sp.dot(Ud, x_p[:d, :]
                    )  # again in dimension L (note that x_p has no null mean)

        x = sp.dot(Ud.T, Y)
        u = sp.mean(x, axis=1, keepdims=True)  #equivalent to  u = Ud.T * r_m
        y = x / sp.dot(u.T, x)

#############################################
# VCA algorithm
#############################################

    indice = sp.zeros((R), dtype=int)
    A = sp.zeros((R, R))
    A[-1, 0] = 1

    for i in range(R):
        w = sp.random.rand(R, 1)
        f = w - sp.dot(A, sp.dot(splin.pinv(A), w))
        f = f / splin.norm(f)

        v = sp.dot(f.T, y)

        indice[i] = sp.argmax(sp.absolute(v))
        A[:, i] = y[:, indice[i]]  # same as x(:,indice(i))

    Ae = Yp[:, indice]

    return Ae, indice, Yp
Example #36
0
 def feature(self, x):
     return lsh.normalize(
         scipy.absolute(mir.cqt(x, self.fs, lo=self.pmin, hi=self.pmax)[0]))