コード例 #1
0
ファイル: filter_lib.py プロジェクト: 39M/PhotoTheater
def glow(src, dst):
    graph = Image.open(src)
    size = graph.size
    width = size[0]
    height = size[1]

    source = graph.convert('RGB')
    Gauss = graph.convert('RGB')
    source = ny.double(ny.array(source))

    Gauss = Gauss.filter(MyGaussianBlur(radius=15))
    Gauss = ny.double(ny.array(source))

    Result = ny.zeros([height, width, 3])
    for row in range(height):
        for col in range(width):
            for k in range(3):
                if source[row, col, k] <= 128:
                    value = Gauss[row, col, k] * source[row, col, k] / 128
                    Result[row, col, k] = min(255, max(0, value))
                else:
                    value = 255 - (255 - Gauss[row, col, k]) * (255 - source[row, col, k]) / 128
                    Result[row, col, k] = min(255, max(0, value))

    result = Image.fromarray(ny.uint8(Result)).convert('RGB')
    result.save(dst)
    return 0
def sig_lmc(C, A):
    '''
    This a function that using Lumped Markov chain to calculate
    the significance of clusters in a give communinity structure.
    refer to "Piccardi 2011 in PloS one".
    Here we normalize the original definition of persistence by
    the size of the corresponding cluster to get a better
    INPUT:
        "A" is a N-by-N weighted adjacency matrix
        "C" is a N-by-1 partition(cluster) vector
    OUTPUT:
        normalized persistence probability of all clusters
    '''
    '''
    Transition Matrix
    '''
    C = np.asarray(C)
    A = np.double(A)
    P = np.linalg.solve(np.diag(np.sum(A,axis = 1)),A)
    [eval, evec] = linalg.eigs(P.T, 1)
    if min(evec)<0:
        evec = -evec
    pi = np.double(evec.T)
    num_node = np.double(np.shape(A)[0])
    cl_label = np.double(np.unique(C))
    num_cl = len(cl_label)
    H = np.zeros((num_node, num_cl),dtype = np.double)
    for i in range(num_cl):
        H[:, i] = np.double((C==cl_label[i]))

    # Transition matrix of the lumped Markov chain

    Q = np.dot(np.dot(np.dot(np.linalg.solve(np.diag(np.dot(pi,H).flatten()),H.T),np.diag(pi.flatten())),P),H)
    persistence = np.multiply(np.divide(np.diag(Q), np.sum(H,axis = 0)),np.sum(H))
    return persistence
コード例 #3
0
ファイル: zorro_util.py プロジェクト: C-CINA/zorro
def apodization( name = 'butter.32', shape= [2048,2048], radius=None ):
    """ apodization( name = 'butter.32', size = [2048,2048], radius=None )
    Provides a 2-D filter or apodization window for Fourier filtering or image clamping.
        Radius = None defaults to shape/2
    
    Valid names are: 
        'hann' - von Hann cosine window on radius
        'hann_square' as above but on X-Y
        'hamming' - good for apodization, nonsense as a filter
        'butter.X' Butterworth multi-order filter where X is the order of the Lorentzian
        'butter_square.X' Butterworth in X-Y
        'gauss_trunc' - truncated gaussian, higher performance (smaller PSF) than hann filter
        'gauss' - regular gaussian
    NOTE: There are windows in scipy.signal for 1D-filtering...
    WARNING: doesn't work properly for odd image dimensions
    """
    # Make meshes
    shape = np.asarray( shape )
    if radius is None:
        radius = shape/2.0
    else:
        radius = np.asarray( radius, dtype='float' )
    # DEBUG: Doesn't work right for odd numbers
    [xmesh,ymesh] = np.meshgrid( np.arange(-shape[1]/2,shape[1]/2), np.arange(-shape[0]/2,shape[0]/2) )
    r2mesh = xmesh*xmesh/( np.double(radius[0])**2 ) + ymesh*ymesh/( np.double(radius[1])**2 )
    
    try:
        [name, order] = name.lower().split('.')
        order = np.double(order)
    except ValueError:
        order = 1
        
    if name == 'butter':
        window =  np.sqrt( 1.0 / (1.0 + r2mesh**order ) )
    elif name == 'butter_square':
        window = np.sqrt( 1.0 / (1.0 + (xmesh/radius[1])**order))*np.sqrt(1.0 / (1.0 + (ymesh/radius[0])**order) )
    elif name == 'hann':
        cropwin = ((xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0) <= 1.0
        window = cropwin.astype('float') * 0.5 * ( 1.0 + np.cos( 1.0*np.pi*np.sqrt( (xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0  )  ) )
    elif name == 'hann_square':
        window = ( (0.5 + 0.5*np.cos( np.pi*( xmesh/radius[1]) ) ) *
            (0.5 + 0.5*np.cos( np.pi*( ymesh/radius[0] )  ) ) )
    elif name == 'hamming':
        cropwin = ((xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0) <= 1.0
        window = cropwin.astype('float') *  ( 0.54 + 0.46*np.cos( 1.0*np.pi*np.sqrt( (xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0  )  ) )
    elif name == 'hamming_square':
        window = ( (0.54 + 0.46*np.cos( np.pi*( xmesh/radius[1]) ) ) *
            (0.54 + 0.46*np.cos( np.pi*( ymesh/radius[0] )  ) ) )
    elif name == 'gauss' or name == 'gaussian':
        window = np.exp( -(xmesh/radius[1])**2.0 - (ymesh/radius[0])**2.0 )
    elif name == 'gauss_trunc':
        cropwin = ((0.5*xmesh/radius[1])**2.0 + (0.5*ymesh/radius[0])**2.0) <= 1.0
        window = cropwin.astype('float') * np.exp( -(xmesh/radius[1])**2.0 - (ymesh/radius[0])**2.0 )
    elif name == 'lanczos':
        print( "TODO: Implement Lanczos window" )
        return
    else:
        print( "Error: unknown filter name passed into apodization" )
        return
    return window
コード例 #4
0
ファイル: main.py プロジェクト: riddhishb/RandomStuff
def reconstruct(pitch,fs,coffs,syllable):
	gain = coffs[0]
	coffs[0] = 1;
	x = np.double(fs)/np.double(pitch)
	num = np.ceil((pitch*3)/10.0)
	#300ms thus
	if syllable != "s":
		
		ex_input = np.zeros(num*x)
		for i in range (0,int(num)):
			ex_input[(i*x)-1] = 1

	    # Filtering the signal
		
		out = signal.filtfilt([gain],coffs,ex_input)
		d_num = [1]
		d_den = [1,-0.9]
		out = signal.filtfilt(d_num,d_den,out)  # De-emphasis
		out = np.int16(out/np.max(np.abs(out)) * 32767)
	else:
		ex_input = np.random.normal(0,1,num)
		out = signal.filtfilt([gain],coffs,ex_input)
		out = np.int16(out/np.max(np.abs(out)) * 32767)

	return out	
コード例 #5
0
ファイル: pyspike.py プロジェクト: britodasilva/pyhfo
def openDATfile(filename,ftype,srate=25000):
    fh = open(filename,'r')
    fh.seek(0)
    if ftype == 'amp':
        data = np.fromfile(fh, dtype=np.int16)
        fh.close()
        data = np.double(data)
        data *= 0.195 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
    elif ftype == 'adc':
        data = np.fromfile(fh, dtype=np.uint16)
        fh.close()
        data = np.double(data)
        data *= 0.000050354 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
        data -= np.mean(data)
    
    elif ftype == 'aux':
        data = np.fromfile(fh, dtype=np.uint16)
        fh.close()
        data = np.double(data)
        data *= 0.0000748 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
        
    elif ftype == 'time':
        data = np.fromfile(fh, dtype=np.int32)
        fh.close()
        data = np.double(data)
        data /= srate # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
    return data
コード例 #6
0
ファイル: io.py プロジェクト: OrkoHunter/stingray
def _save_hdf5_object(object, filename):
    """
    Save a class object in hdf5 format.

    Parameters
    ----------
    object: class instance
        A class object whose attributes would be saved in a dictionary format.

    filename: str
        The file name to save to
    """
    items = vars(object)
    attrs = [name for name in items]

    with h5py.File(filename, 'w') as hf:   
        for attr in attrs:
            data = items[attr]
            
            # If data is a single number, store as an attribute.
            if _isattribute(data):

                if isinstance(data, np.longdouble):
                    data = np.double(data) 
                    utils.simon("Casting data as double instead of longdouble.")
                hf.attrs[attr] = data
            
            # If data is a numpy array, create a dataset.
            else:

                if isinstance(data[0], np.longdouble):
                    data = np.double(data) 
                    utils.simon("Casting data as double instead of longdouble.")
                hf.create_dataset(attr, data=data) 
コード例 #7
0
ファイル: corr_r.py プロジェクト: shjoshi-dev/bss
 def fastColumnWiseCorrcoef(O, P): #P = age_mtx = 1 x n, O = subjects_vertices_mtx = n x m
     n = P.size
     DO = O - (np.einsum('ij->j',O) / np.double(n))
     P -= (np.einsum('i->',P) / np.double(n))
     tmp = np.einsum('ij,ij->j',DO,DO)
     tmp *= np.einsum('i,i->',P,P)
     return np.dot(P, DO) / np.sqrt(tmp)
コード例 #8
0
ファイル: simplot.py プロジェクト: undercoveridiot/gunfolds
def estOE(d):
    gt = d['gt']['graph']
    gt = bfu.undersample(gt, 1)
    e = gk.OCE(d['estimate'], gt)
    N = np.double(len(gk.edgelist(gt))) +\
        np.double(len(gk.bedgelist(gt)))
    return (e['directed'][0] + e['bidirected'][0]) / N
コード例 #9
0
ファイル: test_unit.py プロジェクト: a321bhi/MaLTPyNT
 def test_high_precision_split2(self):
     C_I, C_F, C_l, k = \
         mp.io._split_high_precision_number("C", np.double(1.01), 8)
     assert C_I == 1
     np.testing.assert_almost_equal(C_F, np.double(0.01), 6)
     assert C_l == 0
     assert k == "double"
コード例 #10
0
ファイル: metrics.py プロジェクト: goldsmitha/knn-classifier
def get_precision_recall_values(predictionImage, labelImage, threshLevels, doPlot):
   # normalize images
   predictionImage = predictionImage / np.double(np.max(predictionImage))
   labelImage = labelImage > 0
   
   precisionValues = np.zeros(threshLevels)
   recallValues = np.zeros(threshLevels)
   counter = 0
   
   # get sufficient number of thresholds
   threshValues = np.linspace(0.01,0.99,threshLevels)

   for thresh in threshValues:
       #threshold the prediction image
       threshPrediction = predictionImage>thresh
       
       truePositives = np.double(np.sum(np.logical_and(labelImage, threshPrediction)))
       falsePositives = np.double(np.sum(np.logical_and(np.logical_not(labelImage), threshPrediction)))
       falseNegatives = np.double(np.sum(np.logical_and(labelImage, np.logical_not(threshPrediction))))
       
       precision = truePositives / (truePositives + falsePositives)
       recall = truePositives / (truePositives + falseNegatives)
       
       precisionValues[counter] = precision
       recallValues[counter] = recall
       counter += 1
   
   if doPlot:
       plt.figure('Precision vs Recall')
       plt.plot(precisionValues, recallValues, 'o')
       plt.xlabel("Precision")
       plt.ylabel("Recall")
       
   return (precisionValues, recallValues)
コード例 #11
0
def threshold(frame, threshold = 0.5, normalized_threshold = True, threshold_type = cv2.THRESH_BINARY, debug = False):
    """
    tresholding an image: the input type has to be either np.uint8 or np.float32
    returns a uint8 image
    """

    fmin = np.double( np.nanmin(frame) )
    fmax = np.double( np.nanmax(frame) )
    
    if debug:
        print "fmin: ", fmin
        print "fmax: ", fmax
    
    floatframe = ( (1.0 - 0.0) / (fmax - fmin) * (np.double(frame) - fmin) ).astype(np.float32)
    
    if debug:
        print "floatframe: ", floatframe.dtype
        print "min: ", np.nanmin(floatframe)
        print "max: ", np.nanmax(floatframe)
    
    if not normalized_threshold:
        threshold = 1.0 / (fmax - fmin) * (threshold - fmin)
        if debug:
            print "normalized threshold: ", threshold
    
    retval, t = cv2.threshold(floatframe, thresh = threshold, maxval = 255, type = threshold_type)
    
    return t
コード例 #12
0
ファイル: data_utils.py プロジェクト: j-faria/exonailer
 def get_fn_likelihood(residuals, sigma_w, sigma_r, gamma=1.0):
     like=0.0
     # Arrays of zeros to be passed to the likelihood function
     aa,bb,M = Wavelets.getDWT(residuals)
     # Calculate the g(gamma) factor used in Carter & Winn...
     if(gamma==1.0):
        g_gamma=1.0/(2.0*np.log(2.0))  # (value assuming gamma=1)
     else:
        g_gamma=(2.0)-(2.0)**gamma
     # log-Likelihood of the aproximation coefficients
     sigmasq_S=(sigma_r**2)*g_gamma+(sigma_w)**2
     tau_a =  1.0/sigmasq_S
     like += normal_like( bb[0], 0.0 , tau_a )
     k=long(0)
     SS=range(M)
     for ii in SS:
             # log-Likelihood of the detail coefficients with m=i...
             if(ii==0):
               sigmasq_W=(sigma_r**2)*(2.0**(-gamma*np.double(1.0)))+(sigma_w)**2
               tau=1.0/sigmasq_W
               like += normal_like( bb[1], 0.0, tau )
             else:
               sigmasq_W=(sigma_r**2)*(2.0**(-gamma*np.double(ii+1)))+(sigma_w)**2
               tau=1.0/sigmasq_W
               for j in range(2**ii):
                   like += normal_like( aa[k], 0.0 , tau )
                   k=k+1
     return like
コード例 #13
0
ファイル: lans.py プロジェクト: nfoti/LANS
def gen_backbone(CDFmat, alpha=0.05, S=None):
    """ Returns a backbone network given a CDF matrix and significance value
        and an optional similarity matrix for weights.
        Finds all entries in the matrix s.t. 1-alpha < CDF matrix entry
    """

    # do some input checking
    if type(CDFmat) != N.matrix:
        raise TypeError('gen_backbone:  Invalid input type -- must be numpy.matrix')

    # now find the size of this matrix
    sz = CDFmat.shape

    # check for correct dimensions
    if sz[0] != sz[1]:
        raise ValueError('gen_ecdf_matrix:  Invalid input -- matrix is not square')

    # now make sure the matrix is of doubles
    CDFmat = N.double(CDFmat)

    # convenience renaming
    n = sz[0]

    # now we need to find the entries 
    BBout = N.double(CDFmat > 1-alpha)

    # add weights if desired
    print type(S)
    if S != None:
        print '######## Adding weights to matrix...'
        BBout = N.multiply(BBout,S)
    else:
        print '######## NO weights specified...'

    return BBout
コード例 #14
0
ファイル: io.py プロジェクト: evandromr/stingray
def split_numbers(number):
    """
    Split high precision number(s) into doubles.
    TODO: Consider the option of using a third number to specify shift.

    Parameters
    ----------
    number: long double
        The input high precision number which is to be split

    Returns
    -------
    number_I: double
        First part of high precision number

    number_F: double
        Second part of high precision number
    """

    if isinstance(number, collections.Iterable):
        mods = [math.modf(n) for n in number]
        number_F = [f for f,_ in mods]
        number_I = [i for _,i in mods] 
    else:
        number_F, number_I = math.modf(number)

    return np.double(number_I), np.double(number_F)
コード例 #15
0
ファイル: spglib.py プロジェクト: Johnson-Wang/phonopy
def get_mappings(mesh,
                 rotations,
                 is_shift=np.zeros(3, dtype='intc'),
                 is_time_reversal=True,
                 qpoints=np.double([])):
    """
    Return k-point map to the irreducible k-points and k-point grid points .

    The symmetry is searched from the input rotation matrices in real space.

    is_shift=[0, 0, 0] gives Gamma center mesh and the values 1 give
    half mesh distance shifts.
    """

    mapping = np.zeros(np.prod(mesh), dtype='intc')
    rot_mapping = np.zeros(np.prod(mesh), dtype="intc")
    mesh_points = np.zeros((np.prod(mesh), 3), dtype='intc')
    qpoints = np.double(qpoints).copy()
    if qpoints.shape == (3,):
        qpoints = np.double([qpoints])
    spg.stabilized_reciprocal_mesh(mesh_points,
                                   mapping,
                                   rot_mapping,
                                   np.intc(mesh).copy(),
                                   np.intc(is_shift),
                                   is_time_reversal * 1,
                                   np.intc(rotations).copy(),
                                   np.double(qpoints))

    return mapping,  rot_mapping
コード例 #16
0
def best_match(dominf, key, rng):
    """compute rms, pos, s and o such that |i'*s + o - rng| is minimal
    where i' is a submatrix of domain that has shape = rng.shape and
    upper left corner at pos .
    """
    n, m = rng.shape
    N = np.double(n) * np.double(m)
    dom = dominf.doms[key]
    c = np.sum(rng ** 2) / N
    mean = np.sum(rng) / N
    b = all_dot(rng, dom) / N

    dom_a = dominf.get_mean(key, rng.shape[0])
    dom_mean = dominf.get_meansq(key, rng.shape[0])

    denom = (dom_a - dom_mean ** 2)
    denom[abs(denom) < allmost0] = allmost0
    s = (b - mean * dom_mean) / denom
    o = (mean * dom_a - b * dom_mean) / denom

    # mean square error
    ms = (dom_a * s ** 2 + o ** 2 + c +
      2 * s * o * dom_mean - 2 * s * b - 2 * o * mean)

    # penalty for too big s:
    ms[np.logical_not(abs(s) <= limit_s)] = np.inf

    # best domain
    index = np.unravel_index(np.argmin(ms), ms.shape)
    rms = np.sqrt(np.max((0.0, ms[index])))
    return (rms, index, s[index], o[index])
コード例 #17
0
ファイル: distance.py プロジェクト: akvankorlaar/pystyl
def jaccard_distance(u, v):
    """return jaccard distance"""
    u = numpy.asarray(u)
    v = numpy.asarray(v)
    return (numpy.double(numpy.bitwise_and((u != v),
            numpy.bitwise_or(u != 0, v != 0)).sum())
            /  numpy.double(numpy.bitwise_or(u != 0, v != 0).sum()))
コード例 #18
0
def load_snotel_list(snotel_list_file):
    """Load station name/location information from a text file
    
    example file:
ID     Site Name         Latitude  Longitude   Elevation
05J04S PHANTOM VALLEY    40.38333 -105.83333     2752.4
05J05S WILD BASIN    40.20000 -105.60000     2913.9
05J06S DEADMAN HILL    40.80000 -105.76667     3115.1
    """
    with open(snotel_list_file) as f:
        for i in range(1):l=f.next()
        
        outputlist=[]
        for l in f:
            try:
                line=l.split()
                name=line[0].strip()
                lat=np.double(line[-3])
                lon=np.double(line[-2])
                outputlist.append(Bunch(name=name,lat=lat,lon=lon))
            except Exception as e: 
                print(e)
                print(l)
        
    return outputlist
コード例 #19
0
	def handle_read(self):
         try:
              data = self.recv(1024)
              if not data:
                  return

              datalist = data.split(',')
              timestamp = datalist[2]
              for i in range( (len(datalist) - 3 )/4):
                  if str(self.order) == datalist[ 3 + i*4]:
                      x = np.double(datalist[4 + i*4])
                      y = np.double(datalist[5 + i*4])

                      
              """ create and pubish tranmerc """
              gps_message = gpgga_tranmerc()
              gps_message.time = timestamp
              gps_message.northing = y/100
              gps_message.easting = x/100
              gps_message.fix = np.uint8(4)
              gps_message.sat = np.uint8(6)
              gps_message.hdop = np.double(1.0)
              self.gps_pub.publish(gps_message) 
         except:
            pass
コード例 #20
0
ファイル: model.py プロジェクト: DragonSA/amf_research
    def price(self, Sl, Su, K, scheme=PenaltyRannacherScheme, **kwargs):
        """
        Price the payoff for prices in range [Sl, Su], and K increments, using
        the given FD scheme, and possibility using exponential increments
        (zspace) for the price range.
        """
        Sl = np.double(Sl)
        Su = np.double(Su)
        K = np.int(K)
        P = FDEModel.Value(self.V.T, self.N, Sl, Su, K)
        S = P.S
        ds = P.S[1] - P.S[0]
        Sl = P.S * self.dS.fde_l()

        # Terminal stock price and derivative value
        P.C[-1] = C = self.V.coupon(self.V.T)
        P.V[-1] = V = self.V.terminal(S) + C
        P.I[-1] = I = V

        # Discount price backwards
        t = P.t
        scheme = scheme(self.dS, self.dt, ds, S, **kwargs)
        for i in range(self.N - 1, -1, -1):
            # Discount previous derivative value
            P.C[i] = C = self.V.coupon(t[i])
            P.X[i] = X = self.V.default(t[i], Sl)
            V, I = scheme(t[i], V, X, C, self.V.transient)
            P.V[i] = V
            P.I[i] = I
        return P
コード例 #21
0
ファイル: solver.py プロジェクト: vondrejc/FFTHomPy
def CG(Afun, B, x0, par=None, callback=None):
    """
    Conjugate gradients solver.

    Parameters
    ----------
    Afun : Matrix, LinOper, or numpy.array of shape (n, n)
        it stores the matrix data of linear system and provides a matrix by
        vector multiplication
    B : VecTri or numpy.array of shape (n,)
        it stores a right-hand side of linear system
    x0 : VecTri or numpy.array of shape (n,)
        initial approximation of solution of linear system
    par : dict
        parameters of the method
    callback :

    Returns
    -------
    x : VecTri or numpy.array of shape (n,)
        resulting unknown vector
    res : dict
        results
    """
    if par is None:
        par = dict()
    if 'tol' not in list(par.keys()):
        par['tol'] = 1e-6
    if 'maxiter' not in list(par.keys()):
        par['maxiter'] = int(1e3)

    scal=get_scal(B, par)

    res = dict()
    xCG = x0
    Ax = Afun(x0)
    R = B - Ax
    P = R
    rr = scal(R,R)
    res['kit'] = 0
    res['norm_res'] = np.double(rr)**0.5 # /np.norm(E_N)
    norm_res_log = []
    norm_res_log.append(res['norm_res'])
    while (res['norm_res'] > par['tol']) and (res['kit'] < par['maxiter']):
        res['kit'] += 1 # number of iterations
        AP = Afun(P)
        alp = float(rr/scal(P,AP))
        xCG = xCG + alp*P
        R = R - alp*AP
        rrnext = scal(R,R)
        bet = rrnext/rr
        rr = rrnext
        P = R + bet*P
        res['norm_res'] = np.double(rr)**0.5
        norm_res_log.append(res['norm_res'])
        if callback is not None:
            callback(xCG)
    if res['kit'] == 0:
        res['norm_res'] = 0
    return xCG, res
コード例 #22
0
    def apply_bc_A(self):
        """
            account for boundary conditions at x = 0 & x = L
            - additionally, the stiffness matrix [K] is modified to maintain
                 symmetry => positive definiteness
        """
        # apply BC at x = 0
        if self.BC_type[0] != 0:
            # essential BC
            self.A[0, 0:2] = np.array([1, 0])  # modifies first equation
            self.A[1, 0] = 0  # modification to mainatin symmetry
        elif self.BC_type[0] == 0:
            # natural BC, Flux (Q*)
            n = -1.0  # unit outward normal
            dT = self.BC_0 / (self.kA * n)
            # modifies [K] first equation
            self.A[0, 0:2] = self.kA/np.double(self.h)**2 * np.array([1, -1])

        # apply BC at x = L
        if self.BC_type[1] != 0:
            # essential BC
            self.A[-1][-2:] = np.array([0, 1])  # modifies last equation
            self.A[-2][-1] = 0  # modification to mainatin symmetry
        elif self.BC_type[1] == 0:
            # natural BC, flux (Q*)
            n = 1.0  # unit outward normal
            dT = self.BC_L / (self.kA * n)
            self.A[-1][-2:] = self.kA/np.double(self.h)**2 *\
                np.array([-1, 1])  # modifies last equation
        return self.A
コード例 #23
0
ファイル: validation.py プロジェクト: ALaDyn/Smilei
def matchesWithReference(data, expected_data, data_name, precision):
	# ok if exactly equal (including strings or lists of strings)
	try   :
		if expected_data == data:
			return True
	except: pass
	# If numbers:
	try:
		double_data = np.array(np.double(data), ndmin=1)
		if precision is not None:
			error = np.abs( double_data-np.array(np.double(expected_data), ndmin=1) )
			max_error_location = np.unravel_index(np.argmax(error), error.shape)
			max_error = error[max_error_location]
			if max_error < precision:
				return True
			print( "Reference quantity '"+data_name+"' does not match the data (required precision "+str(precision)+")")
			print( "Max error = "+str(max_error)+" at index "+str(max_error_location))
		else:
			if np.all(double_data == np.double(expected_data)):
				return True
			print( "Reference quantity '"+data_name+"' does not match the data")
	except Exception as e:
		print( "Reference quantity '"+data_name+"': unable to compare to data")
		print( e )
	return False
コード例 #24
0
def genARbasis(numberFrequencyBins, sizeOfFourier, Fs, \
                formantsRange=None, \
                bwRange=None, \
                numberOfAmpsPerPole=5, \
                numberOfFreqPerPole=60, \
                maxF0 = 1000.0):
    if formantsRange is None:
        formantsRange = {}
        formantsRange[0] = [80.0, 1400.0]
        formantsRange[1] = [300.0, 4000.0]
        formantsRange[2] = [1100.0, 6000.0]
        formantsRange[3] = [6100.0, 20000.0]
        
    numberOfFormants = len(formantsRange)
    
    if bwRange is None:
        bwMin = maxF0
        bwMax = np.maximum(0.1 * Fs, bwMin)
        bwRange = np.arange(numberOfAmpsPerPole, dtype=np.double) \
                   * (bwMax - bwMin) / \
                   np.double(numberOfAmpsPerPole-1.0) + \
                   bwMin
        
    freqRanges = np.zeros([numberOfFormants, numberOfFreqPerPole])
    for n in range(numberOfFormants):
        freqRanges[n] = np.arange(numberOfFreqPerPole) \
                        * (formantsRange[n][1] - formantsRange[n][0]) / \
                        np.double(numberOfFreqPerPole-1.0) + \
                        formantsRange[n][0]
        
    totNbElements = numberOfFreqPerPole * \
                    numberOfFormants * numberOfAmpsPerPole
    poleAmp = np.zeros(totNbElements)
    poleFrq = np.zeros(totNbElements)
    WGAMMA = np.zeros([numberFrequencyBins, totNbElements])
    cplxExp = np.exp(-1j * 2.0 * np.pi * \
                     np.arange(numberFrequencyBins) / \
                     np.double(sizeOfFourier))
    
    for n in range(numberOfFormants):
        for w in range(numberOfFreqPerPole):
            for a in range(numberOfAmpsPerPole):
                elementNb = n * numberOfAmpsPerPole * numberOfFreqPerPole + \
                            w * numberOfAmpsPerPole + \
                            a
                poleAmp[elementNb] = np.exp(-bwRange[a] / np.double(Fs))
                poleFrq[elementNb] = freqRanges[n][w]
                ## pole = poleAmp[elementNb] * \
                ##        np.exp(1j * 2.0 * np.pi * \
                ##               poleFrq[elementNb] / np.double(Fs))
                WGAMMA[:,elementNb] = 1 / \
                   np.abs(1 - \
                          2.0 * \
                          poleAmp[elementNb] * \
                          np.cos(2.0 * np.pi * poleFrq[elementNb] / \
                                 np.double(Fs)) * cplxExp +
                          (poleAmp[elementNb] * cplxExp) ** 2\
                          ) ** 2
    
    return bwRange, freqRanges, poleAmp, poleFrq, WGAMMA
コード例 #25
0
def data2AB(data, x0=None):
    n = data.shape[0]
    T = data.shape[1]
    YY = np.dot(data[:, 1:], data[:, 1:].T)
    XX = np.dot(data[:, :-1], data[:, :-1].T)
    YX = np.dot(data[:, 1:], data[:, :-1].T)

    model = VAR(data.T)
    r = model.fit(1)
    A = r.coefs[0,:,:]

    # A = np.ones((n,n))
    B = np.ones((n, n))
    np.fill_diagonal(B, 0)
    B[np.triu_indices(n)] = 0
    K = np.int(scipy.sum(abs(B)))#abs(A)+abs(B)))

    a_idx = np.where(A != 0)
    b_idx = np.where(B != 0)
    np.fill_diagonal(B, 1)

    try:
        s = x0.shape
        x = x0
    except AttributeError:
        x = np.r_[A.flatten(), 0.1*scipy.randn(K)]
    o = optimize.fmin_bfgs(nllf2, x,
                           args=(np.double(A), np.double(B),
                                 YY, XX, YX, T, a_idx, b_idx),
                           gtol=1e-12, maxiter=500,
                           disp=False, full_output=True)
    A, B = x2M(o[0], np.double(A), np.double(B), a_idx, b_idx)
    B = B+B.T
    return  A, B
コード例 #26
0
    def calculate_gh_at_sigma_and_temp(self):
        import anharmonic._phono3py as phono3c
        if self._is_precondition:
            out = self._collision_out[self._isigma, :, self._itemp]
            out_reverse = np.where(self._frequencies>self._cutoff_frequency, 1 / out, 0)
            self._z[self._isigma, :, self._itemp] = self._r[self._isigma, :, self._itemp] * out_reverse[..., np.newaxis]
        else:
            self._z[self._isigma, :, self._itemp] = self._r[self._isigma, :, self._itemp]

        self._z[:, np.where(np.any(np.abs(self._qpoints) > self._pp._criteria, axis=1))] = 0
        zr0 = np.zeros(3, dtype="double")
        phono3c.phonon_3_multiply_dvector_gb3_dvector_gb3(zr0,
                                                        self._z_prev[self._isigma,:,self._itemp].copy(),
                                                        self._r_prev[self._isigma,:,self._itemp].copy(),
                                                        np.intc(self._irr_index_mapping).copy(),
                                                        np.intc(self._kpoint_operations[self._rot_mappings]),
                                                        np.double(np.linalg.inv(self._primitive.get_cell())).copy())
        #Flexibly preconditioned CG: r(i+1)-r(i) instead of r(i+1)
        r = self._r[self._isigma,:,self._itemp] - self._r_prev[self._isigma,:,self._itemp]
        # r = self._r[self._isigma,:,self._itemp]
        zr1 = np.zeros(3, dtype="double")
        phono3c.phonon_3_multiply_dvector_gb3_dvector_gb3(zr1,
                                                        self._z[self._isigma,:,self._itemp].copy(),
                                                        r.copy(),
                                                        np.intc(self._irr_index_mapping).copy(),
                                                        np.intc(self._kpoint_operations[self._rot_mappings]),
                                                        np.double(np.linalg.inv(self._primitive.get_cell())).copy())
        zr1_over_zr0 = np.where(np.abs(zr0>0), zr1/zr0, 0)
        self._p[self._isigma,:,self._itemp] = self._z[self._isigma,:,self._itemp] +\
                                              zr1_over_zr0 * self._p_prev[self._isigma,:,self._itemp]

        self._p[:, np.where(np.any(np.abs(self._qpoints) > self._pp._criteria, axis=1))] = 0
コード例 #27
0
ファイル: Response_Analysis.py プロジェクト: sapresearch/Tomo
def getCDF(temp_response):
    """
    This function calculates voting score of response magnitude.
    The score is calculated as the rank of sorted magnitude.

    Parameters
    ----------
    temp_response: numpy array
        Input batch response matrix
    """
    
    shape = temp_response.shape
    nrow = shape[0]
    ncol = shape[1]
    len_hist = nrow*ncol
    resp_arr = temp_response.reshape((len_hist,))
    sorted_arr = np.sort(resp_arr)
    
    cdf = np.zeros(shape, dtype=np.double)
    for r in range(nrow):
        for c in range(ncol):
            index = np.where(sorted_arr==temp_response[r,c])
            cdf[r,c] = np.double(index[0][0])/np.double(len_hist)
   
    return cdf
コード例 #28
0
    def train_perceptron(self, n_epochs):
        """Trains the parser by running the averaged perceptron algorithm for n_epochs."""
        self.weights = np.zeros(self.features.n_feats)
        total = np.zeros(self.features.n_feats)
        for epoch in range(n_epochs):
            print "Epoch {0}".format(epoch+1)
            n_mistakes = 0
            n_tokens = 0
            n_instances = 0
            for instance in self.reader.train_instances:
                feats = self.features.create_features(instance)
                scores = self.features.compute_scores(feats, self.weights)
                if self.projective:
                    heads_pred = self.decoder.parse_proj(scores)
                else:
                    heads_pred = self.decoder.parse_nonproj(scores)

                for m in range(np.size(heads_pred)):
                    if heads_pred[m] != instance.heads[m]:  # mistake
                        for f in feats[instance.heads[m]][m]:
                            if f < 0:
                                continue
                            self.weights[f] += 1.0
                        for f in feats[heads_pred[m]][m]:
                            if f < 0:
                                continue
                            self.weights[f] -= 1.0
                        n_mistakes += 1
                    n_tokens += 1
                n_instances += 1
            print "Training accuracy: {0}".format(np.double(n_tokens-n_mistakes) / np.double(n_tokens))
            total += self.weights

        self.weights = total / np.double(n_epochs)
コード例 #29
0
ファイル: patterns.py プロジェクト: rueberger/hdnet
    def pattern_distance_jaccard(a, b):
        """
        Computes a distance measure for two binary patterns based on their
        Jaccard-Needham distance, defined as

        .. math::

            d_J(a,b) = 1 - J(a,b) = \\frac{|a \\cup b| - |a \\cap b|}{|a \\cup b|}.

        The similarity measure takes values on the closed interval [0, 1],
        where a value of 1 is attained for disjoint, i.e. maximally dissimilar
        patterns a and b and a value of 0 for the case of :math:`a=b`.

        Parameters
        ----------
        a : list or array, int or bool
            Input pattern
        b : list or array, int or bool
            Input pattern

        Returns
        -------
        dist : double
            Jaccard distance between `a` and `b`.
        """
        # Note: code taken from scipy. Duplicated as only numpy references wanted for base functionality
        a = np.atleast_1d(a).astype(bool)
        b = np.atleast_1d(b).astype(bool)
        dist = (np.double(np.bitwise_and((a != b), np.bitwise_or(a != 0, b != 0)).sum())
                / np.double(np.bitwise_or(a != 0, b != 0).sum()))
        return dist
 def __init__(self, mean, stddev, minval, maxval):
     self.memoized_moments = [1.0]  # 0th moment
     self.mean = np.double(mean)
     self.stddev = np.double(stddev)
     # NOTE(ringwalt): The formula doesn't handle infinite values.
     self.minval = np.double(max(-10, minval))
     self.maxval = np.double(min(10, maxval))
def env_reset():
    global thita
    global g_drl_th_val
    global g_eql_th_val
    global g_rnd_th_val
    global thita_3db
    global P_max_macro
    global MC_power
    global slots_slots
    global sub
    global A_m
    global R_m
    global num_U
    global num_M
    global M_x
    global M_y
    global M_cell_beam
    global M_cell_associated_user_id
    global U_x
    global U_y
    global U_association_macro
    global U_neighbor
    global U_neighbor_sector
    global M_cell_region_x
    global M_cell_region_y
    global U_macro_distance
    global U_macro_power
    global M_cell_txblock_power
    global P_macro_subband
    global U_received_power_subband
    global U_throughput_subband
    global U_SINR_subband
    global U_CQI_subband
    global g_action
    global g_cntr

    global current_throughput
    global previous_throughput

    global g_counter

    global k_m_n
    global v_k_old
    global u_k_old
    global w_k_old

    global v_k_new
    global u_k_new
    global w_k_new

    k_m_n = []
    v_k_old = []
    u_k_old = []
    w_k_old = []

    v_k_new = []
    u_k_new = []
    w_k_new = []

    g_counter = 0
    current_throughput = []
    previous_throughput = 0
    g_drl_th_val = 0
    g_eql_th_val = 0
    g_rnd_th_val = 0

    M_cell_beam = []
    M_cell_associated_user_id = []
    M_cell_region_x = []
    M_cell_region_y = []
    M_cell_txblock_power = []

    U_x = []
    U_y = []
    U_association_macro = []
    U_neighbor = []
    U_neighbor_sector = []
    U_macro_distance = []
    U_macro_power = []
    U_throughputt = []

    P_macro_subband = []
    U_received_power_subband = []
    U_throughput_subband = []

    U_SINR_subband = []
    U_CQI_subband = []

    itr = 0
    for i in range(len(M_x)):
        M_cell_beam.append(random_beam())
        M_cell_associated_user_id.append([])

        for j in range(num_U):
            M_cell_associated_user_id[i].extend([itr + 1])
            U_association_macro.append(i + 1)
            itr = itr + 1

        # [x, y] = random_allocation(M_x[i], M_y[i], 360, 0.2 * R_m, R_m, num_U)
        # U_x.extend(x)
        # U_y.extend(y)

    # ***********************************************************************************************************************

    # g_data_x.append(U_x)
    # g_data_y.append(U_y)
    if g_cntr < g_data_len:
        U_x = data_set['arr_0'][g_cntr].tolist()
        # print(U_x)
        U_y = data_set['arr_1'][g_cntr].tolist()

    else:
        for i in range(len(M_x)):
            [x, y] = random_allocation(M_x[i], M_y[i], 360, 0.2 * R_m, R_m,
                                       num_U)
            U_x.extend(x)
            U_y.extend(y)

        g_data_x.append(U_x)
        g_data_y.append(U_y)

    g_cntr = g_cntr + 1
    print(g_cntr)

    # ***********************************************************************************************************************
    for i in range(len(U_x)):
        U_neighbor.append([])
        U_neighbor_sector.append([])

    for i in range(len(M_x)):
        M_cell_region_x.append([[], [], []])
        M_cell_region_y.append([[], [], []])

        for j in range(3):
            [a, b] = find_points_in_angle(M_x[i], M_y[i], U_x, U_y,
                                          M_cell_beam[i][j], i + 1, j + 1)
            M_cell_region_x[i][j].extend(a)
            M_cell_region_y[i][j].extend(b)

    thita = np.zeros((len(U_x), len(M_x)))

    for i in range(len(U_x)):
        U_macro_distance.append([])
        U_macro_power.append([])

        for j in range(len(M_x)):
            U_macro_distance[i].append(find_dis(U_x[i], U_y[i], M_x[j],
                                                M_y[j]))
            angl = M_cell_beam[j][U_neighbor_sector[i][j] - 1]

            thita[i][j] = find_angl(M_x[j], M_y[j], U_x[i], U_y[i], angl)
            a = transmit_power(thita[i][j], thita_3db, A_m,
                               power_watt_dbm(P_max_macro))
            U_macro_power[i].append(pw_m_hata(a, U_macro_distance[i][j]))

    for i in range(len(M_x)):
        M_cell_txblock_power.append(
            profile_power(sub, slots_slots, MC_power, P_max_macro))
        P_macro_subband.append(M_cell_txblock_power[i])

    generate_power_matrix_macro()

    throughput()

    temp_var = []

    for i in range(num_M * num_U):
        temp_var.extend(U_CQI_subband[i])
        temp_var.append(
            np.double(
                U_macro_distance[i][U_association_macro[i] - 1] / R_m >= 0.5))

    return np.array(temp_var)
コード例 #32
0
from Smilei import *
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erf as erf

path = "temperature_isotropization1"

sim = Smilei(path)
density_electron = np.double(sim.namelist.Species["electron1"].charge_density)
coulomb_log = np.double(sim.namelist.Collisions[0].coulomb_log)
dt = np.double(sim.namelist.Main.timestep) / (2 * np.pi)

re_ = 2.8179403267e-15  # meters
wavelength = 1e-6  # meters
c = 3e8
coeff = (2. * np.pi / wavelength)**2 * re_ * c / (2. * np.sqrt(np.pi))

times = sim.ParticleDiagnostic(diagNumber=0).getAvailableTimesteps()
electrons0 = sim.ParticleDiagnostic(0, slice={"x": "all"}).get()
vx = electrons0["vx"]
electrons1 = sim.ParticleDiagnostic(1, slice={"x": "all"}).get()
vy = electrons1["vy"]
electrons2 = sim.ParticleDiagnostic(2, slice={"x": "all"}).get()
vz = electrons2["vz"]

e_Tpar = np.zeros(len(times))
e_Tperp = np.zeros(len(times))

fig = None
#fig = plt.figure(1)
if fig: fig.clf()
コード例 #33
0
ファイル: ex4.py プロジェクト: 2332256766/python_test
#  the data.

# print('\nVisualizing Neural Network... \n')
#
# displayData(Theta1[:,1:])

# input('\nProgram paused. Press enter to continue.\n')

## ================= Part 10: Implement Predict =================
#  After training the neural network, we would like to use it to predict
#  the labels. You will now implement the "predict" function to use the
#  neural network to predict the labels of the training set. This lets
#  you compute the training set accuracy.

pred = (predict(Theta1, Theta2, X) + 1) % 10

print('\nTraining Set Accuracy: %f\n' %
      (np.mean(np.double(pred == y.T)) * 100))

# rp = np.random.permutation(m)
#
# for i in range(m):
#     # Display
#     print('\nDisplaying Example Image\n')
#     t = np.array([X[rp[i]]])
#     displayData(t)
#
#     pred = predict(Theta1, Theta2, t)
#     print('\nNeural Network Prediction: %d (digit %d)\n'%(pred, (pred+1)%10))
#
#     input('Program paused. Press enter to continue.\n')
コード例 #34
0
def preprocess():
    """ Input:
     Although this function doesn't have any input, you are required to load
     the MNIST data set from file 'mnist_all.mat'.

     Output:
     train_data: matrix of training set. Each row of train_data contains 
       feature vector of a image
     train_label: vector of label corresponding to each image in the training
       set
     validation_data: matrix of training set. Each row of validation_data 
       contains feature vector of a image
     validation_label: vector of label corresponding to each image in the 
       training set
     test_data: matrix of training set. Each row of test_data contains 
       feature vector of a image
     test_label: vector of label corresponding to each image in the testing
       set

     Some suggestions for preprocessing step:
     - divide the original data set to training, validation and testing set
           with corresponding labels
     - convert original data set from integer to double by using double()
           function
     - normalize the data to [0, 1]
     - feature selection"""

    mat = loadmat('D:\Machine Learning\mnist_all.mat'
                  )  #loads the MAT object as a Dictionary

    #Pick a reasonable size for validation data

    train_data = np.array([])
    test_data = np.array([])
    validation_data = np.array([])

    validation_label = np.array([])
    test_data = np.array([])
    test_label = np.array([])

    validation_label = np.zeros(shape=(1000, 1))

    trainx = mat.get('train0')
    test_data = mat.get('test0')
    a = range(trainx.shape[0])
    aperm = np.random.permutation(a)

    validation_data = trainx[aperm[0:1000], :]

    train_data = trainx[aperm[1000:], :]
    train_label = np.zeros(shape=((trainx.shape[0] - 1000), 1))

    test_label = np.zeros(shape=(test_data.shape[0], 1))

    for i in range(1, 10):
        trainx = mat.get('train' + str(i))
        testx = mat.get('test' + str(i))
        a = range(trainx.shape[0])
        aperm = np.random.permutation(a)
        validation_data = np.concatenate(
            ((validation_data, trainx[aperm[0:1000], :])), axis=0)
        b = np.zeros(shape=(1000, 1))

        b[:] = i
        validation_label = np.concatenate((validation_label, b), axis=0)

        train_data = np.concatenate((train_data, trainx[aperm[1000:], :]),
                                    axis=0)
        c = np.zeros(shape=((trainx.shape[0] - 1000), 1))
        c[:] = i
        train_label = np.concatenate((train_label, c), axis=0)

        d = np.zeros(shape=((testx.shape[0]), 1))
        d[:] = i
        test_label = np.concatenate((test_label, d), axis=0)

        test_data = np.concatenate((test_data, testx), axis=0)

    train_data = np.double(train_data)
    test_data = np.double(test_data)
    validation_data = np.double(validation_data)

    train_data /= 255.0
    test_data /= 255.0
    validation_data /= 255.0

    return train_data, train_label, validation_data, validation_label, test_data, test_label
コード例 #35
0
def stFeatureExtraction(signal, Fs, Win, Step):
    """
    This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
    This results to a sequence of feature vectors, stored in a numpy matrix.

    ARGUMENTS
        signal:       the input signal samples
        Fs:           the sampling freq (in Hz)
        Win:          the short-term window size (in samples)
        Step:         the short-term window step (in samples)
    RETURNS
        stFeatures:   a numpy array (numOfFeatures x numOfShortTermWindows)
    """

    Win = int(Win)
    Step = int(Step)

    # Signal normalization
    signal = numpy.double(signal)

    signal = signal / (2.0**15)
    DC = signal.mean()
    MAX = (numpy.abs(signal)).max()
    signal = (signal - DC) / MAX

    N = len(signal)  # total number of samples
    curPos = 0
    countFrames = 0
    nFFT = Win / 2

    [fbank, freqs] = mfccInitFilterBanks(
        Fs, nFFT
    )  # compute the triangular filter banks used in the mfcc calculation
    nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, Fs)

    numOfTimeSpectralFeatures = 8
    numOfHarmonicFeatures = 0
    nceps = 13
    numOfChromaFeatures = 13
    totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures + numOfChromaFeatures
    #    totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures
    stFeatures = numpy.array([], dtype=numpy.float64)

    while (curPos + Win - 1 <
           N):  # for each short-term window until the end of signal
        countFrames += 1
        x = signal[curPos:curPos + Win]  # get current window
        curPos = curPos + Step  # update window position
        X = abs(fft(x))  # get fft magnitude
        X = X[0:nFFT]  # normalize fft
        X = X / len(X)
        if countFrames == 1:
            Xprev = X.copy()  # keep previous fft mag (used in spectral flux)
        curFV = numpy.zeros((totalNumOfFeatures, 1))
        curFV[0] = stZCR(x)  # zero crossing rate
        curFV[1] = stEnergy(x)  # short-term energy
        curFV[2] = stEnergyEntropy(x)  # short-term entropy of energy
        [curFV[3], curFV[4]
         ] = stSpectralCentroidAndSpread(X, Fs)  # spectral centroid and spread
        curFV[5] = stSpectralEntropy(X)  # spectral entropy
        curFV[6] = stSpectralFlux(X, Xprev)  # spectral flux
        curFV[7] = stSpectralRollOff(X, 0.90, Fs)  # spectral rolloff
        curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures + nceps,
              0] = stMFCC(X, fbank, nceps).copy()  # MFCCs

        chromaNames, chromaF = stChromaFeatures(X, Fs, nChroma,
                                                nFreqsPerChroma)
        curFV[numOfTimeSpectralFeatures + nceps:numOfTimeSpectralFeatures +
              nceps + numOfChromaFeatures - 1] = chromaF
        curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures -
              1] = chromaF.std()
        #        curFV[numOfTimeSpectralFeatures+nceps+numOfChromaFeatures-1] = numpy.nonzero( chromaF > 2.0 * chromaF.mean() )[0].shape[0]
        #        temp = numpy.sort(chromaF[:,0])
        #        curFV[numOfTimeSpectralFeatures+numOfChromaFeatures] = temp[-1] / numpy.mean(temp[0:5])
        #        temp = numpy.sort(chromaF[:,0])
        #        if countFrames==10 or countFrames==30:
        #            A = int(temp[-1] / numpy.mean(temp[0:5]))/10
        #            for a in range(A):
        #                print("|"),
        #            print
        #        if countFrames==20:
        #            print numpy.nonzero(chromaF > 5*chromaF.mean())[0].shape[0]
        #HR, curFV[numOfTimeSpectralFeatures+nceps] = stHarmonic(x, Fs)
        # curFV[numOfTimeSpectralFeatures+nceps+1] = freq_from_autocorr(x, Fs)
        if countFrames == 1:
            stFeatures = curFV  # initialize feature matrix (if first frame)
        else:
            stFeatures = numpy.concatenate((stFeatures, curFV),
                                           1)  # update feature matrix
        Xprev = X.copy()

    return numpy.array(stFeatures)
コード例 #36
0
def stSpectogram(signal, Fs, Win, Step, PLOT=False):
    """
    Short-term FFT mag for spectogram estimation:
    Returns:
        a numpy array (nFFT x numOfShortTermWindows)
    ARGUMENTS:
        signal:      the input signal samples
        Fs:          the sampling freq (in Hz)
        Win:         the short-term window size (in samples)
        Step:        the short-term window step (in samples)
        PLOT:        flag, 1 if results are to be ploted
    RETURNS:
    """
    Win = int(Win)
    Step = int(Step)
    signal = numpy.double(signal)
    signal = signal / (2.0**15)
    DC = signal.mean()
    MAX = (numpy.abs(signal)).max()
    signal = (signal - DC) / (MAX - DC)

    N = len(signal)  # total number of signals
    curPos = 0
    countFrames = 0
    nfft = int(Win / 2)
    specgram = numpy.array([], dtype=numpy.float64)

    while (curPos + Win - 1 < N):
        countFrames += 1
        x = signal[curPos:curPos + Win]
        curPos = curPos + Step
        X = abs(fft(x))
        X = X[0:nfft]
        X = X / len(X)

        if countFrames == 1:
            specgram = X**2
        else:
            specgram = numpy.vstack((specgram, X))

    FreqAxis = [((f + 1) * Fs) / (2 * nfft) for f in range(specgram.shape[1])]
    TimeAxis = [(t * Step) / Fs for t in range(specgram.shape[0])]

    if (PLOT):
        fig, ax = plt.subplots()
        imgplot = plt.imshow(specgram.transpose()[::-1, :])
        Fstep = int(nfft / 5.0)
        FreqTicks = range(0, int(nfft) + Fstep, Fstep)
        FreqTicksLabels = [
            str(Fs / 2 - int((f * Fs) / (2 * nfft))) for f in FreqTicks
        ]
        ax.set_yticks(FreqTicks)
        ax.set_yticklabels(FreqTicksLabels)
        TStep = countFrames / 3
        TimeTicks = range(0, countFrames, TStep)
        TimeTicksLabels = ['%.2f' % (float(t * Step) / Fs) for t in TimeTicks]
        ax.set_xticks(TimeTicks)
        ax.set_xticklabels(TimeTicksLabels)
        ax.set_xlabel('time (secs)')
        ax.set_ylabel('freq (Hz)')
        imgplot.set_cmap('jet')
        plt.colorbar()
        plt.show()

    return (specgram, TimeAxis, FreqAxis)
コード例 #37
0
def stChromagram(signal, Fs, Win, Step, PLOT=False):
    """
    Short-term FFT mag for spectogram estimation:
    Returns:
        a numpy array (nFFT x numOfShortTermWindows)
    ARGUMENTS:
        signal:      the input signal samples
        Fs:          the sampling freq (in Hz)
        Win:         the short-term window size (in samples)
        Step:        the short-term window step (in samples)
        PLOT:        flag, 1 if results are to be ploted
    RETURNS:
    """
    Win = int(Win)
    Step = int(Step)
    signal = numpy.double(signal)
    signal = signal / (2.0**15)
    DC = signal.mean()
    MAX = (numpy.abs(signal)).max()
    signal = (signal - DC) / (MAX - DC)

    N = len(signal)  # total number of signals
    curPos = 0
    countFrames = 0
    nfft = int(Win / 2)
    nChroma, nFreqsPerChroma = stChromaFeaturesInit(nfft, Fs)
    chromaGram = numpy.array([], dtype=numpy.float64)

    while (curPos + Win - 1 < N):
        countFrames += 1
        x = signal[curPos:curPos + Win]
        curPos = curPos + Step
        X = abs(fft(x))
        X = X[0:nfft]
        X = X / len(X)
        chromaNames, C = stChromaFeatures(X, Fs, nChroma, nFreqsPerChroma)
        C = C[:, 0]
        if countFrames == 1:
            chromaGram = C.T
        else:
            chromaGram = numpy.vstack((chromaGram, C.T))
    FreqAxis = chromaNames
    TimeAxis = [(t * Step) / Fs for t in range(chromaGram.shape[0])]

    if (PLOT):
        fig, ax = plt.subplots()
        chromaGramToPlot = chromaGram.transpose()[::-1, :]
        Ratio = chromaGramToPlot.shape[1] / (3 * chromaGramToPlot.shape[0])
        chromaGramToPlot = numpy.repeat(chromaGramToPlot, Ratio, axis=0)
        imgplot = plt.imshow(chromaGramToPlot)
        Fstep = int(nfft / 5.0)
        #        FreqTicks = range(0, int(nfft) + Fstep, Fstep)
        #        FreqTicksLabels = [str(Fs/2-int((f*Fs) / (2*nfft))) for f in FreqTicks]
        ax.set_yticks(range(Ratio / 2, len(FreqAxis) * Ratio, Ratio))
        ax.set_yticklabels(FreqAxis[::-1])
        TStep = countFrames / 3
        TimeTicks = range(0, countFrames, TStep)
        TimeTicksLabels = ['%.2f' % (float(t * Step) / Fs) for t in TimeTicks]
        ax.set_xticks(TimeTicks)
        ax.set_xticklabels(TimeTicksLabels)
        ax.set_xlabel('time (secs)')
        imgplot.set_cmap('jet')
        plt.colorbar()
        plt.show()

    return (chromaGram, TimeAxis, FreqAxis)
コード例 #38
0
def run_tracker(p, net_type, net_base_path, model_name):
    """
    run tracker, return bounding result and speed
    """
    # load model # IF QUANTISED
    net = SiamNet(net_type)
    net.load_state_dict(torch.load(
        os.path.join("/home/vision/orig_dp/siamtrackopt/FINN", net_base_path, model_name))['state_dict'])
    net = net.to(device)
    # evaluation mode
    net.eval()

    # load sequence
    img_list, target_position, target_size, gt_list = load_sequence(p.seq_base_path, p.video)

    # first frame
    img_uint8 = cv2.imread(img_list[0])
    img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)
    img_double = np.double(img_uint8)    # uint8 to float

    # compute avg for padding
    avg_chans = np.mean(img_double, axis=(0, 1))

    wc_z = target_size[1] + p.context_amount * sum(target_size)
    hc_z = target_size[0] + p.context_amount * sum(target_size)
    s_z = np.sqrt(wc_z * hc_z)
    scale_z = p.examplar_size / s_z

    # crop examplar z in the first frame
    z_crop = get_subwindow_tracking(img_double, target_position, p.examplar_size, round(s_z), avg_chans)

    z_crop = np.uint8(z_crop)  # you need to convert it to uint8
    # convert image to tensor
    z_crop_tensor = 255.0 * F.to_tensor(z_crop).unsqueeze(0)

    d_search = (p.instance_size - p.examplar_size) / 2
    pad = d_search / scale_z
    s_x = s_z + 2 * pad
    # arbitrary scale saturation
    min_s_x = p.scale_min * s_x
    max_s_x = p.scale_max * s_x

    # generate cosine window
    if p.windowing == 'cosine':
        window = np.outer(np.hanning(p.score_size * p.response_UP), np.hanning(p.score_size * p.response_UP))
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size * p.response_UP, p.score_size * p.response_UP))
    window = window / sum(sum(window))

    # pyramid scale search
    scales = p.scale_step**np.linspace(-np.ceil(p.num_scale/2), np.ceil(p.num_scale/2), p.num_scale)

    # extract feature for examplar z
    z_features = net.conv_features(Variable(z_crop_tensor).to(device))
    z_features = z_features.repeat(p.num_scale, 1, 1, 1)

    # do tracking
    bboxes = np.zeros((len(img_list), 4), dtype=np.double)  # save tracking result
    stats = Stats()
    recorder = Recorder(str(net_type), p.video)
    start_time = datetime.datetime.now()
    for i in tqdm(range(0, len(img_list))):
        if i > 0:
            # do detection
            # currently, we only consider RGB images for tracking
            img_uint8 = cv2.imread(img_list[i])
            img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)
            img_double = np.double(img_uint8)  # uint8 to float

            scaled_instance = s_x * scales
            scaled_target = np.zeros((2, scales.size), dtype = np.double)
            scaled_target[0, :] = target_size[0] * scales
            scaled_target[1, :] = target_size[1] * scales

            # extract scaled crops for search region x at previous target position
            x_crops = make_scale_pyramid(img_double, target_position, scaled_instance, p.instance_size, avg_chans, p)

            # get features of search regions
            x_crops_tensor = torch.FloatTensor(x_crops.shape[3], x_crops.shape[2], x_crops.shape[1], x_crops.shape[0])
            # response_map = SiameseNet.get_response_map(z_features, x_crops)
            for k in range(x_crops.shape[3]):
                tmp_x_crop = x_crops[:, :, :, k]
                tmp_x_crop = np.uint8(tmp_x_crop)
                # numpy array to tensor
                x_crops_tensor[k, :, :, :] = 255.0 * F.to_tensor(tmp_x_crop).unsqueeze(0)

            # get features of search regions
            x_features = net.conv_features(Variable(x_crops_tensor).to(device))

            # evaluate the offline-trained network for exemplar x features
            target_position, new_scale = tracker_eval(net, round(s_x), z_features, x_features, target_position, window, p)

            # scale damping and saturation
            s_x = max(min_s_x, min(max_s_x, (1 - p.scale_LR) * s_x + p.scale_LR * scaled_instance[int(new_scale)]))
            target_size = (1 - p.scale_LR) * target_size + p.scale_LR * np.array([scaled_target[0, int(new_scale)], scaled_target[1, int(new_scale)]])

        rect_position = np.array([target_position[1]-target_size[1]/2, target_position[0]-target_size[0]/2, target_size[1], target_size[0]])

        if p.visualization:
            if not recorder.is_initialized:
                recorder.set_options(img_uint8)
            save_info = {"net_type": str(net_type),
                         "video_name": p.video,
                         "save_to_file": p.save_to_file,
                         "save_to_video": p.save_to_video}
            visualize_tracking_result(img_uint8, rect_position, 1, i, gt_list[i], save_info, stats, recorder)

        # output bbox in the original frame coordinates
        o_target_position = target_position
        o_target_size = target_size
        bboxes[i,:] = np.array([o_target_position[1]-o_target_size[1]/2, o_target_position[0]-o_target_size[0]/2, o_target_size[1], o_target_size[0]])

    if p.visualization:
        recorder.close()
        # print(stats.meanIoU())
        # print(stats.meanCenterError())
        # print(stats.meanPrecision())
    end_time = datetime.datetime.now()
    fps = len(img_list)/max(1.0, (end_time-start_time).seconds)

    return bboxes, fps
コード例 #39
0
ファイル: nsb_entropy.py プロジェクト: flavian-manea/entro-py
def _rho(beta, nxkx, N, K):
    kappa = beta * K
    rx = np.array([_rhoi(x, nxkx, beta) for x in nxkx])
    return rx.prod() / rf(kappa, np.double(N))
コード例 #40
0
    shp_gpd = gpd.read_file(shp_path)

    print("adapting to our needs..")
    lunghezza = lines_length(shp_gpd['geometry'])
    solo_remi=[1 if i=='Rio Blu' else 0 for i in shp_gpd['BARCHE_A_R']]
    # normativa: to be done
    # c'è anche la stazza, ma per il momento non ci interessa (10t)
    vel_max=shp_gpd['VEL_MAX'][:]
    vel_max_mp=shp_gpd['VEL_MAX_MP'][:]
    larghezza=shp_gpd['LARGHEZZA_']
    senso_unico=shp_gpd['ONEWAY'][:]
    orario_senso_start=gpd.GeoSeries(np.zeros([larghezza.size], dtype=np.int8))
    orario_senso_end=gpd.GeoSeries(np.ones([larghezza.size], dtype=np.int8)*24)
    orario_chiuso_start=gpd.GeoSeries(np.ones([larghezza.size], dtype=np.int8)*24)
    orario_chiuso_end=gpd.GeoSeries(np.ones([larghezza.size], dtype=np.int8)*24)
    altezza=np.double(np.ones_like(solo_remi))*1000

    lista_sensi_inversi=["DE SAN LUCA - ROSSINI", "DE PALAZZO - CANONICA", "DE LA FAVA","DE LA PIETA'  - SANT'ANTONIN","DE SAN GIUSEPPE", "DE LA TETA - SAN GIOVANNI LATERANO RAMO BASSO", "DE SAN GIACOMO DALL'ORIO","DE SAN VIO"]
    lista_limiti_sette=["GRANDE","DE CANNAREGIO"]

    lista_no_info_per_non_dimenticare=["DE LE GALEAZZE", "SCUOLA GABELLI","DE LA VERONA - MENUO" ]
    lista_limiti_laguna=["DE LA RANA", "MOLO B", "DI CAMPALTO","COA DI LATTE","MORTO - MAZZORBO","CARBONERA","ALTINO","MONTIRON","DE SANT'ANTONIO","DEL COLPO", "DI BOSSOLARO", "LA ROTTA","CAMPANA", "BOMBAE", "PORTOSECCO","DE LA CAVA","CODA REZIOL"]
    vel_laguna=10
    lista_limiti_centro=["SCUOLA GABELLI","A. CANAL","DE LA SACA DE LA MISERICORDIA","ARSENAL VECHIO"] # in realtà sono quasi tutti al lido
    lista_divieti_0_24 = ["ARSENAL VECHIO"]################## caso particolare, gli altri hanno il divieto di transito nella colonna giusta!
    # canali con limite per canoe e simili dalle 8 alle 15 lunedì-venerdì e dalle 8 alle 13 il sabato
    lista_no_remetti = ["Canal Grande", "Cannaregio", "Giardini", "Greci - San Lorenzo", "- Santa Giustina - Sant’Antonin – Pietà", "Noale", "Novo", "Ca’ Foscari", "Santi Apostoli - Gesuiti"] # nomi da correggere
    vel_centro=5
    ponte_codes_list=[]
    epsilon=0.001
    for index,canal in shp_gpd.iterrows():
コード例 #41
0
    def triangulateCallback(self, p_left, p_right):
        t = p_left.header.stamp.to_time()
        if self.start == 0:
            self.start = t
        if len(p_left.polygon.points) == 0 or len(p_right.polygon.points) == 0:
            if self.tracker:
                if self.tracker.updateNotFound() >= 15:
                    rospy.loginfo("Lost!")
                    self.tracker = None
                    try:
                        data = "%.9f,%.3f,%.3f,%.3f,%d,%.3f,%.3f" % (
                            t - self.start, 0, 0, 0, 0, 0, 0)
                        self.writer.writerow(data.split(','))
                    except csv.Error as e:
                        sys.exit('File %s, line %d: %s' %
                                 (self.file_name, self.writer.line_num, e))
                else:
                    try:
                        data = "%.9f,%.3f,%.3f,%.3f,%d,%.3f,%.3f" % (
                            t - self.start, self.tracker.pos[0],
                            self.tracker.pos[1], self.tracker.pos[2],
                            self.tracker.isTracked, 0, 0)
                        self.writer.writerow(data.split(','))
                    except csv.Error as e:
                        sys.exit('File %s, line %d: %s' %
                                 (self.file_name, self.writer.line_num, e))
            else:
                try:
                    data = "%.9f,%.3f,%.3f,%.3f,%d,%.3f,%.3f" % (
                        t - self.start, 0, 0, 0, 0, 0, 0)
                    self.writer.writerow(data.split(','))
                except csv.Error as e:
                    sys.exit('File %s, line %d: %s' %
                             (self.file_name, self.writer.line_num, e))
            return
        psi_beta_list_left = []
        psi_beta_list_right = []
        for point in p_left.polygon.points:
            u = point.x
            v = point.y
            x = (u - u01) / mu1
            y = (v - v01) / mv1
            phi = np.arctan2(y, x)
            r = np.sqrt(x**2 + y**2)
            p = coeffs1[:]
            p.append(-r)
            thetas = np.roots(p)
            for theta in thetas:
                if np.imag(theta) == 0:
                    if 0 < np.real(theta) < np.pi / 2:
                        theta = np.double(np.real(theta))
                        break
            else:
                print "Unable to find theta"
                return None
            u_cam = np.array([[np.sin(theta) * np.cos(phi)],
                              [np.sin(theta) * np.sin(phi)], [np.cos(theta)]])
            rect_r = np.dot(R1, u_cam)
            psi = np.arcsin(rect_r[0, 0])
            beta = np.arctan2(rect_r[1, 0], rect_r[2, 0])
            psi_beta_list_left.append((psi, beta, point.z))

        for point in p_right.polygon.points:
            u = point.x
            v = point.y
            x = (u - u02) / mu2
            y = (v - v02) / mv2
            phi = np.arctan2(y, x)
            r = np.sqrt(x**2 + y**2)
            p = coeffs1[:]
            p.append(-r)
            thetas = np.roots(p)
            for theta in thetas:
                if np.imag(theta) == 0:
                    if 0 < np.real(theta) < np.pi / 2:
                        theta = np.double(np.real(theta))
                        break
            else:
                print "Unable to find theta"
                return None
            u_cam = np.array([[np.sin(theta) * np.cos(phi)],
                              [np.sin(theta) * np.sin(phi)], [np.cos(theta)]])
            rect_r = np.dot(R1, u_cam)
            psi = np.arcsin(rect_r[0, 0])
            beta = np.arctan2(rect_r[1, 0], rect_r[2, 0])
            psi_beta_list_right.append((psi, beta, point.z))

        psi_beta_list_left = sorted(psi_beta_list_left,
                                    key=itemgetter(2),
                                    reverse=True)
        psi_beta_list_right = sorted(psi_beta_list_right,
                                     key=itemgetter(2),
                                     reverse=True)

        i = 0
        j = 0
        found = False
        while not found:
            z1 = psi_beta_list_left[i][2]
            z2 = psi_beta_list_right[j][2]
            if z1 == 0 and z2 == 0:
                rospy.loginfo("No match")
                if self.tracker:
                    if self.tracker.updateNotFound() >= 15:
                        rospy.loginfo("Lost!")
                        self.tracker = None
                break  # 0 should not be matched with zero, just end
            if z1 < z2:  # right camera has better membership value
                #rospy.loginfo("j = %d, %f", j, z2)
                (psi2, beta2, z2) = psi_beta_list_right[j]
                for (psi1, beta1, z1) in psi_beta_list_left:
                    if abs(beta1 - beta2) < 0.06:  # On the same epipolar line
                        if psi1 <= psi2:
                            # In the stereo vision, psi1 must be more than psi2 for the same object
                            continue
                        rho = baseline * np.cos(psi2) / np.sin(psi1 - psi2)
                        if rho > 10:
                            # 15 meters away from the camera, that is impossible and would be a misdetection
                            continue
                        x_out = rho * np.sin(psi1)
                        y_out = rho * np.cos(psi1) * np.sin(beta1)
                        z_out = rho * np.cos(psi1) * np.cos(beta1)

                        if not self.tracker:
                            self.tracker = Tracker(x_out, y_out, z_out)
                        else:
                            self.tracker.predict()
                            if self.tracker.distance(
                                (x_out, y_out, z_out)) > 0.36:
                                continue
                            self.tracker.updateTrack((x_out, y_out, z_out))

                        self.broadcaster.sendTransform(
                            self.tracker.pos,
                            tf.transformations.quaternion_from_euler(0, 0, 0),
                            rospy.Time.now(), '/blimp', '/world')
                        print z1, z2
                        found = True
                        break
                else:
                    # Arriving here means there is no match, increase index of right camera
                    j += 1
            else:  # left camera has better membership value
                #rospy.loginfo("i = %d, %f", i, z1)
                (psi1, beta1, z1) = psi_beta_list_left[i]
                for (psi2, beta2, z2) in psi_beta_list_right:
                    if abs(beta1 - beta2) < 0.06:  # On the same epipolar line
                        if psi1 <= psi2:
                            # In the stereo vision, psi1 must be more than psi2 for the same object
                            continue
                        rho = baseline * np.cos(psi2) / np.sin(psi1 - psi2)
                        if rho > 10:
                            # 15 meters away from the camera, that is impossible and would be a misdetection
                            continue
                        x_out = rho * np.sin(psi1)
                        y_out = rho * np.cos(psi1) * np.sin(beta1)
                        z_out = rho * np.cos(psi1) * np.cos(beta1)

                        if not self.tracker:
                            self.tracker = Tracker(x_out, y_out, z_out)
                        else:
                            self.tracker.predict()
                            if self.tracker.distance(
                                (x_out, y_out, z_out)) > 0.36:
                                continue
                            self.tracker.updateTrack((x_out, y_out, z_out))

                        self.broadcaster.sendTransform(
                            self.tracker.pos,
                            tf.transformations.quaternion_from_euler(0, 0, 0),
                            rospy.Time.now(), '/blimp', '/world')
                        print z1, z2
                        found = True
                        break
                else:
                    # Arriving here means there is no match, increase index of right camera
                    i += 1
            if i >= len(psi_beta_list_left) or j >= len(psi_beta_list_right):
                rospy.loginfo("No match")
                if self.tracker:
                    if self.tracker.updateNotFound() >= 15:
                        rospy.loginfo("Lost!")
                        self.tracker = None
                break

        if self.tracker:
            try:
                data = "%.9f,%.3f,%.3f,%.3f,%d,%.3f,%.3f" % (
                    t - self.start, self.tracker.pos[0], self.tracker.pos[1],
                    self.tracker.pos[2], self.tracker.isTracked, z1, z2)
                self.writer.writerow(data.split(','))
            except csv.Error as e:
                sys.exit('File %s, line %d: %s' %
                         (self.file_name, self.writer.line_num, e))
        else:
            try:
                data = "%.9f,%.3f,%.3f,%.3f,%d,%.3f,%.3f" % (t - self.start, 0,
                                                             0, 0, 0, 0, 0)
                self.writer.writerow(data.split(','))
            except csv.Error as e:
                sys.exit('File %s, line %d: %s' %
                         (self.file_name, self.writer.line_num, e))
コード例 #42
0
def main():
    global lowpass_filter_b, lowpass_filter_a
    global hz_ire_scale, minn
    global f_deemp_b, f_deemp_a

    global deemp_t1, deemp_t2

    global Bcutr, Acutr

    global Inner

    global blocklen

    outfile = sys.stdout  #.buffer
    audio_mode = 0
    CAV = 0

    byte_start = 0
    byte_end = 0

    f_seconds = False

    optlist, cut_argv = getopt.getopt(sys.argv[1:], "d:D:hLCaAwSs:")

    for o, a in optlist:
        if o == "-d":
            deemp_t1 = np.double(a)
        if o == "-D":
            deemp_t2 = np.double(a)
        if o == "-a":
            audio_mode = 1
            blocklen = blocklen * 4
        if o == "-L":
            Inner = 1
        if o == "-A":
            CAV = 1
            Inner = 1
        if o == "-h":
            # use full spec deemphasis filter - will result in overshoot, but higher freq resonse
            f_deemp_b = [3.778720395899611e-01, -2.442559208200777e-01]
            f_deemp_a = [1.000000000000000e+00, -8.663838812301168e-01]
        if o == "-C":
            Bcutr, Acutr = sps.butter(1,
                                      [2.50 / (freq / 2), 3.26 / (freq / 2)],
                                      btype='bandstop')
            Bcutr, Acutr = sps.butter(1,
                                      [2.68 / (freq / 2), 3.08 / (freq / 2)],
                                      btype='bandstop')
        if o == "-w":
            hz_ire_scale = (9360000 - 8100000) / 100
            minn = 8100000 + (hz_ire_scale * -60)
        if o == "-S":
            f_seconds = True
        if o == "-s":
            ia = int(a)
            if ia == 0:
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    5, (4.2 / (freq / 2)), 'low')
            if ia == 1:
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    5, (4.4 / (freq / 2)), 'low')
            if ia == 2:
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    6, (4.6 / (freq / 2)), 'low')
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    6, (4.6 / (freq / 2)), 'low')
                deemp_t1 = .825
                deemp_t2 = 2.35
            if ia == 3:
                # high frequency response - and ringing.  choose your poison ;)
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    10, (5.0 / (freq / 2)), 'low')
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    7, (5.0 / (freq / 2)), 'low')
            if ia == 4:
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    10, (5.3 / (freq / 2)), 'low')
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    7, (5.3 / (freq / 2)), 'low')

            if ia >= 30:
                lpfreq = ia / 10.0
                lowpass_filter_b, lowpass_filter_a = sps.butter(
                    5, (lpfreq / (freq / 2)), 'low')

    # set up deemp filter
    [tf_b, tf_a] = sps.zpk2tf(-deemp_t2 * (10**-8), -deemp_t1 * (10**-8),
                              deemp_t1 / deemp_t2)
    [f_deemp_b, f_deemp_a] = sps.bilinear(tf_b, tf_a, 1 / (freq_hz / 2))

    #	test()

    argc = len(cut_argv)
    if argc >= 1:
        infile = open(cut_argv[0], "rb")
    else:
        infile = sys.stdin

    byte_start = 0
    if (argc >= 2):
        byte_start = float(cut_argv[1])

    if (argc >= 3):
        byte_end = float(cut_argv[2])
        limit = 1
    else:
        limit = 0

    if f_seconds:
        byte_start *= (freq_hz * 2)
        byte_end *= (freq_hz * 2)
    else:
        byte_end += byte_start

    byte_end -= byte_start

    byte_start = int(byte_start)
    byte_end = int(byte_end)

    if (byte_start > 0):
        infile.seek(byte_start)

    if CAV and byte_start > 11454654400:
        CAV = 0
        Inner = 0

    # set up deemp filter
    [tf_b, tf_a] = sps.zpk2tf(-deemp_t2 * (10**-8), -deemp_t1 * (10**-8),
                              deemp_t1 / deemp_t2)
    [f_deemp_b, f_deemp_a] = sps.bilinear(tf_b, tf_a, 1 / (freq_hz / 2))

    total = toread = blocklen
    inbuf = infile.read(toread * 2)
    indata = np.fromstring(inbuf, 'int16', toread) + 32768

    total = 0
    total_prevread = 0
    total_read = 0

    while (len(inbuf) > 0):
        toread = blocklen - indata.size

        if toread > 0:
            inbuf = infile.read(toread * 2)
            indata = np.append(
                indata,
                np.fromstring(inbuf, 'int16',
                              len(inbuf) // 2) + 32768)

            if indata.size < blocklen:
                exit()

        if audio_mode:
            output, osamp = process_audio(indata)

            nread = osamp
            outfile.write(output)
        else:
            output_16 = process_video(indata)
            outfile.write(output_16)
            nread = len(output_16)

            total_pread = total_read
            total_read += nread

            if CAV:
                if (total_read + byte_start) > 11454654400:
                    CAV = 0
                    Inner = 0

        indata = indata[nread:len(indata)]

        if limit == 1:
            byte_end -= toread
            if (byte_end < 0):
                inbuf = ""
コード例 #43
0
ファイル: star.py プロジェクト: wwangat/pyem
def main(args):
    if args.info:
        args.input.append(args.output)

    df = pd.concat(
        (star.parse_star(inp, augment=args.augment) for inp in args.input),
        join="inner")

    dfaux = None

    if args.cls is not None:
        df = star.select_classes(df, args.cls)

    if args.info:
        if star.is_particle_star(df) and star.Relion.CLASS in df.columns:
            c = df[star.Relion.CLASS].value_counts()
            print("%s particles in %d classes" %
                  ("{:,}".format(df.shape[0]), len(c)))
            print("    ".join([
                '%d: %s (%.2f %%)' % (i, "{:,}".format(s), 100. * s / c.sum())
                for i, s in iteritems(c.sort_index())
            ]))
        elif star.is_particle_star(df):
            print("%s particles" % "{:,}".format(df.shape[0]))
        if star.Relion.MICROGRAPH_NAME in df.columns:
            mgraphcnt = df[star.Relion.MICROGRAPH_NAME].value_counts()
            print(
                "%s micrographs, %s +/- %s particles per micrograph" %
                ("{:,}".format(len(mgraphcnt)), "{:,.3f}".format(
                    np.mean(mgraphcnt)), "{:,.3f}".format(np.std(mgraphcnt))))
        try:
            print("%f A/px (%sX magnification)" %
                  (star.calculate_apix(df), "{:,.0f}".format(
                      df[star.Relion.MAGNIFICATION][0])))
        except KeyError:
            pass
        if len(df.columns.intersection(star.Relion.ORIGINS3D)) > 0:
            print("Largest shift is %f pixels" % np.max(
                np.abs(df[df.columns.intersection(
                    star.Relion.ORIGINS3D)].values)))
        return 0

    if args.drop_angles:
        df.drop(star.Relion.ANGLES, axis=1, inplace=True, errors="ignore")

    if args.drop_containing is not None:
        containing_fields = [
            f for q in args.drop_containing for f in df.columns if q in f
        ]
        if args.invert:
            containing_fields = df.columns.difference(containing_fields)
        df.drop(containing_fields, axis=1, inplace=True, errors="ignore")

    if args.offset_group is not None:
        df[star.Relion.GROUPNUMBER] += args.offset_group

    if args.restack is not None:
        if not args.augment:
            star.augment_star_ucsf(df, inplace=True)
        star.set_original_fields(df, inplace=True)
        df[star.UCSF.IMAGE_PATH] = args.restack
        df[star.UCSF.IMAGE_INDEX] = np.arange(df.shape[0])

    if args.subsample_micrographs is not None:
        if args.bootstrap is not None:
            print("Only particle sampling allows bootstrapping")
            return 1
        mgraphs = df[star.Relion.MICROGRAPH_NAME].unique()
        if args.subsample_micrographs < 1:
            args.subsample_micrographs = np.int(
                max(np.round(args.subsample_micrographs * len(mgraphs)), 1))
        else:
            args.subsample_micrographs = np.int(args.subsample_micrographs)
        ind = np.random.choice(len(mgraphs),
                               size=args.subsample_micrographs,
                               replace=False)
        mask = df[star.Relion.MICROGRAPH_NAME].isin(mgraphs[ind])
        if args.auxout is not None:
            dfaux = df.loc[~mask]
        df = df.loc[mask]

    if args.subsample is not None and args.suffix == "":
        if args.subsample < 1:
            args.subsample = np.int(
                max(np.round(args.subsample * df.shape[0]), 1))
        else:
            args.subsample = np.int(args.subsample)
        ind = np.random.choice(df.shape[0], size=args.subsample, replace=False)
        mask = df.index.isin(ind)
        if args.auxout is not None:
            dfaux = df.loc[~mask]
        df = df.loc[mask]

    if args.copy_angles is not None:
        angle_star = star.parse_star(args.copy_angles, augment=args.augment)
        df = star.smart_merge(df,
                              angle_star,
                              fields=star.Relion.ANGLES,
                              key=args.merge_key)

    if args.copy_alignments is not None:
        align_star = star.parse_star(args.copy_alignments,
                                     augment=args.augment)
        df = star.smart_merge(df,
                              align_star,
                              fields=star.Relion.ALIGNMENTS,
                              key=args.merge_key)

    if args.copy_reconstruct_images is not None:
        recon_star = star.parse_star(args.copy_reconstruct_images,
                                     augment=args.augment)
        df[star.Relion.RECONSTRUCT_IMAGE_NAME] = recon_star[
            star.Relion.IMAGE_NAME]

    if args.transform is not None:
        if args.transform.count(",") == 2:
            r = geom.euler2rot(
                *np.deg2rad([np.double(s) for s in args.transform.split(",")]))
        else:
            r = np.array(json.loads(args.transform))
        df = star.transform_star(df, r, inplace=True)

    if args.invert_hand:
        df[star.Relion.ANGLEROT] = -df[star.Relion.ANGLEROT]
        df[star.Relion.ANGLETILT] = 180 - df[star.Relion.ANGLETILT]

    if args.copy_paths is not None:
        path_star = star.parse_star(args.copy_paths)
        star.set_original_fields(df, inplace=True)
        df[star.Relion.IMAGE_NAME] = path_star[star.Relion.IMAGE_NAME]

    if args.copy_ctf is not None:
        ctf_star = pd.concat((star.parse_star(inp, augment=args.augment)
                              for inp in glob.glob(args.copy_ctf)),
                             join="inner")
        df = star.smart_merge(df,
                              ctf_star,
                              star.Relion.CTF_PARAMS,
                              key=args.merge_key)

    if args.copy_micrograph_coordinates is not None:
        coord_star = pd.concat(
            (star.parse_star(inp, augment=args.augment)
             for inp in glob.glob(args.copy_micrograph_coordinates)),
            join="inner")
        df = star.smart_merge(df,
                              coord_star,
                              fields=star.Relion.MICROGRAPH_COORDS,
                              key=args.merge_key)

    if args.scale is not None:
        star.scale_coordinates(df, args.scale, inplace=True)
        star.scale_origins(df, args.scale, inplace=True)
        star.scale_magnification(df, args.scale, inplace=True)

    if args.scale_particles is not None:
        star.scale_origins(df, args.scale_particles, inplace=True)
        star.scale_magnification(df, args.scale_particles, inplace=True)

    if args.scale_coordinates is not None:
        star.scale_coordinates(df, args.scale_coordinates, inplace=True)

    if args.scale_origins is not None:
        star.scale_origins(df, args.scale_origins, inplace=True)

    if args.scale_magnification is not None:
        star.scale_magnification(df, args.scale_magnfication, inplace=True)

    if args.recenter:
        df = star.recenter(df, inplace=True)

    if args.zero_origins:
        df = star.zero_origins(df, inplace=True)

    if args.pick:
        df.drop(df.columns.difference(star.Relion.PICK_PARAMS),
                axis=1,
                inplace=True,
                errors="ignore")

    if args.subsample is not None and args.suffix != "":
        if args.subsample < 1:
            print("Specific integer sample size")
            return 1
        nsamplings = args.bootstrap if args.bootstrap is not None else df.shape[
            0] / np.int(args.subsample)
        inds = np.random.choice(df.shape[0],
                                size=(nsamplings, np.int(args.subsample)),
                                replace=args.bootstrap is not None)
        for i, ind in enumerate(inds):
            star.write_star(
                os.path.join(
                    args.output,
                    os.path.basename(args.input[0])[:-5] + args.suffix +
                    "_%d" % (i + 1)), df.iloc[ind])

    if args.to_micrographs:
        gb = df.groupby(star.Relion.MICROGRAPH_NAME)
        mu = gb.mean()
        df = mu[[
            c for c in star.Relion.CTF_PARAMS + star.Relion.MICROSCOPE_PARAMS +
            [star.Relion.MICROGRAPH_NAME] if c in mu
        ]].reset_index()

    if args.micrograph_range:
        df.set_index(star.Relion.MICROGRAPH_NAME, inplace=True)
        m, n = [int(tok) for tok in args.micrograph_range.split(",")]
        mg = df.index.unique().sort_values()
        outside = list(range(0, m)) + list(range(n, len(mg)))
        dfaux = df.loc[mg[outside]].reset_index()
        df = df.loc[mg[m:n]].reset_index()

    if args.micrograph_path is not None:
        df = star.replace_micrograph_path(df,
                                          args.micrograph_path,
                                          inplace=True)

    if args.min_separation is not None:
        gb = df.groupby(star.Relion.MICROGRAPH_NAME)
        dupes = []
        for n, g in gb:
            nb = algo.query_connected(
                g[star.Relion.COORDS],
                args.min_separation / star.calculate_apix(df))
            dupes.extend(g.index[~np.isnan(nb)])
        dfaux = df.loc[dupes]
        df.drop(dupes, inplace=True)

    if args.merge_source is not None:
        if args.merge_fields is not None:
            if "," in args.merge_fields:
                args.merge_fields = args.merge_fields.split(",")
            else:
                args.merge_fields = [args.merge_fields]
        else:
            print("Merge fields must be specified using --merge-fields")
            return 1
        if args.merge_key is not None:
            if "," in args.merge_key:
                args.merge_key = args.merge_key.split(",")
        merge_star = star.parse_star(args.merge_source, augment=args.augment)
        df = star.smart_merge(df,
                              merge_star,
                              fields=args.merge_fields,
                              key=args.merge_key)

    if args.split_micrographs:
        dfs = star.split_micrographs(df)
        for mg in dfs:
            star.write_star(
                os.path.join(args.output,
                             os.path.basename(mg)[:-4]) + args.suffix, dfs[mg])
        return 0

    if args.auxout is not None and dfaux is not None:
        star.write_star(args.auxout, dfaux, simplify=args.augment_output)

    if args.output is not None:
        star.write_star(args.output, df, simplify=args.augment_output)
    return 0
コード例 #44
0
ファイル: nsb_entropy.py プロジェクト: flavian-manea/entro-py
def _rhoi(x, nxkx, beta):
    return power(rf(beta, np.double(x)), nxkx[x])
コード例 #45
0
ファイル: scalars.py プロジェクト: santhozkumar/temp_numpy
np.short()
np.intc()
np.intp()
np.int0()
np.int_()
np.longlong()

np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint0()
np.uint()
np.ulonglong()

np.half()
np.single()
np.double()
np.float_()
np.longdouble()
np.longfloat()

np.csingle()
np.singlecomplex()
np.cdouble()
np.complex_()
np.cfloat()
np.clongdouble()
np.clongfloat()
np.longcomplex()
コード例 #46
0
ファイル: data_loader.py プロジェクト: BelovP/Waterfall
dNm = midRange[1] - midRange[0]
highRange = np.round(np.interp(highband, F, np.arange(NTmax)))
dNh = highRange[1] - highRange[0]
dN = max(dNl, dNm, dNh)
print 'NTmax', NTmax
print 'NLmax', NLmax
print 'NTleft', NTleft
print 'Nread', Nread

plt.hold(True)

lastImgLine = 0 # Remember last line, which was filled in Rrgb array.

for i in range(int(Nread)):
	if i == Nread - 1:
		R = np.double(dataset[(i) * NTmax:, 0:NLmax])
		T = np.arange((i) * NTmax, dataset.shape[0]) / fp
		F = np.linspace(0, fp, num=NTleft)
		lowRange = np.round(np.interp(lowband, F, np.arange(NTleft)))
		dNl = lowRange[1] - lowRange[0]
		midRange = np.round(np.interp(midband, F, np.arange(NTleft)))
		dNm = midRange[1] - midRange[0]
		highRange = np.round(np.interp(highband, F, np.arange(NTleft)))
		dNh = highRange[1] - highRange[0]
		dN = max(dNl, dNm, dNh)
	else:
		R = dataset[(i) * NTmax:(i + 1) * NTmax, 0:NLmax]
		T = np.arange((i) * NTmax, (i + 1) * NTmax) / fp

	S = sp.fft(R, axis=0)
	Rl = sp.ifft(S[int(lowRange[0]): int(lowRange[1]), :], n=int(dN), axis = 0)
コード例 #47
0
ファイル: main_train.py プロジェクト: yiyg510/deeplf
# Sample training estimation batch (for initial scale and batchnorm parameters)

beta0_list = []

for i, (sampler, embedder) in enumerate(zip(sampler_list, embedder_list)):

    Tp_est, Tv_est, Ap_est, Av_est = sampler.sample(
        args.est_batch_size[0],
        numpy.inf,
        args.fract_inside[0],
        xvalset='train',
        update_epoch=False)  # get all neighbors for estimation

    Tp_est = normalize_patches(args.patch_norm[0], Tp_est)
    Ap_est = normalize_patches(args.patch_norm[0], Ap_est)
    Y_est = numpy.double(Av_est == Tv_est)

    #
    # Estimate initial scale parameter

    print('estimating initial scale parameter')

    global scale_vars

    # scale for the original method
    l2n = id_net.get_l2n(Tp_est, Ap_est)
    scale_vars = (l2n, Y_est)
    beta0_list.append(
        minimize(labfus, 0.001, method='Nelder-Mead', tol=1e-6).x)

    embedder.beta0 = beta0_list[-1]  # add beta0 to model
コード例 #48
0
import numpy as np
from keras.layers import Input
from keras.layers import MaxPooling2D, UpSampling2D, Conv2D
from keras.models import Sequential
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
from keras.datasets import mnist

#load data
img_rows, img_cols = 28, 28
input_shape = [28, 28, 1]
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.double(np.reshape(x_train,
                               (len(x_train), img_rows, img_cols, 1))) / 255
x_test = np.double(np.reshape(x_test,
                              (len(x_test), img_rows, img_cols, 1))) / 255

# construct a convolutional stack autoencoder
auto_encoder = Sequential()
auto_encoder.add(
    Conv2D(15, (3, 3),
           activation='relu',
           padding='same',
           input_shape=input_shape))  # (?, 28, 28, 32)
auto_encoder.add(MaxPooling2D((2, 2), padding='same'))  # (?, 14, 14, 32)
auto_encoder.add(
    Conv2D(10, (3, 3),
           activation='relu',
           padding='same',
           input_shape=input_shape))  # (?, 28, 28, 32)
auto_encoder.add(MaxPooling2D((2, 2), padding='same'))  # (?, 14, 14, 32)
コード例 #49
0
def domain2polygons(zlonc_target, zlatc_target, wk_proj="wgs84", data=None):

    nzonal_target, nmerid_target = zlonc_target[:-1, :-1].shape
    if data is not None:
        nbands = data.shape[0]

    # Reference projections
    wgs84 = osr.SpatialReference()
    wgs84.ImportFromProj4("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
    wgs84_pyproj = Proj(init="epsg:4326")

    if wk_proj == "wgs84":
        ref_proj = osr.SpatialReference()
        ref_proj.ImportFromProj4(
            "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
        ref_pyproj = Proj(init="epsg:4326")

    else:
        ref_proj = osr.SpatialReference()
        ref_proj.ImportFromProj4(wk_proj)
        ref_pyproj = Proj(wk_proj)

    # Convert lon/lat domain coordinates to reference coordinate system
    xc, yc = transform(wgs84_pyproj, ref_pyproj, zlonc_target, zlatc_target)

    # define the layer
    vector_grid_driver = ogr.GetDriverByName("MEMORY")
    vector_grid_ds = vector_grid_driver.CreateDataSource("memData")
    vector_grid_layer = vector_grid_ds.CreateLayer("grid",
                                                   ref_proj,
                                                   geom_type=ogr.wkbPolygon)

    if data is not None:
        for iband in range(nbands):
            idField = ogr.FieldDefn("field_{}".format(iband), ogr.OFTReal)
            vector_grid_layer.CreateField(idField)

    featureDefn = vector_grid_layer.GetLayerDefn()

    for i in range(nmerid_target):
        for j in range(nzonal_target):
            lon_tmp = [
                xc[j, i],
                xc[j, i + 1],
                xc[j + 1, i + 1],
                xc[j + 1, i],
                xc[j, i],
            ]

            if wk_proj == "wgs84":
                lon_tmp = np.unwrap(lon_tmp, discont=180)

            # Create ring
            ring = ogr.Geometry(ogr.wkbLinearRing)
            _ = ring.AddPoint(lon_tmp[0], yc[j, i])
            _ = ring.AddPoint(lon_tmp[1], yc[j, i + 1])
            _ = ring.AddPoint(lon_tmp[2], yc[j + 1, i + 1])
            _ = ring.AddPoint(lon_tmp[3], yc[j + 1, i])
            _ = ring.AddPoint(lon_tmp[4], yc[j, i])

            # Create polygon
            poly = ogr.Geometry(ogr.wkbPolygon)
            poly.AddGeometry(ring)

            # Add feature to layer
            feature = ogr.Feature(featureDefn)
            feature.SetGeometry(poly)

            # Add data if any
            if data is not None:
                for iband in range(nbands):
                    feature.SetField("field_{}".format(iband),
                                     np.double(data[iband, j, i]))

            vector_grid_layer.CreateFeature(feature)
            feature = None

    return vector_grid_driver, vector_grid_ds, vector_grid_layer
コード例 #50
0
ファイル: sampling.py プロジェクト: zeno129/CSAG
def maxent_edge_sampling(model, theta_g, block, l, psi, beta, x_out):
    """

    :param model: GNM and parameters
    :type model: dict

    :param theta_g: parameters for marginal distribution of network structure P(G)
    :type theta_g: matrix

    :param block: sample block from penultimate iteration of mKPGM
    :type block: matrix

    :param psi: edge types
    :type psi: list

    :param beta: fraction of edges of each type
    :type beta: list

    :param x_out: node attributes
    :type x_out: list

    :return: graphOut (output graph, contains num. of vertices and edge list)
    :rtype: tuple
    """
    # U = unique probabilities
    # T = edge locations
    # (3)
    U, T = get_unique_prob_edge_location(model, theta_g, block, psi, x_out)
    # N_e = 0
    Nus = []

    # for each unique prob. pi_u
    for pi_u in U:
        # (5) Draw num. edges to sample per unique prob.
        n_u = np.random.binomial(len(T[pi_u]), pi_u)
        Nus.append(n_u)  # (6)
        # Accum. total num. edges to be sampled
        # N_e += n_u
    N_e = sum(Nus)

    # (7) Draw num. edges per edge-type to match rho_IN
    gamma = list(np.random.multinomial(n=N_e, pvals=beta, size=1))
    # n = Number of experiments (int)
    # pvals = Probabilities of each of the p different outcomes (sequence of floats, length p)
    # size = Output shape (int or tuple of ints, optional)

    E_OUT = []
    for i, u in enumerate(U):
        # (9) Draw num. edges per edge type for pi_u
        Y = list(
            np.random.multinomial(n=Nus[i],
                                  pvals=[np.double(g) / N_e for g in gamma[0]],
                                  size=1))

        # (10 - 14)
        for j, p in enumerate(psi):
            # (11) Sampling Y_j edges at random from T_uj possible locations
            possible_edges = list(T[u][p])
            random.shuffle(possible_edges)
            edges = possible_edges[:Y[0][j]]

            E_OUT.extend(edges)  # (12)
            gamma[0][j] -= Y[0][j]  # (13)
            N_e -= Y[0][j]  # (14)

    # vertices = len(block[0])
    vertices = pow(model['b'], model['K'])

    return vertices, E_OUT
コード例 #51
0
dates = years * 10000 + months * 100 + days

co2file = options.co2file
if co2file != '':
    co2reader = csv.reader(open(co2file), delimiter=',')
    co2data = []
    cnt = 0
    for row in co2reader:
        if cnt:
            year, jday, co2 = row
            d = datetime.datetime(int(year), 1,
                                  1) + datetime.timedelta(int(jday) - 1)
            co2data.append(
                [d.year * 10000 + d.month * 100 + d.day,
                 double(co2)])
        cnt += 1
    co2data = array(co2data)
    co2interp = interp(dates, co2data[:, 0],
                       co2data[:, 1])  # linear interpolate to dates
else:
    co2interp = None  # no co2 data

firstyear = years[0]
lastyear = years[-1]
yeartostop = lastyear - 30 + 1

cnt = 1
for i in range(firstyear, yeartostop + 1, 5):
    idx = indices(dates, i, i + 49)
    data = alldata[idx]
コード例 #52
0
def preprocess():
    """ Input:
     Although this function doesn't have any input, you are required to load
     the MNIST data set from file 'mnist_all.mat'.

     Output:
     train_data: matrix of training set. Each row of train_data contains 
       feature vector of a image
     train_label: vector of label corresponding to each image in the training
       set
     validation_data: matrix of training set. Each row of validation_data 
       contains feature vector of a image
     validation_label: vector of label corresponding to each image in the 
       training set
     test_data: matrix of training set. Each row of test_data contains 
       feature vector of a image
     test_label: vector of label corresponding to each image in the testing
       set

     Some suggestions for preprocessing step:
     - feature selection"""

    mat = loadmat('mnist_all.mat')  # loads the MAT object as a Dictionary

    # Pick a reasonable size for validation data

    # ------------Initialize preprocess arrays----------------------#
    train_preprocess = np.zeros(shape=(50000, 784))
    validation_preprocess = np.zeros(shape=(10000, 784))
    test_preprocess = np.zeros(shape=(10000, 784))
    train_label_preprocess = np.zeros(shape=(50000,))
    validation_label_preprocess = np.zeros(shape=(10000,))
    test_label_preprocess = np.zeros(shape=(10000,))
    # ------------Initialize flag variables----------------------#
    train_len = 0
    validation_len = 0
    test_len = 0
    train_label_len = 0
    validation_label_len = 0
    # ------------Start to split the data set into 6 arrays-----------#
    for key in mat:
        # -----------when the set is training set--------------------#
        if "train" in key:
            label = key[-1]  # record the corresponding label
            tup = mat.get(key)
            sap = range(tup.shape[0])
            tup_perm = np.random.permutation(sap)
            tup_len = len(tup)  # get the length of current training set
            tag_len = tup_len - 1000  # defines the number of examples which will be added into the training set

            # ---------------------adding data to training set-------------------------#
            train_preprocess[train_len:train_len + tag_len] = tup[tup_perm[1000:], :]
            train_len += tag_len

            train_label_preprocess[train_label_len:train_label_len + tag_len] = label
            train_label_len += tag_len

            # ---------------------adding data to validation set-------------------------#
            validation_preprocess[validation_len:validation_len + 1000] = tup[tup_perm[0:1000], :]
            validation_len += 1000

            validation_label_preprocess[validation_label_len:validation_label_len + 1000] = label
            validation_label_len += 1000

            # ---------------------adding data to test set-------------------------#
        elif "test" in key:
            label = key[-1]
            tup = mat.get(key)
            sap = range(tup.shape[0])
            tup_perm = np.random.permutation(sap)
            tup_len = len(tup)
            test_label_preprocess[test_len:test_len + tup_len] = label
            test_preprocess[test_len:test_len + tup_len] = tup[tup_perm]
            test_len += tup_len
            # ---------------------Shuffle,double and normalize-------------------------#
    train_size = range(train_preprocess.shape[0])
    train_perm = np.random.permutation(train_size)
    train_data = train_preprocess[train_perm]
    train_data = np.double(train_data)
    train_data = train_data / 255.0
    train_label = train_label_preprocess[train_perm]

    validation_size = range(validation_preprocess.shape[0])
    vali_perm = np.random.permutation(validation_size)
    validation_data = validation_preprocess[vali_perm]
    validation_data = np.double(validation_data)
    validation_data = validation_data / 255.0
    validation_label = validation_label_preprocess[vali_perm]

    test_size = range(test_preprocess.shape[0])
    test_perm = np.random.permutation(test_size)
    test_data = test_preprocess[test_perm]
    test_data = np.double(test_data)
    test_data = test_data / 255.0
    test_label = test_label_preprocess[test_perm]

    # Feature selection
    # Your code here.
    tot_data = np.vstack((train_data,validation_data,test_data))  # combining train, validation and test 
    ftr_indices=np.all(tot_data == tot_data[0,:], axis = 0)
    fltrd_data = tot_data[:,~ftr_indices] # removing columns which are similar to the first one by performing shift operations on false columns
     
    tr_len = len(train_data)
    va_len = len(validation_data)
    tst_len = len(test_data)

    train_data = fltrd_data[0:tr_len,:]  # separating train data from the filtered data
    validation_data = fltrd_data[tr_len: (tr_len + va_len),:] # separating validation data from the filtered data
    test_data = fltrd_data[(tr_len + va_len): (tr_len + va_len + tst_len),:] # separating test data from the filtered data
    
    print('preprocess done')

    return train_data, train_label, validation_data, validation_label, test_data, test_label
コード例 #53
0
## plt.rc('text', usetex=True)
plt.rc('image', cmap='jet')
plt.ion()

windowSizeInSamples = 2048
hopsize = 256
NFT = 2048
niter = 50

# TODO: also process these as options:
minF0 = 80
maxF0 = 500
Fs = 44100.0
maxFinFT = 8000.0
F = np.ceil(maxFinFT * NFT / np.double(Fs))

stepNotes = 16  # this is the number of F0s within one semitone
K = 10  # number of spectral shapes for the filter part
R = 0  # number of spectral shapes for the accompaniment
P = 30  # number of elements in dictionary of smooth filters

# number of chirped spectral shapes between each F0
# this feature should be further studied before
# we find a good way of doing that.
chirpPerF0 = 1

# Create the harmonic combs, for each F0 between minF0 and maxF0:

F0Table, WF0 = \
         generate_WF0_chirped(minF0, maxF0, Fs, Nfft=NFT,
コード例 #54
0
ファイル: log2video.py プロジェクト: mit-aera/pyFlightGoggles
from scipy.spatial.transform import Slerp
from scipy import interpolate

curr_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(curr_path,'../'))
from flightgoggles.utils import *
from flightgoggles.env import flightgoggles_env

parser = argparse.ArgumentParser(description='FlightGoggles log2video')
parser.add_argument('-f', "--file_path", help="assign log file path", default=os.path.join(curr_path,"example_log.csv"))
parser.add_argument('-o', "--output", help="assign output video name", default=os.path.join(curr_path,"test.avi"))
args = parser.parse_args()
print("Reading log file from {}".format(args.file_path))

data = pd.read_csv(args.file_path, sep=',', header=None).values[1:,:]
data = np.double(data)
save_path = os.path.join(curr_path,"./tmp")
if not os.path.exists(save_path):
    os.makedirs(save_path)

env = flightgoggles_env(cfg_fgclient="FlightGogglesClient_debug_env.yaml")
# pos_curr = env.get_state("uav1")["position"]
# yaw_curr = env.get_state("uav1")["attitude_euler_angle"][2]

FPS_VIDEO = 60

# Interpolation
time_array = data[:,0]*1e-9
total_time = time_array[-1]
t_new = np.arange(time_array[0], total_time, 1.0/FPS_VIDEO)
コード例 #55
0
    print(debug_J)

    print('training neural network...')
    # 随机初始化参数
    initialTheta1 = randInitializeWeights(hidden_layer_size, input_layer_size)
    initialTheta2 = randInitializeWeights(num_labels, hidden_layer_size)
    initialParams = np.r_[initialTheta1.flatten(), initialTheta2.flatten()]

    lamb = 1
    # 优化参数
    tnc = minimize(costFunction,
                   initialParams,
                   args=(input_layer_size, hidden_layer_size, num_labels, X,
                         y_m, lamb),
                   jac=gradientVectorized,
                   method='TNC',
                   options={'maxiter': 500})

    print(tnc)

    nn_params = tnc.x

    Theta1 = nn_params[:initialTheta1.size].reshape(initialTheta1.shape)
    Theta2 = nn_params[initialTheta1.size:].reshape(initialTheta2.shape)

    displayData(Theta1[:, 1:])

    pre_y = predict(Theta1, Theta2, X)

    print('Train Accuracy: ', np.mean(np.double(pre_y == y)) * 100)
コード例 #56
0
i_map = 0
i_rprec = 0
i_p10 = 0

map_array = np.empty([50, 4])
rprec_array = np.empty([50, 4])
p10_array = np.empty([50, 4])

# file parsing
for line in open('measuresrun1ignore.txt', 'r'):

    # create map matrix
    if ' map ' in line:
        line = line.split(" ")
        map_array[i_map, 0] = np.double(line[23])
        i_map += 1

    # create Rprec matrix
    if 'Rprec ' in line:
        line = line.split(" ")
        rprec_array[i_rprec, 0] = np.double(line[19])
        i_rprec += 1

    # create Precision at 10 Matrix
    if ' P_10 ' in line:
        line = line.split(" ")
        p10_array[i_p10, 0] = np.double(line[22])
        i_p10 += 1

i_map = 0
コード例 #57
0
ファイル: trans_gidx.py プロジェクト: jramarohetra/psims
    lat2 = 90. - latdelta * latidx
    lon1 = -180. + londelta * (lonidx - 1)
    lon2 = -180. + londelta * lonidx

    if lat > lat1 or lat < lat2 or lon < lon1 or lon > lon2:
        raise Exception('Not in cell!')

    return latidx, lonidx

if len(sys.argv) != 6:
    print 'Usage: trans_gidx.py latidx1 lonidx1 lat_delta1[,lon_delta1] lat_delta2[,lon_delta2] weathdir2'
    sys.exit(1)

latidx    = int(sys.argv[1])
lonidx    = int(sys.argv[2])
delta1    = [double(d) / 60. for d in sys.argv[3].split(',')] # arcminutes -> degrees
delta2    = [double(d) / 60. for d in sys.argv[4].split(',')]
weathdir2 = sys.argv[5]

if len(delta1) == 1:
    latdelta1 = londelta1 = delta1[0]
else:
    latdelta1, londelta1 = delta1
if len(delta2) == 1:
    latdelta2 = londelta2 = delta2[0]
else:
    latdelta2, londelta2 = delta2

latrat, lonrat = latdelta2 / latdelta1, londelta2 / londelta1

latd  = [0.5, 0, 1, 0, 1] # start at center
コード例 #58
0
    imlow = cv2.normalize(imlow.astype('float'), None, 0.0, 1.0,
                          cv2.NORM_MINMAX)
    ##    laplacian = cv2.Laplacian(imlow,cv2.CV_64F)
    ##    laplacian1 = np.absolute(laplacian)
    ##    laplacian2 = np.uint8(laplacian1)
    kernel = np.array([[-1, 0, 1]])
    dst = cv2.filter2D(imlow, -1, kernel)

    dst[dst < 0] = 0  # Where values are low
    dst[dst > 1] = 1  # Where values are high
    dst = dst * 255
    dst = np.uint8(dst)
    ret, th3 = cv2.threshold(dst, 20, 255, cv2.THRESH_BINARY)

    lines = cv2.HoughLines(th3, 1, np.pi / 180, 1)
    lines = np.double(lines)
    condition11 = 1
    condition12 = 1

    if lines != []:
        for rho, theta in lines[:, 0, :]:

            if (theta < 80 * npi) & (theta > 0 * npi) & (condition11 == 1):
                thetaL = theta
                rhoL = rho
                condition11 = 0
            if (theta > 120 * npi) & (theta < 180 * npi) & (condition12 == 1):
                thetaR = theta
                rhoR = rho
                condition12 = 0
            if (condition11 == 0) & (condition12 == 0):
コード例 #59
0
    flag_betterspecanal = int(sys.argv[2])
    if (flag_betterspecanal == 0):
        print('Enter BlockSize: ')
        block_size = int(input())

    im = Image.open(img_name)
    print('Read ' + img_name)
    print('Image size: ', im.size)

    # Display image object by PIL.
    im.show(title='image')

    # Import Image Data into Numpy array.
    # The matrix x contains a 2-D array of 8-bit gray scale values.
    x = np.array(im)
    print('Data type: ', x.dtype)

    # Display numpy array by matplotlib.
    plt.imshow(x, cmap=plt.cm.gray)
    plt.title('Image')

    # Set colorbar location. [left, bottom, width, height].
    cax = plt.axes([0.9, 0.15, 0.04, 0.7])
    plt.colorbar(cax=cax)
    plt.show()

    x = np.double(x) / 255.0
    if flag_betterspecanal == 0:
        SpecAnal(x, 99, 99, block_size, base_name)
    else:
        BetterSpecAnal(x, base_name)
コード例 #60
0
def preprocess():
    """ Input:
     Although this function doesn't have any input, you are required to load
     the MNIST data set from file 'mnist_all.mat'.

     Output:
     train_data: matrix of training set. Each row of train_data contains 
       feature vector of a image
     train_label: vector of label corresponding to each image in the training
       set
     validation_data: matrix of training set. Each row of validation_data 
       contains feature vector of a image
     validation_label: vector of label corresponding to each image in the 
       training set
     test_data: matrix of training set. Each row of test_data contains 
       feature vector of a image
     test_label: vector of label corresponding to each image in the testing
       set

     Some suggestions for preprocessing step:
     - feature selection"""

    mat = loadmat('/Users/chenshihchia/PycharmProjects/MLassignment1/mnist_all.mat')  # loads the MAT object as a Dictionary

    # Pick a reasonable size for validation data

    # ------------Initialize preprocess arrays----------------------#
    train_preprocess = np.zeros(shape=(50000, 784))
    validation_preprocess = np.zeros(shape=(10000, 784))
    test_preprocess = np.zeros(shape=(10000, 784))
    train_label_preprocess = np.zeros(shape=(50000,))
    validation_label_preprocess = np.zeros(shape=(10000,))
    test_label_preprocess = np.zeros(shape=(10000,))
    # ------------Initialize flag variables----------------------#
    train_len = 0
    validation_len = 0
    test_len = 0
    train_label_len = 0
    validation_label_len = 0
    # ------------Start to split the data set into 6 arrays-----------#
    for key in mat:
        # -----------when the set is training set--------------------#
        if "train" in key:
            label = key[-1]  # record the corresponding label
            tup = mat.get(key)
            sap = range(tup.shape[0])
            tup_perm = np.random.permutation(sap)
            tup_len = len(tup)  # get the length of current training set
            tag_len = tup_len - 1000  # defines the number of examples which will be added into the training set

            # ---------------------adding data to training set-------------------------#
            train_preprocess[train_len:train_len + tag_len] = tup[tup_perm[1000:], :]
            train_len += tag_len

            train_label_preprocess[train_label_len:train_label_len + tag_len] = label
            train_label_len += tag_len

            # ---------------------adding data to validation set-------------------------#
            validation_preprocess[validation_len:validation_len + 1000] = tup[tup_perm[0:1000], :]
            validation_len += 1000

            validation_label_preprocess[validation_label_len:validation_label_len + 1000] = label
            validation_label_len += 1000

            # ---------------------adding data to test set-------------------------#
        elif "test" in key:
            label = key[-1]
            tup = mat.get(key)
            sap = range(tup.shape[0])
            tup_perm = np.random.permutation(sap)
            tup_len = len(tup)
            test_label_preprocess[test_len:test_len + tup_len] = label
            test_preprocess[test_len:test_len + tup_len] = tup[tup_perm]
            test_len += tup_len
            # ---------------------Shuffle,double and normalize-------------------------#
    train_size = range(train_preprocess.shape[0])
    train_perm = np.random.permutation(train_size)
    train_data = train_preprocess[train_perm]
    train_data = np.double(train_data)
    train_data = train_data / 255.0
    train_label = train_label_preprocess[train_perm]

    validation_size = range(validation_preprocess.shape[0])
    vali_perm = np.random.permutation(validation_size)
    validation_data = validation_preprocess[vali_perm]
    validation_data = np.double(validation_data)
    validation_data = validation_data / 255.0
    validation_label = validation_label_preprocess[vali_perm]

    test_size = range(test_preprocess.shape[0])
    test_perm = np.random.permutation(test_size)
    test_data = test_preprocess[test_perm]
    test_data = np.double(test_data)
    test_data = test_data / 255.0
    test_label = test_label_preprocess[test_perm]

    # Feature selection
    # Your code here.
    selected_features = np.zeros(shape=(784, 1))
    j = 0
    all_data  = np.concatenate((train_data, validation_data,test_data), axis=0)
    all_data_vstack = np.array(np.vstack((all_data)))
    all_data_reduced = np.all(all_data_vstack == all_data_vstack[0, :], axis=0)

    for i in range(all_data_reduced.shape[0]):
        if (bool(all_data_reduced[i])):
            selected_features[j] = i
            j = j+1

    all_data = all_data[:, ~all_data_reduced]

    train_data = all_data[0:train_data.shape[0],:]
    validation_data = all_data[train_data.shape[0]:train_data.shape[0]+validation_data.shape[0],:]
    test_data = all_data[train_data.shape[0]+validation_data.shape[0]:all_data.shape[0],:]

    print('preprocess done')
    return train_data, train_label, validation_data, validation_label, test_data, test_label, selected_features