Ejemplo n.º 1
0
def _padarray(myarray, frame, type):
    """Used in a number of funcs to pad out array cols at start and
    end so that the original shape of the array is maintained
    following processing"""
    (div, mod) = divmod(frame,
                        2)  #pad array to keep original shape after averaging
    if mod <> 0:
        pad = (frame - 1) / 2
    else:
        pad = frame / 2
    size = myarray.shape
    if type == 'av':
        start = scipy.transpose(
            scipy.resize(scipy.transpose(scipy.mean(myarray[:, 0:pad], 1)),
                         (pad, size[0])))
        end = scipy.transpose(
            scipy.resize(
                scipy.transpose(
                    scipy.mean(myarray[:, size[1] - pad:size[1]], 1)),
                (pad, size[0])))
    elif type == 'zero':
        start = end = scipy.transpose(
            scipy.resize(scipy.zeros((size[0], 1)), (pad, size[0])))
    padarray = scipy.concatenate((start, myarray, end), 1)

    return padarray, size
Ejemplo n.º 2
0
    def test_DLN_sigmas(self):
        # dimensions (2,1,3,4) = 24 elements
        dim = (2,1,3,4)
        count_up = arange(1,24,1)
        log_mean = resize(count_up*10, dim)
        log_sigma = resize(count_up, dim)
        
        var_method = 3       
        dist = Distribution_Log_Normal(var_method)
        sample_values = dist.sample_for_eqrm(log_mean,log_sigma)
        actual = exp(log_mean + 2*log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)

        var_method = 4      
        dist = Distribution_Log_Normal(var_method)
        sample_values = dist.sample_for_eqrm(log_mean,log_sigma)
        actual = exp(log_mean + log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
        
        var_method = 5      
        dist = Distribution_Log_Normal(var_method)
        sample_values = dist.sample_for_eqrm(log_mean,log_sigma)
        actual = exp(log_mean - log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
        
        var_method = 6    
        dist = Distribution_Log_Normal(var_method)
        sample_values = dist.sample_for_eqrm(log_mean,log_sigma)
        actual = exp(log_mean - 2*log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)       
Ejemplo n.º 3
0
def simplicial_grid_2d(n):
    """
    Create an NxN 2d grid in the unit square
    
    The number of vertices along each axis is (N+1) for a total of (N+1)x(N+1) vertices
    
    A tuple (vertices,indices) of arrays is returned
    """
    vertices = zeros(((n + 1)**2, 2))
    vertices[:, 0] = ravel(resize(arange(n + 1), (n + 1, n + 1)))
    vertices[:, 1] = ravel(transpose(resize(arange(n + 1), (n + 1, n + 1))))
    vertices /= n

    indices = zeros((2 * (n**2), 3), scipy.int32)

    t1 = transpose(
        concatenate((matrix(arange(n)), matrix(arange(
            1, n + 1)), matrix(arange(n + 2, 2 * n + 2))),
                    axis=0))
    t2 = transpose(
        concatenate((matrix(arange(n)), matrix(arange(
            n + 2, 2 * n + 2)), matrix(arange(n + 1, 2 * n + 1))),
                    axis=0))
    first_row = concatenate((t1, t2))

    for i in xrange(n):
        indices[(2 * n * i):(2 * n * (i + 1)), :] = first_row + i * (n + 1)

    return (vertices, indices)
Ejemplo n.º 4
0
    def test_sample_sigmas_lognormal(self):
        # Vulnerability_Function.sample is essentially a wrapper around
        # Distribution_Log_Normal.sample_for_eqrm (distribution='LN')
        # The mean and sigma can be anything so we'll ignore the setup values

        # dimensions (2,1,3,4) = 24 elements
        dim = (2, 1, 3, 4)
        count_up = arange(1, 24, 1)
        log_mean = resize(count_up * 10, dim)
        log_sigma = resize(count_up, dim)

        var_method = 3
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean, log_sigma)
        actual = exp(log_mean + 2 * log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)

        var_method = 4
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean, log_sigma)
        actual = exp(log_mean + log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)

        var_method = 5
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean, log_sigma)
        actual = exp(log_mean - log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)

        var_method = 6
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean, log_sigma)
        actual = exp(log_mean - 2 * log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
Ejemplo n.º 5
0
def transform_to_classic(A,b,c):
    count_vars = A.shape[1]
    addition_vars = A.shape[0]
    count_all_vars = count_vars + addition_vars
    _A = sc.resize(A, (A.shape[0], count_all_vars))
    _A[:, :count_vars] = A
    _A[:, count_vars:] = sc.eye(addition_vars)
    _c = sc.resize(c, (count_all_vars, 1))
    _c[count_vars:, :] = sc.zeros((addition_vars, 1))
    I = range(count_vars, count_vars+addition_vars)
    return _A, b, _c, I
Ejemplo n.º 6
0
    def test_sample_sigmas_lognormal(self):
        # Vulnerability_Function.sample is essentially a wrapper around
        # Distribution_Log_Normal.sample_for_eqrm (distribution='LN')
        # The mean and sigma can be anything so we'll ignore the setup values
        
        # dimensions (2,1,3,4) = 24 elements
        dim = (2,1,3,4)
        count_up = arange(1,24,1)
        log_mean = resize(count_up*10, dim)
        log_sigma = resize(count_up, dim)
        
        var_method = 3
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean,log_sigma)
        actual = exp(log_mean + 2*log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)

        var_method = 4      
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean,log_sigma)
        actual = exp(log_mean + log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
        
        var_method = 5      
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean,log_sigma)
        actual = exp(log_mean - log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
        
        var_method = 6    
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='LN',
                                      var_method=var_method)
        sample_values = func.sample(log_mean,log_sigma)
        actual = exp(log_mean - 2*log_sigma)
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
Ejemplo n.º 7
0
    def test_DLN_monte_carlo3(self):
        dim = (1,2,3)
        count_up = arange(0,6,1)
        log_mean = resize(count_up*10, dim)
        log_sigma = resize(count_up, dim)
        count_up_2 = arange(1,48,2)
        var_method = 2
        
        dist = Distribution_Log_Normal(var_method)
        sample_values = dist._monte_carlo(log_mean,log_sigma, True)

        # Can not check result, it's random 
        self.assert_(sample_values.shape == dim)
Ejemplo n.º 8
0
def baseline1(myarray):
    """Set first bin of each row to zero
    """
    size = myarray.shape
    take_array = scipy.transpose(
        scipy.resize(scipy.transpose(myarray[:, 0]), (size[1], size[0])))
    return myarray - take_array
Ejemplo n.º 9
0
def create_table():
    vec = []
    for k in range(1, 56):
        vec.append((100003 - 200003 * k + 300007 * k * k) % 1000000 - 500000)
    for k in range(56, 4000001):
        vec.append((vec[k - 24] + vec[k - 55] + 1000000) % 1000000 - 500000)
    return sp.array(vec), sp.resize(vec, (2000, 2000))
Ejemplo n.º 10
0
def _std(a):
    """Find the standard deviation of 2D array
    along axis = 0
    """
    m = _mean(a,0)
    m = scipy.resize(m,(a.shape[0],a.shape[1]))
    return scipy.sqrt(scipy.sum((a-m)**2,0)/(a.shape[0]-1))
Ejemplo n.º 11
0
def _std(a):
    """Find the standard deviation of 2D array
    along axis = 0
    """
    m = _mean(a, 0)
    m = scipy.resize(m, (a.shape[0], a.shape[1]))
    return scipy.sqrt(scipy.sum((a - m)**2, 0) / (a.shape[0] - 1))
Ejemplo n.º 12
0
def autoscale(a):
    """Auto-scale array
    
    >>> a = array([[1,2,3,4],[0.1,0.2,-0.7,0.6],[5,1,7,9]])
    >>> a
    array([[ 1. ,  2. ,  3. ,  4. ],
           [ 0.1,  0.2, -0.7,  0.6],
           [ 5. ,  1. ,  7. ,  9. ]])
    >>> a = autoscale(a)
    >>> a
    array([[-0.39616816,  1.03490978, -0.02596746, -0.12622317],
           [-0.74121784, -0.96098765, -0.98676337, -0.93089585],
           [ 1.137386  , -0.07392213,  1.01273083,  1.05711902]])
    """
    mean_cols = scipy.resize(sum(a, 0) / a.shape[0], (a.shape))
    std_cols = scipy.resize(scipy.sqrt((sum((a - mean_cols) ** 2, 0)) / (a.shape[0] - 1)), (a.shape))
    return (a - mean_cols) / std_cols
Ejemplo n.º 13
0
def normtot(myarray):
    """Normalises to a total of 1 for each row"""
    size_of_myarray = myarray.shape
    sum_of_cols = scipy.transpose(
        scipy.resize(scipy.sum(myarray, 1),
                     (size_of_myarray[1], size_of_myarray[0])))
    return_normal = myarray / sum_of_cols
    return return_normal
Ejemplo n.º 14
0
def baseline2(myarray):
    """Subtract average of the first and last bin from each bin
    """
    size = myarray.shape
    take_array = scipy.transpose(
        scipy.resize(scipy.transpose((myarray[:, 0] + myarray[:, size[1] - 1]) / 2), (size[1], size[0]))
    )
    return myarray - take_array
Ejemplo n.º 15
0
def augment_3_vector(v=array([0.0, 0.0, 0.0]), free=False):
    if free:
        freeval = 0.0
    else:
        freeval = 1.0
    assert shape(v) == (3,), "v argument must be 3-vector -- found " + str(shape(v))
    vaug = resize(v, (4,))
    vaug[3] = freeval
    return vaug
Ejemplo n.º 16
0
def autoscale(a):
    """Auto-scale array
    
    >>> a = array([[1,2,3,4],[0.1,0.2,-0.7,0.6],[5,1,7,9]])
    >>> a
    array([[ 1. ,  2. ,  3. ,  4. ],
           [ 0.1,  0.2, -0.7,  0.6],
           [ 5. ,  1. ,  7. ,  9. ]])
    >>> a = autoscale(a)
    >>> a
    array([[-0.39616816,  1.03490978, -0.02596746, -0.12622317],
           [-0.74121784, -0.96098765, -0.98676337, -0.93089585],
           [ 1.137386  , -0.07392213,  1.01273083,  1.05711902]])
    """
    mean_cols = scipy.resize(sum(a, 0) / a.shape[0], (a.shape))
    std_cols = scipy.resize(
        scipy.sqrt((sum((a - mean_cols)**2, 0)) / (a.shape[0] - 1)), (a.shape))
    return (a - mean_cols) / std_cols
Ejemplo n.º 17
0
def baseline2(myarray):
    """Subtract average of the first and last bin from each bin
    """
    size = myarray.shape
    take_array = scipy.transpose(
        scipy.resize(
            scipy.transpose((myarray[:, 0] + myarray[:, size[1] - 1]) / 2),
            (size[1], size[0])))
    return myarray - take_array
Ejemplo n.º 18
0
def augment_3_vector(v=array([0., 0., 0.]), free=False):
    if free:
        freeval = 0.
    else:
        freeval = 1.
    assert shape(v) == (
        3, ), "v argument must be 3-vector -- found " + str(shape(v))
    vaug = resize(v, (4, ))
    vaug[3] = freeval
    return vaug
Ejemplo n.º 19
0
def _padarray(myarray, frame, type):
    """Used in a number of funcs to pad out array cols at start and
    end so that the original shape of the array is maintained
    following processing"""
    (div, mod) = divmod(frame, 2)  # pad array to keep original shape after averaging
    if mod <> 0:
        pad = (frame - 1) / 2
    else:
        pad = frame / 2
    size = myarray.shape
    if type == "av":
        start = scipy.transpose(scipy.resize(scipy.transpose(scipy.mean(myarray[:, 0:pad], 1)), (pad, size[0])))
        end = scipy.transpose(
            scipy.resize(scipy.transpose(scipy.mean(myarray[:, size[1] - pad : size[1]], 1)), (pad, size[0]))
        )
    elif type == "zero":
        start = end = scipy.transpose(scipy.resize(scipy.zeros((size[0], 1)), (pad, size[0])))
    padarray = scipy.concatenate((start, myarray, end), 1)

    return padarray, size
Ejemplo n.º 20
0
def ProcessQuarterMatrix(p):
    """
    this is used if parameters define just upper right and lower left
    quadrants of antisymmetric matrix.
    """
    n = scipy.sqrt(scipy.size(p))
    Mu = scipy.resize(p, (n, n))
    Mfull = scipy.bmat([[scipy.zeros((n, n)), Mu],
                        [-scipy.transpose(Mu),
                         scipy.zeros((n, n))]])
    return scipy.linalg.expm(Mfull)
Ejemplo n.º 21
0
 def test_DLN_monte_carlo2(self):
     # dimensions (2,1,3,4) = 24 elements
     dim = (2,1,3,4)
     count_up = arange(1,24,1)
     log_mean = resize(count_up*10, dim)
     log_sigma = resize(count_up, dim)
     count_up_2 = arange(1,48,2)
     variate = resize(count_up_2, dim)
     var_method = 2
     
     dist = Distribution_Log_Normal(var_method)
     # Provide a predictable sample
     dist.rvs = lambda size: count_up_2
     sample_values = dist._monte_carlo(log_mean, log_sigma, True)
     
     oldsettings = seterr(over='ignore')
     actual = exp(log_mean + variate*log_sigma)
     seterr(**oldsettings)
     self.assert_(allclose(sample_values, actual))
     self.assert_(sample_values.shape == dim)
Ejemplo n.º 22
0
def grid(start, stop, stepSize):
    """Create an array which can be used as an abscissa for interpolation.
        Returns an array of regular values, increasing by 'stepSize', which go from
        'start' to 'stop' inclusively.
        """
    r = scipy.arange(start, stop + stepSize, stepSize)
    while r[-1] > stop:
        r = r[:-1]
    if r[-1] < stop:
        r = scipy.resize(r, (r.shape[0] + 1, ))
        r[-1] = stop + stepSize / 2
    return r
Ejemplo n.º 23
0
    def test_sample_no_variability_normal(self):
        # Vulnerability_Function.sample is essentially a wrapper around
        # Distribution_Normal.sample_for_eqrm (distribution='N')
        # The mean and sigma can be anything so we'll ignore the setup values

        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='N',
                                      var_method=None)

        # dimensions (2,1,3,4) = 24 elements
        dim = (2, 1, 3, 4)
        count_up = arange(1, 24, 1)
        mean = resize(count_up * 10, dim)
        sigma = resize(count_up, dim)

        sample_values = func.sample(mean, sigma)

        actual = mean
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
Ejemplo n.º 24
0
def augment_3x3_matrix(M=mat(identity(3, "f")), disp=array([0.0, 0.0, 0.0]), free=False):
    if free:
        freeval = 0.0
    else:
        freeval = 1.0
    assert shape(M) == (3, 3), "M argument must be 3 x 3 matrix"
    assert shape(disp) == (3,), "displacement argument must be 1 x 3 array"
    # resize works properly only on rows ... therefore,
    # use transpose twice. (resize on columns messes up matrix contents)
    Mxr = mat(resize(M.array, (4, 3)))
    Mxr[3, :] = [0.0, 0.0, 0.0]
    MxrcT = mat(resize((Mxr.T).array, (4, 4)))
    try:
        # array
        dcol = copy.copy(disp).resize(4)
        dcol[3] = freeval
    except AttributeError:
        # list
        dcol = copy.copy(disp)
        dcol.append(freeval)
    MxrcT[3, :] = dcol
    return MxrcT.T
Ejemplo n.º 25
0
    def test_sample_no_variability_normal(self):
        # Vulnerability_Function.sample is essentially a wrapper around
        # Distribution_Normal.sample_for_eqrm (distribution='N')
        # The mean and sigma can be anything so we'll ignore the setup values
        
        func = Vulnerability_Function('test',
                                      self.mean_values,
                                      self.cv_values,
                                      distribution='N',
                                      var_method=None)
        
        # dimensions (2,1,3,4) = 24 elements
        dim = (2,1,3,4)
        count_up = arange(1,24,1)
        mean = resize(count_up*10, dim)
        sigma = resize(count_up, dim)
        
        sample_values = func.sample(mean,sigma)

        actual = mean
        self.assert_(allclose(sample_values, actual))
        self.assert_(actual.shape == dim)
Ejemplo n.º 26
0
    def add_general(verb, newWeights):
        ## a more general add for both rows and columns
        a,b = (self.weights.shape[0]+1, self.weights.shape[1]) if verb else (self.weights.shape[0], self.weights.shape[1]+1)
        self.weights = scipy.resize(self.weights, (a,b))

        a,b = (self.weights.shape[0], self.weights.shape[1]) if verb else (self.weights.shape[0], self.weights.shape[1])

        if newWeights == None:
            newWeights = scipy.ones(a)
        if verb: self.weights[a-1,:] = newWeights
        else:
            self.weights[:,b-1] = newWeights
        return self.weights
def output(contours, shape=(126, 126), outputfile='signatures.csv'):
    """
    Take the set of all contours that we have identified as possible signatures
    and resize them all into a canonical shape (the best shape and the best
    method for doing so have yet to be determined) so we can train a classifier
    on the pixels.  We want to do unsupervised clustering to separate the
    signatures from non-signatures
    """
    from scipy import resize
    with open(outputfile, 'a') as f:
        for c in contours:
            newc = map(int, resize(c, shape).flatten())
            f.write('\t'.join(map(str, newc)) + '\n')
Ejemplo n.º 28
0
    def add_general(verb, newWeights):
        ## a more general add for both rows and columns
        a,b = (self.weights.shape[0]+1, self.weights.shape[1]) if verb else (self.weights.shape[0], self.weights.shape[1]+1)
        self.weights = scipy.resize(self.weights, (a,b))

        a,b = (self.weights.shape[0], self.weights.shape[1]) if verb else (self.weights.shape[0], self.weights.shape[1])

        if newWeights == None:
            newWeights = scipy.ones(a)
        if verb: self.weights[a-1,:] = newWeights
        else:
            self.weights[:,b-1] = newWeights
        return self.weights
def output(contours,shape=(126,126),outputfile='signatures.csv'):
    """
    Take the set of all contours that we have identified as possible signatures
    and resize them all into a canonical shape (the best shape and the best
    method for doing so have yet to be determined) so we can train a classifier
    on the pixels.  We want to do unsupervised clustering to separate the
    signatures from non-signatures
    """
    from scipy import resize
    with open(outputfile,'a') as f:
        for c in contours:
            newc = map(int, resize(c, shape).flatten())
            f.write('\t'.join(map(str, newc))+'\n')
Ejemplo n.º 30
0
def artificial_basis_method(A, b, c, eps):
    count_vars = A.shape[1]
    addition_vars = A.shape[0]
    count_all_vars = count_vars + addition_vars
    _A = sc.resize(A, (A.shape[0], count_all_vars))
    _A[:, :count_vars] = A
    _A[:, count_vars:] = sc.eye(addition_vars)
    _c = sc.resize(c, (count_all_vars, 1))
    _c[:count_vars, :] = sc.zeros((count_vars, 1))
    _c[count_vars:, :] = sc.full((addition_vars, 1), -1)
    # if I is None:
    I = range(count_vars, count_vars+addition_vars)
    # pprint.pprint((_A, b, _c ,I))
    Res = simplex_method(_A, b, _c, I, eps)
    if Res[2] < -eps:
        return None, None, None
    Real_I = [i for i in range(count_vars) if i not in Res[1]]

    for i in range(len(Res[1])):
        if Res[1][i] >= count_vars:
            Res[1][i] = Real_I.pop(0)

    return Res
Ejemplo n.º 31
0
def augment_3x3_matrix(
        M=mat(identity(3, 'f')), disp=array([0., 0., 0.]), free=False):
    if free:
        freeval = 0.
    else:
        freeval = 1.
    assert shape(M) == (3, 3), "M argument must be 3 x 3 matrix"
    assert shape(disp) == (3, ), "displacement argument must be 1 x 3 array"
    # resize works properly only on rows ... therefore,
    # use transpose twice. (resize on columns messes up matrix contents)
    Mxr = mat(resize(M.array, (4, 3)))
    Mxr[3, :] = [0., 0., 0.]
    MxrcT = mat(resize((Mxr.T).array, (4, 4)))
    try:
        # array
        dcol = copy.copy(disp).resize(4)
        dcol[3] = freeval
    except AttributeError:
        # list
        dcol = copy.copy(disp)
        dcol.append(freeval)
    MxrcT[3, :] = dcol
    return MxrcT.T
Ejemplo n.º 32
0
def simplicial_grid_2d(n):
    """
    Create an NxN 2d grid in the unit square
    
    The number of vertices along each axis is (N+1) for a total of (N+1)x(N+1) vertices
    
    A tuple (vertices,indices) of arrays is returned
    """
    vertices = zeros(((n+1)**2,2))
    vertices[:,0] = ravel(resize(arange(n+1),(n+1,n+1)))
    vertices[:,1] = ravel(transpose(resize(arange(n+1),(n+1,n+1))))
    vertices /= n
    
    indices = zeros((2*(n**2),3),scipy.int32)

    
    t1 = transpose(concatenate((matrix(arange(n)),matrix(arange(1,n+1)),matrix(arange(n+2,2*n+2))),axis=0))
    t2 = transpose(concatenate((matrix(arange(n)),matrix(arange(n+2,2*n+2)),matrix(arange(n+1,2*n+1))),axis=0))
    first_row = concatenate((t1,t2))
    
    for i in xrange(n):       
        indices[(2*n*i):(2*n*(i+1)),:] = first_row + i*(n+1)
    
    return (vertices,indices)
Ejemplo n.º 33
0
def dtw(seq1, seq2):
    """
    DTW
    
    縦/横ずれペナルティあり、不一致ペナルティが均一のオーソドックスなもの。
    コストに距離関数を入れるよりも挿入誤りには頑健なので
    PDF-MusicXMLマッチングにはこちらのほうが向いていると思われる
    """
    seq1 = sp.array(seq1)
    seq2 = sp.array(seq2)

    seq1_mat = sp.resize(seq1, (len(seq2), len(seq1))).T
    seq2_mat = sp.resize(seq2, (len(seq1), len(seq2)))

    mismatch_mat = sp.absolute(seq1_mat - seq2_mat)
    mismatch_idx = sp.where(mismatch_mat != 0)
    mismatch_mat[mismatch_idx[0], mismatch_idx[1]] = 1

    cost_mat = sp.zeros((len(seq1), len(seq2)))
    cost_mat[0, 0] = mismatch_mat[0, 0]
    for i in range(1, len(seq1)):
        cost_mat[i, 0] = cost_mat[i - 1, 0] + mismatch_mat[i, 0]
    for j in range(1, len(seq2)):
        cost_mat[0, j] = cost_mat[0, j - 1] + mismatch_mat[0, j]
    for i in range(1, len(seq1)):
        for j in range(1, len(seq2)):
            cost_s = cost_mat[i - 1, j - 1] + mismatch_mat[i, j]
            cost_h = cost_mat[i, j - 1] + mismatch_mat[i, j]
            cost_v = cost_mat[i - 1, j] + mismatch_mat[i, j]
            cost_mat[i, j] = min(cost_s, cost_h, cost_v)

    path_mat = _trace_optimal_path(cost_mat)

    confidence = 1.0 / (cost_mat[-1, -1] + 1e-5)

    return cost_mat, path_mat, confidence
Ejemplo n.º 34
0
def lintrend(myarray):
    """Subtract a linearly increasing baseline between first and last bins
    """
    size, t = myarray.shape, 0
    sub = scipy.zeros((size[0], size[1]), "d")
    while t < size[0]:
        a = myarray[t, 0]
        b = myarray[t, size[1] - 1]
        div = (b - a) / size[1]
        if div == 0:
            div = 1
        ar = scipy.arange(a, b, div, "d")
        sub[t, :] = scipy.resize(ar, (size[1],))
        t = t + 1
    return myarray - sub
Ejemplo n.º 35
0
def baseline_linear(case):
	"""Baseline correction that subtracts a linearly increasing baseline between
	the first and last independent variable."""
	size, t = case.shape, 0
	subtract = scipy.zeros((size[0],size[1]), 'd')
	while t < size[0]:
		a = case[t,0]
		b = case[t,size[1]-1]
		div = (b-a)/size[1]
		if div == 0:
			div = 1
		arr = scipy.arrange(a,b,div,'d')
		subtract[t,:] = scipy.resize(arr,(size[1],))
		t = t+1
	return case-subtract
Ejemplo n.º 36
0
def lintrend(myarray):
    """Subtract a linearly increasing baseline between first and last bins
    """
    size, t = myarray.shape, 0
    sub = scipy.zeros((size[0], size[1]), 'd')
    while t < size[0]:
        a = myarray[t, 0]
        b = myarray[t, size[1] - 1]
        div = (b - a) / size[1]
        if div == 0:
            div = 1
        ar = scipy.arange(a, b, div, 'd')
        sub[t, :] = scipy.resize(ar, (size[1], ))
        t = t + 1
    return myarray - sub
Ejemplo n.º 37
0
    def test_DN_monte_carlo4(self):
        # Check no randomness in the last dimension
        dim = (1,1,3)
        count_up = arange(1,4)
        log_mean = zeros(dim)
        log_sigma = resize(count_up, dim)
        var_method = 2
        
        dist = Distribution_Normal(var_method)
        sample_values = dist._monte_carlo(log_mean,log_sigma, False)

        
        actual = sample_values[0,0,0] * log_sigma
        #print "actual", actual
        #print "sample_values", sample_values 
        self.assert_(allclose(sample_values, actual))
Ejemplo n.º 38
0
def filterbank_compute(samples):
    v = samples
    x = scipy.resize(v, (gain.shape[0], v.shape[0]))
    
    if zi.shape[0] != gain.shape[0]:
        zi.resize((max(gain.shape[0], gain.shape[0]), 4 , 2))
        
    def filt(x):
        coeffsB1 = scipy.array([B0[row[0]] / gain[row[0]],
                                B11[row[0]]/ gain[row[0]],
                                B2[row[0]] / gain[row[0]]])

        a = scipy.array([A0[row[0]], A1[row[0]], A2[row[0]]])

        y1, zi[row[0],0,:] = scipy.signal.lfilter(coeffsB1,
                                                  a,
                                                  x, zi = zi[row[0],0,:])
        
        y2, zi[row[0],1,:] = scipy.signal.lfilter([B0[row[0]],
                                                   B12[row[0]],
                                                   B2[row[0]]],
                                                  a,
                                                  y1, zi = zi[row[0],1,:])
        
        y3, zi[row[0],2,:] = scipy.signal.lfilter([B0[row[0]],
                                                   B13[row[0]],
                                                   B2[row[0]]],
                                                  a,
                                                  y2, zi = zi[row[0],2,:])
        
        y4, zi[row[0],3,:] = scipy.signal.lfilter([B0[row[0]],
                                                   B14[row[0]],
                                                   B2[row[0]]],
                                                  a,
                                                  y3, zi = zi[row[0],3,:])
        row[0] += 1
        return y4

    row = [0]
    y = scipy.apply_along_axis(filt, 1, x)
    return y.T 
Ejemplo n.º 39
0
 def test_GroundMotionDistributionLogNormal(self):
     # Check no randomness in the last dimension
     
     dln = GroundMotionDistributionLogNormal(var_method=RANDOM_SAMPLING,
                                             atten_spawn_bins=1,
                                             n_recurrence_models=1)
     dim = (1, 1, 1,4)
     log_mean = zeros(dim)
     log_sigma = ones(dim)
     count_up = arange(1,4)
     log_sigma = resize(count_up, dim)
     sample_values = dln.ground_motion_sample(log_mean,log_sigma)
     # Returns: ndarray[spawn, GMmodel, rec_model, site, event,period]
     #  spectral accelerations, measured in G.
     self.assert_(sample_values.shape == (1,1,1,1,1,4))
     
     var = log(sample_values[0,0,0,0,0,0])
     actual = exp(var * log_sigma)
     #print "actual", actual
     #print "sample_values", sample_values 
     self.assert_(allclose(sample_values, actual))
Ejemplo n.º 40
0
def _BW(X, group):
    """Generate B and W matrices for CVA
    Ref. Krzanowski
    """
    mx = scipy.mean(X, 0)[nA, :]
    tgrp = scipy.unique(group)
    for x in range(len(tgrp)):
        idx = _index(group, tgrp[x])
        L = len(idx)
        meani = scipy.mean(scipy.take(X, idx, 0), 0)
        meani = scipy.resize(meani, (len(idx), X.shape[1]))
        A = scipy.mean(scipy.take(X, idx, 0), 0) - mx
        C = scipy.take(X, idx, 0) - meani
        if x > 1:
            Bo = Bo + L * scipy.dot(scipy.transpose(A), A)
            Wo = Wo + scipy.dot(scipy.transpose(C), C)
        elif x == 1:
            Bo = L * scipy.dot(scipy.transpose(A), A)
            Wo = scipy.dot(scipy.transpose(C), C)

    B = (1.0 / (len(tgrp) - 1)) * Bo
    W = (1.0 / (X.shape[0] - len(tgrp))) * Wo

    return B, W
Ejemplo n.º 41
0
def _BW(X,group):
    """Generate B and W matrices for CVA
    Ref. Krzanowski
    """
    mx = scipy.mean(X,0)[nA,:]
    tgrp = scipy.unique(group)
    for x in range(len(tgrp)):
        idx = _index(group,tgrp[x])
        L = len(idx)
        meani = scipy.mean(scipy.take(X,idx,0),0)
        meani = scipy.resize(meani,(len(idx),X.shape[1]))
        A = scipy.mean(scipy.take(X,idx,0),0) - mx
        C = scipy.take(X,idx,0) - meani
        if x > 1:
            Bo = Bo + L*scipy.dot(scipy.transpose(A),A)
            Wo = Wo + scipy.dot(scipy.transpose(C),C)
        elif x == 1:
            Bo = L*scipy.dot(scipy.transpose(A),A)
            Wo = scipy.dot(scipy.transpose(C),C)
    
    B = (1.0/(len(tgrp)-1))*Bo
    W = (1.0/(X.shape[0] - len(tgrp)))*Wo
    
    return B,W
Ejemplo n.º 42
0
def _rhistinterpbound(ts_data,ts_ticks,tticks,p=10,lowbound=None,\
                     floor_eps=1.e-4,maxiter=5,pfactor=2):
    """ Wrapper of the histospline rhistinerp from _rational_hist.
        It only accept ts with time averaged values which is stamped at
        begining of period.If no such props is given in ts, function
        will assume input ts has such property.
        
        
    Parameters
    -----------
        
    ts : :class:`~vtools.data.timeseries.TimeSeries`
        Series to be interpolated

    times : :ref:`time_sequence <time_sequence>`
        The new times to which the series will be interpolated.
        
    filter_nan : boolean,optional
        True if nan points should be omitted or not.
    
    p : float,optional
        spline tension, must >-1.
    
    lowbound: float,optional
        lower bound for the data
    
    tolbound:float,optional
        lower bound tolerance for the data
        
    Returns
    -------
     result: array 
        interpolated values.
    
        
    .. note:: 
        In piecewise intervals the data are treated as follows: 
        1. If the input data lie above lobound for the interval,
        the spline will be forced (using parameters p and q)
        to lie above the bound
        2. If the input data lie along lobound (within a distance 
        tolobound), the spline will be a flat line on lobound.
        3. If the input data lie more than a distance tolobound below
        lobound, an error occurs and the routine aborts.

         
    """
    if len(ts_ticks) < 2:
        raise ValueError("data length is too short to do interpolation")

    x = ts_ticks
    ts_interval_ticks = x[1] - x[0]
    extra_x = x[-1] + ts_interval_ticks
    x = resize(x, len(x) + 1)
    x[-1] = extra_x
    y = ts_data
    p = ones(len(x) - 1, dtype='d') * p
    q = p
    y0 = y[0]
    yn = y[-1]
    ynew=rhist_bound(array(x).astype("float"),array(y),tticks.astype("float"),\
                            y0,yn,p,lbound=lowbound,\
                            maxiter=maxiter,pfactor=pfactor,\
                            floor_eps=floor_eps)
    return ynew
Ejemplo n.º 43
0
        peakiLocs, peakiMags, peakiPhases = peakInterp.process( fft,
                                                               peakLocs,
                                                               peakMags,
                                                               peakPhases )
        
        trajLocs, trajMags = tracker.process( fft,
                                              peakiLocs,
                                              peakiMags )

        specSynth = peakSynth.process( trajLocs,
                                       trajMags )

        specSynth = specSynth[:,:plotSize]

        specMag = scipy.resize(spec, (1, spec.shape[0]))

        specMagResid = loudia.dbToMag( specMag ) - specSynth
        
        specResid = loudia.magToDb(loudia.dbToMag( specMag ) - specSynth)[0,:]
        
        specSynth = loudia.magToDb( specSynth )[0,:]
        
        trajsLocs.append( trajLocs[0,:] )
        trajsMags.append( trajMags[0,:] )

        specs.append( spec )

        specsSynth.append( specSynth )
        specsResid.append( specResid )
Ejemplo n.º 44
0
def arrayFlatten(a):
    return scipy.resize(a, scipy.prod(scipy.shape(a)))
Ejemplo n.º 45
0
 def _resizeArray(self, a):
     """Increase the buffer size. It should always be one longer than the
     current sequence length and double on every growth step."""
     shape = list(a.shape)
     shape[0] = (shape[0] + 1) * 2
     return resize(a, shape)
Ejemplo n.º 46
0
def baseline1(myarray):
    """Set first bin of each row to zero
    """
    size = myarray.shape
    take_array = scipy.transpose(scipy.resize(scipy.transpose(myarray[:, 0]), (size[1], size[0])))
    return myarray - take_array
Ejemplo n.º 47
0
                           Y_test=test_set_y,
                           num_iteration=15,
                           learn_rate=i,
                           print_cost=False)
    print("\n" + "--------------------------------------------------" + "\n")

for i in learing_rate:
    plt.plot(models[str(i)]["costs"], label=models[str(i)]["learning_rate"])
plt.xlabel("iterations")
plt.ylabel("cost")
legend = plt.legend(loc="upper center", shadow=True)
frame = legend.get_frame()
frame.set_facecolor("0.90")
# plt.show()

my_image = "timg.jpg"
# we Preprocess the image to fit your algorithm
fname = "images/" + my_image
image = np.array(plt.imread(fname))
my_image = scipy.resize(image, (num_px, num_px, 3)).reshape(
    (1, num_px * num_px * 3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
# plt.show()
print("y = " + str(np.squeeze(my_predicted_image)) +
      ", your algorithm predicts a \"" +
      classes[int(np.squeeze(my_predicted_image)), ].decode("utf-8") +
      "\" picture.")

print()
Ejemplo n.º 48
0
def pca_nipals(myarray,comps,type='covar',stb=None):
    """Run principal components analysis (PCA) using NIPALS

    Martens,H; Naes,T: Multivariate Calibration, Wiley: New York, 1989

    >>> import scipy
    >>> a = scipy.array([[1,2,3],[0,1,1.5],[-1,-6,34],[8,15,2]])
    >>> tt,pp,pr,eigs=pca_nipals(a,2)
    >>> tt
    array([[ -5.86560409,  -4.2823783 ],
           [ -6.66119189,  -6.16469835],
           [ 25.62619836,   1.82031282],
           [-13.09940238,   8.62676382]])
    
    """
    
    if type == 'covar':
        newarray = _meancent(myarray)
    elif type == 'corr':
        newarray = _autoscale(myarray)
    
    arr_size = newarray.shape
    tt, pp, i = scipy.zeros((arr_size[0],comps),'d'), scipy.zeros((comps,arr_size[1]),'d'), 0
        
    while i < comps:
        std = scipy.std(newarray,axis=0)
        st2 = scipy.argsort(std)
        ind = st2[arr_size[1]-1,]
        t0 = newarray[:,ind]
        c = 0
        while c == 0: #NIPALS
            p0 = scipy.dot(scipy.transpose(t0),newarray)
            p1 = p0/scipy.sqrt(scipy.sum(p0**2))
            t1 = scipy.dot(newarray,scipy.transpose(p1))
            if scipy.sqrt(scipy.sum(t1**2)) - scipy.sqrt(scipy.sum(t0**2)) < 5*10**-5:
                tt[:,i] = t1
                pp[i,:] = p1
                c = 1
            t0 = t1

        newarray = newarray - scipy.dot(scipy.resize(t1,(arr_size[0],1)),
              scipy.resize(p1,(1,arr_size[1])))

        i += 1
       #report progress to status bar
        if stb is not None:
            stb.SetStatusText(string.join(('Principal component',str(i)),' '),0)

    # work out percentage explained variance
    if type == 'covar':
        newarray = _meancent(myarray)
    elif type == 'corr':
        newarray = _autoscale(myarray)
    
    s0, s = scipy.sum(scipy.sum(newarray**2)), []
    for n in scipy.arange(1,comps+1,1):
        E = newarray - scipy.dot(tt[:,0:n],pp[0:n,:])
        s.append(scipy.sum(scipy.sum(E**2)))
        
    pr = (1-((scipy.asarray(s)/s0)))*100
    pr = scipy.reshape(pr,(1,len(pr)))
    pr = scipy.concatenate((scipy.array([[0.0]]),pr),1)
    pr = scipy.reshape(pr,(pr.shape[1],))
    eigs = scipy.array(s)
    
    if stb is not None:
        stb.SetStatusText('Status',0)
    
    return tt,pp,pr[:,nA],eigs[:,nA]
Ejemplo n.º 49
0
def _autoscale(a):
    mean_cols = scipy.resize(sum(a,0)/a.shape[0],(a.shape))
    std_cols = scipy.resize(scipy.sqrt((sum((a - mean_cols)**2,0))/(a.shape[0]-1)), (a.shape))
    return (a-mean_cols)/std_cols
Ejemplo n.º 50
0
def normtot(myarray):
    """Normalises to a total of 1 for each row"""
    size_of_myarray = myarray.shape
    sum_of_cols = scipy.transpose(scipy.resize(scipy.sum(myarray, 1), (size_of_myarray[1], size_of_myarray[0])))
    return_normal = myarray / sum_of_cols
    return return_normal
Ejemplo n.º 51
0
            freqStop = trajLoc / fftSize + 0.01

            # Create the filter for the given trajectory
            filt.setOrder(order, False)
            filt.setLowFrequency(freq, False)
            filt.setHighFrequency(freqStop, False)
            filt.setFilterType(loudia.BandFilter.BESSEL, False)
            filt.setBandType(loudia.BandFilter.BANDSTOP, False)
            filt.setup()

            # Filter the samples of that trajectory
            filtered = filt.process(filtered)

        filtereds.append(filtered.T)

        specMag = scipy.resize(spec, (1, spec.shape[0]))

        trajsLocs.append(trajLocs[0, :])
        trajsMags.append(trajMags[0, :])

        specs.append(spec)

        peakPos = peakLocs[peakLocs > 0]
        peakMags = peakMags[peakLocs > 0]

        peakiPos = peakiLocs[peakiLocs > 0]
        peakiMags = peakiMags[peakiLocs > 0]

        trajPos = trajLocs[trajLocs > 0]
        trajMags = trajMags[trajLocs > 0]
Ejemplo n.º 52
0
#!/opt/epd/bin/python

import scipy, numpy
import os, sys, re
import matplotlib.pyplot as plt

filename = "soln.dat"
if sys.argv[1]:
    filename = sys.argv[1]
y = scipy.loadtxt(filename)
timesteps = len(numpy.unique(y[:, 0]))
dx = len(numpy.unique(y[:, 1]))
dy = len(numpy.unique(y[:, 2]))
dz = len(numpy.unique(y[:, 3]))
y = y[:, 4]  # grab temp data
y = scipy.resize(y, [timesteps, dx, dy, dz])

plt.clf()

rows = 2
cols = 4
for i in range(rows * cols):
    plt.subplot(rows, cols, i + 1)
    plt.imshow(y[i * timesteps / (rows * cols), :, :, dz / 2], vmin=0, vmax=1.3)
    plt.colorbar()

plt.show()
Ejemplo n.º 53
0
 def _resizeArray(self, a):
     """Increase the buffer size. It should always be one longer than the
     current sequence length and double on every growth step."""
     shape = list(a.shape)
     shape[0] = (shape[0] + 1) * 2
     return resize(a, shape)
Ejemplo n.º 54
0
def dfa_xval_pca(X,pca,nopcs,group,mask,nodfs,ptype='covar'):
    """Perform PC-DFA with full cross validation
    
    >>> import scipy
    >>> X = scipy.array([[ 0.19343116,  0.49655245,  0.72711322,  0.79482108,  0.13651874],[ 0.68222322,  0.89976918,  0.30929016,  0.95684345,  0.01175669],[ 0.3027644 ,  0.82162916,  0.83849604,  0.52259035,  0.89389797],[ 0.54167385,  0.64491038,  0.56807246,  0.88014221,  0.19913807],[ 0.15087298,  0.81797434,  0.37041356,  0.17295614,  0.29872301],[ 0.69789848,  0.66022756,  0.70273991,  0.9797469 ,  0.66144258],[ 0.378373  ,  0.34197062,  0.54657115,  0.27144726,  0.28440859],[ 0.8600116 ,  0.2897259 ,  0.4448802 ,  0.25232935,  0.46922429],[ 0.85365513,  0.34119357,  0.69456724,  0.8757419 ,  0.06478112],[ 0.59356291,  0.53407902,  0.62131013,  0.73730599,  0.98833494]])
    >>> group = scipy.array([[1],[1],[1],[1],[2],[2],[2],[3],[3],[3]])
    >>> mask = scipy.array([[0],[1],[0],[0],[0],[0],[1],[0],[0],[1]])
    >>> scores,loads,eigs = dfa_xval_pca(X,'NIPALS',3,group,mask,2,'covar')
    
    """
    rx1,rx2,rx3,ry1,ry2,ry3,dummy1,dummy2,dummy3=_split(X,scipy.array(group,'i')[:,nA],mask[:,nA])
    
    if pca == 'SVD':
        pcscores,pp,pr,pceigs = pca_svd(rx1,type=ptype)        
    elif pca == 'NIPALS':
        pcscores,pp,pr,pceigs = pca_nipals(rx1,nopcs,type=ptype)
    
    #get indices
    idxn = scipy.arange(X.shape[0])[:,nA]
    tr_idx = scipy.take(idxn,_index(mask,0),0)
    cv_idx = scipy.take(idxn,_index(mask,1),0)
    
    #train
    trscores,loads,eigs,dummy = cva(pcscores[:,0:nopcs],ry1,nodfs)
    
    #cross validation
    #Get projected pc scores
    if ptype in ['covar']:
        rx2 = rx2-scipy.resize(scipy.mean(rx2,0),(len(rx2),rx1.shape[1]))
    else:
        rx2 = (rx2-scipy.resize(scipy.mean(rx2,0),(len(rx2),rx1.shape[1]))) / \
                  scipy.resize(scipy.std(rx2,0),(len(rx2),rx1.shape[1]))
        
    pcscores = scipy.dot(rx2,scipy.transpose(pp))
    
    cvscores = scipy.dot(pcscores[:,0:nopcs],loads)
    
    #independent test
    if max(mask) > 1:
        ts_idx = scipy.take(idxn,_index(mask,2),0)
        if ptype in ['covar']:
            rx3 = rx3-scipy.resize(scipy.mean(rx3,0),(len(rx3),rx1.shape[1]))
        else:
            rx3 = (rx3-scipy.resize(scipy.mean(rx3,0),(len(rx3),rx1.shape[1]))) / \
                  scipy.resize(scipy.std(rx3,0),(len(rx3),rx1.shape[1]))
        pcscores = scipy.dot(rx3,scipy.transpose(pp))
        tstscores = scipy.dot(pcscores[:,0:nopcs],loads)
        
        scores = scipy.zeros((X.shape[0],nodfs),'d')
        
        tr_idx = scipy.reshape(tr_idx,(len(tr_idx),)).tolist()
        cv_idx = scipy.reshape(cv_idx,(len(cv_idx),)).tolist()
        ts_idx = scipy.reshape(ts_idx,(len(ts_idx),)).tolist()
        _put(scores,tr_idx,trscores)
        _put(scores,cv_idx,cvscores)
        _put(scores,ts_idx,tstscores)
    else:
        scores = scipy.concatenate((trscores,cvscores),0)
        tr_idx = scipy.reshape(tr_idx,(len(tr_idx),)).tolist()
        cv_idx = scipy.reshape(cv_idx,(len(cv_idx),)).tolist()
        _put(scores,tr_idx,trscores)
        _put(scores,cv_idx,cvscores)
    
    #get loadings for original variables
    loads = scipy.dot(scipy.transpose(pp[0:nopcs,:]),loads)
        
    return scores,loads,eigs