Ejemplo n.º 1
0
    def synth_meas_watson_SH_cyl_neuman_PGSE( self, x, grad_dirs, G, delta, smalldel, fibredir, roots ):
        d=x[0]
        R=x[1]
        kappa=x[2]

        l_q = grad_dirs.shape[0]

        # Parallel component
        LePar = self.cyl_neuman_le_par_PGSE(d, G, delta, smalldel)

        # Perpendicular component
        LePerp = self.cyl_neuman_le_perp_PGSE(d, R, G, delta, smalldel, roots)

        ePerp = np.exp(LePerp)

        # Compute the Legendre weighted signal
        Lpmp = LePerp - LePar
        lgi = self.legendre_gaussian_integral(Lpmp, 6)

        # Compute the spherical harmonic coefficients of the Watson's distribution
        coeff = self.watson_SH_coeff(kappa)
        coeffMatrix = matlib.repmat(coeff, l_q, 1)

        # Compute the dot product between the symmetry axis of the Watson's distribution
        # and the gradient direction
        #
        # For numerical reasons, cosTheta might not always be between -1 and 1
        # Due to round off errors, individual gradient vectors in grad_dirs and the
        # fibredir are never exactly normal.  When a gradient vector and fibredir are
        # essentially parallel, their dot product can fall outside of -1 and 1.
        #
        # BUT we need make sure it does, otherwise the legendre function call below
        # will FAIL and abort the calculation!!!
        #
        cosTheta = np.dot(grad_dirs,fibredir)
        badCosTheta = abs(cosTheta)>1
        cosTheta[badCosTheta] = cosTheta[badCosTheta]/abs(cosTheta[badCosTheta])

        # Compute the SH values at cosTheta
        sh = np.zeros(coeff.shape)
        shMatrix = matlib.repmat(sh, l_q, 1)
        for i in range(7):
            shMatrix[:,i] = np.sqrt((i+1 - .75)/np.pi)
            # legendre function returns coefficients of all m from 0 to l
            # we only need the coefficient corresponding to m = 0
            # WARNING: make sure to input ROW vector as variables!!!
            # cosTheta is expected to be a COLUMN vector.
            tmp = np.zeros((l_q))
            for pol_i in range(l_q):
                tmp[pol_i] = scipy.special.lpmv(0, 2*i, cosTheta[pol_i])
            shMatrix[:,i] = shMatrix[:,i]*tmp

        E = np.sum(lgi*coeffMatrix*shMatrix, 1)
        # with the SH approximation, there will be no guarantee that E will be positive
        # but we need to make sure it does!!! replace the negative values with 10% of
        # the smallest positive values
        E[E<=0] = np.min(E[E>0])*0.1
        E = 0.5*E*ePerp

        return E
def con2vert(A, b):
    """
    Convert sets of constraints to a list of vertices (of the feasible region).
    If the shape is open, con2vert returns False for the closed property.
    """
    # Python implementation of con2vert.m by Michael Kleder (July 2005),
    #  available: http://www.mathworks.com/matlabcentral/fileexchange/7894
    #  -con2vert-constraints-to-vertices
    # Author: Michael Kelder (Original)
    #         Andre Campher (Python implementation)
    c = linalg.lstsq(mat(A), mat(b))[0]
    btmp = mat(b)-mat(A)*c
    D = mat(A)/matlib.repmat(btmp, 1, A.shape[1])

    fmatv = qhull(D, "Ft") #vertices on facets

    G  = zeros((fmatv.shape[0], D.shape[1]))
    for ix in range(0, fmatv.shape[0]):
        F = D[fmatv[ix, :], :].squeeze()
        G[ix, :] = linalg.lstsq(F, ones((F.shape[0], 1)))[0].transpose()

    V = G + matlib.repmat(c.transpose(), G.shape[0], 1)
    ux = uniqm(V)

    eps = 1e-13
    Av = dot(A, ux.T)
    bv = tile(b, (1, ux.shape[0]))
    closed = sciall(Av - bv <= eps)

    return ux, closed
Ejemplo n.º 3
0
def get_lap_matrix_self_tuning(data):
    start_time = time.clock()
    sls2 = -2 * np.mat(data) * np.mat(data.T)
    sls1 = np.mat(np.sum(data ** 2, 1))
    w_matrix = sls2 + repmat(sls1, len(sls1), 1) + repmat(sls1.T, 1, len(sls1))
    w_matrix = np.array(w_matrix)

    sigma = list()
    m = len(w_matrix)
    sort_w_matrix = np.sort(w_matrix)
    for i in range(m):
        sigma.append(np.sqrt(sort_w_matrix[i, 7]))
    """for i in range(m):
        print(i)
        idx = np.argsort(w_matrix[i, :])

        idx = get_k_min_index(w_matrix[i, :].tolist(), 7)
        sigma.append(np.sqrt(w_matrix[i, idx]))
    """

    for row in range(m):
        for col in range(m):
            w_matrix[row][col] /= float(sigma[row] * sigma[col])

    w_matrix = np.exp(-np.mat(w_matrix))
    w_matrix = np.array(w_matrix)
    d_matrix = np.diag(np.sum(w_matrix, 1))
    d_matrix_square_inv = np.linalg.inv(d_matrix ** 0.5)
    dot_matrix = np.dot(d_matrix_square_inv, w_matrix)
    lap_matrix = np.dot(dot_matrix, d_matrix_square_inv)
    end_time = time.clock()

    print("calc self_tuning laplace matrix spends %f seconds" % (end_time - start_time))
    return lap_matrix
Ejemplo n.º 4
0
def deltas(x, w=9):
    """
    Calculate the deltas (derivatives) of a sequence
    Use a W-point window (W odd, default 9) to calculate deltas using a
    simple linear slope.  This mirrors the delta calculation performed
    in feacalc etc.  Each row of X is filtered separately.

    Notes:
    x is your data matrix where each feature corresponds to a row (so you may have to transpose the data you
    pass as an argument) and then transpose the output of the function).

    :param x: x is your data matrix where each feature corresponds to a row.
        (so you may have to transpose the data you pass as an argument)
        and then transpose the output of the function)
    :param w: window size, defaults to 9
    :return: derivatives of a sequence
    """
    # compute the shape of the input
    num_row, num_cols = x.shape

    # define window shape
    hlen = w // 2  # floor integer divide
    w = 2 * hlen + 1  # odd number
    win = np.arange(hlen, -hlen - 1, -1, dtype=np.float32)

    # pad the data by repeating the first and last columns
    a = matlab.repmat(x[:, 1], 1, hlen).reshape((num_row, hlen), order='F')
    b = matlab.repmat(x[:, -1], 1, hlen).reshape((num_row, hlen), order='F')
    xx = np.concatenate((a, x, b), axis=1)

    # apply the delta filter, see matlab 1D filter
    d = signal.lfilter(win, 1, xx, 1)

    # trim the edges
    return d[:, hlen*2: hlen*2 + num_cols]
 def dlnprob(self, theta):
     
     if self.batchsize > 0:
         batch = [ i % self.N for i in range(self.iter * self.batchsize, (self.iter + 1) * self.batchsize) ]
         ridx = self.permutation[batch]
         self.iter += 1
     else:
         ridx = np.random.permutation(self.X.shape[0])
         
     Xs = self.X[ridx, :]
     Ys = self.Y[ridx]
     
     w = theta[:, :-1]  # logistic weights
     alpha = np.exp(theta[:, -1])  # the last column is logalpha
     d = w.shape[1]
     
     wt = np.multiply((alpha / 2), np.sum(w ** 2, axis=1))
     
     coff = np.matmul(Xs, w.T)
     y_hat = 1.0 / (1.0 + np.exp(-1 * coff))
     
     dw_data = np.matmul(((nm.repmat(np.vstack(Ys), 1, theta.shape[0]) + 1) / 2.0 - y_hat).T, Xs)  # Y \in {-1,1}
     dw_prior = -np.multiply(nm.repmat(np.vstack(alpha), 1, d) , w)
     dw = dw_data * 1.0 * self.X.shape[0] / Xs.shape[0] + dw_prior  # re-scale
     
     dalpha = d / 2.0 - wt + (self.a0 - 1) - self.b0 * alpha + 1  # the last term is the jacobian term
     
     return np.hstack([dw, np.vstack(dalpha)])  # % first order derivative 
Ejemplo n.º 6
0
def fastsvds(M,r): 
    """
    "Fast" but less accurate SVD by computing the SVD of MM^T or M^TM 
    ***IF*** one of the dimensions of M is much smaller than the other. 
    Note. This is numerically less stable, but useful for large hyperspectral 
    images. 

    """

    m,n = M.shape 
    rationmn = 10 # Parameter, should be >= 1

    if m < rationmn*n: 
        MMt = np.dot(M,M.T)
        u,s,v = svds(MMt,r)
        s = np.diag(s)
        v = np.dot(M.T, u) 
        v = np.multiply(v,repmat( (sum(v**2)+1e-16)**(-0.5),n,1)) 
        s = np.sqrt(s) 
    elif n < rationmn*m:
        MtM = np.dot(M.T,M)
        u,s,v = svds(MtM,r) 
        s = np.diag(s)
        u = np.dot(M,v) 
        u = np.multiply(u,repmat( (sum(u**2)+1e-16)**(-0.5),m,1))
        s = np.sqrt(s) 
    else:
        u,s,v = svds(M,r) 
        s = np.diag(s)
    return (u,s,v)
Ejemplo n.º 7
0
def computeGradient( nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lamda ):
	Theta1, Theta2 = paramRollback( nn_params, input_layer_size, hidden_layer_size, num_labels )
	# print shape(Theta1), shape(Theta2)
	m, n   = shape(X)
	a1, z2, a2, z3, h = feedForward( Theta1, Theta2, X, m )

	yVec = ( (matlib.repmat(arange(1, num_labels+1), m, 1) == matlib.repmat(y, 1, num_labels)) + 0)

	D1 = zeros(shape(Theta1))
	D2 = zeros(shape(Theta2))

	sigma3 = h - yVec.T
	sigma2 = Theta2.T.dot(sigma3) * sigmoidGradient( r_[ ones((1,m)), z2 ] )
	sigma2 = sigma2[1:,:]
	# print shape(sigma2)
	delta1 = sigma2.dot( a1.T )
	delta2 = sigma3.dot( a2.T ) 

	D1[:,1:] = delta1[:,1:]/m + (Theta1[:,1:] * lamda / m)
	D2[:,1:] = delta2[:,1:]/m + (Theta2[:,1:] * lamda / m)
	D1[:,0]  = delta1[:,0]/m
	D2[:,0]  = delta2[:,0]/m 

	grad = array([D1.T.reshape(-1).tolist() + D2.T.reshape(-1).tolist()]).T
	# print D1, shape(D1)
	# print D2, shape(D2)


	return ndarray.flatten(grad)
Ejemplo n.º 8
0
def trueFeatureStats(T, R, fMap, discountFactor, stateProp=1, MAT_LIMIT=1e8):
    """ Gather the statistics needed for LSTD,
    assuming infinite data (true probabilities).
    Option: if stateProp is  < 1, then only a proportion of all 
    states will be seen as starting state for transitions """
    dim = len(fMap)
    numStates = len(T)
    statMatrix = zeros((dim, dim))
    statResidual = zeros(dim)
    ss = range(numStates)
    repVersion = False
    
    if stateProp < 1:
        ss = random.sample(ss, int(numStates * stateProp))
    elif dim * numStates**2 < MAT_LIMIT:
        repVersion = True
    
    # two variants, depending on how large we can afford our matrices to become.        
    if repVersion:    
        tmp1 = tile(fMap, (numStates,1,1))
        tmp2 = transpose(tmp1, (2,1,0))
        tmp3 = tmp2 - discountFactor * tmp1            
        tmp4 = tile(T, (dim,1,1))
        tmp4 *= transpose(tmp1, (1,2,0))
        statMatrix = tensordot(tmp3, tmp4, axes=[[0,2], [1,2]]).T
        statResidual = dot(R, dot(fMap, T).T)
    else:
        for sto in ss:
            tmp = fMap - discountFactor * repmat(fMap[:, sto], numStates, 1).T
            tmp2 = fMap * repmat(T[:, sto], dim, 1)
            statMatrix += dot(tmp2, tmp.T)             
            statResidual += R[sto] * dot(fMap, T[:, sto])
    return statMatrix, statResidual
    def env_init(self):
        """
            Based on the levin model, the dispersion probability is initialized.
        """
        self.dispersionModel = InvasiveUtility.Levin
        notDirectedG = networkx.Graph(self.simulationParameterObj.graph)
        adjMatrix = adjacency_matrix(notDirectedG)

        edges = self.simulationParameterObj.graph.edges()
        simulationParameterObj = self.simulationParameterObj
        if self.dispersionModel == InvasiveUtility.Levin:
            parameters = InvasiveUtility.calculatePath(notDirectedG,adjMatrix, edges, simulationParameterObj.downStreamRate,
                simulationParameterObj.upStreamRate)
            C = (1 - simulationParameterObj.upStreamRate * simulationParameterObj.downStreamRate) / (
                (1 - 2 * simulationParameterObj.upStreamRate) * (1 - simulationParameterObj.downStreamRate))
            self.dispertionTable = np.dot(1 / C, parameters)
            self.germinationObj = GerminationDispersionParameterClass(1, 1)
        #calculating the worst case fully invaded rivers cost
        worst_case = repmat(1, 1, self.simulationParameterObj.nbrReaches * self.simulationParameterObj.habitatSize)[0]
        cost_state_unit = InvasiveUtility.get_unit_invaded_reaches(worst_case,
            self.simulationParameterObj.habitatSize) * self.actionParameterObj.costPerReach
        stateCost = cost_state_unit + InvasiveUtility.get_invaded_reaches(
            worst_case) * self.actionParameterObj.costPerTree
        stateCost = stateCost + InvasiveUtility.get_empty_slots(worst_case) * self.actionParameterObj.emptyCost
        costAction = InvasiveUtility.get_budget_cost_actions(repmat(3, 1, self.simulationParameterObj.nbrReaches)[0],
            worst_case, self.actionParameterObj)
        networkx.adjacency_matrix(self.simulationParameterObj.graph)
        return "VERSION RL-Glue-3.0 PROBLEMTYPE non-episodic DISCOUNTFACTOR " + str(
            self.discountFactor) + " OBSERVATIONS INTS (" + str(
            self.simulationParameterObj.nbrReaches * self.simulationParameterObj.habitatSize) + " 1 3) ACTIONS INTS (" + str(
            self.simulationParameterObj.nbrReaches) + " 1 4) REWARDS (" + str(self.Bad_Action_Penalty)+" "+str(
            -1 * (costAction + stateCost)) + ") EXTRA "+str(self.simulationParameterObj.graph.edges()) + " BUDGET "+str(self.actionParameterObj.budget) +" by Majid Taleghan."
Ejemplo n.º 10
0
def samples_to_cluster_object(data, label):

    m = len(label)
    assert len(data) == len(label)
    Cluster.n_samples = m
    for i in range(m):
        cluster_object = Cluster()
        cluster_object.data = data[i, :]
        cluster_object.cluster = label[i]
        Cluster.samples_list.append(cluster_object)

    start_time = time.clock()
    sls2 = -2 * np.mat(data) * np.mat(data.T)
    sls1 = np.mat(np.sum(data ** 2, 1))
    Cluster.w_matrix = sls2 + repmat(sls1, len(sls1), 1) + repmat(sls1.T, 1, len(sls1))
    Cluster.w_matrix = np.array(Cluster.w_matrix) ** 0.5
    end_time = time.clock()
    print("calc distance matrix' time is %f seconds" % (end_time - start_time))

    outfile = open("distance.txt", "w+")
    print("writing distance to file ...")
    for i in range(m - 1):
        for j in range(i + 1, m):
            Cluster.distances.append(Cluster.w_matrix[i, j])
            str_d = str(i + 1) + "\t" + str(j + 1) + "\t" + str(Cluster.w_matrix[i, j]) + "\n"
            outfile.write(str_d)
    print("write done")

    del cluster_object
	def fit(self, X):
		if self.components==0:
			self.components=X.shape[0]

		self.X = X
		N = X.shape[0]
		K = np.zeros((N,N))
		for row in range(N):
			for col in range(N):
				K[row,col] = self._kernel_func(X[row,:], X[col,:])

		self._K_sum = np.sum(K)
		self._K_cached_sumcols = np.sum(K, axis=0)
		K_c = K - repmat(np.reshape(np.sum(K, axis=1), (N,1)), 1, N)/N - repmat(self._K_cached_sumcols, N, 1)/N + self._K_sum/N**2

		# kernel matrix must be symmetric, so using symmetric matrix eigenvalue solver
		self._eigenvalues, self._eigenvectors = eigh(K_c)
		self._eigenvalues = np.real(self._eigenvalues)
		self._eigenvectors = np.real(self._eigenvectors)
		key = np.argsort(self._eigenvalues)
		key = key[::-1]
		self._eigenvalues = self._eigenvalues[key]
		self._eigenvectors = self._eigenvectors[:,key]
		self.X = self.X[key,:]
		self._K_cached_sumcols = self._K_cached_sumcols[key]
Ejemplo n.º 12
0
    def __check_and_repmat__(self, attrib, angles):
        """
        Checks whether the attribute is a single value and repeats it into an array if it is
        :rtype: None
        :param attrib: string
        :param angles: np.ndarray
        """
        old_attrib = getattr(self, attrib)

        if type(old_attrib) in [float, int, np.float32, np.float64]:
            new_attrib = matlib.repmat(old_attrib, 1, angles.shape[0])[0]
            setattr(self, attrib, new_attrib)

        elif type(old_attrib) == np.ndarray:
            if old_attrib.ndim == 1:
                if old_attrib.shape in [(3,), (2,), (1,)]:
                    new_attrib = matlib.repmat(old_attrib, angles.shape[0], 1)
                    setattr(self, attrib, new_attrib)
                elif old_attrib.shape == (angles.shape[0],):
                    pass
            else:
                if old_attrib.shape == (angles.shape[0], old_attrib.shape[1]):
                    pass
                else:
                    raise AttributeError(attrib + " with shape: " + str(old_attrib.shape) +
                                         " not compatible with shapes: " + str([(angles.shape[0],),
                                                                                (angles.shape[0], old_attrib.shape[1]),
                                                                                (3,), (2,), (1,)]))

        else:
            raise TypeError(
                "Data type not understood for: geo." + attrib + " with type = " + str(type(getattr(self, attrib))))
Ejemplo n.º 13
0
def julia_sets(complex_number, n, maxIter=30, a=-1, b=1):
    """
    This function computes the Julia set of the comlex number passed in
    according to the function f(x) = x**2  + c.

    Inputs:
	complex_number: The comlpex number for which you would like to find
			the Julia set.
	n: The size of the square (n x n) matrix that will contain plot
	   information.
	maxIter: The maximum number of iterations to pass x though f(x).
	a: The lower bound for both the real and complex axes.
	b: The upper bound for both the real and complex axes.

    Outputs:
	plot: There is no output that can be stored to a variable. When this
	      function is called a plot is automatically generated.
    """

    if n % 2 != 0:
	print 'Value Error: We need n to be an even number'
	return

    X = sp.zeros((n,n), dtype = 'complex64')
    Xj = sp.zeros((n,n), dtype = 'complex64')
    x = sp.linspace(a,b,n)
    xj = x * 1j

    X = repmat(x, n, 1)
    Xj = repmat(xj, n, 1)

    start_grid =  X + Xj.T
    answer_grid = sp.zeros((n,n), dtype = 'complex64')

    func = lambda x: x**2 + complex_number

    for i in range(n):
	for j in range(n):
	    x = start_grid[i,j]
	    for m in range(maxIter):
		x = func(x)
		m += 1
	    answer_grid[i,j] = x

    for i in range(n):
	for j in range(n):
	    if math.isnan(answer_grid[i,j]):
		answer_grid[i,j] = 0

    E = sp.zeros((n,n), dtype = 'complex64')
    for i in range(n):
	for j in range(n):
	    E[i,j] = sp.exp(-abs(answer_grid[i,j]))

    E = -1* sp.array(E, dtype = float)
    plt.imshow(E)
Ejemplo n.º 14
0
 def compute_distance_matrix(self):
     X = self.X
     y = self.y
     self.EDM = distance_utils.calcDistanceMatrixFastEuclidean(X)
     N,d = X.shape
     ind = repmat(array(range(N)),N,1)
     self.EDM.setfield(ind, dtype=int16)
     Y = repmat(y, N, 1)
     flag = (Y==Y.T)
     self.EDM.setfield(flag, dtype=bool, offset=2)
Ejemplo n.º 15
0
def MutInf(x1, x2, Nbins=(20, 20, 20), s=1):
    import numpy as np  # histogramdd, array, transpose

    # define data vectors/arrays
    x = x1[s:]  # time series data from osc_1
    y = x1[:-s]  # time series data from osc_1 delayed by s
    z = x2[:-s]  # time series data from osc_2 delayed by s

    # calculate 3D histogram of size (nx,ny,nz) where nx is the number
    # of bins for x, ny for y, nz for z. This is the tricky part since
    # the size of bins is quite critical. However you could start with
    # say nx=20, i.e. 20 bins evenly spaced between the min and max values
    # of x
    nx, ny, nz = Nbins
    H, edges = np.histogramdd([x, y, z], bins=(nx, ny, nz))

    # add small probability mass everywhere to avoid divide by zeros,
    # e.g. 1e-9 (you can experiment with different values). This part
    # was a bit of a fudge and should be replaced with something more
    # sensible - but I never got round to it
    # H = H + 1e-12

    # renormalise so sums to unity
    P = H / np.sum(H)

    # now sum along the first dimension to create a 2D array of p(y,z)
    P_yz = np.sum(P, axis=0)

    # replicate the 2D array at each element in the first dimension
    # so as to regain a (nx,ny,nz) array (this is purely for computational
    # ease)
    from numpy.matlib import repmat

    P_yz = repmat(P_yz, nx, 1).reshape(nx, ny, nz)

    # now sum along the first and third dimension to give 1D array of p(y)
    P_y = np.sum(P, axis=(0, 2))

    # replicate in both the x and z directions
    P_y = repmat(P_y, nx, nz).reshape(nx, ny, nz)

    P_xy = np.sum(P, axis=2)
    P_xy = repmat(P_xy, nz, 1).reshape(nx, ny, nz)

    # create conditional probability mass functions
    P_x_given_yz = P / P_yz
    P_x_given_y = P_xy / P_y

    # calculate transfer entropy
    logP = np.log(P_x_given_yz / P_x_given_y)
    logP = np.nan_to_num(logP)

    T = np.sum(P * logP)

    return T
Ejemplo n.º 16
0
def mkfdstencil(x,xbar,k):
#this funtion is sue to create finite diference method matrix
  maxorder            = len(x)
  h_matrix            = repmat(np.transpose(x)-xbar,maxorder,1)
  powerfactor_matrix  = np.transpose(repmat(np.arange(0,maxorder),maxorder,1))
  factorialindex      = np.transpose(repmat(factorial(np.arange(0,maxorder)),maxorder,1))
  taylormatrix        = h_matrix ** powerfactor_matrix /factorialindex
  derivativeindex     = np.zeros(maxorder)
  derivativeindex[k]  = 1 
  u = np.linalg.solve(taylormatrix,derivativeindex)
  return u
Ejemplo n.º 17
0
def polyFeatures(X, p):
	m, n   = shape(X)
	powers = matlib.repmat(range(1, p+1), m, 1)
	Xrep   = matlib.repmat(X, 1, p)
	# print shape(powers), shape(Xrep)
	X_poly = Xrep ** powers
	# print shape(X_poly)
	# print X_poly
	# test   = (ones((12,8))*2) ** powers
	# print test

	return X_poly
Ejemplo n.º 18
0
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d))
Ejemplo n.º 19
0
    def backcor(self,n,y,ord_cus,s,fct):
        # Rescaling
        N = len(n)
        index = np.argsort(n)
        n=np.array([n[i] for i in index])
        y=np.array([y[i] for i in index])
        maxy = max(y)
        dely = (maxy-min(y))/2.
        n = 2. * (n-n[N-1]) / float(n[N-1]-n[0]) + 1.
        n=n[:,np.newaxis]
        y = (y-maxy)/dely + 1

        # Vandermonde matrix
        p = np.array(range(ord_cus+1))[np.newaxis,:]
        T = repmat(n,1,ord_cus+1) ** repmat(p,N,1)
        Tinv = pinv(np.transpose(T).dot(T)).dot(np.transpose(T))

        # Initialisation (least-squares estimation)
        a = Tinv.dot(y)
        z = T.dot(a)

        # Other variables
        alpha = 0.99 * 1/2     # Scale parameter alpha
        it = 0                 # Iteration number
        zp = np.ones((N,1))         # Previous estimation

        # LEGEND
        while np.sum((z-zp)**2)/np.sum(zp**2) > 1e-10:

            it = it + 1        # Iteration number
            zp = z             # Previous estimation
            res = y - z        # Residual

            # Estimate d
            if fct=='sh':
                d = (res*(2*alpha-1)) * (abs(res)<s) + (-alpha*2*s-res) * (res<=-s) + (alpha*2*s-res) * (res>=s)
            elif fct=='ah':
                d = (res*(2*alpha-1)) * (res<s) + (alpha*2*s-res) * (res>=s)
            elif fct=='stq':
                d = (res*(2*alpha-1)) * (abs(res)<s) - res * (abs(res)>=s)
            elif fct=='atq':
                d = (res*(2*alpha-1)) * (res<s) - res * (res>=s)
            else:
                pass

            # Estimate z
            a = Tinv.dot(y+d)   # Polynomial coefficients a
            z = T.dot(a)            # Polynomial

        z=np.array([(z[list(index).index(i)]-1)*dely+maxy for i in range(len(index))])

        return z,a,it,ord_cus,s,fct
Ejemplo n.º 20
0
def cocktail(data1, data2):
	global BITWIDTH
	dt1 = data1 / float(1<<(BITWIDTH-1))
	dt2 = data2 / float(1<<(BITWIDTH-1))
	xx = [dt1, dt2]
	p = xx - np.transpose(matlib.repmat(np.mean(xx, 1), np.shape(xx)[1], 1))
	yy = np.dot(linalg.sqrtm(np.linalg.inv(np.cov(xx))), p)
	rr = matlib.repmat(np.sum(yy*yy, 0), np.shape(yy)[0], 1)
	w, s, v = np.linalg.svd(np.dot(rr*yy, np.transpose(yy)))
	f = np.transpose(np.dot(np.transpose(xx), w)) #w is the unmixing matrix
	f[0] *= float(1<<(BITWIDTH-1))
	f[1] *= float(1<<(BITWIDTH-1))
	return f
Ejemplo n.º 21
0
def spkmeans(X, init):
        
    """
    Perform spherical k-means clustering.
    X: d x n data matrix
    init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
    Reference: Clustering on the Unit Hypersphere using Von Mises-Fisher Distributions.
    by A. Banerjee, I. Dhillon, J. Ghosh and S. Sra.
    Written by Michael Chen ([email protected]).
    Based on matlab version @ 
    http://www.mathworks.com/matlabcentral/fileexchange/28902-spherical-k-means/content/spkmeans.m 
    (and slightly modifed to run on previous verions of Matlab)
    initialization
    """
    d,n = X.shape

    if n <= init:
        label = range(1,init) 
        m = X 
        energy = 0
    else:
        # Normalize the columns of X
        X = np.dot(X, repmat( (sum(X**2)+1e-16)**(-0.5),d,1)) 

        if len(init) == 1:
            idx = randsample(n,init)
            m = X[:,idx]
            [ul,label] = np.maximum(np.dot(m.T,X),[],1)
        elif init.shape[0] == 1 and init.shape[1] == n:
            label = init
        elif init.shape[0] == d:
            m = np.multiply(init, repmat( (sum(init**2)+1e-16)**(-0.5),d,1))
            ul,label = np.maximum(np.dot(m.T,X),[],1)
        else:
            error('ERROR: init is not valid.')
		
		## main algorithm: final version 
        last = 0
        while (label != last).any():
            u,pipi,label = np.unique(label)   # remove empty clusters
            k = len(u)
            E = sparse.coo_matrix(range(n),label,1,n,k,n)
            m = np.dot(X,E) 
            m = np.dot(m, repmat( (sum(m**2)+1e-16)**(-0.5),d,1)) 
            last = label
            val = np.maximum(np.dot(m.T*X),[],1)
            label = np.argmax(np.dot(m.T*X),[],1)
        ul,ul,label = np.unique(label)   # remove empty clusters
        energy = np.sum(val)
    return (label, m, energy)
Ejemplo n.º 22
0
def get_lap_matrix_sl(data, sigma):
    #高斯核函数
    # s(x_i, x_j) = exp(-|x_i - x_j|^2 /(2 * sigma^2))
    start_time = time.clock()
    sls2 = -2 * np.mat(data) * np.mat(data.T)
    sls1 = np.mat(np.sum(data ** 2, 1))
    w_matrix = np.exp(-(sls2 + repmat(sls1, len(sls1), 1) + repmat(sls1.T, 1, len(sls1))) / float((2 * sigma ** 2)))
    w_matrix = np.array(w_matrix)
    d_matrix = np.diag(np.sum(w_matrix, 1))
    lap_matrix = d_matrix - w_matrix
    end_time = time.clock()

    print("calc sl laplace matrix spends %f seconds" % (end_time - start_time))
    return lap_matrix
Ejemplo n.º 23
0
def get_lap_matrix_njw(data, sigma):
    #高斯核函数
    # s(x_i, x_j) = exp(-|x_i - x_j|^2 /(2 * sigma^2))
    start_time = time.clock()
    sls2 = -2 * np.mat(data) * np.mat(data.T)
    sls1 = np.mat(np.sum(data ** 2, 1))
    w_matrix = np.exp(-(sls2 + repmat(sls1, len(sls1), 1) + repmat(sls1.T, 1, len(sls1))) / float((2 * sigma ** 2)))
    w_matrix = np.array(w_matrix)
    d_matrix = np.diag(np.sum(w_matrix, 1))
    d_matrix_square_inv = np.linalg.inv(d_matrix ** 0.5)
    dot_matrix = np.dot(d_matrix_square_inv, w_matrix)
    lap_matrix = np.dot(dot_matrix, d_matrix_square_inv)
    end_time = time.clock()

    print("calc njw laplace matrix spends %f seconds" % (end_time - start_time))
    return lap_matrix
Ejemplo n.º 24
0
 def _mask(self, shape):
     if len(shape) > 1:
         nsynch, nsamples = shape
     else:
         nsamples = shape[0]
         nsynch = 1
     return repmat(rand(1, nsamples) < self.sparsity, nsynch, 1)
Ejemplo n.º 25
0
 def currentLosses(self, params):
     if self.batch_size > 1:
         params = repmat(params, 1, self.batch_size)
         res = self.loss_fun(params)
         return reshape(res, (self.batch_size, self.paramdim))
     else:
         return self.loss_fun(params)
Ejemplo n.º 26
0
 def currentGradients(self, params):
     if self.batch_size > 1:
         params = repmat(params, 1, self.batch_size)
         res = self.gradient_fun(params)            
         return reshape(res, (self.batch_size, self.paramdim))
     else:
         return self.gradient_fun(params)
Ejemplo n.º 27
0
def testcluster(lamBda=0.00001, ncluster=2, kx=50, rate=0.001, func=sign):
    S = zeros([n_total, n_total])
    r_index, l_index = randindex(n_total, rate, -1)
    S[r_index, l_index] = groundTruth[r_index, l_index]
    # function varctorization
    sign = func
    sign = np.vectorize(sign)
    S = sign(S + S.T)
    S = sparse.csr_matrix(S)
    #
    U, D = idc.inductive(X, S, kx, ncluster, lamBda, 50)
    #
    Xresult = matrix(U[:, 0:ncluster])
    Xresult = Xresult / (matlib.repmat(np.sqrt(np.square(Xresult).sum(axis=1)),
                                       1,
                                       ncluster) * 1.0)
    label = KMeans(n_clusters=ncluster).fit_predict(Xresult)
    label = array(label)
    predictA = - ones([n_total, n_total])
   #
    for i in range(ncluster):
        pos = np.where(label == i)[0]
        for j in pos:
            for k in pos:
                predictA[j, k] = 1
    #
    accbias = sum(predictA != groundTruth).sum() / float(np.product(groundTruth.shape))
    print 'sample rate: ', rate, "  ", "err: ", accbias
Ejemplo n.º 28
0
 def signalToNoiseRatio(self, xs):
     """ What is the one-sample signal-to-noise ratio. """         
     rxs = repmat(xs, self.ESamples, 1).T
     gs = self._df(rxs)
     g2s = mean(gs **2, axis=1)
     gs = mean(gs, axis=1)
     return gs**2/g2s
Ejemplo n.º 29
0
def hw1_find_eigendigits(A):
    # get mean vector by averaging the rows
    A = A.astype(float)

    m = mean(A, axis=1)
    
    # replace X with X - E(X)
    B = repmat(m, A.shape[1], 1)

    A = A - transpose(B)
    
    # get vec and val
    print "Before Eig"
    val, vec = eig(dot(transpose(A), A))
    print "After Eig"

    vec = dot(A, vec)
    
    # sort e-vec and e-val
    idx = val.argsort()[::-1]   
    val = val[idx]
    vec = vec[:,idx]
    
    # normalize columns
    V = vec / norm(vec, axis=0)
    V = nan_to_num(V)
    V = around(V, decimals=8).real

    return (m, V)
Ejemplo n.º 30
0
def testHand(cards,model):
    if type(cards[0]) is str:
        cards = [CARDS[i] for i in cards]
    handCode = encodeCards(cards).reshape((1,85))
    netInput = np.hstack((matlib.repmat(handCode,32,1),np_utils.to_categorical(range(32),32)))
    predictions = model.predict(netInput,verbose=0)
    return [np.argmax(predictions),predictions]
Ejemplo n.º 31
0
def phaserandomized(X):
    """Calculates phase randomized data based on real data. The full algorithm is described here Pritchard 1991.

        Parameters:
        -------
        X : ndarray
            3-D numpy array structured like (subject, channel, sample)

        Returns:
        -------
        Xr : ndarray
            3-D numpy array structured like (subject, channel, sample) with random phase added

    """
    start = default_timer()

    N, D, T = X.shape
    print(f'\n{N} subjects, {D} sensors and {T} samples')

    Xr = np.empty((N, D, T))

    for subject in range(0, N):

        Xfft = np.fft.rfft(X[subject, :, :], T)
        ampl = np.abs(Xfft)
        phi = np.angle(Xfft)
        # np.random.seed(42)
        phi_r = 4 * np.arccos(0) * np.random.rand(
            1, int(T / 2 - 1)) - 2 * np.arccos(0)
        Xfft[:, 1:int(T / 2)] = ampl[:, 1:int(T / 2)] * np.exp(
            np.sqrt(-1 + 0j) *
            (phi[:, 1:int(T / 2)] + npm.repmat(phi_r, D, 1)))
        Xr[subject, :, :] = np.fft.irfft(Xfft, T)

    stop = default_timer()
    print(f'Elapsed time: {round(stop - start)} seconds.')

    return Xr
Ejemplo n.º 32
0
def geometric_brownian_motion(allow_negative=False, **kwargs):
    """
    Geometric Brownian Motion
    Step 1 - Calculate the Deterministic component - drift
    Alternative drift 1 - supporting random walk theory
    drift = 0
    Alternative drift 2 -
    drift = risk_free_rate - (0.5 * sigma**2)
    :return: asset path

    """

    starting_value = kwargs.get("starting_value")
    mu = kwargs.get("mu")
    sigma = kwargs.get("sigma")
    num_trading_days = kwargs.get("num_trading_days")
    num_per = kwargs.get("forecast_period")

    # Calculate Drift
    mu = mu / num_trading_days
    sigma = sigma / math.sqrt(num_trading_days)  # Daily volatility
    drift = mu - (0.5 * sigma**2)

    # Calculate Random Shock Component
    random_shock = np.random.normal(0, 1, (1, num_per))
    log_ret = drift + (sigma * random_shock)

    compounded_ret = np.cumsum(log_ret, axis=1)
    asset_path = starting_value + (starting_value * compounded_ret)

    # Include starting value
    starting_value = ml.repmat(starting_value, 1, 1)
    asset_path = np.concatenate((starting_value, asset_path), axis=1)

    if allow_negative:
        asset_path *= asset_path > 0

    return asset_path.mean(axis=0)
def scattering_factor(elements, HKL, sf):
    """
    Input:
    elements: all the elements in a molecule -> mol.element (1*n) list
    HKL: (3*N) array contains all points that we are calculating in 3D Fourier space
    sf: object of class ScatterFactor, which contains information of sf for each atom

    Output:
    (n*N) array
    """
    atomTypes = np.unique(elements)

    f = np.zeros((len(elements), HKL.shape[1]))

    stols = np.array([0.25 * np.sum(np.square(HKL), axis=0)])

    for iType in range(atomTypes.shape[0]):

        try:
            sfnr = sf.label.index(atomTypes[iType])
        except ValueError:
            print "Cannot find " + atomTypes[iType] + " in atomsf.lib"

        idx = [i for i, x in enumerate(elements) if x == atomTypes[iType]]

        a = np.array(sf.a[sfnr])  # 1*4, based on the structure of the atomsf.lib file
        b = np.array(sf.b[sfnr])
        b.shape = len(b), 1  # 4*1
        c = sf.c[sfnr]

        fType = c + np.dot(a, np.exp(-b * stols))  # b * stols -> 4*N, fType-> 1*N

        f[idx, :] = repmat(fType, len(idx), 1)

    smallerthanzero = np.where(f < 0)
    f[smallerthanzero] = np.finfo(float).eps

    return f
Ejemplo n.º 34
0
    def plot_vehicle(self,
                     pos: np.ndarray,
                     heading: float,
                     width: float,
                     length: float,
                     zorder: int = 10,
                     color_str: str = 'blue',
                     id_in: str = 'default') -> None:
        """
        Highlight a vehicle pose with a scaled bounding box.

        :param pos:         position of the vehicle's center of gravity
        :param heading:     heading of the vehicle (0.0 beeing north) [in rad]
        :param width:       width of the vehicle [in m]
        :param length:      length of the vehicle [in m]
        :param zorder:      z-order of the plotted object (layer in the plot)
        :param color_str:   string specifying color (use default color strings)
        :param id_in:       sting with an unique object ID (previously plotted objects with same ID will be removed)

        """

        # delete highlighted positions with handle
        if id_in in self.__veh_patch.keys():
            self.__veh_patch[id_in].remove()
            del self.__veh_patch[id_in]

        theta = heading - np.pi / 2

        bbox = (npm.repmat([[pos[0]], [pos[1]]], 1, 4)
                + np.matmul([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]],
                            [[-length / 2, length / 2, length / 2, -length / 2],
                             [-width / 2, -width / 2, width / 2, width / 2]]))

        patch = np.array(bbox).transpose()
        patch = np.vstack((patch, patch[0, :]))

        plt_patch = ptch.Polygon(patch, facecolor=color_str, zorder=zorder)
        self.__veh_patch[id_in] = self.__main_ax.add_artist(plt_patch)
def get_3dim_spectrum_from_data(wav_data, frame, shift, fftl):
    """
    dump_wav : channel_size * speech_size (2dim)
    """
    len_sample, len_channel_vec = len(wav_data[:, 0]), 4
    dump_wav = wav_data.T
    dump_wav = dump_wav / np.max(np.abs(dump_wav)) * 0.7
    window = sg.hanning(fftl + 1, 'periodic')[:-1]
    multi_window = npm.repmat(window, len_channel_vec, 1)
    st = 0
    ed = frame
    number_of_frame = np.int((len_sample - frame) / shift)
    spectrums = np.zeros(
        (len_channel_vec, number_of_frame, np.int(fftl / 2) + 1),
        dtype=np.complex64)
    for ii in range(0, number_of_frame):
        multi_signal_spectrum = fft(dump_wav[:, st:ed], n=fftl,
                                    axis=1)[:, 0:np.int(fftl / 2) +
                                            1]  # channel * number_of_bin
        spectrums[:, ii, :] = multi_signal_spectrum
        st = st + shift
        ed = ed + shift
    return spectrums, len_sample
Ejemplo n.º 36
0
def sortXYZ(xyz):
    #SORTXYZ Sort 3D point in clockwise order
    #   The definition of clockwise order for 3D points is ill-posed. To make it well defined,
    #   we must first define a viewing direction. This code treats the center
    #   of the set of points as the viewing direction (from original point).
    ori_xyz = xyz
    xyz = xyz / repmat(np.sum(xyz**2, 1), 3, 1).T
    center = np.mean(xyz, 0)
    center = center / np.linalg.norm(center, 2)
    # set up z axis at center
    z = center
    x = [-center[1], center[0], 0]
    x = x / np.linalg.norm(x, 2)
    y = np.cross(z, x)
    R = np.dot(np.diag([1, 1, 1]), np.linalg.pinv(np.array([x, y, z]).T))

    newXYZ = np.dot(R, (xyz.T))
    A = np.arctan2(newXYZ[1, :], newXYZ[0, :])
    I = np.argsort(A)

    s_xyz = ori_xyz[I, :]

    return [s_xyz, I]
Ejemplo n.º 37
0
    def getLocalChart(self, point, U0=None):
        """Find the local coordinate chart to perform approximations"""
        if self.sigma == None:
            print("Warning: sigma not initialized - initializing with calculateSigma()")
            self.calculateSigmaFromPoint(point)
        if self.kd_tree == None:
            print("Warning: No KD tree. Looking for neighbors one by one.")
            print("currently not implemented")
            raise Exception("no KD tree")

        init_q = self.genInitQ(point)

        if self.recompute_sigma == True:
            self.calculateSigmaFromPoint(init_q)
        U, q, PX0, X0, W, _ = self.findLocalCoordinatesThresh( point, init_q, self.compactWeightFunc, U0, initSVD = self.initSVD)
        D, I = self.kd_tree.query(q, self.sigma) # work with neighbors according to the SIGMA_Q neighborhood (so we'll have enough)
        D = D[0]
        I = I[0][D<np.inf]
        X0 = self.data[:,I] - nlib.repmat(q, 1, len(I))
        PX0 = np.dot(U.T, X0)
        F0 = self.f_data[I]
        W = self.compactWeightFunc(np.linalg.norm(PX0, axis=0))
        return q, X0, PX0, F0, W, U
Ejemplo n.º 38
0
def zscore_matrix(data, group, controlCode):
    """
    Z-score data relative to a given group (author: @saratheriver)

    Parameters
    ----------
    data : pandas.DataFrame
        Data matrix (e.g. thickness data), shape = (n_subject, n_region)
    group : list
        Group assignment (e.g, [0, 0, 0, 1, 1, 1], same length as n_subject.
    controlCode : int
        Value that corresponds to "baseline" group

    Returns
    -------
    Z : pandas.DataFrame
        Z-scored data relative to control code
    """
    C = [i for i, x in enumerate(group) if x == controlCode]
    n = len(group)
    z1 = data - npm.repmat(np.mean(data.iloc[C, ]), n, 1)
    z2 = np.std(data.iloc[C, ], ddof=1)
    return z1 / z2
Ejemplo n.º 39
0
    def compute_svm_cost(self, X, Y, W=None, b=None):
        """
        Calculates the cost of the loss function in the SVM multi-class sense 
        for the given data, weights and bias.
        """

        if W is None:
            W = self.W

        if b is None:
            b = self.b

        N = X.shape[1]
        s = W.dot(X) + b
        sy = repmat(s.T[Y.T == 1], Y.shape[0], 1)

        margins = np.maximum(0, s - sy + 1)
        margins[Y == 1] = 0
        loss = margins.sum() / N

        regularization = 0.5 * self.l * np.sum(W**2)

        return loss + regularization
Ejemplo n.º 40
0
def diffusionMap(sigmaK, alpha=1, numEigs=6):

    myHdf5 = h5py.File(fname, 'r+')

    D_data = myHdf5[grpNameDM+dset_data][()]
    dim = myHdf5[grpNameDM+dset_data].attrs['numHits']
    D_indices = myHdf5[grpNameDM+dset_indices]
    D_indptr = myHdf5[grpNameDM+dset_indptr]

    P = normalizedGraphLaplacian(D_data,D_indices,D_indptr,dim,sigmaK)

    tic = time.time()
    s, u = eigsh(P,k=numEigs+1,which='LM')
    u = np.real(u)
    u = np.fliplr(u)
    toc = time.time()
    print "%%%%: ",toc-tic, u, u.shape, s, s.shape

    U = u/repmat(np.matrix(u[:,0]).transpose(),1,numEigs+1)
    Y = U[:,1:numEigs+1]

    myHdf5.close()
    return Y,s
Ejemplo n.º 41
0
def vdpmm_maximizeCNN(data, params, gammas, stddev):
    D = data.shape[1]
    N = data.shape[0]
    K = (params['a']).shape[0]
    a0 = D
    beta0 = 1
    mean0 = np.mean(data, axis=0)
    B0 = .1 * D * np.cov(data.T)

    #convenience variables first
    Ns = np.sum(gammas, axis=0) + 1e-10
    mus = np.zeros((K, D))
    sigs = np.zeros((D, D, K))
    mus = np.dot(gammas.T, data) / (repmat(Ns, D, 1).T)
    for i in range(K):
        diff0 = data - repmat(mus[i, :], N, 1)
        diff1 = repmat(np.sqrt(gammas[:, i]), D, 1).T * diff0
        sigs[:, :, i] = np.dot(diff1.T, diff1)

    #now the estimates for the variational parameters
    params['g'][:, 0] = 1 + np.sum(gammas, axis=0)
    #g_{s,2} = Eq[alpha] + sum_n sum_{j=s+1} gamma_j^n
    temp1 = (params['eq_alpha'] +
             np.flipud(np.cumsum(np.flipud(np.sum(gammas, 0)))) -
             np.sum(gammas, 0))
    params['g'][:, 1] = temp1
    params['beta'] = Ns + beta0
    params['a'] = Ns + a0
    tempNs = repmat(Ns, D, 1).T * mus
    for k in range(K):
        if k > 1:
            params['mean'][k, :] = (tempNs[k] + beta0 * mean0) / (repmat(
                Ns[k] + beta0, D, 1).T) + 0.1 * params['mean'][k - 1, :]
        else:
            params['mean'][k, :] = (tempNs[k] + beta0 * mean0) / (repmat(
                Ns[k] + beta0, D, 1).T)

    #for one dimension
    tempStddev = np.sum(gammas * stddev, axis=0)
    tempStddev.shape = (K, 1)
    for i in range(K):
        diff = mus[i, :] - mean0
        params['B'][:, :, i] = sigs[:, :, i] + Ns[i] * beta0 * np.dot(
            diff, diff.T) / (Ns[i] + beta0) + B0 + tempStddev[i]
    return params
Ejemplo n.º 42
0
def compute_statistics_random_h_star(
        h_sim: np.ndarray,
        max_cell_radius=None,
        simulation_number=None) -> (List[int], List[int], List[int]):
    """
    Build related statistics derived from Ripley's K function, normalize K
    """
    simulation_number = simulation_number or constants.analysis_config[
        "RIPLEY_K_SIMULATION_NUMBER"]
    max_cell_radius = max_cell_radius or constants.analysis_config[
        "MAX_CELL_RADIUS"]

    h_sim = np.power((h_sim * 3) / (4 * math.pi), 1. / 3) - matlib.repmat(
        np.arange(1, max_cell_radius + 1), simulation_number, 1)
    h_sim_sorted = np.sort(h_sim)
    h_sim_sorted = np.sort(h_sim_sorted[:, ::-1], axis=0)
    synth95 = h_sim_sorted[int(
        np.floor(0.95 * simulation_number)
    )]  # TODO : difference with V0 : floor since if the numbers are high we get simulation_sumber here
    synth50 = h_sim_sorted[int(np.floor(0.5 * simulation_number))]
    synth5 = h_sim_sorted[int(np.floor(0.05 * simulation_number))]

    return synth5, synth50, synth95
Ejemplo n.º 43
0
def costFunction(initial_theta, X, Y):
    m = len(Y)
    J = 0
    for a, b in zip(Y, np.dot(X, initial_theta)):
        J += -a * np.log(sigmoid(b)) - (1 - a) * np.log(1 - sigmoid(b))

    J /= m

    in_mat = []

    for a, b in zip(np.dot(X, initial_theta), Y):
        in_mat.append(sigmoid(a) - b)

    in_mat = np.array(in_mat)

    size_X = X.shape
    in_mat = npm.repmat(in_mat, 1, size_X[1])

    matsum = []
    for x, y in zip(X, in_mat):
        matsum.append(x * y)

    return J, sum(matsum) / m
Ejemplo n.º 44
0
def jensen_shannon_div(query_arr, train_mat):

    # normalize arrays so that they become probability distributions
    query_arr = query_arr / float(np.sum(query_arr))

    train_mat = np.divide(train_mat.T, np.sum(train_mat, 1)).T

    query_mat = repmat(query_arr, len(train_mat), 1)

    mat_sum = 0.5 * (query_mat + train_mat)

    D1 = query_mat * np.log2(np.divide(query_mat, mat_sum))

    D2 = train_mat * np.log2(np.divide(train_mat, mat_sum))

    # convert all nans to 0
    D1[np.isnan(D1)] = 0

    D2[np.isnan(D2)] = 0

    JS_mat = 0.5 * (np.sum(D1, 1) + np.sum(D2, 1))

    return JS_mat
Ejemplo n.º 45
0
def h2e(v):
    """
    Convert from homogeneous to Euclidean form
    
    :param v: homogeneous vector or matrix
    :type v: array_like
    :return: Euclidean vector
    :rtype: numpy.ndarray

    - If ``v`` is an array, shape=(N,), return an array shape=(N-1,) where the elements have
      all been scaled by the last element of ``v``.
    - If ``v`` is a matrix, shape=(N,M), return a matrix shape=(N-1,N), where each column has
      been scaled by its last element.
      
    :seealso: e2h
    """
    if argcheck.isvector(v):
        # dealing with shape (N,) array
        v = argcheck.getvector(v)
        return v[0:-1] / v[-1]
    elif isinstance(v, np.ndarray) and len(v.shape) == 2:
        # dealing with matrix
        return v[:-1, :] / matlib.repmat(v[-1, :], v.shape[0] - 1, 1)
Ejemplo n.º 46
0
def los_channel(rx_ant_coordinates, tx_ant_coordinates, wavelength):
    shape = rx_ant_coordinates.shape
    if (len(shape) == 1):
        nr = 1
    else:
        nr = len(rx_ant_coordinates)
    shape = tx_ant_coordinates.shape
    if (len(shape) == 1):
        nt = 1
    else:
        nt = len(tx_ant_coordinates)
    rr = mat.repmat(rx_ant_coordinates, nt, 1)
    tt = np.kron(tx_ant_coordinates, np.ones((nr, 1)))
    diff = np.power((rr - tt), 2)
    diff = np.sum(diff, axis=1)
    dist = np.power(diff, 0.5)
    dist = dist.reshape(nr, nt)
    dist = np.array(dist)
    los = np.zeros((nr, nt), dtype=complex)
    for ii in range(nr):
        for jj in range(nt):
            los[ii][jj] = np.exp(1j * 2 * np.pi * dist[ii][jj] / wavelength)
    return los
Ejemplo n.º 47
0
def extend_time(feats, upsampling_factor):
    """FUNCTION TO EXTEND TIME RESOLUTION

    Args:
        feats (ndarray): feature vector with the shape (T x D)
        upsampling_factor (int): upsampling_factor

    Return:
        (ndarray): extend feats with the shape (upsampling_factor*T x D)
    """
    # get number
    n_frames = feats.shape[0]
    n_dims = feats.shape[1]

    # extend time
    feats_extended = np.zeros((n_frames * upsampling_factor, n_dims))
    for j in range(n_frames):
        start_idx = j * upsampling_factor
        end_idx = (j + 1) * upsampling_factor
        feats_extended[start_idx:end_idx] = repmat(feats[j, :],
                                                   upsampling_factor, 1)

    return feats_extended
Ejemplo n.º 48
0
def initialize_all(numParticles, numTimeSteps, cluster, boxLength, hDiameter, aligned, field, \
 fieldAmp, angFreq, timeSteps, shape, rAvg):
    """Initializes all particles and fields."""
    #create empty matrices
    particleMoments, particleAxes = initialize_empty_matrices(
        numParticles, numTimeSteps, shape)

    #fill matrices, initialize particles and fields
    particleMoments, particleAxes, mStart, nStart, hApplied, particleCoords = initialize_particles(particleMoments, \
     particleAxes, numParticles, numTimeSteps, cluster, boxLength, hDiameter, aligned, field, fieldAmp, angFreq, \
     timeSteps, shape)

    #initialize "ghost" coordinates for periodic boundary conditions
    ghostCoords, masks = initialize_ghost_coords(particleCoords, rAvg,
                                                 boxLength)
    distMatrixSq = repmat(ghostCoords, len(ghostCoords), 1) - repeat(
        ghostCoords, len(ghostCoords), axis=0)
    distMatrixSq = distMatrixSq.reshape(
        (len(ghostCoords), len(ghostCoords), 3))
    distMatrix = np.sqrt(np.sum(distMatrixSq**2, axis=2))

    return particleMoments, particleAxes, mStart, nStart, hApplied, particleCoords, ghostCoords, masks, \
     distMatrixSq, distMatrix
Ejemplo n.º 49
0
    def create_plot(self):
        # 数据的流动
        self.t = 0
        pg.setConfigOption('background', 'w')
        pg.setConfigOption('foreground', 'k')

        self.stream_scroll = pg.PlotWidget(title='脑电图', background='w')
        self.stream_scroll.setYRange(0.5, self.channel_count, padding=0.1)

        self.stream_scroll_time_axis = np.linspace(-5, 0, self.samples * 4)
        self.stream_scroll.setXRange(-5, 0, padding=.005)

        labelStyle = {'color': '#000', 'font-size': '14pt'}
        self.stream_scroll.setLabel('bottom', 'Time', 'Seconds', **labelStyle)
        self.stream_scroll.setLabel('left', 'Channel', **labelStyle)
        color_list = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                      (0, 255, 255), (255, 0, 255), (0, 0, 0), (255, 0, 0),
                      (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                      (0, 255, 255), (255, 0, 255), (0, 0, 0), (255, 0, 0)]

        for i in range(self.channel_count - 1, -1, -1):
            self.filtered_data['filtered_channel{}'.format(i +
                                                           1)] = matlib.repmat(
                                                               [0],
                                                               self.samples *
                                                               4, 1).T.ravel()
            self.curves['curve_channel{}'.format(i +
                                                 1)] = self.stream_scroll.plot(
                                                     pen=color_list[i])
            self.curves['curve_channel{}'.format(i + 1)].setData(
                x=self.stream_scroll_time_axis,
                y=([
                    point + i + 1 for point in self.filtered_data[
                        'filtered_channel{}'.format(i + 1)]
                ]))
        # print(len(self.data_buffer))
        self.set_layout()
Ejemplo n.º 50
0
def calcDistanceMatrix2(AB,
                        distFunc=lambda delta: sqrt(sum(delta**2, axis=1))):
    assert (len(AB) in [1, 2] and type(AB) != ndarray)
    if len(AB) == 2:
        A, B = AB
        #if (A==B).all(): return calcDistanceMatrix2([A],distFunc)
        #A = array(A)
        #B = array(B)
        nA, dim = A.shape
        assert (B.shape[1] == dim)
        nB = B.shape[0]
        print A.shape, nB, B.shape, nA
        delta = repeat(A, nB, 0) - repmat(B, nA, 1)
        dist = distFunc(delta).reshape(nA, nB)  # dist[i,j] = d(A[i],B[j])
        del delta
        return dist
    else:  # elif len(AB)==1:
        A = array(AB[0])
        nA, dim = A.shape  #max nA <= 800
        rows = repeat(range(nA), nA)  # 0,0,0,...,n-1,n-1
        cols = array(range(nA) * nA)  # 0,1,2
        upper_ind = where(cols > rows)[0]
        # nA == (1+sqrt(1+8*len(upper_ind))/2
        ##lower_ind = where(cols<rows)[0]
        delta = A[rows[upper_ind], :] - A[cols[upper_ind], :]
        del rows
        del cols
        # computes all possible combinations
        #dist = zeros(nA*nA)
        #partial_delta = delta[:,upper_ind]
        partial_dist = distFunc(delta)
        del delta
        partial_dist.setfield(upper_ind, dtype=int32)
        #dist[upper_ind] = partial_dist
        #dist = dist.reshape(nA, nA) # dist[i,j] = d(A[i],A[j]) for i<j
        #dist = dist + dist.T # make it symmetric
        return partial_dist
Ejemplo n.º 51
0
    def gbm_monte_carlo(s0, sigma, mu, nPer, nTDays, nSim):
        """
        Geometric Brownian Motion Monte Carlo Simulation
        #s0 - starting price
        #sigma - annual volatility
        #mu - annual expected return
        #nPer - number of forecast period in days
        #nTDays - number of trading days in one year  
        #nSim - number of simulations
        returns simulated asset path
        """
        ''' Step 1 - Calculate the Deterministic component - drift    
        Alternative drift 1 - supporting random walk theory
        drift = 0     
        Alternative drift 2 - 
        drift = risk_free_rate - (0.5 * sigma**2)
        '''

        #Industry standard for drift
        mu = mu / nTDays  #Daily return
        sigma = sigma / math.sqrt(nTDays)  #Daily volatility
        drift = mu - (0.5 * sigma**2)
        ''' Step 2 - Create a matrix of stochastic component - random shock '''
        z = np.random.normal(0, 1, (nSim, nPer))
        log_ret = drift + (sigma * z)
        ''' Compound return using vectorize method 
        LN(Today/Yesterday) = drift + random shock * sigma 
        '''
        compounded_ret = np.cumsum(log_ret, axis=1)
        asset_path = s0 + (s0 * compounded_ret)

        #Include starting value
        s0 = matlib.repmat(s0, nSim, 1)
        asset_path = np.concatenate((s0, asset_path), axis=1)
        asset_path *= (asset_path > 0)  #set negative to zero

        return (asset_path)
Ejemplo n.º 52
0
    def compute_s_prime_a(self, s_primes):
        """Given the list of next states s_prime, it returns the matrix state actions that
        for each state prime the contains all the pairs s'a where a is in the action set
        of s'.
        """

        # Get the number of actions for each state
        n_actions_per_state = list(
            map(lambda x: len(x),
                map(lambda s: self._actions[tuple(s)], s_primes)))
        tot_n_actions = sum(n_actions_per_state)
        n_states = s_primes.shape[0]
        sa = np.empty((tot_n_actions, self.state_dim + self.action_dim))

        end = 0
        for i in range(n_states):
            # set interval variables
            start = end
            end = end + n_actions_per_state[i]

            # set state prime
            i_s_prime = s_primes[i, :]
            n_actions = n_actions_per_state[i]

            # populate the matrix with the ith state prime
            sa[start:end,
               0:self.state_dim] = matlib.repmat(i_s_prime, n_actions, 1)

            # populate the matrix with the actions of the action set of ith state prime
            sa[start:end, self.state_dim:] =\
                np.array(self._actions[tuple(i_s_prime)]).reshape((n_actions, self.action_dim))

        # reset self._actions to save memory
        self._actions = []

        self.sprime_a = sa
        self.n_actions_per_state_prime = n_actions_per_state
Ejemplo n.º 53
0
def find_knee(values):
    # get coordinates of all the points
    nPoints = len(values)
    allCoord = np.vstack((range(nPoints), values)).T
    # np.array([range(nPoints), values])

    # get the first point
    firstPoint = allCoord[0]
    # get vector between first and last point - this is the line
    lineVec = allCoord[-1] - allCoord[0]
    lineVecNorm = lineVec / np.sqrt(np.sum(lineVec**2))

    # find the distance from each point to the line:
    # vector between all points and first point
    vecFromFirst = allCoord - firstPoint

    # To calculate the distance to the line, we split vecFromFirst into two
    # components, one that is parallel to the line and one that is perpendicular
    # Then, we take the norm of the part that is perpendicular to the line and
    # get the distance.
    # We find the vector parallel to the line by projecting vecFromFirst onto
    # the line. The perpendicular vector is vecFromFirst - vecFromFirstParallel
    # We project vecFromFirst by taking the scalar product of the vector with
    # the unit vector that points in the direction of the line (this gives us
    # the length of the projection of vecFromFirst onto the line). If we
    # multiply the scalar product by the unit vector, we have vecFromFirstParallel
    scalarProduct = np.sum(vecFromFirst * mb.repmat(lineVecNorm, nPoints, 1),
                           axis=1)
    vecFromFirstParallel = np.outer(scalarProduct, lineVecNorm)
    vecToLine = vecFromFirst - vecFromFirstParallel

    # distance to line is the norm of vecToLine
    distToLine = np.sqrt(np.sum(vecToLine**2, axis=1))

    # knee/elbow is the point with max distance value
    idxOfBestPoint = np.argmax(distToLine)
    return idxOfBestPoint
Ejemplo n.º 54
0
def mf(f):
    FFT = np.fft.rfft(f, axis=0)  # FFT of signal
    RF = np.real(FFT)  # Real part of FFT
    IF = np.imag(FFT)  # Imaginary part of FFT
    F = np.abs(FFT)  # Magnitude of spectrum
    # AF = np.sqrt(np.power(RF,2)+np.power(IF,2))/FFT.shape[0]  # Amplitude of FFT
    AF = np.abs(FFT)
    PF = np.arctan(np.divide(IF, RF + np.finfo(float).eps))  # Phase of FFT
    PF = np.power(F, 2)  # Power     of spectrum
    PF[1:-1] = 2 * PF[1:-1]
    sumF = 0.5 * sum(F[1:], axis=0)
    sumPF = 0.5 * sum(PF[1:], axis=0)
    if len(F.shape) <= 1:
        F = F[:, np.newaxis]
        PF = PF[:, np.newaxis]
    freq = npm.repmat(
        np.array(range(F.shape[0]))[:, np.newaxis], 1, F.shape[-1])
    MDF = np.array([
        next(i for i in range(1,
                              len(freq) + 1)
             if sum(PF[1:i + 1, j], axis=0) >= sumPF[j])
        for j in range(PF.shape[-1])
    ])
    MMDF = np.array([
        next(i for i in range(1,
                              len(freq) + 1)
             if sum(F[1:i + 1, j], axis=0) >= sumF[j])
        for j in range(F.shape[-1])
    ])
    sumPF[sumPF == 0] = 1.
    sumF[sumF == 0] = 1.
    MNF = sum(np.divide(np.multiply(PF[1:], freq[1:]), sumPF), axis=0)
    MMNF = sum(np.divide(np.multiply(F[1:], freq[1:]), sumF), axis=0)
    out = np.concatenate((np.array([MNF, MDF, MMNF, MMDF]), RF, IF, F, AF, PF),
                         axis=0)
    return out, np.array([MNF, MDF, MMNF,
                          MMDF]), RF, IF, F, AF, PF, time.time()
Ejemplo n.º 55
0
    def _to_one_hot(self, labels, reverse=False):
        """ Transform a list of labels into a 1-hot encoding.

        Args:
            labels: A list of class labels.
            reverse: If true, then one-hot encoded samples are transformed back
                to categorical labels.

        Returns:
            The 1-hot encoded labels.
        """
        if not self.classification:
            raise RuntimeError('This method can only be called for ' +
                               'classification datasets.')

        # Initialize encoder.
        if self._one_hot_encoder is None:
            self._one_hot_encoder = OneHotEncoder( \
                categories=[range(self.num_classes)])
            num_time_steps = 1
            if self.sequence:
                num_time_steps = labels.shape[1] // np.prod(self.out_shape)
            self._one_hot_encoder.fit(
                npm.repmat(np.arange(self.num_classes), num_time_steps, 1).T)

        if reverse:
            # Unfortunately, there is no inverse function in the OneHotEncoder
            # class. Therefore, we take the one-hot-encoded "labels" samples
            # and take the indices of all 1 entries. Note, that these indices
            # are returned as tuples, where the second column contains the
            # original column indices. These column indices from "labels"
            # mudolo the number of classes results in the original labels.
            return np.reshape(
                np.argwhere(labels)[:, 1] % self.num_classes,
                (labels.shape[0], -1))
        else:
            return self._one_hot_encoder.transform(labels).toarray()
Ejemplo n.º 56
0
def get_3dim_spectrum(wav_name, channel_vec, start_point, stop_point, frame,
                      shift, fftl):
    """
    dump_wav : channel_size * speech_size (2dim)
    """
    samples, _ = sf.read(wav_name.replace('{}', str(channel_vec[0])),
                         start=start_point,
                         stop=stop_point,
                         dtype='float32')
    if len(samples) == 0:
        return None, None
    dump_wav = np.zeros((len(channel_vec), len(samples)), dtype=np.float16)
    dump_wav[0, :] = samples.T
    for ii in range(0, len(channel_vec) - 1):
        samples, _ = sf.read(wav_name.replace('{}', str(channel_vec[ii + 1])),
                             start=start_point,
                             stop=stop_point,
                             dtype='float32')
        dump_wav[ii + 1, :] = samples.T

    dump_wav = dump_wav / np.max(np.abs(dump_wav)) * 0.7
    window = sg.hanning(fftl + 1, 'periodic')[:-1]
    multi_window = npm.repmat(window, len(channel_vec), 1)
    st = 0
    ed = frame
    number_of_frame = np.int((len(samples) - frame) / shift)
    spectrums = np.zeros(
        (len(channel_vec), number_of_frame, np.int(fftl / 2) + 1),
        dtype=np.complex64)
    for ii in range(0, number_of_frame):
        multi_signal_spectrum = fft(dump_wav[:, st:ed], n=fftl,
                                    axis=1)[:, 0:np.int(fftl / 2) +
                                            1]  # channel * number_of_bin
        spectrums[:, ii, :] = multi_signal_spectrum
        st = st + shift
        ed = ed + shift
    return spectrums, len(samples)
Ejemplo n.º 57
0
    def sample(self, b=None, isQMDP=False):
        if b is None:
            b = self.current_belief

        V = self.alpha_v
        b = np.matrix(b)
        # If Q-MDP choose action directly
        if isQMDP:
            Q = V * b.T
            act = greedy('samp', Q)

        # else: Build Q-matrix
        else:
            Q = np.zeros(self._nA)
            for a in range(self._nA):
                #b = np.matrix(b)

                # Compute updated beliefs for current action and all observations
                # print"b : {}".format(b)
                Vaux = (b * self._P[a]).T

                Vaux = np.multiply(npmat.repmat(Vaux, 1, self._nZ), self._O[a])
                Vaux = V * Vaux

                Vmax = np.amax(Vaux, 0)[0]

                # Compute observation probabilities and multiply

                Q[a] = (b * np.matrix(self._R[:, a]).T).item(
                    0, 0) + self._gamma * np.sum(Vmax)
            # print "Q : {}".format(Q)
            # raw_input()
            act = greedy('prob', Q)

        f_act = {self.main_act: [act]}

        return f_act
Ejemplo n.º 58
0
def patient_staging(pi0, event_centers, likeli_post, likeli_pre, type_staging):
    L_yes = np.divide(likeli_post, likeli_post + likeli_pre + 1e-100)
    L_no = 1 - L_yes
    event_centers_pad = np.insert(event_centers, 0, 0)
    event_centers_pad = np.append(event_centers_pad, 1)
    pk_s = np.diff(event_centers_pad)
    # pk_s[:]=1;#?????

    m = L_yes.shape
    prob_stage = np.zeros((m[0], m[1] + 1))
    p_no_perm = L_no[:, pi0]
    p_yes_perm = L_yes[:, pi0]
    for j in range(
            m[1] + 1
    ):  # np.nanprod当axis=1时横向相乘,空值设为1,p_yes_perm[:,:j]列数逐渐增多,p_no_perm[:,j:]列数逐渐减小
        prob_stage[:, j] = pk_s[j] * np.multiply(
            np.nanprod(p_yes_perm[:, :j], axis=1),
            np.nanprod(p_no_perm[:, j:], axis=1))

    all_stages_rep2 = matlib.repmat(event_centers_pad[:-1], m[0], 1)

    if type_staging[0] == 'exp':
        subj_stages = np.zeros(prob_stage.shape[0])
        for i in range(prob_stage.shape[0]):
            idx_nan = np.isnan(p_yes_perm[i, :])
            pr = prob_stage[i, 1:]
            ev = event_centers_pad[1:-1]
            # 将每一个病人不同生物标记物的分期概率与事件中心相乘(除去空值),除以不同生物标记物的分期概率
            subj_stages[i] = np.mean(
                np.multiply(
                    np.append(prob_stage[i, 0], pr[~idx_nan]),
                    np.append(event_centers_pad[0], ev[~idx_nan]))) / np.mean(
                        np.append(prob_stage[i, 0], pr[~idx_nan]))
    elif type_staging[0] == 'ml':
        subj_stages = np.argmax(prob_stage, axis=1)

    return subj_stages
Ejemplo n.º 59
0
def llc(X, D, knn=5):
    # the sparse coder introduced in
    # "Locality-constrained Linear Coding for Image Classification"

    n_samples = X.shape[1]
    n_atoms = D.shape[1]
    # has the distance of
    # each sample to each atom
    dist = np.zeros((n_samples, n_atoms))
    # calculate the distances
    for i in range(n_samples):
        for j in range(n_atoms):
            dist[i, j] = norm(X[:, i] - D[:, j])

    # has the indices of the atoms
    # that are nearest neighbour to each sample
    knn_idx = np.zeros((n_samples, knn)).astype(int)
    for i in xrange(n_samples):
        knn_idx[i, :] = np.argsort(dist[i, :])[:knn]
    # the sparse coding matrix
    Z = np.zeros((n_atoms, n_samples))
    II = np.eye(knn)
    beta = 1e-4
    b = np.ones(knn)
    for i in xrange(n_samples):
        idx = knn_idx[i, :]
        z = D.T[idx, :] - repmat(X.T[i, :], knn, 1)
        C = np.dot(z, z.T)
        C = C + II * beta * np.trace(C)
        # solve the linear system C*c=b
        c = solve(C, b)
        # enforce the constraint on the sparse codes
        # such that sum(c)=1
        c = c / float(np.sum(c))
        Z[idx, i] = c

    return Z
Ejemplo n.º 60
0
 def _create_accum_list_labeled(self, shortest_paths, maxpath, labels_t,
                                numlabels):
     """
     Construct accumulation array matrix for one dataset
     containing labaled graph data.
     """
     res = lil_matrix(
         np.zeros((len(shortest_paths),
                   (maxpath + 1) * numlabels * (numlabels + 1) / 2)))
     for i, s in enumerate(shortest_paths):
         labels = labels_t[i]
         labels_aux = matlib.repmat(labels, 1, len(labels))
         min_lab = np.minimum(labels_aux.T, labels_aux)
         max_lab = np.maximum(labels_aux.T, labels_aux)
         subsetter = np.triu(~(np.isinf(s)))
         min_lab = min_lab[subsetter]
         max_lab = max_lab[subsetter]
         ind = s[subsetter] * numlabels * (numlabels + 1) / 2 + \
                 (min_lab - 1) * (2*numlabels + 2 - min_lab) / 2 + \
                 max_lab - min_lab
         accum = np.zeros((maxpath + 1) * numlabels * (numlabels + 1) / 2)
         accum[:ind.max() + 1] += np.bincount(ind.astype(int))
         res[i] = lil_matrix(accum)
     return res