Example #1
0
def getStochasticMatrix(inputMatrix: matrix,
                        normZeros: bool = False) -> matrix:
    columnSums = asarray(inputMatrix.sum(0)).reshape(-1)
    if normZeros:
        for i in range(len(columnSums)):
            columnSum: float = columnSums[i]
            if columnSum == 0:
                inputMatrix[:, i] = 1
        return inputMatrix / inputMatrix.sum(0)
    else:
        columnSums[columnSums == 0] = 1
        return inputMatrix / columnSums
Example #2
0
def resample(curr_samples, prob, afnv):
    dt = numpy.dtype('f8')
    nsamples = MA(curr_samples.shape).item(0)
    if prob.sum() == 0:
        map_afnv = MA(numpy.ones(nsamples), dt) * afnv
        count = MA(numpy.zeros((prob.shape), dt))
    else:
        prob = prob / (prob.sum())
        count = MA(numpy.ceil(nsamples * prob), int)
        count = count.T
        map_afnv = MA(numpy.zeros((1, 6)), dt)
        for i in range(nsamples):
            for j in range(count[i]):
                map_afnv = MA(
                    numpy.concatenate((map_afnv, curr_samples[i, :]), axis=0),
                    dt)
        K = map_afnv.shape[0]
        map_afnv = map_afnv[1:K, :]
        ns = count.sum()
        if nsamples > ns:
            map_afnv = MA(
                numpy.concatenate(
                    (map_afnv, MA(numpy.ones(
                        ((nsamples - ns), 1)) * afnv, dt)),
                    axis=0), dt)
        map_afnv = map_afnv[0:nsamples, :]
    return map_afnv, count
def resample(curr_samples,prob,afnv):
	dt = numpy.dtype('f8');
	nsamples = MA(curr_samples.shape).item(0)
	if prob.sum() ==0 :
		map_afnv = MA(numpy.ones(nsamples),dt)*afnv
		count = MA(numpy.zeros((prob.shape),dt))
	else:
		prob = prob/(prob.sum())
		count = MA(numpy.ceil(nsamples*prob),int)
		count = count.T
		map_afnv = MA(numpy.zeros((1,6)),dt);
	
		for i in range(nsamples):
			for j in range(count[i]):
				map_afnv = MA(numpy.concatenate((map_afnv,curr_samples[i,:]),axis=0),dt)
		
		K = map_afnv.shape[0];
		map_afnv = map_afnv[1:K,:];
		ns = count.sum()
		if nsamples > ns:
			map_afnv = MA(numpy.concatenate((map_afnv,MA(numpy.ones(((nsamples-ns),1))*afnv,dt)),axis=0),dt)
		
		map_afnv = map_afnv[0:nsamples,:]
	
	return map_afnv,count
Example #4
0
def get_probability_transition_matrix(A: np.matrix):
    """
    Calcule la matrice de probabilité de transition à partir de la matrice d'adjacence
    :param A: La matrice de base
    :return: La matrice de probabilités de transition
    """
    res = []
    for i in range(A.__len__()):
        res.append([0] * A.__len__())
    do = A.sum(axis=1)
    for i in range(A.__len__()):
        res[i] = (A[i] / float(do.item(i))).A1
    return np.matrix(res)
Example #5
0
    def _is_redundant(self,
                      matrix: np.matrix,
                      cutoff: Optional[float] = None) -> bool:
        """Identify redundant rows in a matrix that can be removed."""
        cutoff = 1.0 - self.feasibility_tol
        # Avoid zero variances
        extra_col = matrix[:, 0] + 1
        # Avoid zero rows being correlated with constant rows
        extra_col[matrix.sum(axis=1) == 0] = 2
        corr = np.corrcoef(np.c_[matrix, extra_col])
        corr = np.tril(corr, -1)

        return (np.abs(corr) > cutoff).any(axis=1)
 def entropy_function(matrix: np.matrix):
     '''
     Calculates the shannon entropy for a matrix representing the
     distribution of amino acids across a dataset of peptide
     sequences. Returns the information entropy for the given matrix.
     matrix: a 12x20 matrix representing the distributions of the 20
     amino acids across a dataset of 12 amino acid length peptides.
     '''
     row_sum = matrix.sum()
     pr = matrix / row_sum
     entropy_func_val = np.multiply(pr, np.log(pr))
     entropy_func_val = np.nan_to_num(entropy_func_val)
     entropy_func_val = entropy_func_val.sum(axis=1, dtype='float') * -1
     return entropy_func_val
Example #7
0
def hotelling(x: np.matrix):
    s = x.sum(axis=1)
    alpha = s / np.amax(s)
    # print("alpha = " + str(alpha))

    alpha_old = alpha - alpha
    while (np.absolute(alpha - alpha_old)).sum() >= epsilon:
        # print("============================")
        beta = x.dot(alpha)
        # print("beta = " + str(beta))
        alpha_old = alpha
        alpha = beta / np.amax(beta)
        # print("alpha = " + str(alpha))
        # print("difference = " + str(np.absolute(alpha - alpha_old)))
        # print("diff_sum = " + str((np.absolute(alpha - alpha_old)).sum()))
    return alpha, np.amax(beta)
    def test_method_create_obs_crowddensitylocal(
            labe_gost: str, index_element: int, device_id: str,
            timestamp: datetime.datetime,
            density_matrix: matrix) -> CrowdDensityLocalObservation:
        try:
            crowd_density_local = CrowdDensityLocalObservation()
            crowd_density_local.set_label_cache(
                label_cache=UnitTestsUtilityObservables.
                test_method_get_label_cache(label_gostfilter=labe_gost,
                                            index_element=index_element))
            crowd_density_local.timestamp = timestamp
            crowd_density_local.set_device_id(device_id=device_id)
            crowd_density_local.density_map = density_matrix
            crowd_density_local.density_count = density_matrix.sum()

            return crowd_density_local
        except Exception as ex:
            return None