Пример #1
0
def iter_over_coordinates(input_coordinates_part, C, COV, structure, k):
    """
    input: input_coordinates_part numpy array, coordinates for model creation
           C numpy array kxd, matrix of k d-dimensional cluster centres
           COV numpy array kxdxd, matrix of covariance matrices
           structure list(int, list(floats), list(floats)),
                      number of non-hypertime dimensions, list of hypertime
                      radii nad list of wavelengths
           k positive integer, number of clusters
    output: grid_densities_part numpy array kx1, number of part of cells
                                                 belonging to the clusters
    uses: dio.create_X(), np.shape(), gc.collect(), np.tile(),
          np.sum(), np.dot(), np.array(),
          cl.partition_matrix()
    objective: to find out the number of cells (part of them) belonging to
               the clusters
    """
    X = dio.create_X(input_coordinates_part, structure)
    n, d = np.shape(X)
    gc.collect()
    D = []
    for cluster in range(k):
        C_cluster = np.tile(C[cluster, :], (n, 1))
        XC = dio.hypertime_substraction(X, C_cluster, structure)
        #XC = X - C_cluster
        VI = COV[cluster]#COV[cluster, :, :]
        D.append(np.sum(np.dot(XC, VI) * XC, axis=1))
        gc.collect()
    D = np.array(D)
    gc.collect()
    U = cl.partition_matrix(D, version='model')
    #U = U ** 2 #!!!
    gc.collect()
    grid_densities_part = np.sum(U, axis=1, keepdims=True)
    return grid_densities_part
Пример #2
0
def initialization(X, k, structure, params):
    """
    """
    n, d = np.shape(X)
    C = np.empty((k, d))
    WEIGHTS = np.empty((k, n))

    # pokusim se udelat ten suj algoritmus
    Lambda = 5
    Clambda = np.empty((k * Lambda, d))
    for redundant in range(k * Lambda):
        Clambda[redundant] = X[np.random.choice(n)]
    DIFFERENCES = []
    for point in Clambda:
        row = bs.substraction(Clambda, point, 'def', structure)
        DIFFERENCES.append(row)
    DIFFERENCES = np.array(DIFFERENCES)
    DISTANCES = np.sqrt(np.sum((DIFFERENCES)**2, axis=2))
    bigConstant = np.max(DISTANCES) * 2
    SEARCH = DISTANCES + np.eye(k * Lambda) * bigConstant
    sums = np.sum(DISTANCES, axis=0)
    for iteration in range(k * (Lambda - 1)):
        ind = np.unravel_index(np.argmin(SEARCH, axis=None), SEARCH.shape)
        toDel = np.argmin(sums[list(ind)])
        SEARCH[:, ind[toDel]] = bigConstant
        SEARCH[ind[toDel], :] = bigConstant
        sums[ind[toDel]] = bigConstant
    C = Clambda[sums != bigConstant]
    for cluster in range(k):
        XminusC = bs.substraction(X, C[cluster], 'def', structure)
        WEIGHTS[cluster] = cl.weights_matrix(XminusC, weights=None, uid='hard')
    U = cl.partition_matrix(WEIGHTS, uid='hard')
    """
    # PUVODNI A FUNKCNI
    # initialize M to random, initialize C to spherical with variance 1
    for cluster in xrange(k):
        C[cluster] = X[np.random.choice(n)]
        XminusC = bs.substraction(X, C[cluster], 'def', structure)
        WEIGHTS[cluster] = weights_matrix(XminusC, weights=None, uid='hard')
    U = partition_matrix(WEIGHTS, uid='hard')
    # KONEC PUVODNIHO A FUNKCNIHO
    """
    return C, U
Пример #3
0
def one_freq(one_input_coordinate, C, COV, structure, k,
                    density_integrals):
    """
    input: one_input_coordinate numpy array, coordinates for model creation
           C numpy array kxd, matrix of k d-dimensional cluster centres
           COV numpy array kxdxd, matrix of covariance matrices
           structure list(int, list(floats), list(floats)),
                      number of non-hypertime dimensions, list of hypertime
                      radii nad list of wavelengths
           k positive integer, number of clusters
           density_integrals numpy array kx1, matrix of ratios between
                                               measurements and grid cells
                                               belonging to the clusters
    output: freq array len(input_coordinates_part)x1,
                                           frequencies(stat) obtained
                                           from model in positions of part
                                           of input_coordinates
    uses: dio.create_X(), np.shape(), gc.collect(), np.tile(),
          np.sum(), np.dot(), np.array(),
          cl.partition_matrix()
    objective: to create grid of frequencies(stat) over a part time-space
               (histogram)
    """
    X = dio.create_X(one_input_coordinate, structure)
    n, d = np.shape(X)
    D = []
    for cluster in range(k):
        # C_cluster = np.tile(C[cluster, :], (n, 1))
        XC = dio.hypertime_substraction(X, C[cluster:cluster+1], structure)
        #XC = X - C[cluster]
        VI = COV[cluster]#COV[cluster, :, :]
        D.append(np.sum(np.dot(XC, VI) * XC, axis=1))
        # gc.collect()
    D = np.array(D)
    # gc.collect()
    U = cl.partition_matrix(D, version='model')
    #U = (U ** 2) * density_integrals
    U = U * density_integrals
    # gc.collect()
    freq = np.sum(U, axis=0)
    return freq