Exemple #1
0
    def segment(self):
        """
        Using the trained model,
        segment the image matrix into
        the pre-specified number of
        components. Returns the original
        image matrix with the each
        pixel's intensity replaced
        with its max-likelihood
        component mean.

        returns:
        segment = numpy.ndarray[numpy.ndarray[float]]
        """
        # TODO: finish this
        dim = self.flatten_image.shape
        original_dim = self.image_matrix.shape
        indicator_indices = np.zeros((dim[0], 1))
        indicator = np.zeros((dim[0], self.num_components))
        distance = np.square(np.subtract(self.flatten_image, self.means))
        indicator_indices = np.argmin(distance, axis=1)
        indx = np.arange(dim[0]).reshape(-1, 1)
        indicator_indices = indicator_indices.reshape(-1, 1)
        indicator[indx, indicator_indices] = 1
        means = self.means.copy()
        means = means.reshape(1, -1)
        for i in range(self.num_components):
            indices = np.where(indicator[:, i] == 1)
            self.flatten_image[indices] = means[0, i]
        updated_image_values = unflatten_image_matrix(self.flatten_image,
                                                      original_dim[1])
        return updated_image_values
Exemple #2
0
    def segment(self):

        flattened = flatten_image_matrix(self.image_matrix)  # n X 1

        def component_joint_prob(i):
            a = 0.5 * np.log(2 * np.pi * self.variances[i])
            b = (np.square(flattened - self.means[i])) / (2 *
                                                          self.variances[i])
            c = np.log(self.mixing_coefficients[i])
            return -a - b + c

        z = np.array([
            component_joint_prob(i) for i in range(self.num_components)
        ])  # 3 X n
        gamma_nk_denom = logsumexp(z, axis=0)
        gamma_component = []
        for k in range(self.num_components):
            gamma_nk_nomin = z[k]
            gamma_nk = np.exp(gamma_nk_nomin - gamma_nk_denom)
            gamma_component.append(gamma_nk)
        idx = np.argmax(np.array(gamma_component), axis=0)
        #print(idx)
        image_matrix = np.array([self.means[i] for i in idx.flatten()])
        update_image_values = unflatten_image_matrix(
            image_matrix, self.image_matrix.shape[1])
        return update_image_values
    def segment(self):
        """
        Using the trained model, 
        segment the image matrix into
        the pre-specified number of 
        components. Returns the original 
        image matrix with the each 
        pixel's intensity replaced 
        with its max-likelihood 
        component mean.
        
        returns:
        segment = numpy.ndarray[numpy.ndarray[float]]
        """
        # TODO: finish this
        num = []
        for i in range(self.num_components):
            num.append(-.5 * np.log(2 * np.pi * self.variances[i]) -
                       (((self.flat_values - self.means[i])**2) /
                        (2 * self.variances[i])))
        num = np.array(num)
        seg_index = num.argmax(axis=0)
        for i in range(self.num_components):
            self.flat_values[seg_index == i] = self.means[i]

        segment = unflatten_image_matrix(self.flat_values, self.width)

        return segment
def k_means_cluster(image_values, k=3, initial_means=None):
    """
    Separate the provided RGB values into
    k separate clusters using the k-means algorithm,
    then return an updated version of the image
    with the original values replaced with
    the corresponding cluster values.

    params:
    image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    k = int
    initial_means = numpy.ndarray[numpy.ndarray[float]] or None

    returns:
    updated_image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    """
    # TODO: finish this function
    if (len(image_values.shape) == 3):
        height, width, depth = image_values.shape
    else:
        height, width = image_values.shape
        depth = 1
    flat_values = flatten_image_matrix(image_values)
    #if flat_values.shape[1]==1:
    #   flat_values.shape=(len(flat_values,),)

    if initial_means == None:
        initial_means_index = np.array(sample(range(len(flat_values)), k))
        means = np.array(flat_values[initial_means_index])
    else:
        means = np.array(initial_means)

    diff_max = 10
    count = 0

    while count < 100:  ###insert while condition here for testing whether the clustering has converged
        dist = []
        for i in range(k):
            dist.append(np.sqrt(((flat_values - means[i])**2).sum(axis=1)))

        dist = np.array(dist)
        cluster_indices = dist.T.argmin(axis=1)
        new_means = []
        diff = []
        for i in range(k):
            new_means.append(flat_values[cluster_indices == i].mean(axis=0))
            diff.append(np.sqrt(((new_means[i] - means[i])**2).sum(axis=0)))
        diff_max = max(diff)
        if diff_max < .01:
            count += 1
        means = new_means[:]

    for i in range(k):
        flat_values[cluster_indices == i] = flat_values[cluster_indices ==
                                                        i].mean(axis=0).T
    new_values = unflatten_image_matrix(flat_values, width)

    return new_values
Exemple #5
0
def k_means_cluster(image_values, k=3, initial_means=None):
    """
    Separate the provided RGB values into
    k separate clusters using the k-means algorithm,
    then return an updated version of the image
    with the original values replaced with
    the corresponding cluster values.

    params:
    image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    k = int
    initial_means = numpy.ndarray[numpy.ndarray[float]] or None

    returns:
    updated_image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    """
    # TODO: finish this function
    original_dim = image_values.shape
    flatten_image = flatten_image_matrix(image_values)
    dim = flatten_image.shape
    indicator_indices = np.zeros((dim[0], 1))
    distance = np.zeros((dim[0], k))
    if initial_means == None:
        means_indices = np.random.choice(dim[0], k, replace=False)
        means = flatten_image[means_indices, :].copy()
    else:
        means = initial_means
    previous_means = np.zeros(means.shape)
    while not np.array_equal(means, previous_means):
        indicator = np.zeros((dim[0], k))
        previous_means = means.copy()
        for i in range(k):
            distance[:, i] = np.sum(np.power(
                np.subtract(flatten_image, means[i, :]), 2),
                                    axis=1,
                                    dtype=float)
        indicator_indices = np.argmin(distance, axis=1)
        indx = np.arange(dim[0]).reshape(-1, 1)
        indicator_indices = indicator_indices.reshape(-1, 1)
        indicator[indx, indicator_indices] = 1
        for i in range(k):
            means[i, :] = np.divide(
                np.sum(np.multiply(indicator[:, i].reshape(-1, 1),
                                   flatten_image),
                       axis=0,
                       dtype=float), np.sum(indicator[:, i], dtype=float))

    for i in range(k):
        indices = np.where(indicator[:, i] == 1)
        flatten_image[indices, :] = means[i, :]
    updated_image_values = unflatten_image_matrix(flatten_image,
                                                  original_dim[1])
    return updated_image_values
Exemple #6
0
def k_means_cluster(image_values, k=3, initial_means=None):
    """
    Separate the provided RGB values into
    k separate clusters using the k-means algorithm,
    then return an updated version of the image
    with the original values replaced with
    the corresponding cluster values.

    params:
    image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    k = int
    initial_means = numpy.ndarray[numpy.ndarray[float]] or None

    returns:
    updated_image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    """
    #    raise NotImplementedError()

    #1. If initial is None, create a random initial point list from data
    dim = [
        np.size(image_values, 0),
        np.size(image_values, 1),
        np.size(image_values, 2)
    ]
    if initial_means == None:
        initial_means = getInitialMeans(image_values, k, dim)

    #2. Loop through initial list and subtract it from the data points
    sz = 1
    for i in range(len(dim) - 1):
        sz = sz * dim[i]
    arr_reshaped = flatten_image_matrix(
        image_values)  #np.reshape(image_values,(sz,dim[len(dim)-1]))

    #3. Square sum and root results to get distance from eack k point
    kArray = getDistances(arr_reshaped, initial_means, k)

    #4. Build array containing dataset for each k
    kImage, kIndexes, kMeans = getMeans(arr_reshaped, kArray, k)
    #5. Test for convergence and compile new image data to return
    while (getConvergence(kMeans, initial_means) == False):
        initial_means = kMeans
        kArray = getDistances(arr_reshaped, initial_means, k)
        kImage, kIndexes, kMeans = getMeans(arr_reshaped, kArray, k)

    arr_orig = np.ndarray(shape=(sz, dim[len(dim) - 1]))
    for i in range(k):
        arr_orig[kIndexes[i]] = kMeans[i]

    arr_orig = unflatten_image_matrix(arr_orig, dim[0])
    return arr_orig
Exemple #7
0
def k_means_cluster(image_values, k=3, initial_means=None):

    import random

    if image_values.ndim == 3:
        row, col, depth = image_values.shape
    else:
        row, col = image_values.shape
        depth = 1

    if initial_means == None:
        initial_means = np.zeros((k, depth))
        random_rows = random.sample(range(row), k)
        random_cols = random.sample(range(col), k)
        for count in range(k):
            initial_means[count] = image_values[random_rows[count]][
                random_cols[count]]

    distance = np.zeros((k, row * col))
    image_values_flat = flatten_image_matrix(image_values)
    means = initial_means
    prev_cluster_allocation = np.zeros((1, row * col))

    while (1):
        for cluster_number in range(k):
            distance[cluster_number] = np.linalg.norm(image_values_flat -
                                                      means[cluster_number],
                                                      axis=1)

        cluster_allocation = np.argmin(distance, axis=0)
        if (np.array_equal(prev_cluster_allocation, cluster_allocation)):
            break

        else:
            prev_cluster_allocation = cluster_allocation

        for cluster_number in range(k):
            indices = np.where(cluster_allocation == cluster_number)
            means[cluster_number] = np.mean(image_values_flat[[indices]][0],
                                            axis=0)

    for cluster_number in range(k):
        indices = np.where(cluster_allocation == cluster_number)
        image_values_flat[[indices]] = means[cluster_number]

    updated_image_values = unflatten_image_matrix(image_values_flat, col)
    return updated_image_values
Exemple #8
0
def k_means_cluster(image_values, k=3, initial_means=None):
    """
    Separate the provided RGB values into
    k separate clusters using the k-means algorithm,
    then return an updated version of the image
    with the original values replaced with
    the corresponding cluster values.

    params:
    image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    k = int
    initial_means = numpy.ndarray[numpy.ndarray[float]] or None

    returns:
    updated_image_values = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]]
    """
    flattened = flatten_image_matrix(image_values)
    count = 0
    # M step, random prototype vector
    if initial_means == None:
        initial_means_idx = np.random.choice(flattened.shape[0],
                                             k,
                                             replace=False)
        mu_k = flattened[initial_means_idx]
    else:
        mu_k = initial_means

    while count < 1:
        # E step - updating r_nk
        squared_dist = np.array([
            np.square(flattened - mu_k[i]).sum(axis=1)
            for i in range(mu_k.shape[0])
        ])
        r_nk = np.argmin(squared_dist, axis=0)
        # M step - update mu_k
        clusters = np.array([flattened[(r_nk == i)] for i in range(k)])
        mu_k_prev = mu_k
        mu_k = np.array([clusters[i].mean(axis=0) for i in range(k)])
        # Convergence test
        if np.array_equal(mu_k_prev, mu_k):
            count += 1

    image_matrix = np.array([mu_k[k] for k in r_nk])
    updated_image_values = unflatten_image_matrix(image_matrix,
                                                  image_values.shape[1])
    return updated_image_values
    raise NotImplementedError()