コード例 #1
0
def main():

    #Approximate mean values obtained by running K-means using scikit library

    data, image = read_data("../data/cow.txt", True)
    X = np.reshape(image, (image.shape[0] * image.shape[1], image.shape[2]))
    kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
    clusters = kmeans.cluster_centers_
    fit_EM(clusters[0],clusters[1],"cow",data,image)

    data, image = read_data("../data/fox.txt", True)
    X = np.reshape(image, (image.shape[0] * image.shape[1], image.shape[2]))
    kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
    clusters = kmeans.cluster_centers_
    fit_EM(clusters[0],clusters[1], "fox", data, image)

    data, image = read_data("../data/owl.txt", True)
    X = np.reshape(image, (image.shape[0] * image.shape[1], image.shape[2]))
    kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
    clusters = kmeans.cluster_centers_
    fit_EM(clusters[0], clusters[1],"owl",data,image)

    data, image = read_data("../data/zebra.txt", True)
    X = np.reshape(image, (image.shape[0] * image.shape[1], image.shape[2]))
    kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
    clusters = kmeans.cluster_centers_
    fit_EM(clusters[0], clusters[1], "zebra", data, image)
コード例 #2
0
def variational_inference(data):
    entropy = 0
    new_entropy = 1
    mus = np.zeros(data.shape)
    # initialize mu
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            if data[i, j] == 1:
                mus[i, j] = L1.pdf(1) - L1.pdf(-1)
            else:
                mus[i, j] = L_1.pdf(1) - L_1.pdf(-1)
    entropy = compute_entropy(data, mus)
    iter = 0
    print("initialized")
    while abs(entropy - new_entropy) > 0.0001:
        iter += 1
        entropy = new_entropy
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                mus = update_mu(i, j, data, mus)
        new_entropy = compute_entropy(data, mus)
        print(new_entropy)
        iter_mus = mus.copy()
        denoized_data = np.round(iter_mus)
        denoized_data = transform_back_values(denoized_data)
        denoized_data = transform_back_shape(denoized_data)
        write_data(
            denoized_data,
            str(J) + '_J_' + str(sigma) + '_sigma_' + str(iter) + '_epoch' +
            str(filenumber) + '_noise_vi.txt')
        read_data(
            str(J) + '_J_' + str(sigma) + '_sigma_' + str(iter) + '_epoch' +
            str(filenumber) + '_noise_vi.txt', True, False, True)

    return mus
コード例 #3
0
def output(data_dict, filename, prefix='outputs/a2/'):
    for type, data in data_dict.items():
        write_data(data, prefix + type + '_' + filename)
        read_data(prefix + type + '_' + filename,
                  False,
                  save=True,
                  save_name=prefix + type + '_' +
                  filename.replace('.txt', '.jpg'))
コード例 #4
0
def main():
	# Denoise noise images 1-4 using the gibbs sampling algorithm and save the
	# denoised data matrix and denoised image into text and png files respectively
	for a in range(1,5):
		noisetxt = "../a1/" + str(a) + "_noise.txt"
		print(noisetxt)
		data, image = read_data(noisetxt, True)
		data_arr = data_to_arr(data)
		new_data_arr = gibbs_sample(data_arr, 15)
		new_data = arr_to_data(new_data_arr)
		denoisetxt = "../output/" + str(a) + "_denoise_gibbs.txt"
		write_data(new_data, denoisetxt)
		denoisepng = "../output/" + str(a) + "_denoise_gibbs.png"
		read_data(denoisetxt, True, False, True, denoisepng)
コード例 #5
0
def denoise_gibbs(filenumber):
    """transform data, preforms gibbs sampling, transform back data"""
    data, image = read_data('../a1/' + str(filenumber) + '_noise.txt', True)
    data = transform_data_shape(data)
    # put data in {-1, 1} space
    data = transform_data_values(data)

    denoized_data = gibbs_sampling(data)
    denoized_data = transform_back_values(denoized_data)
    denoized_data = transform_back_shape(denoized_data)
    write_data(
        denoized_data, 'final_denoised' + str(J) + '_J_' + str(sigma) +
        '_sigma_' + str(filenumber) + '_noise.txt')
    read_data(
        'final_denoised' + str(J) + '_J_' + str(sigma) + '_sigma_' +
        str(filenumber) + '_noise.txt', True, False, True)
    return denoized_data
コード例 #6
0
def main():
    # Segment cow, fox, owl and zebra images using Expectation-Maximisation
    # and save corresponding mask, seg1 and seg2 into text and jpg files
    names = ['cow', 'fox', 'owl', 'zebra']
    for a in range(len(names)):
        inputtxt = "../a2/" + names[a] + ".txt"
        print(inputtxt)
        data, image = read_data(inputtxt, False)
        data_arr = data_to_arr(data)
        responsibilities = determine_responsibilities(data_arr, 10)
        mask_data, seg1_data, seg2_data = segment_image(data, responsibilities)
        write_read_data(names[a], "mask", mask_data)
        write_read_data(names[a], "seg1", seg1_data)
        write_read_data(names[a], "seg2", seg2_data)
コード例 #7
0
def main():

    for img in range(2, 3):
        print("Denoising for image " + str(img))
        data, image = read_data("../data/" + str(img) + "_noise.txt",
                                True,
                                visualize=True)

        print(data.shape)
        print(image.shape)

        image[image == 0] = -1
        image[image == 255] = 1

        avg = denoise(image, 0.7, 20, 100)

        avg[avg >= 0] = 255
        avg[avg < 0] = 0

        print(avg.shape)
        width = avg.shape[0]
        height = avg.shape[1]
        counter = 0

        for i in range(0, width):
            for j in range(0, height):
                data[counter][2] = avg[i][j][0]
                counter = counter + 1

        write_data(data, "../output/" + str(img) + "_denoise.txt")
        read_data("../output/" + str(img) + "_denoise.txt",
                  True,
                  save=True,
                  save_name="../output/" + str(img) + "_denoise.jpg")
        print("Finished writing data. Please check " + str(img) +
              "_denoise.jpg \n")
コード例 #8
0
    def __init__(self, fname):

        #Variable holders for data

        self.data = 0
        self.image = 0
        self.X = 0

        #Variable holders for probability related

        self.posterior = [[0, 0]]
        self.prior = [0, 0]
        self.normalization = [0, 0]
        self.post_sum = [0, 0]
        self.Mean = [[0, 0, 0], [0, 0, 0]]

        self.Cov1 = np.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
        self.Cov2 = np.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]])

        #Preprocessing of data

        self.data, self.image = read_data(fname, True)
        self.X = self.data[:, 2:]

        #Initialization of means,variance
        self.Mean[0] = [self.X[0][0], self.X[0][1], self.X[0][2]]
        temp = [0, 0, 0]

        for i in self.X:  #finding the farthest value from cluster one and the same is assigned as cluster 2

            if temp[0] < ((self.Mean[0][0] - i[0]) * (self.Mean[0][0] - i[0])):
                temp[0] = (self.Mean[0][0] - i[0]) * (self.Mean[0][0] - i[0])
                self.Mean[1][0] = i[0]

            if temp[1] < ((self.Mean[0][1] - i[1]) * (self.Mean[0][1] - i[1])):
                temp[1] = (self.Mean[0][1] - i[1]) * (self.Mean[0][1] - i[1])
                self.Mean[1][1] = i[1]

            if temp[2] < ((self.Mean[0][2] - i[2]) * (self.Mean[0][2] - i[2])):
                temp[2] = (self.Mean[0][2] - i[2]) * (self.Mean[0][2] - i[2])
                self.Mean[1][2] = i[2]

        self.Cov1 = np.asarray([[20, 0, 0], [0, 20, 0], [0, 0, 20]])
        self.Cov1 = np.cov(self.Cov1)  #covariance 1

        self.Cov2 = np.asarray([[15, 0, 0], [0, 15, 0], [0, 0, 15]])
        self.Cov2 = np.cov(self.Cov2)
        self.expect_maxim(fname)
コード例 #9
0
def sampler(input, output, c, J, lamda, n_iter):
    _, noisy_img = io_data.read_data(input, True)
    # Convert to shape [rows, cols]
    noisy_img = np.asarray(noisy_img).reshape(noisy_img.shape[0:2])
    noisy_img[noisy_img < 128] = -1  # Use comparision for float.
    noisy_img[noisy_img > 128] = 1

    # Initialize mean value.
    mean = np.zeros(noisy_img.shape)

    L_1 = norm.logpdf(noisy_img, 1, c * c)
    L_neg1 = norm.logpdf(noisy_img, -1, c * c)

    for _ in range(n_iter):
        updated_mean = np.zeros(noisy_img.shape)

        for i in range(noisy_img.shape[0]):
            for j in range(noisy_img.shape[1]):
                nbr = 0
                if i > 0:
                    nbr += mean[i - 1][j]
                if i < noisy_img.shape[0] - 1:
                    nbr += mean[i + 1][j]
                if j > 0:
                    nbr += mean[i][j - 1]
                if j < noisy_img.shape[1] - 1:
                    nbr += mean[i][j + 1]

                updated_mean[i][j] = lamda * mean[i][j] + \
                                     (1 - lamda) * np.tanh(nbr * J + 0.5 * (L_1[i][j] - L_neg1[i][j]))

        mean = updated_mean

    # Restore the graph.
    noisy_img[mean > 0] = 255
    noisy_img[mean < 0] = 0
    noisy_img = np.expand_dims(noisy_img, 2)

    # Save the data.
    cv2.imwrite(output, noisy_img)
コード例 #10
0
def sampler(input, output, var, J, n_iter):
    _, noisy_img = io_data.read_data(input, True)
    # Convert to shape [rows, cols]
    noisy_img = np.asarray(noisy_img).reshape(noisy_img.shape[0:2])
    noisy_img[noisy_img < 128] = -1  # Use comparision for float.
    noisy_img[noisy_img > 128] = 1

    norm_1 = norm.pdf(noisy_img, 1, var)
    norm_negative_1 = norm.pdf(noisy_img, -1, var)

    for iter in range(n_iter):
        for i in range(noisy_img.shape[0]):
            for j in range(noisy_img.shape[1]):
                nbr = 0
                if i > 0:
                    nbr += noisy_img[i - 1][j]
                if i < noisy_img.shape[0] - 1:
                    nbr += noisy_img[i + 1][j]
                if j > 0:
                    nbr += noisy_img[i][j - 1]
                if j < noisy_img.shape[1] - 1:
                    nbr += noisy_img[i][j + 1]

                potential_1 = math.exp(J * nbr)
                potential_negative_1 = math.exp(-J * nbr)

                prob_1 = norm_1[i][j] * potential_1
                prob_negative_1 = norm_negative_1[i][j] * potential_negative_1

                # Normalize.
                prob_1 /= (prob_1 + prob_negative_1)

                noisy_img[i][j] = (np.random.rand() < prob_1) * 2 - 1

    noisy_img[noisy_img > 0] = 255
    noisy_img[noisy_img < 0] = 0
    noisy_img = np.expand_dims(noisy_img, 2)

    # Save the data.
    cv2.imwrite(output, noisy_img)
コード例 #11
0
def main():
    prefix = 'a2/'
    for filename in ['cow.txt', 'fox.txt', 'owl.txt', 'zebra.txt']:
        # for filename in ['owl.txt']:
        print('Solving ' + filename + ':...')
        data, image = read_data(prefix + filename, True)
        eps = 0.5

        if (filename == 'owl.txt'):
            mix0 = 0.4
            means = np.array([[55, 0, 20], [80, 0, 0]])
            cov0 = np.array([[20, 0, 0], [0, 2, 0], [0, 0, 10]])
            cov1 = np.array([[42, 0, 0], [0, 1, 0], [0, 0, 10]])
        else:
            X = np.reshape(image,
                           (image.shape[0] * image.shape[1], image.shape[2]))
            kmeans = KMeans(n_clusters=2, random_state=0).fit(X)

            # set covs, coefficients
            cluster_map = pd.DataFrame()
            cluster_map['data'] = X.tolist()
            cluster_map['cluster'] = kmeans.labels_
            cluster0 = cluster_map[cluster_map.cluster == 0]['data']
            cluster1 = cluster_map[cluster_map.cluster == 1]['data']
            cov0 = np.cov(np.transpose(cluster0.tolist()))
            cov1 = np.cov(np.transpose(cluster1.tolist()))
            mix0 = float(len(cluster0)) / len(cluster_map)
            means = kmeans.cluster_centers_

        back_data, front_data, mask_data = fit_EM(image,
                                                  data,
                                                  means,
                                                  cov0,
                                                  cov1,
                                                  mix0,
                                                  eps=eps)
        data_dict = {'back': back_data, 'front': front_data, 'mask': mask_data}
        output(data_dict, filename)
コード例 #12
0
    positions = list(it.product(np.arange(r),np.arange(c)))
    np.random.shuffle(positions)
    for n in range(niter):
        Z = X_list[n].copy()
        for i in positions:
            s = sum([Z[j[0],j[1]] for j in neighbors_ix(i,Z)])
            p= sigmoid(2*Beta*s+np.log(normal(Y[i[0],i[1]],1,sigma)/normal(Y[i[0],i[1]],-1,sigma)))
            X[i[0],i[1]] = np.random.choice([1,-1],1,p = [p,1-p])
        X_list = np.append(X_list,[X],axis=0)
        if (n+1)%step==0:
            print("iter: {}".format(n+1))
    return X_list


#Importing data and initializing the parameters
data,image = read_data("../a1/4_noise.txt",True)
Y = normalize_img(image)
r,c = Y.shape
X0 = np.zeros(Y.shape)
Beta = 8
nbiter = 10
step = max(int(nbiter/100),1)
sigma = 3.5

#Image denoising
X_list = gibbs(X0,Beta,Y,sigma,nbiter)[2:]
X_avg = unnormalize_avg_img(X_list)

#Output
write_data(img_to_data(X_avg), "../outputs/a1/4_output_5.txt")
read_data("../outputs/a1/4_output_5.txt", True, save=True)
コード例 #13
0
        print(new_entropy)
        iter_mus = mus.copy()
        denoized_data = np.round(iter_mus)
        denoized_data = transform_back_values(denoized_data)
        denoized_data = transform_back_shape(denoized_data)
        write_data(
            denoized_data,
            str(J) + '_J_' + str(sigma) + '_sigma_' + str(iter) + '_epoch' +
            str(filenumber) + '_noise_vi.txt')
        read_data(
            str(J) + '_J_' + str(sigma) + '_sigma_' + str(iter) + '_epoch' +
            str(filenumber) + '_noise_vi.txt', True, False, True)

    return mus


def draw_new_samples(data):
    new_data = np.zeros(data.shape)
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            if ((data[i, j] + 1) / 2) > np.random.rand():
                new_data[i, j] = 1
    return new_data


for filenumber in range(1, 5):
    data, image = read_data('../a1/' + str(filenumber) + '_noise.txt', True)
    data = transform_data_shape(data)
    # put data in {-1, 1} space
    data = transform_data_values(data)
    variational_inference(data)
コード例 #14
0
    mask_image[:, 2] = mask * 100

    foreground = np.zeros(shape=data.shape)
    foreground[:, 0:2] = data[:, 0:2]
    for i in range(2, 5):
        foreground[:, i] = np.multiply(data[:, i], inverse_mask)

    background = np.zeros(shape=data.shape)
    background[:, 0:2] = data[:, 0:2]
    for i in range(2, 5):
        background[:, i] = np.multiply(data[:, i], mask)

    return mask_image, foreground, background


if __name__ == '__main__':
    values = ['fox', 'owl', 'zebra', 'cow']
    for i in values:
        print("Now Processing : " + str(i) + ".txt")
        filename = "data" + os.sep + str(i) + ".txt"
        data, image = read_data(filename, is_RGB=False)
        mask, foreground, background = segment_image(data)
        output_filename = "results" + os.sep + str(i) + "_mask.txt"
        write_data(mask, output_filename)
        data, image = read_data(output_filename, is_RGB=False, save=True)
        output_filename = "results" + os.sep + str(i) + "_foreground.txt"
        write_data(foreground, output_filename)
        data, image = read_data(output_filename, is_RGB=False, save=True)
        output_filename = "results" + os.sep + str(i) + "_background.txt"
        write_data(background, output_filename)
        data, image = read_data(output_filename, is_RGB=False, save=True)
コード例 #15
0
def write_read_data(start_name, end_name, data):
    # save data matrix and image into text and jpg files respectively
    txt = "../output/" + start_name + "_" + end_name + ".txt"
    write_data(data, txt)
    jpg = "../output/" + start_name + "_" + end_name + ".jpg"
    read_data(txt, False, False, True, jpg)
コード例 #16
0
    def expect_maxim(self, filename):

        #initializing for first iteration

        self.prior = [0.5, 0.5]
        exp_mean1 = 0
        exp_mean2 = 0

        #Expectation step

        log_lh = []
        while (1):
            self.post_sum = [0, 0]
            self.posterior = [[0, 0]]
            exp_mean1 = [0, 0, 0]
            exp_mean2 = [0, 0, 0]

            for v in self.image:
                for i in v:
                    l_post = [0, 0]
                    l_post[0] = multivariate_normal.pdf(
                        i,
                        mean=self.Mean[0],
                        cov=self.Cov1,
                        allow_singular=True)  # Likelihood 1
                    l_post[1] = multivariate_normal.pdf(
                        i,
                        mean=self.Mean[1],
                        cov=self.Cov2,
                        allow_singular=True)  # Likelihood 2

                    l_post[0] = self.prior[0] * l_post[0]
                    l_post[1] = self.prior[1] * l_post[1]

                    Normalization = l_post[0] + l_post[1]

                    l_post[0] = l_post[0] / Normalization
                    l_post[1] = l_post[1] / Normalization

                    self.posterior.append(l_post)

                    self.post_sum[0] = self.post_sum[0] + l_post[
                        0]  #used in the maximization step
                    self.post_sum[1] = self.post_sum[1] + l_post[1]

                    exp_mean1 = exp_mean1 + (l_post[0] * i)
                    exp_mean2 = exp_mean2 + (l_post[1] * i)

#Maximization Step :

            self.Mean[0] = exp_mean1 / self.post_sum[0]  #updated Mean
            self.Mean[1] = exp_mean2 / self.post_sum[1]

            variance1 = np.zeros((3, 3))
            variance2 = np.zeros((3, 3))

            j = 1
            for v in self.image:
                for i in v:
                    variance1 = variance1 + (self.posterior[j][0] * np.outer(
                        (i - self.Mean[0]), (i - self.Mean[0])))
                    variance2 = variance2 + (self.posterior[j][1] * np.outer(
                        (i - self.Mean[1]), (i - self.Mean[1])))
                    j = j + 1

            self.Cov1 = variance1 / self.post_sum[0]  #updated variance
            self.Cov2 = variance2 / self.post_sum[1]

            self.prior[0] = self.post_sum[0] / self.X.shape[0]  #updated prior
            self.prior[1] = self.post_sum[1] / self.X.shape[0]
            print("Maximization ended")

            lval = 0
            sumList = []

            for v in self.image:
                for i in v:
                    l_post[0] = multivariate_normal.pdf(i,
                                                        self.Mean[0],
                                                        self.Cov1,
                                                        allow_singular=True)
                    l_post[1] = multivariate_normal.pdf(i,
                                                        self.Mean[1],
                                                        self.Cov2,
                                                        allow_singular=True)

                    Normalization = (self.prior[0] *
                                     l_post[0]) + (self.prior[1] * l_post[1])
                    sumList.append(np.log(Normalization))

                lval = np.sum(np.asarray(sumList))

            log_lh.append(lval)
            print("Log Likelihood: " + str(lval))

            if len(log_lh) < 2: continue
            if np.abs(lval - log_lh[-2]) < 0.5: break
#end of While loop

#Copying mask

        backg = self.data.copy()
        foreg = self.data.copy()
        mask = self.data.copy()

        for i in range(0, len(self.data) - 1):

            cell = self.data[i]
            point = [cell[2], cell[3], cell[4]]
            l_post = [0, 0]
            l_post[0] = multivariate_normal.pdf(
                point, mean=self.Mean[0], cov=self.Cov1,
                allow_singular=True)  # Likelihood 1
            l_post[1] = multivariate_normal.pdf(
                point, mean=self.Mean[1], cov=self.Cov2,
                allow_singular=True)  # Likelihood 2

            l_post[0] = self.prior[0] * l_post[0]
            l_post[1] = self.prior[1] * l_post[1]

            Normalization = l_post[0] + l_post[1]

            l_post[0] = l_post[0] / Normalization
            l_post[1] = l_post[1] / Normalization

            if (l_post[0] < l_post[1]):
                backg[i][2] = backg[i][3] = backg[i][4] = 0
                mask[i][2] = mask[i][3] = mask[i][4] = 0
            else:
                foreg[i][2] = foreg[i][3] = foreg[i][4] = 0
                mask[i][2] = 100
                mask[i][3] = mask[i][4] = 0

        write_data(backg, filename + "_back.txt")
        read_data(filename + "_back.txt",
                  False,
                  save=True,
                  save_name=filename + "_background.jpg")

        write_data(foreg, filename + "_fore.txt")
        read_data(filename + "_fore.txt",
                  False,
                  save=True,
                  save_name=filename + "_foreground.jpg")

        write_data(mask, filename + "_mask.txt")
        read_data(filename + "_mask.txt",
                  False,
                  save=True,
                  save_name=filename + "_masked.jpg")
コード例 #17
0
ファイル: Segmentation.py プロジェクト: Soufedd/PGM_asg
    d = data.shape[-1]-2
    Fgnd = data.copy()
    Bgnd = data.copy()
    for n in range(len(data)):
        Fgnd[n,2:] = np.zeros(d) if mask[n,-1]==255 else Fgnd[n,2:]
        Bgnd[n, 2:] = np.zeros(d) if mask[n, -1] == 0 else Bgnd[n, 2:]
    return Fgnd,Bgnd



#Importing data and initialization
animal = "owl"
K = 2
K_epsilon = 1e-5
EM_epsilon = 1e-2
data,image = read_data("../a2/"+animal+".txt",False)

N = len(data)
d = data.shape[-1] - 2
obs_data = data[:,2:].reshape(N,d,1)

mus = np.random.randn(K,d,1)
sigmas = np.tile(np.identity(d),(K,1)).reshape(K,d,d)
pis = np.repeat(1/K,K)


#Using K-Means to provide the initial means for EM
mus,r = Kmeans(obs_data,mus,K_epsilon)

#Using EM for data segmentation (expressed in gamma)
gamma,pis,mus,sigmas = EM(obs_data,pis,mus,sigmas,EM_epsilon)
コード例 #18
0
def fit_EM(m1, m2, filename, data, image):
    mean1 = m1              #Mean 1

    mean2 = m2              #Mean 2

    eps = 0.5               #Threshold

    z = np.asarray([[13, 20, 29], [13, 23, 37], [13, 23, 29]])
    cov1 = np.cov(z)        #covariance 1

    z = np.asarray([[9, -58, 7], [8, -7, 10], [6, -4, 6]])
    cov2 = np.cov(z)        #covariance 2

    mix1 = 0.4              #mixing co-efficient 1
    mix2 = 0.6              #mixing co-efficient 2

    N = image.shape[0] * image.shape[1]         #Total number of samples

    log_likelihoods = []

    print("Initialization of mean,covariance and mixing co-efficient statement done for "+str(filename))

    print("")
    print("Starting EM algorithm for "+str(filename))
    # Expectation Step :

    iteration_number = 0
    while (1):
        iteration_number += 1
        print("Iteration: "+str(iteration_number))

        N1 = 0
        N2 = 0
        resp1_list = []
        resp2_list = []
        mu_sum1 = [0, 0, 0]
        mu_sum2 = [0, 0, 0]

        for y in image:
            for x in y:
                prob1 = pyro.sample("prob1", dist.multivariate_studentt)
                prob1 = multivariate_normal.pdf(x, mean=mean1, cov=cov1, allow_singular=True)      # gaussian density 1

                prob2 = multivariate_normal.pdf(x, mean=mean2, cov=cov2, allow_singular=True)      # gaussian density 2

                Numerator1 = mix1 * prob1
                Numerator2 = mix2 * prob2

                denom = Numerator1 + Numerator2

                resp1 = Numerator1 / denom  #responsibility for 1st cluster

                resp2 = Numerator2 / denom  #responsibility for 2nd cluster

                resp1_list.append(resp1)
                resp2_list.append(resp2)


                mu_sum1 += resp1 * x
                mu_sum2 += resp2 * x

                N1 += resp1
                N2 += resp2

        # Maximization Step :

        mu_new1 = mu_sum1 / N1  #updated mean 1
        mu_new2 = mu_sum2 / N2  #updated mean 2

        var_1 = np.zeros((3, 3))
        var_2 = np.zeros((3, 3))

        i = 0
        for y in image:
            for x in y:
                var_1 += resp1_list[i] * np.outer((x - mu_new1), (x - mu_new1))
                var_2 += resp2_list[i] * np.outer((x - mu_new2), (x - mu_new2))
                i = i + 1

        var_new1 = var_1 / N1   #updated covariance1
        var_new2 = var_2 / N2   #updated covariance2

        mix_new1 = N1 / N       #updated mixing co-efficient1
        mix_new2 = N2 / N       #updated mixing co-efficient2

        mean1 = mu_new1
        mean2 = mu_new2

        cov1 = var_new1
        cov2 = var_new2

        mix1 = mix_new1
        mix2 = mix_new2

        #Calculate Log Likelihood
        Z = [0, 0]
        ll = 0
        sumList=[]
        for y in image:
            for x in y:
                prob1 = multivariate_normal.pdf(x, mu_new1, var_new1, allow_singular=True)

                prob2 = multivariate_normal.pdf(x, mu_new2, var_new2, allow_singular=True)

                sum = (mix_new1 * prob1) + (mix_new2 * prob2)
                sumList.append(np.log(sum))

            ll = np.sum(np.asarray(sumList))


        log_likelihoods.append(ll)

        print("Log Likelihood: " + str(ll))

        if len(log_likelihoods) < 2: continue
        if np.abs(ll - log_likelihoods[-2]) < eps: break
        #Break loop if log likelihoods dont change more than threshold over 2 iterations

    print("")
    print("End of iterations for: " + str(filename))
    print("")

    #Write to File
    print("Writing to file for:  " + str(filename))

    back_data = data.copy()
    front_data = data.copy()
    mask_data = data.copy()

    for i in range(0,len(data)-1):

        cell = data[i]
        point = [cell[2], cell[3], cell[4]]
        prob1 = multivariate_normal.pdf(point, mean=mean1, cov=cov1, allow_singular=True)

        resp1 = mix1 * prob1
        prob2 = multivariate_normal.pdf(point, mean=mean2, cov=cov2, allow_singular=True)
        resp2 = mix2 * prob2

        resp1 = resp1/(resp1+resp2)
        resp2 = resp2/(resp1+resp2)


        if (resp1 < resp2):
            back_data[i][2] = back_data[i][3] = back_data[i][4] = 0
            mask_data[i][2] = mask_data[i][3] = mask_data[i][4] = 0

        else:
            front_data[i][2] = front_data[i][3] = front_data[i][4] = 0
            mask_data[i][2] = 100
            mask_data[i][3] = mask_data[i][4] = 0


    write_data(back_data,"../output/"+str(filename)+"_back.txt")
    read_data("../output/"+str(filename)+"_back.txt", False, save=True, save_name="../output/"+str(filename)+"_background.jpg")


    write_data(front_data,"../output/"+str(filename)+"_fore.txt")
    read_data("../output/"+str(filename)+"_fore.txt", False, save=True, save_name="../output/"+str(filename)+"_foreground.jpg")


    write_data(mask_data,"../output/"+str(filename)+"_mask.txt")
    read_data("../output/"+str(filename)+"_mask.txt", False, save=True, save_name="../output/"+str(filename)+"_masked.jpg")

    print("Finished writing data. Please check "+str(filename)+"_background.jpg, "+str(filename)+
          "_foreground.jpg and "+str(filename)+"_masked.jpg ")
コード例 #19
0
CONVERGENCE_THRESHOLD = 1
K_SEG = 2

# Place this file inside code/ folder
if __name__ == "__main__":

    input_path = [
        "../a2/cow.txt", "../a2/fox.txt", "../a2/owl.txt", "../a2/zebra.txt"
    ]
    output_path = [
        "../output/a2/cow", "../output/a2/fox", "../output/a2/owl",
        "../output/a2/zebra"
    ]

    for i in range(len(input_path)):
        data, image = read_data(input_path[i], True)
        height, width, channel = image.shape

        # reshape into pixels, each has 3 channels (RGB)
        pixels = image.reshape((height * width, channel))
        mask, foreground, background = EM(pixels, height, width, K_SEG)

        # save result images
        imageplt.imsave(output_path[i] + '_mask.png', mask)
        cv2.imwrite(output_path[i] + '_seg1.png',
                    (cv2.cvtColor(foreground, cv2.COLOR_Lab2BGR) * 255).astype(
                        np.uint8))
        cv2.imwrite(output_path[i] + '_seg2.png',
                    (cv2.cvtColor(background, cv2.COLOR_Lab2BGR) * 255).astype(
                        np.uint8))