def images_to_single_fruit_centroid(fruit_number, image_path):
    import PIL
    image = PIL.Image.open(image_path).resize((320,240), PIL.Image.NEAREST)
    # fruit = (Image(image_path, grey=True)==fruit_number)*1.0
    fruit = Image(image)==fruit_number
    # import pdb; pdb.set_trace()
    blobs = fruit.blobs()
    # plt.imshow(fruit.image)
    # plt.annotate(str(fruit_number), np.array(blobs[0].centroid).reshape(2,))
    # plt.show()
    # assert len(blobs) == 1, "Must have only 1 fruit of the given fruit type in this photo"
    return np.array(blobs[0].centroid).reshape(2,) # (2,)
def get_triangulation_params(base_dir):
    # Assume everything is in base_dir/workshop_output
    import os
    from pathlib import Path
    import ast
    fruit_lst_centroids = [[], [], [], []]
    fruit_lst_pose = [[], [], [], []]
    base_dir = Path(base_dir)

    files = os.listdir(base_dir / 'workshop_output')
    assert 'images.txt' in files

    image_poses = {}
    with open(base_dir / 'workshop_output/images.txt') as fp:
        for line in fp.readlines():
            pose_dict = ast.literal_eval(line)
            image_poses[pose_dict['imgfname']] = pose_dict['pose']

    for file_path in image_poses.keys():
        img_vals = set(
            Image(base_dir / file_path, grey=True).image.reshape(-1))
        for fruit_num in img_vals:
            if fruit_num > 0:
                try:
                    centroid = images_to_single_fruit_centroid(
                        fruit_num, base_dir / file_path)
                    pose = image_poses[file_path]
                    fruit_lst_centroids[fruit_num - 1].append(centroid)
                    fruit_lst_pose[fruit_num - 1].append(
                        np.array(pose).reshape(3, ))
                except ZeroDivisionError:
                    pass
    completed_triangulations = {}
    for i in range(4):
        if len(fruit_lst_centroids[i]) > 0:
            centroids = np.stack(fruit_lst_centroids[i], axis=1)  # (2,n)
            pose = np.stack(fruit_lst_pose[i], axis=1)  # (3,n)
            completed_triangulations[i + 1] = {
                'centroids': centroids,
                'pose': pose
            }  # entroids (2,n), pose (3, n)
    return completed_triangulations
Exemple #3
0
                           xlabel='pixel value',
                           ylabel='number of pixels',
                           **kwargs)
        elif type == 'cumulative':
            plot_histogram(self.xs.flatten(),
                           self.cs.flatten(),
                           block=block,
                           xlabel='pixel value',
                           ylabel='cumulative number of pixels',
                           **kwargs)
        elif type == 'normalized':
            plot_histogram(self.xs.flatten(),
                           self.ns.flatten(),
                           block=block,
                           xlabel='pixel value',
                           ylabel='normalized cumulative number of pixels',
                           **kwargs)


# --------------------------------------------------------------------------#
if __name__ == '__main__':

    print('ImageProcessingKernel.py')

    from machinevisiontoolbox import Image

    im = Image('penguins.png', grey=True)

    h = im.hist()
    print(h)
    h.plot(block=True)
            >>> from machinevisiontoolbox import Image
            >>> im = Image('shark2.png')
            >>> blobs = im.blobs()
            >>> print(blobs)

        
        """
        return Blob(self, **kwargs)


if __name__ == "__main__":

    from machinevisiontoolbox import Image
    import matplotlib.pyplot as plt

    im = Image('shark2.png')
    blobs = im.blobs()
    print(blobs)
    im.disp()
    blobs.plot_labelbox(color='yellow')
    blobs.plot_centroid()
    plt.show()

    # # read image
    # from machinevisiontoolbox import Image
    # im = Image(cv.imread('images/multiblobs.png', cv.IMREAD_GRAYSCALE))

    # # call Blobs class
    # b = Blob(image=im)

    # # plot image
    # plot_box(centre=(300,200), wh=(40,40), fillcolor='red', alpha=0.5)

    # plot_point([(200,200), (300, 300), (400,400)], marker='r*', color='blue', text="bob {}")

    # plot_labelbox("hello", color='red', textcolor='white', centre=(300,300), wh=(60,40))
    # plt.show()

    import numpy as np
    from machinevisiontoolbox import idisp, iread, Image

    im = np.zeros((100, 100, 3), 'uint8')
    # im, file = iread('flowers1.png')

    # draw_box(im, color=(255,0,0), centre=(50,50), wh=(20,20))

    # draw_point(im, [(200,200), (300, 300), (400,400)], color='blue')

    # draw_labelbox(im, "box", thickness=3, centre=(100,100), wh=(100,30), color='red', textcolor='white')
    idisp(im)

    x = np.random.randint(0, 100, size=(10, ))
    y = np.random.randint(0, 100, size=(10, ))

    plot_point((x, y), 'w+')
    plt.draw()
    plt.show(block=True)

    im = Image('penguins.png')
    h = im.hist()
            else:
                # isint
                th0 = np.round(0.1 * np.iinfo(img.dtype).max)
        if th1 is None:
            th1 = 1.5 * th0

        # compute gradients Ix, Iy using guassian kernel
        dg = self.kdgauss(sigma)

        out = []
        for im in img:

            Ix = np.abs(im.convolve(dg, 'same'))
            Iy = np.abs(im.convolve(np.transpose(dg), 'same'))

            # Ix, Iy must be 16-bit input image
            Ix = np.array(Ix, dtype=np.int16)
            Iy = np.array(Iy, dtype=np.int16)

            out.append((cv.Canny(Ix, Iy, th0, th1, L2gradient=True)))

        return self.__class__(out)


# --------------------------------------------------------------------------#
if __name__ == '__main__':

    print('ImageProcessingKernel.py')
    from machinevisiontoolbox import Image
    print(Image.kcircle(5))
    # sf0 = sf[0:3]
    # sf0.u

    # drawing = sf.drawSiftKeypoints(imgray)

    # TODO would be nice to make a root-sift descriptor method, as it is a
    # simple addition to the SIFT descriptor

    # test matching

    # import code
    # code.interact(local=dict(globals(), **locals()))

    from machinevisiontoolbox import Image

    kp1 = Image('eiffel2-1.png').SIFT()
    kp2 = Image('eiffel2-2.png').SIFT()

    matches = kp1.match(kp2)


    # im = Image('eiffel2-1.png')
    # ax = im.disp()

    # # sort into descending order
    # ks = kp1.sort()
    # print(len(kp1), len(ks))
    # print(kp1[0]._descriptor)
    # print(ks[0]._descriptor)
    
    # kp1.plot(hand=True, handalpha=0.2)