コード例 #1
0
def densereg_face_iterator(istraining, db='helen'):

    database_path = Path(
        '/vol/atlas/homes/yz4009/databases/DenseReg/FaceRegDataset') / db
    landmarks_path = Path('/vol/atlas/databases') / db

    image_folder = 'Images_Resized'
    uv_folder = 'Labels_Resized'
    z_folder = 'Labels_Resized'

    if istraining == 1:
        database_path = database_path / 'trainset'
        landmarks_path = landmarks_path / 'trainset'
    elif istraining == 0:
        database_path = database_path / 'testset'
        landmarks_path = landmarks_path / 'testset'

    for pimg in print_progress(
            mio.import_images(database_path / image_folder / image_folder)):

        image_name = pimg.path.stem

        # load iuv data
        labels = sio.loadmat(
            str(database_path / uv_folder / uv_folder /
                ('%s.mat' % image_name)))
        iuv = Image(
            np.stack([(labels['LabelsH_resized'] >= 0).astype(np.float32),
                      labels['LabelsH_resized'],
                      labels['LabelsV_resized']]).clip(0, 1))

        # load lms data
        try:
            orig_image = mio.import_image(landmarks_path /
                                          ('%s.jpg' % image_name))
        except:
            orig_image = mio.import_image(landmarks_path /
                                          ('%s.png' % image_name))

        orig_image = orig_image.resize(pimg.shape)

        pimg.landmarks['JOINT'] = orig_image.landmarks['PTS']

        pimg_data = utils.crop_image_bounding_box(
            pimg, pimg.landmarks['JOINT'].bounding_box(), [384, 384], base=256)

        pimg = pimg_data[0]

        iuv_data = utils.crop_image_bounding_box(
            iuv, pimg.landmarks['JOINT'].bounding_box(), [384, 384], base=256)

        iuv = iuv_data[0]

        yield {
            'image': pimg,
            'iuv': iuv,
            'visible_pts': np.array(list(range(68))),
            'marked_index': np.array(list(range(68)))
        }
コード例 #2
0
def blue_peter():
    import menpo.io as mio
    import h5it
    from menpo.visualize.image import glyph
    from menpo.feature import hog
    import matplotlib.pyplot as plt
    # Loading the pre-built HOG AAM
    import cPickle as pickle

    with open('/Users/pts08/hog_lfpw_aam.pkl', 'rb') as f:
        hog_aam = pickle.load(f)
    
    #hog_aam = h5it.load('/Users/pts08/sparse_hog.hdf5')
    print('Here is one I made earlier!')

    bp = mio.import_image('blue_peter.jpg')
    hog_blue_peter = hog(bp)

    plt.figure()

    plt.subplot(121)
    bp.view()
    plt.axis('off')
    plt.gcf().set_size_inches(11, 11)
    plt.title('RGB')

    plt.subplot(122)
    glyph(hog_blue_peter).view()
    plt.axis('off')
    plt.gcf().set_size_inches(11, 11)
    plt.title('HOG')

    return hog_aam
コード例 #3
0
def save_faces(emotions, face_detector, fitter):
    for element in emotions:
        TEST_IMG_PATH = "/Users/joanna_frankiewicz/Desktop/Database/FaceDB_Emotions/images/"+element
        for filename in os.listdir(TEST_IMG_PATH):
            if filename.endswith(".jpg"):
                img_path = TEST_IMG_PATH+"/"+filename
                test_img = mio.import_image(img_path)
                try:
                    test_face_bb = face_2_pointcloud(face_detector(test_img))
                    fitting_result = fitter.fit_from_bb(test_img, test_face_bb, max_iters=25)
                    
                    # get points of bounding_box. left top and bottom right
                    p_left_top = fitting_result.final_shape.bounds()[0]
                    p_right_bottom = fitting_result.final_shape.bounds()[1]

                    image_width, image_height = test_img.width, test_img.height

                    # clip value to image range
                    p_left_top[0] = np.clip([p_left_top[0]], 0, image_height)[0]
                    p_left_top[1] = np.clip([p_left_top[1]], 0, image_width)[0]
                    p_right_bottom[0] = np.clip([p_right_bottom[0]], 0, image_height)[0]
                    p_right_bottom[1] = np.clip([p_right_bottom[1]], 0, image_width)[0]

                    img_tmp = test_img.pixels.squeeze()[int(p_left_top[0]):int(p_right_bottom[0]), int(p_left_top[1]):int(p_right_bottom[1])]*255
                    img_tmp = cv2.resize(img_tmp, (144, 144))
                    result = cv2.imwrite("/Users/joanna_frankiewicz/Desktop/Database/FaceDB_Emotions/result/"+element+"/"+filename, img_tmp)
                except:
                    print('Invalid shape of image')
コード例 #4
0
    def from_menpo(cls, data_dir, verbose=True):
        """
        Create class instance from directory of menpo files

        Parameters
        ----------
        data_dir: string
            path to data directory
        verbose: bool
            whether or not to print current progress

        Returns
        -------
        class instance
        """
        if verbose:
            print("Loading data from %s" % data_dir)
            wrapper_fn = tqdm
        else:

            def linear_wrapper(x):
                return x

            wrapper_fn = linear_wrapper
        img_paths = list(mio.image_paths(data_dir))

        samples = []
        for _img in wrapper_fn(img_paths):

            samples.append(
                SingleImage.from_menpo(mio.import_image(_img), img_file=_img))

        return cls(samples=samples)
コード例 #5
0
def dlib_landmark(img_path, img_dir):
    detector = dlib.get_frontal_face_detector()
    predictor_68_point_model = face_recognition_models.pose_predictor_model_location()

    predictor = dlib.shape_predictor(predictor_68_point_model)
    # load the input image, resize it, and convert it to grayscale
    image = cv2.imread(img_path)
    # image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale image
    rects = detector(gray, 1)
    # loop over the face detections
    shapes = []
    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        for (x, y) in shape:
            cv2.circle(image, (x, y), 5, (0, 0, 255), -1)
            # show the output image with the face detections + facial landmarks
        plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        plt.show()
        shape = swap_np_columns(shape)
        shapes.append(shape)
        if i == 0:
            pts_file = img_dir + img_path.split('/')[-1].split('.')[0] + '.pts'
            print(pts_file)
            mio.export_landmark_file(menpo.shape.PointCloud(shape),
                                     pts_file,
                                     overwrite=True)
            mio_img = mio.import_image(img_path)
            mio_img.view_landmarks()
            plt.show()
            
    return shapes
コード例 #6
0
ファイル: result.py プロジェクト: jalabort/alabortijcv2015
    def image(self):
        if self._image is None:
            image = mio.import_image(self._image_path)
            image.crop_to_landmarks_proportion_inplace(0.5)
            self._image = image

        return self._image
コード例 #7
0
def test(fitter, mi0=28, mi1=24):
    # error_list = []
    # error_dict = {}
    name_list, valid_name_dir, red_dot_location_dir, ground_truth_dir = get_test_path(
    )
    for name in name_list:
        image_road = os.path.join(valid_name_dir, name + '.jpg')
        image = mio.import_image(image_road)
        # resolution = image.shape[1]

        txt_path = os.path.join(red_dot_location_dir, name + '.txt')
        bboxes = get_bbx(txt_path)

        # txt_path = os.path.join(ground_truth_dir, name + '.txt')
        # ground_truth_np = get_coordinate(txt_path)

        result = fitter.fit_from_bb(image, bboxes, max_iters=[mi0, mi1])

        pre_landmarks = result.final_shape.as_vector().copy()
        pre_landmarks.resize((35, 2))
        pre_landmarks[:, [0, 1]] = pre_landmarks[:, [1, 0]]
        root = R"C:\Users\chen\Desktop\test_pre_label"
        save_path = os.path.join(root, name + '.txt')
        np.savetxt(save_path,
                   pre_landmarks,
                   fmt='%d',
                   delimiter=',',
                   newline='\r\n')
コード例 #8
0
    def image(self):
        if self._image is None:
            image = mio.import_image(self._image_path)
            image.crop_to_landmarks_proportion_inplace(0.5)
            self._image = image

        return self._image
コード例 #9
0
def get_pts_for_mirror_image(file_path, verbose=False):
    for image_file_name in mio.image_paths(file_path):
        if os.path.exists(
                os.path.splitext(str(image_file_name))[0] +
                '_mirror.jpg') and not os.path.exists(
                    os.path.splitext(str(image_file_name))[0] + '_mirror.pts'):
            img = mio.import_image(image_file_name).mirror()
            mirrored_points = []
            for i in range(68):
                unmirrored_points = img.landmarks.get('PTS').lms.points
                mirrored_points.append([
                    unmirrored_points[mirror_transfrom[i + 1] - 1][1],
                    unmirrored_points[mirror_transfrom[i + 1] - 1][0]
                ])
            if verbose:
                print('Adding mirror pts for: ' + image_file_name)

            with open(
                    os.path.splitext(str(image_file_name))[0] + '_mirror.pts',
                    'w') as f:
                f.write('version: 1\nn_points:  %d\n{\n' %
                        (len(mirrored_points)))
                for point in mirrored_points:
                    f.write('%f %f\n' % (point[0], point[1]))
                f.write('}')
コード例 #10
0
def create_from_pkl_img(tfrecord_dir, image_dir, pickle_dir, shuffle):
    print('Loading images from "%s"' % image_dir)

    image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
    pickle_filenames = sorted(glob.glob(os.path.join(pickle_dir, '*')))
    if len(image_filenames) == 0:
        error('No input images found')

    # good_ids =  mio.import_pickle('/vol/construct3dmm/visualizations/nicp/mein3d/good_ids.pkl')

    img = mio.import_pickle(pickle_filenames[0])
    resolution = img.shape[2]
    channels = img.shape[0] if img.ndim == 3 else 1
    if img.shape[1] != resolution:
        error('Input images must have the same width and height')
    if resolution != 2**int(np.floor(np.log2(resolution))):
        error('Input image resolution must be a power-of-two')
    if channels not in [1, 3]:
        error('Input images must be stored as RGB or grayscale')

    with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
        order = tfr.choose_shuffled_order() if shuffle else np.arange(
            len(image_filenames))
        for idx in range(order.size):
            img = mio.import_image(image_filenames[order[idx]]).pixels.astype(
                np.float32) * 2 - 1
            pkl = mio.import_pickle(pickle_filenames[order[idx]]).astype(
                np.float32)
            # pkl[0, :, :] = scipy.ndimage.gaussian_filter(pkl[0, :, :], 2)
            # pkl[1, :, :] = scipy.ndimage.gaussian_filter(pkl[1, :, :], 2)
            # pkl[2, :, :] = scipy.ndimage.gaussian_filter(pkl[2, :, :], 2)
            # img_resized = np.stack((cv2.resize(img[0],dsize=(256,256)),cv2.resize(img[1],dsize=(256,256)),cv2.resize(img[2],dsize=(256,256))))
            tfr.add_both(np.concatenate([img, pkl]))
コード例 #11
0
ファイル: menpo_tool.py プロジェクト: SkyLindotnet/FRCNN_lib
def test():
    img = mio.import_image(
        '/home/sean/workplace/221/py-R-FCN-test/data/DB/face/300-w_face/otherDB/aflw-full/testset/0_image00002_1.jpg'
    )

    if img.n_channels != 1:
        img = img.as_greyscale()

    img.landmarks['face'] = mio.import_landmark_file(
        '/home/sean/workplace/221/py-R-FCN-test/data/DB/face/300-w_face/temp/indoor_001.pts'
    )

    # objects return copies rather than mutating self, so we can chain calls
    img = (img.crop_to_landmarks(
        group='face',
        boundary=10).rescale_landmarks_to_diagonal_range(100, group='face'))

    # now lets take an image feature...
    img = fast_dsift(img)

    # ...and extract the vector of pixels contained in the
    # convex hull of the face...
    vector = img.as_masked().constrain_mask_to_landmarks(
        group='face').as_vector()

    print(type(vector), vector.shape)
コード例 #12
0
            def _preprocess(self, idx):
                name, *lms5pt = self.detection.loc[idx]
                lms5pt = PointCloud(np.array(lms5pt).reshape([-1,2])[:,::-1])
                img = mio.import_image((self.image_path/name).with_suffix('.jpg'))
                cimg, _, _ = dm.utils.crop_image_bounding_box(img, lms5pt.bounding_box(), [112, 112], base=186)

                return cimg.pixels_with_channels_at_back() * 2 - 1
コード例 #13
0
ファイル: menpo_tool.py プロジェクト: SkyLindotnet/FRCNN_lib
def test_AAM(fitter, images):
    for image in images:
        image = mio.import_image(image)
        image = image.as_greyscale()
        initial_bbox = image.landmarks['PTS'].bounding_box()
        gt_shape = image.landmarks['PTS'].lms
        initial_shape = noisy_shape_from_bounding_box(gt_shape,
                                                      gt_shape.bounding_box())
        image.landmarks['boundingbox'] = initial_bbox
        image.landmarks['init_shape'] = initial_shape
        image.view_landmarks(group='boundingbox',
                             line_colour='red',
                             render_markers=False,
                             line_width=4)
        image.view_landmarks(group='init_shape')
        # fit image
        result = fitter.fit_from_bb(image,
                                    initial_bbox,
                                    max_iters=[15, 5],
                                    gt_shape=image.landmarks['PTS'].lms)
        # print result
        print(result)

        # fit image
        result1 = fitter.fit_from_shape(image,
                                        initial_shape,
                                        max_iters=[15, 5],
                                        gt_shape=image.landmarks['PTS'].lms)
        # print result
        print(result1)

        result.view(render_initial_shape=True)
コード例 #14
0
ファイル: align.py プロジェクト: viscog-cmu/familiarity_sims
def warp_landmarked_image_folder(top_dir, template, detector, ext='.jpg'):
    """
    finds all images with associated .pts landmark files and performs
    warping on them
    """
    mask = menpo.image.BooleanImage.init_from_pointcloud(template)
    warpeds = []
    shapes = []
    labels = []
    folders = glob.glob(os.path.join(top_dir, '*'))
    new_folders = [
        folder.replace(imset, '{}-warped'.format(imset)) for folder in folders
    ]
    for label, folder in enumerate(tqdm(folders)):
        os.makedirs(new_folders[label], exist_ok=True)
        for im_fn in tqdm(glob.glob('{}/*{}'.format(folder, ext))):
            try:
                image = mio.import_image(im_fn)
                shape = image.landmarks['PTS']
            except:
                continue
            bboxes = detector(image)
            if len(bboxes) < 1:
                print('no face found in {}'.format(im_fn))
                continue
            min_b, max_b = bboxes[0].bounds()
            cropped = image.crop(min_b, max_b)
            new_fn = im_fn.replace(imset, '{}-warped'.format(imset))
            transform = menpo.transform.AlignmentAffine(shape, template)
            warped = cropped.warp_to_mask(mask, transform)
            mio.export_image(warped, new_fn, overwrite=True)
            warpeds.append(warped.pixels)
            shapes.append(shape)
            labels.append(label)
    return warpeds, shapes, labels
コード例 #15
0
def load_image_test(path, reference_shape, frame_num):
    file_name = path[:-1] + "/%06d.jpg" % (frame_num)

    im = mio.import_image(file_name)

    im.landmarks['PTS'] = mio.import_landmark_file(path[:-1] +
                                                   "/annot/%06d.pts" %
                                                   (frame_num))
    # im.landmarks['PTS'] = mio.import_landmark_file(path[:-1] + "/%06d.pts" % (frame_num))
    bb_path = path[:-1] + "/bbs/%06d.pts" % (frame_num)

    im.landmarks['bb'] = mio.import_landmark_file(bb_path)

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(
        reference_shape, bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    lms = im.landmarks['PTS'].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)

    return 1, pixels.astype(np.float32).copy(), gt_truth, estimate
コード例 #16
0
ファイル: io_import_test.py プロジェクト: jacksoncsy/menpo
def test_importing_I_no_normalise(is_file, mock_image):
    mock_image.return_value = PILImage.new('I', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.jpg', normalise=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 1
    assert im.pixels.dtype == np.int32
コード例 #17
0
ファイル: io_import_test.py プロジェクト: jacksoncsy/menpo
def test_importing_GIF_normalise(is_file, mock_image):
    mock_image.return_value = PILImage.new('P', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.gif', normalise=True)
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.float
コード例 #18
0
ファイル: test_io_import.py プロジェクト: eosulliv/menpo
def test_importing_PIL_P_normalize(is_file, mock_image):
    mock_image.return_value = PILImage.new("P", (10, 10))
    is_file.return_value = True

    im = mio.import_image("fake_image_being_mocked.ppm", normalize=True)
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.float
コード例 #19
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_PIL_L_normalise(is_file, mock_image):
    mock_image.return_value = PILImage.new('L', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalise=True)
    assert im.shape == (10, 10)
    assert im.n_channels == 1
    assert im.pixels.dtype == np.float
コード例 #20
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_importing_PIL_P_no_normalize(is_file, mock_image):
    mock_image.return_value = PILImage.new('P', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #21
0
def test_importing_PIL_P_no_normalize(is_file, mock_image):
    mock_image.return_value = PILImage.new('P', (10, 15))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)
    assert im.shape == (15, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #22
0
ファイル: io_import_test.py プロジェクト: jacksoncsy/menpo
def test_importing_I_no_normalise(is_file, mock_image):
    mock_image.return_value = PILImage.new('I', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.jpg', normalise=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 1
    assert im.pixels.dtype == np.int32
コード例 #23
0
ファイル: result.py プロジェクト: luckynote/ijcv-2014-aam
    def image(self):
        if self._image is None:
            image_ = mio.import_image(self._image_path)
            image = Image(np.rollaxis(image_.pixels, -1))
            image.landmarks = image_.landmarks
            image.crop_to_landmarks_proportion_inplace(0.5)
            self._image = image

        return self._image
コード例 #24
0
def test_importing_imageio_RGB_no_normalise(is_file, mock_image):

    mock_image.return_value = np.zeros([10, 10, 3], dtype=np.uint8)
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.jpg', normalise=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #25
0
def test_resolve_from_paths_multi_group():
    def resolver(path):
        test_dict = {'test': path.with_name('lenna.ljson')}
        return mio.input.resolve_from_paths(test_dict)

    image = mio.import_image(mio.data_path_to('einstein.jpg'),
                             landmark_resolver=resolver)
    assert (image.landmarks.n_groups == 2)
    assert (set(image.landmarks.keys()) == {'test_LJSON', 'test_pupils'})
コード例 #26
0
def test_resolve_from_paths_single_group():
    def resolver(path):
        test_dict = {'test': path.with_name('takeo.pts')}
        return mio.input.resolve_from_paths(test_dict)

    image = mio.import_image(mio.data_path_to('einstein.jpg'),
                             landmark_resolver=resolver)
    assert (image.landmarks.n_groups == 1)
    assert (image.landmarks['test'].path == mio.data_path_to('takeo.pts'))
コード例 #27
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_RGB_no_normalise(is_file, mock_image):

    mock_image.return_value = np.zeros([10, 10, 3], dtype=np.uint8)
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.jpg', normalise=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #28
0
ファイル: test_io_import.py プロジェクト: eosulliv/menpo
def test_resolve_from_paths_multi_group():
    def resolver(path):
        test_dict = {"test": path.with_name("lenna.ljson")}
        return mio.input.resolve_from_paths(test_dict)

    image = mio.import_image(mio.data_path_to("einstein.jpg"),
                             landmark_resolver=resolver)
    assert image.landmarks.n_groups == 2
    assert set(image.landmarks.keys()) == {"test_LJSON", "test_pupils"}
コード例 #29
0
ファイル: test_io_import.py プロジェクト: eosulliv/menpo
def test_resolve_from_paths_single_group():
    def resolver(path):
        test_dict = {"test": path.with_name("takeo.pts")}
        return mio.input.resolve_from_paths(test_dict)

    image = mio.import_image(mio.data_path_to("einstein.jpg"),
                             landmark_resolver=resolver)
    assert image.landmarks.n_groups == 1
    assert image.landmarks["test"].path == mio.data_path_to("takeo.pts")
コード例 #30
0
def ply_importer(filepath, asset=None, texture_resolver=None, **kwargs):
    """Allows importing Wavefront (OBJ) files.

    Uses VTK.

    Parameters
    ----------
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    texture_resolver : `callable`, optional
        A callable that recieves the mesh filepath and returns a single
        path to the texture to load.
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    shape : :map:`PointCloud` or subclass
        The correct shape for the given inputs.
    """
    import vtk
    from vtk.util.numpy_support import vtk_to_numpy

    ply_importer = vtk.vtkPLYReader()
    ply_importer.SetFileName(str(filepath))

    ply_importer.Update()

    # Get the output
    polydata = ply_importer.GetOutput()

    # We must have point data!
    points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)

    trilist = np.require(vtk_ensure_trilist(polydata), requirements=['C'])

    texture = None
    if texture_resolver is not None:
        texture_path = texture_resolver(filepath)
        if texture_path is not None and texture_path.exists():
            texture = mio.import_image(texture_path)

    tcoords = None
    if texture is not None:
        try:
            tcoords = vtk_to_numpy(polydata.GetPointData().GetTCoords())
        except Exception:
            pass

        if isinstance(tcoords, np.ndarray) and tcoords.size == 0:
            tcoords = None

    colour_per_vertex = None
    return _construct_shape_type(points, trilist, tcoords, texture,
                                 colour_per_vertex)
コード例 #31
0
ファイル: base.py プロジェクト: HaoyangWang/menpo3d
def ply_importer(filepath, asset=None, texture_resolver=None, **kwargs):
    """Allows importing Wavefront (OBJ) files.

    Uses VTK.

    Parameters
    ----------
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    texture_resolver : `callable`, optional
        A callable that recieves the mesh filepath and returns a single
        path to the texture to load.
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    shape : :map:`PointCloud` or subclass
        The correct shape for the given inputs.
    """
    import vtk
    from vtk.util.numpy_support import vtk_to_numpy

    ply_importer = vtk.vtkPLYReader()
    ply_importer.SetFileName(str(filepath))

    ply_importer.Update()

    # Get the output
    polydata = ply_importer.GetOutput()

    # We must have point data!
    points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)

    trilist = np.require(vtk_ensure_trilist(polydata), requirements=['C'])

    texture = None
    if texture_resolver is not None:
        texture_path = texture_resolver(filepath)
        if texture_path is not None and texture_path.exists():
            texture = mio.import_image(texture_path)

    tcoords = None
    if texture is not None:
        try:
            tcoords = vtk_to_numpy(polydata.GetPointData().GetTCoords())
        except Exception:
            pass

        if isinstance(tcoords, np.ndarray) and tcoords.size == 0:
            tcoords = None

    colour_per_vertex = None
    return _construct_shape_type(points, trilist, tcoords, texture,
                                 colour_per_vertex)
コード例 #32
0
ファイル: test_io_import.py プロジェクト: eosulliv/menpo
def test_custom_landmark_resolver():
    def lmark_resolver(path):
        return mio.import_landmark_file(mio.data_path_to("takeo.pts"))

    img = mio.import_image(mio.data_path_to("lenna.png"),
                           landmark_resolver=lmark_resolver)
    assert img.has_landmarks

    takeo_lmarks = mio.import_builtin_asset.takeo_pts()["PTS"]
    np.allclose(img.landmarks["PTS"].points, takeo_lmarks.points)
コード例 #33
0
ファイル: data_provider.py プロジェクト: trigeorgis/ibugnet
        def wrapper(index):
            path = self.root / (index.decode("utf-8") + self.image_extension)
            im = mio.import_image(path, normalize=False)

            im = crop_face(im)

            pixels = get_pixels(im)
            landmarks = im.landmarks[None].lms.points.astype(np.float32)

            return pixels.astype(np.float32), landmarks
コード例 #34
0
def test_custom_landmark_resolver():
    def lmark_resolver(path):
        return mio.import_landmark_file(mio.data_path_to('takeo.pts'))

    img = mio.import_image(mio.data_path_to('lenna.png'),
                           landmark_resolver=lmark_resolver)
    assert (img.has_landmarks)

    takeo_lmarks = mio.import_builtin_asset.takeo_pts()['PTS']
    np.allclose(img.landmarks['PTS'].points, takeo_lmarks.points)
コード例 #35
0
    def getImageFromFile(path):
        def load_image(i):
            i = i.crop_to_landmarks_proportion(0.5)
            if i.n_channels == 3:
                i = i.as_greyscale()
            labeller(i, 'PTS', face_ibug_68_to_face_ibug_68)
            return i

        image_path = Path(path)
        i = load_image(mio.import_image(image_path))
        return i
コード例 #36
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_importing_PIL_1_no_normalize(is_file, mock_image):
    from menpo.image import BooleanImage

    mock_image.return_value = PILImage.new('1', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 1
    assert im.pixels.dtype == np.bool
    assert type(im) == BooleanImage
コード例 #37
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_importing_PIL_RGBA_normalize(is_file, mock_image):
    from menpo.image import MaskedImage

    mock_image.return_value = PILImage.new('RGBA', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.float
    assert type(im) == MaskedImage
コード例 #38
0
def test_custom_landmark_resolver():
    def lmark_resolver(path):
        return {'PTS': mio.data_path_to('takeo.pts')}

    img = mio.import_image(mio.data_path_to('lenna.png'),
                           landmark_resolver=lmark_resolver)
    assert(img.has_landmarks)

    takeo_lmarks = mio.import_builtin_asset.takeo_pts()
    np.allclose(img.landmarks['PTS'].lms.points,
                takeo_lmarks.lms.points)
コード例 #39
0
def test_importing_PIL_1_no_normalize(is_file, mock_image):
    from menpo.image import BooleanImage

    mock_image.return_value = PILImage.new('1', (10, 15))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)
    assert im.shape == (15, 10)
    assert im.n_channels == 1
    assert im.pixels.dtype == np.bool
    assert type(im) == BooleanImage
コード例 #40
0
def test_importing_PIL_RGBA_normalize(is_file, mock_image):
    from menpo.image import MaskedImage

    mock_image.return_value = PILImage.new('RGBA', (10, 15))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)
    assert im.shape == (15, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.float
    assert type(im) == MaskedImage
コード例 #41
0
ファイル: captureImage.py プロジェクト: Deathstroke7/lipRead
 def getImageFromFile(path):
 
     def load_image(i):
         i = i.crop_to_landmarks_proportion(0.5)
         if i.n_channels == 3:
             i = i.as_greyscale()
         labeller(i, 'PTS', face_ibug_68_to_face_ibug_68)
         return i
     
     image_path = Path(path)
     i =  load_image(mio.import_image(image_path))
     return i
コード例 #42
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_GIF_no_normalise(is_file, mock_image):
    mock_image.return_value.get_data.return_value = np.ones((10, 10, 3),
                                                            dtype=np.uint8)
    mock_image.return_value.get_length.return_value = 2
    is_file.return_value = True

    ll = mio.import_image('fake_image_being_mocked.gif', normalise=False)
    assert len(ll) == 2

    im = ll[0]
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #43
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_register_image_importer(is_file):
    from menpo.image import Image
    image = Image.init_blank((10, 10))

    def foo_importer(filepath, **kwargs):
        return image

    is_file.return_value = True

    with patch.dict(mio.input.extensions.image_types, {}, clear=True):
        mio.register_image_importer('.foo', foo_importer)
        new_image = mio.import_image('fake.foo')
    assert image is new_image
コード例 #44
0
def load_images(list_frames, frames_path, path_land, clip_name, max_images=None,
                training_images=None, crop_reading=0.3, pix_thres=330, feat=None):
    """
    Read images from the clips that are processed. The landmarks can be a different folder with the extension of pts and
    are searched as such.
    :param list_frames:         List of images that will be read and loaded.
    :param frames_path:         Path to the folder of images.
    :param path_land:           Path of the respective landmarks.
    :param clip_name:           The name of the clip being processed.
    :param max_images:          (optional) Max images that will be loaded from this clip.
    :param training_images:     (optional) List of images to append the new ones.
    :param crop_reading:        (optional) Amount of cropping the image around the landmarks.
    :param pix_thres:           (optional) If the cropped image has a dimension bigger than this, it gets cropped to this diagonal dimension.
    :param feat:                (optional) Features to be applied to the images before inserting them to the list.
    :return:                    List of menpo images.
    """
    from random import shuffle
    if not check_path_and_landmarks(frames_path, clip_name, path_land):
        return []
    if feat is None:
        feat = no_op
    if training_images is None:
        training_images = []
    shuffle(list_frames)            # shuffle the list to ensure random ones are chosen
    if max_images is None:
        max_images = len(list_frames)
    elif max_images < 0:
        print('Warning: The images cannot be negative, loading the whole list instead.')
        max_images = len(list_frames)
    cnt = 0  # counter for images appended to the list
    for frame_name in list_frames:
        try:
            im = mio.import_image(frames_path + frame_name, normalise=True)
        except ValueError:                                      # in case the extension is unknown (by menpo)
            print('Ignoring the \'image\' {}.'.format(frame_name))
            continue
        res = glob.glob(path_land + clip_name + sep + im.path.stem + '*.pts')
        if len(res) == 0:                       # if the image does not have any existing landmarks, ignore it
            continue
        elif len(res) > 1:
            #_r = randint(0,len(res)-1); #just for debugging reasons in different variable
            #ln = mio.import_landmark_file(res[_r]) # in case there are plenty of landmarks for the image, load random ones
            print('The image {} has more than one landmarks, for one person, loading only the first ones.'.format(frame_name))
        ln = mio.import_landmark_file(res[0])
        im.landmarks['PTS'] = ln
        im = crop_rescale_img(im, crop_reading=crop_reading, pix_thres=pix_thres)
        training_images.append(feat(im))
        cnt += 1
        if cnt >= max_images:
            break  # the limit of images (appended to the list) is reached
    return training_images
コード例 #45
0
def test_importing_PIL_1_proper_conversion(is_file, mock_image):
    from menpo.image import BooleanImage
 
    arr = np.zeros((10, 10), dtype=np.uint8)
    arr[4, 4] = 255
    mock_image.return_value = PILImage.fromarray(arr).convert('1')
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)
    assert im.shape == (10, 10)
    assert im.n_channels == 1
    assert im.pixels.dtype == np.bool
    assert type(im) == BooleanImage
    assert np.all(im.pixels == arr.astype(np.bool))
コード例 #46
0
ファイル: data_provider.py プロジェクト: trigeorgis/mdm
def load_image(path, reference_shape, is_training=False, group='PTS',
               mirror_image=False):
    """Load an annotated image.

    In the directory of the provided image file, there
    should exist a landmark file (.pts) with the same
    basename as the image file.

    Args:
      path: a path containing an image file.
      reference_shape: a numpy array [num_landmarks, 2]
      is_training: whether in training mode or not.
      group: landmark group containing the grounth truth landmarks.
      mirror_image: flips horizontally the image's pixels and landmarks.
    Returns:
      pixels: a numpy array [width, height, 3].
      estimate: an initial estimate a numpy array [68, 2].
      gt_truth: the ground truth landmarks, a numpy array [68, 2].
    """
    im = mio.import_image(path)
    bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
    if 'set' not in str(bb_root):
        bb_root = im.path.parent.relative_to(im.path.parent.parent)

    im.landmarks['bb'] = mio.import_landmark_file(str(Path('bbs') / bb_root / (
        im.path.stem + '.pts')))

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(reference_shape,
                                                              bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    if mirror_image:
        im = utils.mirror_image(im)

    lms = im.landmarks[group].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)
    return pixels.astype(np.float32).copy(), gt_truth, estimate
コード例 #47
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_importing_ffmpeg_GIF_no_normalize(is_file, video_infos_ffprobe, pipe):
    video_infos_ffprobe.return_value = {'duration': 2, 'width': 100,
                                        'height': 150, 'n_frames': 10, 'fps': 5}
    empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()
    pipe.return_value.stdout.read.return_value = empty_frame
    is_file.return_value = True

    ll = mio.import_image('fake_image_being_mocked.gif', normalize=False)
    assert ll.path.name == 'fake_image_being_mocked.gif'
    assert ll.fps == 5
    assert len(ll) == 10

    im = ll[0]
    assert im.shape == (150, 100)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #48
0
def im_read_greyscale(frame_name, frames_path, img_type, normalise=True):
    """
    The function reads an image with name frame_name in frames_path and returns the greyscale menpo image.
    :param frame_name:  Name of the frame .
    :param frames_path: Folder of the images (assumption that it exists).
    :param img_type:    Type/extension of the image.
    :param normalise:   (optional) Whether the image should be normalised when imported.
    :return:            Menpo greyscale image or [] if not found.
    """
    if frame_name[frame_name.rfind('.'):] != img_type:
        return []  # in case they are something different than an image
    try:
        im = mio.import_image(frames_path + frame_name, normalise=normalise)
        if im.n_channels == 3 and normalise:
            im = im.as_greyscale(mode='luminosity')
        elif im.n_channels == 3:
            im = im.as_greyscale(mode='channel', channel=1)
        return im
    except:
        print('Potentially wrong path or wrong image.')
        return []
コード例 #49
0
ファイル: base.py プロジェクト: HaoyangWang/menpo3d
def mjson_importer(filepath, asset=None, texture_resolver=True, **kwargs):
    """
    Import meshes that are in a simple JSON format.

    Parameters
    ----------
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    texture_resolver : `callable`, optional
        A callable that recieves the mesh filepath and returns a single
        path to the texture to load.
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    shape : :map:`PointCloud` or subclass
        The correct shape for the given inputs.
    """
    with open(str(filepath), 'rb') as f:
        mesh_json = json.load(f)

    texture = None
    if texture_resolver is not None:
        texture_path = texture_resolver(filepath)
        if texture_path is not None and texture_path.exists():
            texture = mio.import_image(texture_path)

    points = mesh_json['points']
    trilist = mesh_json['trilist']
    tcoords = mesh_json.get('tcoords'),
    colour_per_vertex = mesh_json.get('colour_per_vertex')

    return _construct_shape_type(points, trilist, tcoords, texture,
                                 colour_per_vertex)
コード例 #50
0
ファイル: io_import_test.py プロジェクト: csagonas/menpo
def test_import_image_no_norm():
    img_path = os.path.join(mio.data_dir_path(), 'einstein.jpg')
    im = mio.import_image(img_path, normalise=False)
    assert im.pixels.dtype == np.uint8
コード例 #51
0
def plot_image_latex_with_subcaptions(folds, pb, pout, name_im, legend_names=None,
                                      normalise=None, allow_fail=False,
                                      overwr=True):
    """
    Customised function for my papers. It plots variations of an image (i.e. different
        images) next to each other with the respective legend names.
    The idea is: Import one by one from the folds, normalise (e.g. resize) and export each
        with a predictable name. Write the tex file and compile it to create the image
        with the several subplots and the custom labels.
    Attention: Because of latex compilation, this function writes and reads from the disk,
        so pay attention to the pout path.
    :param folds: (list) Names of the parent folders to search the image to. The assumption
        is that all those are relative to pb path.
    :param pb:    (str) Base path where the images to be imported exist.
    :param pout:  (str) Path to export the result in. The method will write the result in a
        new sub-folder named 'concatenated'.
    :param name_im: (str) Name (stem + suffix) of the image to be imported from folds.
    :param legend_names: (optional, list or None) If provided, it should match in length the
        folds; each one will be respectively provided as a sub-caption to the respective image.
    :param normalise: (optional, list of functions or None) If not None, then the function accepts
        a menpo image and normalises it.
    :param allow_fail: (optional, list or bool) If bool, it is converted into a list of
        length(folds). The images from folds that do not exist
        will be ignored if allow_fail is True.
    :param overwr:     (optional, bool) To overwrite or not the intermediate results written.
    :return:
    # TODO: extend the formulation to provide freedom in the number of elements per line etc.
    """
    # # short lambda for avoiding the long import command.
    import_im = lambda p, norm=False: mio.import_image(p, landmark_resolver=None,
                                                       normalize=norm)
    # # names_imout: Names of the output images in the disk.
    # # names_meth: Method of the name to put in the sub-caption.
    names_imout, names_meth = [], []
    # # if allow_fail is provided as a single boolean, convert into a list, i.e.
    # # each one of the folders has different permissions.
    if not isinstance(allow_fail, list):
        allow_fail = [allow_fail for _ in range(len(folds))]
    # # if normalise is provided as a single boolean, convert into a list.
    if not isinstance(normalise, list):
        normalise = [normalise for _ in range(len(folds))]

    for cnt, fold in enumerate(folds):
        if allow_fail[cnt]:
            # # In this case, we don't mind if an image fails.
            try:
                im = import_im(join(pb, fold, name_im))
            except:
                continue
        else:
            im = import_im(join(pb, fold, name_im))
        # # get the name for the sub-caption (legend).
        if legend_names is not None:
            if '_' in legend_names[cnt]:
                print('WARNING: `_` found on legend name, possibly issue with latex.')
            names_meth.append(legend_names[cnt])
        else:
            assert 0, 'Not implemented for now! Need to use map_to_name()'
        # # Optionally resize the image.
        if normalise[cnt]:
            im = normalise[cnt](im)
        # # export the image into the disk and append the name exported in the list.
        nn = '{}_{}'.format(Path(fold).stem, im.path.name)
        mio.export_image(im, pout + nn, overwrite=overwr)
        names_imout.append(nn)

    # # export into a file the latex command.
    nlat = Path(name_im).stem
    fo = open(pout + '{}.tex'.format(nlat),'wt')
    fo.writelines(('\\documentclass{article}\\usepackage{amsmath}'
                   '\n\\usepackage{graphicx}\\usepackage{subfig}'
                   '\\begin{document}\n'))
    list_to_latex(names_imout, wrap_subfloat=True, names_subfl=names_meth, pbl='',
                  file_to_print=fo, caption=False)
    fo.writelines('\\thispagestyle{empty}\\end{document}\n')
    fo.close()

    # # the concatenated for the final png
    pout1 = Path(mkdir_p(join(pout, 'concatenated', '')))
    # # create the png image and delete the tex and intermediate results.
    cmd = ('cd {0}; pdflatex {1}.tex; pdfcrop {1}.pdf;'
           'rm {1}.aux {1}.log {1}.pdf; mv {1}-crop.pdf {2}.pdf;'
           'pdftoppm -png {2}.pdf > {2}.png; rm {2}.pdf; rm {0}*.png; rm {0}*.tex')
    nconc = pout1.stem + sep + nlat
    return popen(cmd.format(pout, nlat, nconc))
コード例 #52
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_importing_PIL_I_normalize(is_file, mock_image):
    mock_image.return_value = PILImage.new('I', (10, 10))
    is_file.return_value = True

    im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)
コード例 #53
0
def build_all_models_frgc(images, ref_frame_path, subject_id,
                          out_path='/vol/atlas/homes/pts08/',
                          transform_class=ThinPlateSplines,
                          square_mask=False):
    print "Beginning model creation for {0}".format(subject_id)
    # Build reference frame
    ref_frame = mio.import_image(ref_frame_path)
    labeller([ref_frame], 'PTS', ibug_68_closed_mouth)
    ref_frame.crop_to_landmarks(boundary=2, group='ibug_68_closed_mouth',
                                label='all')
    if not square_mask:
        ref_frame.constrain_mask_to_landmarks(group='ibug_68_closed_mouth',
                                              label='all')

    reference_shape = ref_frame.landmarks['ibug_68_closed_mouth'].lms

    # Extract all shapes
    labeller(images, 'PTS', ibug_68_closed_mouth)
    shapes = [img.landmarks['ibug_68_closed_mouth'].lms for img in images]

    # Warp each of the images to the reference image
    print "Warping all frgc shapes to reference frame of {0}".format(subject_id)
    tps_transforms = [transform_class(reference_shape, shape) for shape in shapes]
    warped_images = [img.warp_to(ref_frame.mask, t)
                     for img, t in zip(images, tps_transforms)]

    # Calculate the normal matrix
    print 'Extracting all normals'
    normal_matrix = extract_normals(warped_images)

    # Save memory by deleting all the images since we don't need them any more.
    # Keep one around that we can query for it's size etc
    example_image = deepcopy(warped_images[0])
    del warped_images[:]

    # Normals
    print 'Computing normal feature space'
    normal_images = create_feature_space(normal_matrix, example_image,
                                         'normals', subject_id,
                                         out_path=out_path)

    # Spherical
    print 'Computing spherical feature space'
    spherical_matrix = Spherical().logmap(normal_matrix)
    spherical_images = create_feature_space(spherical_matrix, example_image,
                                            'spherical', subject_id,
                                            out_path=out_path)

    # AEP
    print 'Computing AEP feature space'
    mean_normals = normalise_vector(np.mean(normal_matrix, 0))
    aep_matrix = AEP(mean_normals).logmap(normal_matrix)
    aep_images = create_feature_space(aep_matrix, example_image, 'aep',
                                      subject_id,
                                      out_path=out_path)

    # PGA
    print 'Computing PGA feature space'
    mu = intrinsic_mean(normal_matrix, PGA, max_iters=50)
    pga_matrix = PGA(mu).logmap(normal_matrix)
    pga_images = create_feature_space(pga_matrix, example_image, 'pga',
                                      subject_id,
                                      out_path=out_path)

    # PCA models
    n_components = 200
    print 'Computing PCA models ({} components)'.format(n_components)
    template = ref_frame

    normal_model = PCAModel(normal_images, center=True)
    normal_model.trim_components(200)
    cosine_model = PCAModel(normal_images, center=False)
    cosine_model.trim_components(200)
    spherical_model = PCAModel(spherical_images, center=False)
    spherical_model.trim_components(200)
    aep_model = PCAModel(aep_images, center=False)
    aep_model.trim_components(200)
    pga_model = PCAModel(pga_images, center=False)
    pga_model.trim_components(200)

    mean_normals_image = normal_model.mean
    mu_image = mean_normals_image.from_vector(mu)

    # Save out models
    pickle_model(out_path, subject_id, 'normal', normal_model, template,
                 mean_normals)
    pickle_model(out_path, subject_id, 'cosine', cosine_model, template,
                 mean_normals)
    pickle_model(out_path, subject_id, 'spherical', spherical_model, template,
                 mean_normals)
    pickle_model(out_path, subject_id, 'aep', aep_model, template,
                 mean_normals)
    pickle_model(out_path, subject_id, 'pga', pga_model, template,
                 mean_normals, intrinsic_means=mu_image)
コード例 #54
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_import_image_no_norm():
    img_path = mio.data_dir_path() / 'einstein.jpg'
    im = mio.import_image(img_path, normalize=False)
    assert im.pixels.dtype == np.uint8
コード例 #55
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_import_image():
    img_path = mio.data_dir_path() / 'einstein.jpg'
    im = mio.import_image(img_path)
    assert im.pixels.dtype == np.float
    assert im.n_channels == 1
コード例 #56
0
ファイル: detector_test.py プロジェクト: VLAM3D/menpodetect
from mock import MagicMock
import numpy as np
from numpy.testing import assert_allclose

from menpo.shape import PointDirectedGraph
from menpodetect.detect import (detect, menpo_image_to_uint8)
import menpo.io as mio


takeo = mio.import_builtin_asset.takeo_ppm()
takeo_uint8 = mio.import_image(mio.data_path_to('takeo.ppm'), normalize=False)
fake_box = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
fake_detector = lambda x: ([PointDirectedGraph.init_from_edges(
    fake_box.copy(),
    np.array([[0, 1], [1, 2], [2, 3], [3, 0]]))])


def test_rescaling_image():
    takeo_copy = takeo.copy()
    ratio = 200.0 / takeo_copy.diagonal()
    pcs = detect(fake_detector, takeo_copy, image_diagonal=200)
    assert len(pcs) == 1
    assert takeo_copy.n_channels == 3
    assert takeo_copy.landmarks['object_0'][None].n_points == 4
    assert_allclose(takeo_copy.landmarks['object_0'][None].points,
                    fake_box * (1.0 / ratio), atol=10e-2)


def test_passing_uint8_image():
    takeo_copy = takeo_uint8.copy()
    pcs = detect(fake_detector, takeo_copy, greyscale=False)
コード例 #57
0
ファイル: io_import_test.py プロジェクト: jacksoncsy/menpo
def test_importing_GIF_non_pallete_exception(is_file, mock_image):
    mock_image.return_value = PILImage.new('RGB', (10, 10))
    is_file.return_value = True

    mio.import_image('fake_image_being_mocked.gif', normalise=False)
コード例 #58
0
ファイル: base.py プロジェクト: HaoyangWang/menpo3d
def wrl_importer(filepath, asset=None, texture_resolver=None, **kwargs):
    """Allows importing VRML 2.0 meshes.

    Uses VTK and assumes that the first actor in the scene is the one
    that we want.

    Parameters
    ----------
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    texture_resolver : `callable`, optional
        A callable that recieves the mesh filepath and returns a single
        path to the texture to load.
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    shape : :map:`PointCloud` or subclass
        The correct shape for the given inputs.
    """
    import vtk
    from vtk.util.numpy_support import vtk_to_numpy

    vrml_importer = vtk.vtkVRMLImporter()
    vrml_importer.SetFileName(str(filepath))
    vrml_importer.Update()

    # Get the first actor.
    actors = vrml_importer.GetRenderer().GetActors()
    actors.InitTraversal()
    mapper = actors.GetNextActor().GetMapper()
    mapper_dataset = mapper.GetInput()

    if actors.GetNextActor():
        # There was more than one actor!
        warnings.warn('More than one actor was detected in the scene. Only '
                      'single scene actors are currently supported.')

    # Get the Data
    polydata = vtk.vtkPolyData.SafeDownCast(mapper_dataset)

    # We must have point data!
    points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)

    trilist = vtk_ensure_trilist(polydata)

    texture = None
    if texture_resolver is not None:
        texture_path = texture_resolver(filepath)
        if texture_path is not None and texture_path.exists():
            texture = mio.import_image(texture_path)

    # Three different outcomes - either we have a textured mesh, a coloured
    # mesh or just a plain mesh. Let's try each in turn.

    # Textured
    tcoords = None
    try:
        tcoords = vtk_to_numpy(polydata.GetPointData().GetTCoords())
    except Exception:
        pass

    if isinstance(tcoords, np.ndarray) and tcoords.size == 0:
        tcoords = None

    # Colour-per-vertex
    try:
        colour_per_vertex = vtk_to_numpy(mapper.GetLookupTable().GetTable()) / 255.
    except Exception:
        pass

    if isinstance(colour_per_vertex, np.ndarray) and colour_per_vertex.size == 0:
        colour_per_vertex = None

    return _construct_shape_type(points, trilist, tcoords, texture,
                                 colour_per_vertex)
コード例 #59
0
def test_landmark_resolver_none():
    img = mio.import_image(mio.data_path_to('lenna.png'),
                           landmark_resolver=None)
    assert(not img.has_landmarks)