示例#1
0
def load_image_test(path, reference_shape, frame_num):
    file_name = path[:-1] + "/%06d.jpg" % (frame_num)

    im = mio.import_image(file_name)

    im.landmarks['PTS'] = mio.import_landmark_file(path[:-1] +
                                                   "/annot/%06d.pts" %
                                                   (frame_num))
    # im.landmarks['PTS'] = mio.import_landmark_file(path[:-1] + "/%06d.pts" % (frame_num))
    bb_path = path[:-1] + "/bbs/%06d.pts" % (frame_num)

    im.landmarks['bb'] = mio.import_landmark_file(bb_path)

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(
        reference_shape, bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    lms = im.landmarks['PTS'].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)

    return 1, pixels.astype(np.float32).copy(), gt_truth, estimate
def process_frame(frame_name, clip, img_type, svm_p, loop=False):
    """
    Applies the AAM fitter (global var) in a frame. Additionally, it might apply an
    SVM to verify it's a face if required.
    :param frame_name: str: Name of the frame along with extension, e.g. '000001.png'.
    :param clip:       str: Name of the clip.
    :param img_type:   str: Suffix (extension) of the frames, e.g. '.png'.
    :param svm_p:      dict: Required params for SVM classification.
    :param loop:       bool: (optional) Declares whether this is a 2nd fit for AAM (loop).
    :return:
    """
    global fitter
    name = frame_name[:frame_name.rfind('.')]
    p0 = clip.path_read_ln[0] + name + '_0.pts'
    # find if this is 2nd fit or 1st.
    if loop:  # if 2nd fit, then if landmark is 'approved', return. Otherwise proceed.
        try:
            ln = import_landmark_file(p0)
            copy2(p0, clip.path_write_ln[0] + name + '_0.pts')
            return  # if the landmark already exists, return (for performance improvement)
        except ValueError:
            pass
        try:
            ln = import_landmark_file(clip.path_read_ln[1] + name + '_0.pts')
        except ValueError:  # either not found or no suitable importer
            return
    else:
        try:
            ln = import_landmark_file(p0)
        except ValueError:  # either not found or no suitable importer
            return
    im = im_read_greyscale(frame_name, clip.path_frames, img_type)
    if not im:
        return
    im.landmarks['PTS2'] = ln
    # fitting can be faster if the image is cropped in advance, though
    # you should save the transform to get back to the original shape,
    # hence here we just leave the original image.
    fr = fitter.fit_from_shape(im, im.landmarks['PTS2'].lms)
    p_wr = clip.path_write_ln[0] + im.path.stem + '_0.pts'
    im.landmarks['ps_pbaam'] = fr.final_shape
    export_landmark_file(im.landmarks['ps_pbaam'], p_wr, overwrite=True)

    # apply SVM classifier by extracting patches (is face or not).
    if not svm_p['apply']:
        return
    im_cp = im.crop_to_landmarks_proportion(0.2, group='ps_pbaam')
    im_cp = svm_p['feat'](im_cp)
    im2 = warp_image_to_reference_shape(im_cp, svm_p['refFrame'], 'ps_pbaam')
    _p_nd = im2.extract_patches_around_landmarks(
        group='source', as_single_array=True,
        patch_shape=svm_p['patch_s']).flatten()
    if svm_p['clf'].decision_function(_p_nd) > 0:
        copy2(p_wr, clip.path_write_ln[1] + im.path.stem + '_0.pts')
def process_frame(frame_name, clip, img_type, svm_p, loop=False):
    """
    Applies the AAM fitter (global var) in a frame. Additionally, it might apply an
    SVM to verify it's a face if required.
    :param frame_name: str: Name of the frame along with extension, e.g. '000001.png'.
    :param clip:       str: Name of the clip.
    :param img_type:   str: Suffix (extension) of the frames, e.g. '.png'.
    :param svm_p:      dict: Required params for SVM classification.
    :param loop:       bool: (optional) Declares whether this is a 2nd fit for AAM (loop).
    :return:
    """
    global fitter
    name = frame_name[:frame_name.rfind('.')]
    p0 = clip.path_read_ln[0] + name + '_0.pts'
    # find if this is 2nd fit or 1st.
    if loop:  # if 2nd fit, then if landmark is 'approved', return. Otherwise proceed.
        try:
            ln = import_landmark_file(p0)
            copy2(p0, clip.path_write_ln[0] + name + '_0.pts')
            return      # if the landmark already exists, return (for performance improvement)
        except ValueError:
            pass
        try:
            ln = import_landmark_file(clip.path_read_ln[1] + name + '_0.pts')
        except ValueError:  # either not found or no suitable importer
            return
    else:
        try:
            ln = import_landmark_file(p0)
        except ValueError:  # either not found or no suitable importer
            return
    im = im_read_greyscale(frame_name, clip.path_frames, img_type)
    if not im:
        return
    im.landmarks['PTS2'] = ln
    fr = fitter.fit_from_shape(im, im.landmarks['PTS2'].lms, crop_image=0.3)
    p_wr = clip.path_write_ln[0] + im.path.stem + '_0.pts'
    export_landmark_file(fr.fitted_image.landmarks['final'], p_wr, overwrite=True)

    # apply SVM classifier by extracting patches (is face or not).
    if not svm_p['apply']:
        return
    im.landmarks.clear()  # temp solution
    im.landmarks['ps_pbaam'] = fr.fitted_image.landmarks['final']
    im_cp = im.crop_to_landmarks_proportion(0.2, group='ps_pbaam')
    im_cp = svm_p['feat'](im_cp)
    im2 = warp_image_to_reference_shape(im_cp, svm_p['refFrame'], 'ps_pbaam')
    _p_nd = im2.extract_patches_around_landmarks(group='source', as_single_array=True,
                                                 patch_shape=svm_p['patch_s']).flatten()
    if svm_p['clf'].decision_function(_p_nd) > 0:
        copy2(p_wr, clip.path_write_ln[1] + im.path.stem + '_0.pts')
示例#4
0
def test_importing_v3_ljson_null_values(is_file, mock_open, mock_dict):
    v3_ljson = {
        "groups": {
            "LJSON": {
                "labels": [{
                    "label": "left_eye",
                    "mask": [0, 1, 2]
                }, {
                    "label": "right_eye",
                    "mask": [3, 4, 5]
                }],
                "landmarks": {
                    "connectivity": [[0, 1], [1, 2], [2, 0], [3, 4], [4, 5],
                                     [5, 3]],
                    "points": [[None, 200.5], [None, None], [316.8, 199.15],
                               [339.48, 205.0], [358.54, 217.82],
                               [375.0, 233.4]]
                }
            }
        },
        "version": 3
    }

    mock_dict.return_value = v3_ljson
    is_file.return_value = True
    lmark_dict = mio.import_landmark_file('fake_lmark_being_mocked.ljson')
    assert isinstance(lmark_dict, dict)
    lmark = lmark_dict['LJSON']
    nan_points = np.isnan(lmark.points)
    assert nan_points[0, 0]  # y-coord None point is nan
    assert not nan_points[0, 1]  # x-coord point is not nan
    assert np.all(nan_points[1, :])  # all of leye label is nan
def main():
    path_to_neutral = Path(
        '/Users/lls/Documents/face/data/headpose/Angle/neutral0/')
    path_to_smile = Path(
        '/Users/lls/Documents/face/data/headpose/Angle/down30')
    path_to_source = Path('/Users/lls/Documents/face/data/trump/trump')

    # PDM shape
    neutral = PDMModel(path_to_neutral, 20)[0].model.mean()
    smile = PDMModel(path_to_smile, 20)[0].model.mean()
    source_shape, source_img = PDMModel(path_to_source, 20)
    p_smile = project(smile, source_shape)
    p_neutral = project(neutral, source_shape)
    delta = (p_smile - p_neutral) * 1.5
    ptsPath = '/Users/lls/Documents/face/data/trump/trump/trump_13.pts'
    trumpShape = mio.import_landmark_file(ptsPath).lms
    p_i = project(trumpShape, source_shape)
    new_p_i = p_i + delta
    reconstructed_img_i = source_shape.model.instance(new_p_i)
    trans_reconstructed_img_i = AlignmentAffine(reconstructed_img_i,
                                                trumpShape)
    reconstructed_img_i_pc = trans_reconstructed_img_i.apply(
        reconstructed_img_i)
    plt.subplot(241)
    reconstructed_img_i_pc.view()
    plt.gca().set_title('reconstructed_img_i_pc')
    plt.subplot(242)
    trumpShape.view()
    plt.gca().set_title('trumpShape')
    plt.show()
    '''
示例#6
0
def test_importing_v2_ljson_null_values(is_file, mock_open, mock_dict):
    v2_ljson = {
        "labels": [{
            "label": "left_eye",
            "mask": [0, 1, 2]
        }, {
            "label": "right_eye",
            "mask": [3, 4, 5]
        }],
        "landmarks": {
            "connectivity": [[0, 1], [1, 2], [2, 0], [3, 4], [4, 5], [5, 3]],
            "points": [[None, 200.5], [None, None], [316.8, 199.15],
                       [339.48, 205.0], [358.54, 217.82], [375.0, 233.4]]
        },
        "version":
        2
    }

    mock_dict.return_value = v2_ljson
    is_file.return_value = True
    with warnings.catch_warnings(record=True) as w:
        lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson',
                                         group='LJSON')
    nan_points = np.isnan(lmark.points)
    assert nan_points[0, 0]  # y-coord None point is nan
    assert not nan_points[0, 1]  # x-coord point is not nan
    assert np.all(nan_points[1, :])  # all of leye label is nan
示例#7
0
def test():
    img = mio.import_image(
        '/home/sean/workplace/221/py-R-FCN-test/data/DB/face/300-w_face/otherDB/aflw-full/testset/0_image00002_1.jpg'
    )

    if img.n_channels != 1:
        img = img.as_greyscale()

    img.landmarks['face'] = mio.import_landmark_file(
        '/home/sean/workplace/221/py-R-FCN-test/data/DB/face/300-w_face/temp/indoor_001.pts'
    )

    # objects return copies rather than mutating self, so we can chain calls
    img = (img.crop_to_landmarks(
        group='face',
        boundary=10).rescale_landmarks_to_diagonal_range(100, group='face'))

    # now lets take an image feature...
    img = fast_dsift(img)

    # ...and extract the vector of pixels contained in the
    # convex hull of the face...
    vector = img.as_masked().constrain_mask_to_landmarks(
        group='face').as_vector()

    print(type(vector), vector.shape)
def serialize_sample(writer, subject_id):
    subject_name = 'P{}'.format(subject_id)

    for i, (video, audio, label) in enumerate(zip(*get_samples(subject_name))):

        frame = Image.init_from_channels_at_back(video)
        lms_path = landmarks_directory / subject_name / "{}.pts".format(i)

        try:
            lms = mio.import_landmark_file(lms_path)
        except Exception as e:
            print('Landmark file [{}] could not be imported'.format(i))
            print('Exception message : {}'.format(e))
            continue

        frame.landmarks['PTS'] = lms
        frame = crop_face(frame)

        example = tf.train.Example(features=tf.train.Features(
            feature={
                'sample_id': _int_feauture(i),
                'subject_id': _int_feauture(subject_id),
                'label': _bytes_feauture(label.tobytes()),
                'raw_audio': _bytes_feauture(audio.tobytes()),
                'frame': _bytes_feauture(get_jpg_string(frame))
            }))

        writer.write(example.SerializeToString())
        del video, audio, label
示例#9
0
def test_json_landmarks_bunny_direct():
    lms = mio.import_landmark_file(mio.data_path_to('bunny.ljson'))
    labels = {'reye', 'mouth', 'nose', 'leye'}
    assert(len(labels - set(lms.labels)) == 0)
    assert_allclose(lms['leye'].points, bunny_leye, atol=1e-7)
    assert_allclose(lms['reye'].points, bunny_reye, atol=1e-7)
    assert_allclose(lms['nose'].points, bunny_nose, atol=1e-7)
    assert_allclose(lms['mouth'].points, bunny_mouth, atol=1e-7)
示例#10
0
文件: io_test.py 项目: ikassi/menpo
def test_json_landmarks_bunny_direct():
    lms = pio.import_landmark_file(pio.data_path_to('bunny.json'))
    assert(lms.group_label == 'JSON')
    labels = {'r_eye', 'mouth', 'nose', 'l_eye'}
    assert(len(labels - set(lms.labels)) == 0)
    assert_allclose(lms['l_eye'].lms.points, bunny_l_eye, atol=1e-7)
    assert_allclose(lms['r_eye'].lms.points, bunny_r_eye, atol=1e-7)
    assert_allclose(lms['nose'].lms.points, bunny_nose, atol=1e-7)
    assert_allclose(lms['mouth'].lms.points, bunny_mouth, atol=1e-7)
示例#11
0
def test_json_landmarks_bunny_direct():
    lms = pio.import_landmark_file(pio.data_path_to('bunny.json'))
    assert (lms.group_label == 'JSON')
    labels = {'r_eye', 'mouth', 'nose', 'l_eye'}
    assert (len(labels - set(lms.labels)) == 0)
    assert_allclose(lms['l_eye'].lms.points, bunny_l_eye, atol=1e-7)
    assert_allclose(lms['r_eye'].lms.points, bunny_r_eye, atol=1e-7)
    assert_allclose(lms['nose'].lms.points, bunny_nose, atol=1e-7)
    assert_allclose(lms['mouth'].lms.points, bunny_mouth, atol=1e-7)
示例#12
0
def load_images_test(paths,
                     reference_shape,
                     group=None,
                     verbose=True,
                     PLOT=False):
    """Loads and rescales input knn_2D to the diagonal of the reference shape.

    Args:
      paths: a list of strings containing the data directories.
      reference_shape (meanshape): a numpy array [num_landmarks, 2]
      group: landmark group containing the grounth truth landmarks.
      verbose: boolean, print debugging info.
    Returns:
      knn_2D: a list of numpy arrays containing knn_2D.
      shapes: a list of the ground truth landmarks.
      reference_shape (meanshape): a numpy array [num_landmarks, 2].
      shape_gen: PCAModel, a shape generator.
    """
    images = []
    shapes = []
    scales = []
    # compute mean shape
    reference_shape = PointCloud(reference_shape)
    nameList = []
    bbox = []
    data = dict()
    for path in paths:
        if verbose:
            print('Importing data from {}'.format(path))

        for im in mio.import_images(path, verbose=verbose, as_generator=True):
            # group = group or im.landmarks[group]._group_label
            group = group or im.landmarks.keys()[0]
            bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
            if 'set' not in str(bb_root):
                bb_root = im.path.parent.relative_to(im.path.parent.parent)
            im.landmarks['bb'] = mio.import_landmark_file(
                str(
                    Path('bbs') / bb_root /
                    (im.path.stem.replace(' ', '') + '.pts')))

            nameList.append(str(im.path))
            lms = im.landmarks['bb'].lms.points
            bbox.append([lms[0, 1], lms[2, 1], lms[0, 0], lms[1, 0]])
            # bbox = np.array(bbox)
            # data['nameList'] = nameList
            # data['bbox'] = bbox
            # sio.savemat('ibug_data.mat', {'nameList':data['nameList'], 'bbox':data['bbox']})
            # exit(0)

            im = im.crop_to_landmarks_proportion(0.3, group='bb')
            images.append(im)

    return images
示例#13
0
def import_image(img_path):
    img = cv2.imread(str(img_path))
    original_image = Image.init_from_channels_at_back(img[:, :, -1::-1])

    try:
        original_image_lms = mio.import_landmark_file('{}/{}.ljson'.format(
            img_path.parent, img_path.stem)).lms.points.astype(np.float32)
        original_image.landmarks['LJSON'] = PointCloud(original_image_lms)
    except:
        pass

    return original_image
示例#14
0
def test_register_landmark_importer(is_file):
    from menpo.shape import PointCloud
    lmark = PointCloud.init_2d_grid((1, 1))

    def foo_importer(filepath, **kwargs):
        return lmark

    is_file.return_value = True

    with patch.dict(mio.input.extensions.image_landmark_types, {}, clear=True):
        mio.register_landmark_importer('.foo', foo_importer)
        new_lmark = mio.import_landmark_file('fake.foo')
    assert lmark is new_lmark
示例#15
0
def test_register_landmark_importer(is_file):
    from menpo.shape import PointCloud
    lmark = PointCloud.init_2d_grid((1, 1))

    def foo_importer(filepath, **kwargs):
        return lmark

    is_file.return_value = True

    with patch.dict(mio.input.extensions.image_landmark_types, {}, clear=True):
        mio.register_landmark_importer('.foo', foo_importer)
        new_lmark = mio.import_landmark_file('fake.foo')
    assert lmark is new_lmark
示例#16
0
def load_image(path,
               reference_shape,
               is_training=False,
               group='PTS',
               mirror_image=False):
    """Load an annotated image.

    In the directory of the provided image file, there
    should exist a landmark file (.pts) with the same
    basename as the image file.

    Args:
      path: a path containing an image file.
      reference_shape: a numpy array [num_landmarks, 2]
      is_training: whether in training mode or not.
      group: landmark group containing the grounth truth landmarks.
      mirror_image: flips horizontally the image's pixels and landmarks.
    Returns:
      pixels: a numpy array [width, height, 3].
      estimate: an initial estimate a numpy array [68, 2].
      gt_truth: the ground truth landmarks, a numpy array [68, 2].
    """
    im = mio.import_image(path)
    bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
    if 'set' not in str(bb_root):
        bb_root = im.path.parent.relative_to(im.path.parent.parent)

    im.landmarks['bb'] = mio.import_landmark_file(
        str(Path('bbs') / bb_root / (im.path.stem + '.pts')))

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(
        reference_shape, bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    if mirror_image:
        im = utils.mirror_image(im)

    lms = im.landmarks[group].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)
    return pixels.astype(np.float32).copy(), gt_truth, estimate
def load_images(list_frames, frames_path, path_land, clip_name, max_images=None,
                training_images=None, crop_reading=0.3, pix_thres=330, feat=None):
    """
    Read images from the clips that are processed. The landmarks can be a different folder with the extension of pts and
    are searched as such.
    :param list_frames:         List of images that will be read and loaded.
    :param frames_path:         Path to the folder of images.
    :param path_land:           Path of the respective landmarks.
    :param clip_name:           The name of the clip being processed.
    :param max_images:          (optional) Max images that will be loaded from this clip.
    :param training_images:     (optional) List of images to append the new ones.
    :param crop_reading:        (optional) Amount of cropping the image around the landmarks.
    :param pix_thres:           (optional) If the cropped image has a dimension bigger than this, it gets cropped to this diagonal dimension.
    :param feat:                (optional) Features to be applied to the images before inserting them to the list.
    :return:                    List of menpo images.
    """
    from random import shuffle
    if not check_path_and_landmarks(frames_path, clip_name, path_land):
        return []
    if feat is None:
        feat = no_op
    if training_images is None:
        training_images = []
    shuffle(list_frames)            # shuffle the list to ensure random ones are chosen
    if max_images is None:
        max_images = len(list_frames)
    elif max_images < 0:
        print('Warning: The images cannot be negative, loading the whole list instead.')
        max_images = len(list_frames)
    cnt = 0  # counter for images appended to the list
    for frame_name in list_frames:
        try:
            im = mio.import_image(frames_path + frame_name, normalise=True)
        except ValueError:                                      # in case the extension is unknown (by menpo)
            print('Ignoring the \'image\' {}.'.format(frame_name))
            continue
        res = glob.glob(path_land + clip_name + sep + im.path.stem + '*.pts')
        if len(res) == 0:                       # if the image does not have any existing landmarks, ignore it
            continue
        elif len(res) > 1:
            #_r = randint(0,len(res)-1); #just for debugging reasons in different variable
            #ln = mio.import_landmark_file(res[_r]) # in case there are plenty of landmarks for the image, load random ones
            print('The image {} has more than one landmarks, for one person, loading only the first ones.'.format(frame_name))
        ln = mio.import_landmark_file(res[0])
        im.landmarks['PTS'] = ln
        im = crop_rescale_img(im, crop_reading=crop_reading, pix_thres=pix_thres)
        training_images.append(feat(im))
        cnt += 1
        if cnt >= max_images:
            break  # the limit of images (appended to the list) is reached
    return training_images
示例#18
0
def test_importing_v1_ljson_null_values(is_file, mock_open, mock_dict):
    v1_ljson = {
        "groups": [
            {
                "connectivity": [[0, 1], [1, 2], [2, 3]],
                "label":
                "chin",
                "landmarks": [
                    {
                        "point": [987.9, 1294.1]
                    },
                    {
                        "point": [96.78, 1246.8]
                    },
                    {
                        "point": [None, 0.1]
                    },
                    {
                        "point": [303.22, 167.2]
                    },
                ],
            },
            {
                "connectivity": [[0, 1]],
                "label": "leye",
                "landmarks": [{
                    "point": [None, None]
                }, {
                    "point": [None, None]
                }],
            },
        ],
        "version":
        1,
    }
    mock_dict.return_value = v1_ljson
    is_file.return_value = True

    with warnings.catch_warnings(record=True) as found_warnings:
        lmark = mio.import_landmark_file("fake_lmark_being_mocked.ljson",
                                         group="LJSON")
    nan_points = np.isnan(lmark.points)

    # Should raise deprecation warning
    assert len(found_warnings) == 1, [w.message for w in found_warnings]
    assert nan_points[2, 0]  # y-coord None point is nan
    assert not nan_points[2, 1]  # x-coord point is not nan
    assert np.all(nan_points[4:, :])  # all of leye label is nan
示例#19
0
def load_image(path, reference_shape, is_training=False, group='PTS',
               mirror_image=False):
    """Load an annotated image.

    In the directory of the provided image file, there
    should exist a landmark file (.pts) with the same
    basename as the image file.

    Args:
      path: a path containing an image file.
      reference_shape: a numpy array [num_landmarks, 2]
      is_training: whether in training mode or not.
      group: landmark group containing the grounth truth landmarks.
      mirror_image: flips horizontally the image's pixels and landmarks.
    Returns:
      pixels: a numpy array [width, height, 3].
      estimate: an initial estimate a numpy array [68, 2].
      gt_truth: the ground truth landmarks, a numpy array [68, 2].
    """
    im = mio.import_image(path)
    bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
    if 'set' not in str(bb_root):
        bb_root = im.path.parent.relative_to(im.path.parent.parent)

    im.landmarks['bb'] = mio.import_landmark_file(str(Path('bbs') / bb_root / (
        im.path.stem + '.pts')))

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(reference_shape,
                                                              bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    if mirror_image:
        im = utils.mirror_image(im)

    lms = im.landmarks[group].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)
    return pixels.astype(np.float32).copy(), gt_truth, estimate
def build_reference_shape(paths, num_patches=73, diagonal=200):
    """Builds the reference shape.

    Args:
        paths: train image paths.
        num_patches: number of landmarks
        diagonal: the diagonal of the reference shape in pixels.
    Returns:
        the reference shape.
    """
    landmarks = []
    for path in paths:
        group = mio.import_landmark_file(path.parent / (path.stem + '.pts'))
        if group.n_points == num_patches:
            landmarks += [group]
    return compute_reference_shape(landmarks, diagonal=diagonal).points.astype(np.float32)
def _aux(im, pts_paths, pts_names, pts_formats, save_path, save_original, off1, off2,
         figure_size, overwrite, render_options, only_ln=False):
    if only_ln:  # case of visualising only landmarks (black background)
        path_tmp = im.path
        im = Image.init_blank([im.shape[0], im.shape[1]], im.n_channels)
        im.path = path_tmp
    # attach landmarks
    for k, pts_path in enumerate(pts_paths):
        if os.path.isfile(pts_path + im.path.stem + pts_formats[k]):
            pts = mio.import_landmark_file(pts_path + im.path.stem + pts_formats[k])
            im.landmarks[pts_names[k]] = pts

    # copy original if asked
    if save_original:
        im_orig = im.copy()

    # crop
    if pts_names[0] in im.landmarks.group_labels:
        centre = im.landmarks[pts_names[0]].lms.centre()
        min_indices = np.array([round(centre[0])-off1, round(centre[1])-off2])
        max_indices = np.array([round(centre[0])+off1, round(centre[1])+off2])
        # im.crop_inplace(min_indices, max_indices)
        im = im.crop(min_indices, max_indices, constrain_to_boundary=True)
    else:
        path_tmp = im.path
        im = Image.init_blank([off1*2 + 1, off2*2 + 1], im.n_channels)
        im.path = path_tmp

    # render
    rand = randint(1, 10000)
    fig = plt.figure(rand)
    if save_original:
        gs = gridspec.GridSpec(1, 2, width_ratios=[im_orig.height, im.height])

        plt.subplot(gs[0])
        renderer = _render(im_orig, pts_names, fig, render_options['colours'][0],
                           render_options['sizes'][0], render_options['edgesizes'][0], figure_size)

        plt.subplot(gs[1])
        renderer = _render(im, pts_names, fig, render_options['colours'][1],
                           render_options['sizes'][1], render_options['edgesizes'][1], figure_size)
    else:
        renderer = _render(im, pts_names, fig, render_options['colours'][1],
                           render_options['sizes'][1], render_options['edgesizes'][1], figure_size)

    renderer.save_figure(save_path + im.path.stem + '.png', format='png', pad_inches=0.0, overwrite=overwrite)
    plt.close(rand)
def _aux(im, pts_paths, pts_names, pts_formats, save_path, save_original, off1, off2, figure_size, overwrite, render_options, only_ln=False):
    if only_ln:  # case of visualising only landmarks (black background)
        path_tmp = im.path
        im = Image.init_blank([im.shape[0], im.shape[1]], im.n_channels)
        im.path = path_tmp
    # attach landmarks
    for k, pts_path in enumerate(pts_paths):
        if os.path.isfile(pts_path + im.path.stem + pts_formats[k]):
            pts = mio.import_landmark_file(pts_path + im.path.stem + pts_formats[k])
            im.landmarks[pts_names[k]] = pts

    # copy original if asked
    if save_original:
        im_orig = im.copy()

    # crop
    if pts_names[0] in im.landmarks.group_labels:
        centre = im.landmarks[pts_names[0]].lms.centre()
        min_indices = np.array([round(centre[0])-off1, round(centre[1])-off2])
        max_indices = np.array([round(centre[0])+off1, round(centre[1])+off2])
        # im.crop_inplace(min_indices, max_indices)
        im = im.crop(min_indices, max_indices, constrain_to_boundary=True)
    else:
        path_tmp = im.path
        im = Image.init_blank([off1*2 + 1, off2*2 + 1], im.n_channels)
        im.path = path_tmp

    # render
    rand = randint(1, 10000)
    fig = plt.figure(rand)
    if save_original:
        gs = gridspec.GridSpec(1, 2, width_ratios=[im_orig.height, im.height])

        plt.subplot(gs[0])
        renderer = _render(im_orig, pts_names, fig, render_options['colours'][0],
                           render_options['sizes'][0], render_options['edgesizes'][0], figure_size)

        plt.subplot(gs[1])
        renderer = _render(im, pts_names, fig, render_options['colours'][1],
                           render_options['sizes'][1], render_options['edgesizes'][1], figure_size)
    else:
        renderer = _render(im, pts_names, fig, render_options['colours'][1],
                           render_options['sizes'][1], render_options['edgesizes'][1], figure_size)

    renderer.save_figure(save_path + im.path.stem + '.png', format='png', pad_inches=0.0, overwrite=overwrite)
    plt.close(rand)
示例#23
0
        def wrapper(index, shape):
            index = index.decode("utf-8")

            prefix = index.split('_')[0]

            landmark_indices = list(map(int, index.split('_')[1:]))
            if len(landmark_indices) > 1:
                min_index, max_index = landmark_indices
                landmark_indices = range(min_index, max_index + 1)

            kpts = np.zeros(shape[:2], dtype=int)
            im = Image(kpts)

            mask = np.ones(list(shape[:2]) + [1]).astype(np.float32)

            for lms_index in landmark_indices:
                filename = (
                    prefix + '_' + str(lms_index) + '.' + self.lms_extension)
                path = self.lms_root / filename
                if not path.exists():
                    continue
                lms = mio.import_landmark_file(path.as_posix()).lms

                if lms.points.shape[0] != 68:
                    min_indices, max_indices = lms.bounds()

                    mask[min_indices[0]:max_indices[0], min_indices[1]:
                         max_indices[1]] = 0
                    continue

                for i in range(68):
                    lms_mask = im.as_masked().copy()
                    patches = np.ones((1, 1, 1, 4, 4), dtype=np.bool)

                    pc = lms.points[i][None, :]
                    lms_mask.mask.pixels[...] = False
                    lms_mask = lms_mask.mask.set_patches(
                        patches, menpo.shape.PointCloud(pc))
                    kpts[lms_mask.mask] = i + 1

            return kpts.astype(np.int32), mask.astype(np.int32)
示例#24
0
def test_importing_v2_ljson_null_values(is_file, mock_open, mock_dict):
    v2_ljson = { "labels": [
                    { "label": "left_eye", "mask": [0, 1, 2] },
                    { "label": "right_eye", "mask": [3, 4, 5] }
                 ],
                 "landmarks": {
                     "connectivity": [ [0, 1], [1, 2], [2, 0], [3, 4],
                                       [4, 5],  [5, 3] ],
                     "points": [ [None, 200.5], [None, None],
                                 [316.8, 199.15], [339.48, 205.0],
                                 [358.54, 217.82], [375.0, 233.4]]
                 },
                 "version": 2 }

    mock_dict.return_value = v2_ljson
    is_file.return_value = True

    lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson')
    nan_points = np.isnan(lmark.lms.points)
    assert nan_points[0, 0]  # y-coord None point is nan
    assert not nan_points[0, 1]  # x-coord point is not nan
    assert np.all(nan_points[1, :]) # all of leye label is nan
示例#25
0
    def get_keys(self, path='images'):

        path = self.root / path
        lms_files = path.glob('*' + self.lms_extension)
        keys = []  # ['face_55135', 'face_49348']

        # Get only files with 68 landmarks
        for p in lms_files:
            try:
                lms = mio.import_landmark_file(p)

                if lms.n_landmarks == 68 and not np.isnan(lms.lms.points).any(
                ):
                    keys.append(lms.path.stem)
            except:
                pass

        self._keys = keys
        print('Found {} files.'.format(len(keys)))

        if len(keys) == 0:
            raise RuntimeError('No images found in {}'.format(path))
        return tf.constant(keys, tf.string)
示例#26
0
def test_importing_v1_ljson_null_values(is_file, mock_open, mock_dict):
    v1_ljson = { "groups": [
        { "connectivity": [ [ 0, 1 ], [ 1, 2 ], [ 2, 3 ] ],
          "label": "chin", "landmarks": [
            { "point": [ 987.9, 1294.1 ] }, { "point": [ 96.78, 1246.8 ] },
            { "point": [ None, 0.1 ] }, { "point": [303.22, 167.2 ] } ] },
        { "connectivity": [ [ 0, 1 ] ],
          "label": "leye", "landmarks": [
            { "point": [ None, None ] },
            { "point": [ None, None ] }] }
        ], "version": 1 }
    mock_dict.return_value = v1_ljson
    is_file.return_value = True

    with warnings.catch_warnings(record=True) as w:
        lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson')
    nan_points = np.isnan(lmark.lms.points)

    # Should raise deprecation warning
    assert len(w) == 1
    assert nan_points[2, 0]  # y-coord None point is nan
    assert not nan_points[2, 1]  # x-coord point is not nan
    assert np.all(nan_points[4:, :]) # all of leye label is nan
示例#27
0
def main():
    trumpFolder = './data/trump/trump'
    leftFolder = './data/headpose/Angle/left15'
    rightFolder = './data/headpose/Angle/right15'
    neutralFolder = './data/headpose/Angle/neutral0'
    downFolder = './data/headpose/Angle/down30'
    '''SHAPE MODEL'''
    trumpShapeModel = PointDistributionModel(trumpFolder)
    leftShapeModel = PointDistributionModel(leftFolder)
    rightShapeModel = PointDistributionModel(rightFolder)
    neutralShapeModel = PointDistributionModel(neutralFolder)
    downShapeModel = PointDistributionModel(downFolder)
    '''MEAN SHAPE'''
    trumpMeanShape = trumpShapeModel.model.mean()
    leftMeanShape = leftShapeModel.model.mean()
    rightMeanShape = rightShapeModel.model.mean()
    neutralMeanShape = neutralShapeModel.model.mean()
    downMeanShape = downShapeModel.model.mean()
    '''WEIGHTS'''
    leftWeights = Projection(trumpShapeModel, leftMeanShape)
    rightWeights = Projection(trumpShapeModel, rightMeanShape)
    neutralWeights = Projection(trumpShapeModel, neutralMeanShape)
    downWeights = Projection(trumpShapeModel, downMeanShape)
    '''DELTA'''
    leftDeltaWeights = deltaWeights(leftWeights, neutralWeights)
    rightDeltaWeights = deltaWeights(rightWeights, neutralWeights)
    downDeltaWeights = deltaWeights(downWeights, neutralWeights)
    '''RECONSTRUCTION'''
    '''PRODUCE TRUMP IMAGES'''
    for root, dirs, files in os.walk(trumpFolder):
        for file in files:
            if os.path.join(root, file).endswith('.pts'):
                '''READ ONE TRUMP IMAGE/PTS'''
                ptsPath = os.path.join(root, file)
                trumpShape = mio.import_landmark_file(ptsPath)
                headTrumpShape = Reconstruction(trumpShapeModel,
                                                leftDeltaWeights, trumpShape)
示例#28
0
    def _validate_without_gt(self, valid_imgs, fitter, vis=True):
        Pxy_template = self.read_pts(self.template_filename)  # x ~ y format
        menpo_initial_pts = mio.import_landmark_file(
            self.template_filename).lms

        Pyx_template = self._swap_columns(Pxy_template)

        for im in valid_imgs:
            path = im.path._str
            pathdir, name, ext = fp.fileparts(path)

            Pyx_initial_pts = Pyx_template * np.array(
                [im.pixels.shape[1], im.pixels.shape[2]], np.float32)
            Pxy_initial_pts = self._swap_columns(Pyx_initial_pts)
            menpo_initial_pts.points = Pyx_initial_pts

            fr = fitter.fit_from_shape(im, menpo_initial_pts, gt_shape=None)
            menpo_output_pts = fr.final_shape
            Pxy_output_pts = self._swap_columns(menpo_output_pts.points)

            self.save_pts(os.path.join(pathdir, name + '.pts'), Pxy_output_pts)

            if vis is True:
                Pxy_initial_pts = Pxy_initial_pts.astype(np.int32)
                Pxy_output_pts = Pxy_output_pts.astype(np.int32)
                im_vis = cv2.imread(path)
                for k in range(Pxy_output_pts.shape[0]):
                    cv2.circle(im_vis,
                               (Pxy_initial_pts[k, 0], Pxy_initial_pts[k, 1]),
                               10, (0, 255, 0), 10)  # green
                    cv2.circle(im_vis,
                               (Pxy_output_pts[k, 0], Pxy_output_pts[k, 1]),
                               10, (0, 0, 255), 10)  # red
                im_vis = cv2.resize(im_vis, dsize=(0, 0), fx=0.3, fy=0.3)
                if not os.path.exists('./vis/'):
                    os.mkdir('./vis/')
                cv2.imwrite(os.path.join('./vis/', name + '.jpg'), im_vis)
示例#29
0
from numpy.testing import assert_allclose
import os
from pathlib import PosixPath, WindowsPath, Path
from mock import patch, PropertyMock, MagicMock
from nose.tools import raises


import menpo.io as mio
from menpo.io.utils import _norm_path
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure


builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'

test_lg = mio.import_landmark_file(mio.data_path_to('lenna.ljson'))
nan_lg = test_lg.copy()
nan_lg.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = '/tmp/test.fake'


@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
    exists.return_value = True
    landmark_types.__contains__.return_value = True
    mio.export_landmark_file(test_lg, fake_path, overwrite=True)
    mock_open.assert_called_with('wb')
示例#30
0
def test_import_landmark_file():
    lm_path = os.path.join(mio.data_dir_path(), 'einstein.pts')
    mio.import_landmark_file(lm_path)
示例#31
0
def load_images(paths, group=None, verbose=True):
    """Loads and rescales input images to the diagonal of the reference shape.

    Args:
      paths: a list of strings containing the data directories.
      reference_shape: a numpy array [num_landmarks, 2]
      group: landmark group containing the grounth truth landmarks.
      verbose: boolean, print debugging info.
    Returns:
      images: a list of numpy arrays containing images.
      shapes: a list of the ground truth landmarks.
      reference_shape: a numpy array [num_landmarks, 2].
      shape_gen: PCAModel, a shape generator.
    """
    images = []
    shapes = []
    bbs = []

    reference_shape = PointCloud(build_reference_shape(paths))

    for path in paths:
        if verbose:
            print('Importing data from {}'.format(path))

        for im in mio.import_images(path, verbose=verbose, as_generator=True):
            group = group or im.landmarks[group]._group_label

            bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
            if 'set' not in str(bb_root):
                bb_root = im.path.parent.relative_to(im.path.parent.parent)
            im.landmarks['bb'] = mio.import_landmark_file(str(Path(
                'bbs') / bb_root / (im.path.stem + '.pts')))
            im = im.crop_to_landmarks_proportion(0.3, group='bb')
            im = im.rescale_to_pointcloud(reference_shape, group=group)
            im = grey_to_rgb(im)
            images.append(im.pixels.transpose(1, 2, 0))
            shapes.append(im.landmarks[group].lms)
            bbs.append(im.landmarks['bb'].lms)

    train_dir = Path(FLAGS.train_dir)
    mio.export_pickle(reference_shape.points, train_dir / 'reference_shape.pkl', overwrite=True)
    print('created reference_shape.pkl using the {} group'.format(group))

    pca_model = detect.create_generator(shapes, bbs)

    # Pad images to max length
    max_shape = np.max([im.shape for im in images], axis=0)
    max_shape = [len(images)] + list(max_shape)
    padded_images = np.random.rand(*max_shape).astype(np.float32)
    print(padded_images.shape)

    for i, im in enumerate(images):
        height, width = im.shape[:2]
        dy = max(int((max_shape[1] - height - 1) / 2), 0)
        dx = max(int((max_shape[2] - width - 1) / 2), 0)
        lms = shapes[i]
        pts = lms.points
        pts[:, 0] += dy
        pts[:, 1] += dx

        lms = lms.from_vector(pts)
        padded_images[i, dy:(height+dy), dx:(width+dx)] = im

    return padded_images, shapes, reference_shape.points, pca_model
示例#32
0
def influence():
    image_paths = sorted(list(Path('.').glob(FLAGS.dataset)))
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        with open(MDM_MODEL_PATH, 'rb') as f:
            graph_def = tf.GraphDef.FromString(f.read())
            tf.import_graph_def(graph_def)

        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            errors = []
            mean_errors = []

            step = 0
            start_time = time.time()
            for path in image_paths:
                mp_image = mio.import_image(path)
                assert isinstance(mp_image, menpo.image.Image)
                if mp_image.n_channels == 3:
                    mp_image.pixels = np.mean(mp_image.pixels,
                                              0,
                                              keepdims=True)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(
                        Path(path.parent.parent / 'BoundingBoxes' /
                             (path.stem + '.pts'))))
                ly, lx = mp_image.landmarks['bb'].points[0]
                hy, hx = mp_image.landmarks['bb'].points[2]
                cx = (lx + hx) / 2
                cy = (ly + hy) / 2
                bb_size = int(math.ceil(max(hx - lx, hy - ly) * 4. / 6.))
                square_bb = np.array([[cy - bb_size, cx - bb_size],
                                      [cy + bb_size, cx + bb_size]])
                mp_image.landmarks['square_bb'] = PointCloud(square_bb)
                mp_image = mp_image.crop_to_landmarks_proportion(
                    0.0, group='square_bb')
                mp_image = mp_image.resize((112, 112))

                np_image = np.expand_dims(mp_image.pixels.transpose((1, 2, 0)),
                                          0)
                np_shape = mp_image.landmarks['PTS'].points

                prediction, = sess.run('import/add:0',
                                       feed_dict={'import/input:0': np_image})
                assert isinstance(prediction, np.ndarray)
                prediction = prediction.reshape((68, 2))
                prediction = prediction[:, [1, 0]]
                error = normalized_batch_nme(prediction,
                                             mp_image.landmarks['PTS'].points)
                mean_error = normalized_nme(error)
                error_level = min(9, int(mean_error * 100))

                concat_image = utils.draw_landmarks_discrete(
                    np_image[0], np_shape, prediction)
                # plt.imsave('err{}/step{}.png'.format(error_level, step), concat_image)
                errors.append(error)
                mean_errors.append(mean_error)
                step += 1
                if step % 20 == 0:
                    duration = time.time() - start_time
                    sec_per_batch = duration / 20.0
                    examples_per_sec = 1. / sec_per_batch
                    log_str = '{}: [{:d} batches done] ({:.1f} examples/sec; {:.3f} sec/batch)'
                    print(
                        log_str.format(datetime.now(), step, examples_per_sec,
                                       sec_per_batch))
                    start_time = time.time()

            errors = np.array(errors)
            print(errors.shape)
            mean_errors = np.vstack(mean_errors).ravel()
            errors_mean = np.mean(errors, 0)
            mean_errors_mean = mean_errors.mean()
            with open('errors.txt', 'w') as ofs:
                for row, avg in zip(errors, mean_errors):
                    for col in row:
                        ofs.write('%.4f, ' % col)
                    ofs.write('%.4f' % avg)
                    ofs.write('\n')
                for col in errors_mean:
                    ofs.write('%.4f, ' % col)
                ofs.write('%.4f' % mean_errors_mean)
                ofs.write('\n')
            auc_at_08 = (mean_errors < .08).mean()
            auc_at_05 = (mean_errors < .05).mean()

            print('Errors', mean_errors.shape)
            print(
                '%s: mean_rmse = %.4f, auc @ 0.05 = %.4f, auc @ 0.08 = %.4f' %
                (datetime.now(), mean_errors.mean(), auc_at_05, auc_at_08))
示例#33
0
def test_import_landmark_file():
    lm_path = mio.data_dir_path() / 'einstein.pts'
    mio.import_landmark_file(lm_path)
示例#34
0
def load_images_aflw(paths,
                     group=None,
                     verbose=True,
                     PLOT=True,
                     AFLW=False,
                     PLOT_shape=False):
    """Loads and rescales input knn_2D to the diagonal of the reference shape.

    Args:
      paths: a list of strings containing the data directories.
      reference_shape (meanshape): a numpy array [num_landmarks, 2]
      group: landmark group containing the grounth truth landmarks.
      verbose: boolean, print debugging info.
    Returns:
      knn_2D: a list of numpy arrays containing knn_2D.
      shapes: a list of the ground truth landmarks.
      reference_shape (meanshape): a numpy array [num_landmarks, 2].
      shape_gen: PCAModel, a shape generator.
    """
    images = []
    shapes = []
    bbs = []
    shape_space = []
    plot_shape_x = []
    plot_shape_y = []
    # compute mean shape
    if AFLW:
        # reference_shape = PointCloud(mio.import_pickle(Path('/home/hliu/gmh/RL_FA/mdm_aflw/ckpt/train_aflw') / 'reference_shape.pkl'))
        reference_shape = mio.import_pickle(
            Path('/home/hliu/gmh/RL_FA/mdm_aflw/ckpt/train_aflw') /
            'reference_shape.pkl')
    else:
        reference_shape = PointCloud(build_reference_shape(paths))

    for path in paths:
        if verbose:
            print('Importing data from {}'.format(path))

        for im in mio.import_images(path, verbose=verbose, as_generator=True):
            # group = group or im.landmarks[group]._group_label
            group = group or im.landmarks.keys()[0]
            bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
            if 'set' not in str(bb_root):
                bb_root = im.path.parent.relative_to(im.path.parent.parent)

            if AFLW:
                im.landmarks['bb'] = im.landmarks['PTS'].lms.bounding_box()
            else:
                im.landmarks['bb'] = mio.import_landmark_file(
                    str(Path('bbs') / bb_root / (im.path.stem + '.pts')))
            im = im.crop_to_landmarks_proportion(0.3, group='bb')
            im = im.rescale_to_pointcloud(reference_shape, group=group)
            im = grey_to_rgb(im)
            # knn_2D.append(im.pixels.transpose(1, 2, 0))
            shapes.append(im.landmarks[group].lms)
            shape_space.append(im.landmarks[group].lms.points)
            bbs.append(im.landmarks['bb'].lms)
            if PLOT_shape:
                x_tmp = np.sum((im.landmarks[group].lms.points[:, 0] -
                                reference_shape.points[:, 0]))
                y_tmp = np.sum((im.landmarks[group].lms.points[:, 1] -
                                reference_shape.points[:, 1]))
                if x_tmp < 0 and y_tmp < 0:
                    plot_shape_x.append(x_tmp)
                    plot_shape_y.append(y_tmp)
    shape_space = np.array(shape_space)
    print('shape_space:', shape_space.shape)

    train_dir = Path(FLAGS.train_dir)
    if PLOT_shape:
        k_nn_plot_x = []
        k_nn_plot_y = []
        centers = utils.k_means(shape_space, 500, num_patches=19)
        centers = np.reshape(centers, [-1, 19, 2])
        for i in range(centers.shape[0]):
            x_tmp = np.sum((centers[i, :, 0] - reference_shape.points[:, 0]))
            y_tmp = np.sum((centers[i, :, 1] - reference_shape.points[:, 1]))
            if x_tmp < 0 and y_tmp < 0:
                k_nn_plot_x.append(x_tmp)
                k_nn_plot_y.append(y_tmp)

        # plt.scatter(plot_shape_x, plot_shape_y, s=20)
        # plt.scatter(k_nn_plot_x, k_nn_plot_y, s=40)
        # plt.xticks(())
        # plt.yticks(())
        # plt.show()
        # pdb.set_trace()

    np.save(train_dir / 'shape_space_all.npy', shape_space)
    # centers = utils.k_means(shape_space, 100)
    # centers = np.reshape(centers, [-1, 68, 2])

    # np.save(train_dir/'shape_space_origin.npy', centers)
    # print('created shape_space.npy using the {} group'.format(group))
    # exit(0)

    mio.export_pickle(reference_shape.points,
                      train_dir / 'reference_shape.pkl',
                      overwrite=True)
    print('created reference_shape.pkl using the {} group'.format(group))

    pca_model = detect.create_generator(shapes, bbs)

    # Pad knn_2D to max length
    max_shape = [272, 261, 3]
    padded_images = np.random.rand(*max_shape).astype(np.float32)
    print(padded_images.shape)

    if PLOT:
        # plot without padding
        centers = utils.k_means(shape_space, 500, num_patches=19)
        centers = np.reshape(centers, [-1, 19, 2])
        plot_img = cv2.imread('a.png').transpose(2, 0, 1)
        centers_tmp = np.zeros(centers.shape)
        # menpo_img = mio.import_image('a.png')
        menpo_img = menpo.image.Image(plot_img)
        for i in range(centers.shape[0]):
            menpo_img.view()
            min_y = np.min(centers[i, :, 0])
            min_x = np.min(centers[i, :, 1])
            centers_tmp[i, :, 0] = centers[i, :, 0] - min_y + 20
            centers_tmp[i, :, 1] = centers[i, :, 1] - min_x + 20
            print(centers_tmp[i, :, :])
            menpo_img.landmarks['center'] = PointCloud(centers_tmp[i, :, :])
            menpo_img.view_landmarks(group='center',
                                     marker_face_colour='b',
                                     marker_size='16')
            # menpo_img.landmarks['center'].view(render_legend=True)
            plt.savefig('plot_shape_space_aflw/' + str(i) + '.png')
            plt.close()
        exit(0)

    # !!!shape_space without delta, which means shape_space has already been padded!

    # delta = np.zeros(shape_space.shape)

    for i, im in enumerate(images):
        height, width = im.shape[:2]
        dy = max(int((max_shape[0] - height - 1) / 2), 0)
        dx = max(int((max_shape[1] - width - 1) / 2), 0)
        lms = shapes[i]
        pts = lms.points
        pts[:, 0] += dy
        pts[:, 1] += dx
        shape_space[i, :, 0] += dy
        shape_space[i, :, 1] += dx
        # delta[i][:, 0] = dy
        # delta[i][:, 1] = dx
        lms = lms.from_vector(pts)
        padded_images[i, dy:(height + dy), dx:(width + dx)] = im

    # shape_space = np.concatenate((shape_space, delta), 2)

    centers = utils.k_means(shape_space, 1000, num_patches=19)
    centers = np.reshape(centers, [-1, 19, 2])

    # pdb.set_trace()
    np.save(train_dir / 'shape_space.npy', centers)
    print('created shape_space.npy using the {} group'.format(group))
    exit(0)
    return padded_images, shapes, reference_shape.points, pca_model, centers
示例#35
0
import warnings
from pathlib import Path, PosixPath, WindowsPath
from unittest.mock import MagicMock, PropertyMock, patch

import numpy as np
from numpy.testing import assert_allclose
from pytest import raises

import menpo.io as mio
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure
from menpo.io.utils import _norm_path

builtins_str = "__builtin__" if sys.version_info[0] == 2 else "builtins"

test_lg = mio.import_landmark_file(mio.data_path_to("lenna.ljson"),
                                   group="LJSON")
nan_lg = test_lg.copy()
nan_lg.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = "/tmp/test.fake"


@patch("menpo.io.output.base.landmark_types")
@patch("menpo.io.output.base.Path.exists")
@patch("menpo.io.output.base.Path.open")
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
    exists.return_value = True
    landmark_types.__contains__.return_value = True
    mio.export_landmark_file(test_lg, fake_path, overwrite=True)
    mock_open.assert_called_with("wb")
示例#36
0
def test_import_landmark_file():
    lm_path = mio.data_dir_path() / 'einstein.pts'
    mio.import_landmark_file(lm_path)
示例#37
0
def load_images_test_300VW(paths,
                           reference_shape,
                           group=None,
                           verbose=True,
                           PLOT=False):
    """Loads and rescales input knn_2D to the diagonal of the reference shape.

    Args:
      paths: a list of strings containing the data directories.
      reference_shape (meanshape): a numpy array [num_landmarks, 2]
      group: landmark group containing the grounth truth landmarks.
      verbose: boolean, print debugging info.
    Returns:
      knn_2D: a list of numpy arrays containing knn_2D.
      shapes: a list of the ground truth landmarks.
      reference_shape (meanshape): a numpy array [num_landmarks, 2].
      shape_gen: PCAModel, a shape generator.
    """
    images = []
    shapes = []
    scales = []
    # compute mean shape
    reference_shape = PointCloud(reference_shape)

    for path in paths:
        if verbose:
            print('Importing data from {}'.format(path))

        for im in mio.import_images(path, verbose=verbose, as_generator=True):
            # group = group or im.landmarks[group]._group_label
            # pdb.set_trace()

            # bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
            bb_root = im.path.parent
            if 'set' not in str(bb_root):
                bb_root = im.path.parent.relative_to(im.path.parent.parent)
            im.landmarks['bb'] = mio.import_landmark_file(
                bb_root / str(Path('bbs') / (im.path.stem + '.pts')))
            im.landmarks['PTS'] = mio.import_landmark_file(
                bb_root / str(Path('annot') / (im.path.stem + '.pts')))

            im = im.crop_to_landmarks_proportion(0.3, group='bb')
            # im = im.rescale_to_pointcloud(reference_shape, group=group)
            # _, height, width = im.pixels.shape

            # im = im.resize([386, 458])
            # im = grey_to_rgb(im)
            # knn_2D.append(im.pixels.transpose(1, 2, 0))
            # shapes.append(im.landmarks[group].lms.points.astype('float32'))
            # scales.append([386/height, 485/width])
            # lms = im.landmarks[group].lms
            # im = im.pixels.transpose(1, 2, 0)
            # height, width = im.shape[:2]
            # # print('shape:', height, width)
            # padded_image = np.random.rand(386, 458, 3).astype(np.float32)
            # dy = max(int((386 - height - 1) / 2), 0)
            # dx = max(int((458 - width - 1) / 2), 0)
            # pts = lms.points
            # pts[:, 0] += dy
            # pts[:, 1] += dx
            # # delta[i][:, 0] = dy
            # # delta[i][:, 1] = dx
            # lms = lms.from_vector(pts)
            # padded_image[dy:(height+dy), dx:(width+dx), :] = im
            images.append(im)
            # shapes.append(lms.points.astype('float32'))

    return images
def prepare_images(paths, num_patches=73, verbose=True):
    """Save Train Images to TFRecord
    Args:
        paths: a list of strings containing the data directories.
        num_patches: number of landmarks
        verbose: boolean, print debugging info.
    Returns:
        None
    """
    if len(paths) == 0:
        return
    # .../<Dataset>/Images/*.png -> .../<Dataset>
    path_base = Path(paths[0]).parent.parent
    image_paths = []

    # First: get all image paths
    for path in paths:
        for file in Path('.').glob(path):
            try:
                mio.import_landmark_file(
                    str(Path(file.parent.parent / 'BoundingBoxes' / (file.stem + '.pts')))
                )
            except ValueError:
                continue
            image_paths.append(file)
    print('Got all image paths...')

    # Second: split to train, test and validate. 7:2:1
    if Path(path_base / 'train_img.txt').exists():
        with Path(path_base / 'train_img.txt').open('rb') as train_ifs, \
                Path(path_base / 'test_img.txt').open('rb') as test_ifs, \
                Path(path_base / 'val_img.txt').open('rb') as val_ifs:
            train_paths = [Path(line[:-1].decode('utf-8')) for line in train_ifs.readlines()]
            test_paths = [Path(line[:-1].decode('utf-8')) for line in test_ifs.readlines()]
            val_paths = [Path(line[:-1].decode('utf-8')) for line in val_ifs.readlines()]
    else:
        random.shuffle(image_paths)
        num_train = int(len(image_paths) * 0.7)
        num_test = int(len(image_paths) * 0.2)
        train_paths = sorted(image_paths[:num_train])
        test_paths = sorted(image_paths[num_train:num_train+num_test])
        val_paths = sorted(image_paths[num_train+num_test:])
        with Path(path_base / 'train_img.txt').open('wb') as train_ofs, \
                Path(path_base / 'test_img.txt').open('wb') as test_ofs, \
                Path(path_base / 'val_img.txt').open('wb') as val_ofs:
            train_ofs.writelines([str(line).encode('utf-8') + b'\n' for line in train_paths])
            test_ofs.writelines([str(line).encode('utf-8') + b'\n' for line in test_paths])
            val_ofs.writelines([str(line).encode('utf-8') + b'\n' for line in val_paths])
    print('Found Train/Test/Validate {}/{}/{}'.format(len(train_paths), len(test_paths), len(val_paths)))

    # Third: export reference shape on train
    if Path(path_base / 'reference_shape.pkl').exists():
        reference_shape = PointCloud(mio.import_pickle(path_base / 'reference_shape.pkl'))
    else:
        reference_shape = PointCloud(build_reference_shape(train_paths, num_patches))
        mio.export_pickle(reference_shape.points, path_base / 'reference_shape.pkl', overwrite=True)
    print('Created reference_shape.pkl')

    # Fourth: image shape & pca
    image_shape = [0, 0, 3]  # [H, W, C]
    if Path(path_base / 'pca.bin').exists() and Path(path_base / 'meta.txt').exists():
        with Path(path_base / 'meta.txt').open('r') as ifs:
            image_shape = [int(x) for x in ifs.read().split(' ')]
    else:
        with tf.io.TFRecordWriter(str(path_base / 'pca.bin')) as ofs:
            counter = 0
            for path in train_paths:
                counter += 1
                if verbose:
                    status = 10.0 * counter / len(train_paths)
                    status_str = '\rPreparing {:2.2f}%['.format(status * 10)
                    for i in range(int(status)):
                        status_str += '='
                    for i in range(int(status), 10):
                        status_str += ' '
                    status_str += '] {}     '.format(path)
                    print(status_str, end='')
                mp_image = mio.import_image(path)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(Path(mp_image.path.parent.parent / 'BoundingBoxes' / (mp_image.path.stem + '.pts')))
                )
                mp_image = mp_image.crop_to_landmarks_proportion(0.3, group='bb')
                mp_image = mp_image.rescale_to_pointcloud(reference_shape, group='PTS')
                mp_image = grey_to_rgb(mp_image)
                assert(mp_image.pixels.shape[0] == image_shape[2])
                image_shape[0] = max(mp_image.pixels.shape[1], image_shape[0])
                image_shape[1] = max(mp_image.pixels.shape[2], image_shape[1])
                features = tf.train.Features(
                    feature={
                        'pca/shape': tf.train.Feature(
                            float_list=tf.train.FloatList(value=mp_image.landmarks['PTS'].points.flatten())
                        ),
                        'pca/bb': tf.train.Feature(
                            float_list=tf.train.FloatList(value=mp_image.landmarks['bb'].points.flatten())
                        ),
                    }
                )
                ofs.write(tf.train.Example(features=features).SerializeToString())
            if verbose:
                print('')
        with Path(path_base / 'meta.txt').open('w') as ofs:
            for s in image_shape[:-1]:
                ofs.write('{} '.format(s))
            ofs.write('{}'.format(image_shape[-1]))
    print('Image shape', image_shape)

    # Fifth: train data
    if Path(path_base / 'train.bin').exists():
        pass
    else:
        random.shuffle(train_paths)
        with tf.io.TFRecordWriter(str(path_base / 'train.bin')) as ofs:
            print('Preparing train data...')
            counter = 0
            for path in train_paths:
                counter += 1
                if verbose:
                    status = 10.0 * counter / len(train_paths)
                    status_str = '\rPreparing {:2.2f}%['.format(status * 10)
                    for i in range(int(status)):
                        status_str += '='
                    for i in range(int(status), 10):
                        status_str += ' '
                    status_str += '] {}     '.format(path)
                    print(status_str, end='')
                mp_image = mio.import_image(path)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(Path(mp_image.path.parent.parent / 'BoundingBoxes' / (mp_image.path.stem + '.pts')))
                )
                mp_image = mp_image.crop_to_landmarks_proportion(0.3, group='bb')
                mp_image = mp_image.rescale_to_pointcloud(reference_shape, group='PTS')
                mp_image = grey_to_rgb(mp_image)
                # Padding to the same size
                height, width = mp_image.pixels.shape[1:]  # [C, H, W]
                dy = max(int((image_shape[0] - height - 1) / 2), 0)
                dx = max(int((image_shape[1] - width - 1) / 2), 0)
                padded_image = np.random.rand(*image_shape).astype(np.float32)
                padded_image[dy:(height + dy), dx:(width + dx), :] = mp_image.pixels.transpose(1, 2, 0)
                padded_landmark = mp_image.landmarks['PTS'].points
                padded_landmark[:, 0] += dy
                padded_landmark[:, 1] += dx
                features = tf.train.Features(
                    feature={
                        'train/image': tf.train.Feature(
                            bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(padded_image.tostring())])
                        ),
                        'train/shape': tf.train.Feature(
                            float_list=tf.train.FloatList(value=padded_landmark.flatten())
                        )
                    }
                )
                ofs.write(tf.train.Example(features=features).SerializeToString())
            if verbose:
                print('')

    # Sixth: test data
    if Path(path_base / 'test.bin').exists():
        pass
    else:
        with tf.io.TFRecordWriter(str(path_base / 'test.bin')) as ofs:
            print('Preparing test data...')
            counter = 0
            for path in test_paths:
                counter += 1
                if verbose:
                    status = 10.0 * counter / len(test_paths)
                    status_str = '\rPreparing {:2.2f}%['.format(status * 10)
                    for i in range(int(status)):
                        status_str += '='
                    for i in range(int(status), 10):
                        status_str += ' '
                    status_str += '] {}     '.format(path)
                    print(status_str, end='')
                mp_image = mio.import_image(path)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(Path(mp_image.path.parent.parent / 'BoundingBoxes' / (mp_image.path.stem + '.pts')))
                )
                mp_image = mp_image.crop_to_landmarks_proportion(0.3, group='bb')
                mp_bb = mp_image.landmarks['bb'].bounding_box()
                mp_image.landmarks['init'] = align_shape_with_bounding_box(reference_shape, mp_bb)
                mp_image = mp_image.rescale_to_pointcloud(reference_shape, group='init')
                mp_image = grey_to_rgb(mp_image)
                # Padding to the same size
                height, width = mp_image.pixels.shape[1:]  # [C, H, W]
                dy = max(int((256 - height - 1) / 2), 0)  # 200*(1+0.3*2)/sqrt(2) == 226.7
                dx = max(int((256 - width - 1) / 2), 0)  # 200*(1+0.3*2)/sqrt(2) == 226.7
                padded_image = np.random.rand(256, 256, 3).astype(np.float32)
                padded_image[dy:(height + dy), dx:(width + dx), :] = mp_image.pixels.transpose(1, 2, 0)
                padded_landmark = mp_image.landmarks['PTS'].points
                padded_landmark[:, 0] += dy
                padded_landmark[:, 1] += dx
                padded_init_landmark = mp_image.landmarks['init'].points
                padded_init_landmark[:, 0] += dy
                padded_init_landmark[:, 1] += dx
                features = tf.train.Features(
                    feature={
                        'test/image': tf.train.Feature(
                            bytes_list=tf.train.BytesList(
                                value=[tf.compat.as_bytes(padded_image.tostring())])
                        ),
                        'test/shape': tf.train.Feature(
                            float_list=tf.train.FloatList(value=padded_landmark.flatten())
                        ),
                        'test/init': tf.train.Feature(
                            float_list=tf.train.FloatList(value=padded_init_landmark.flatten())
                        )
                    }
                )
                ofs.write(tf.train.Example(features=features).SerializeToString())
            if verbose:
                print('')
示例#39
0
 def lmark_resolver(path):
     return mio.import_landmark_file(mio.data_path_to('takeo.pts'))
from numpy.testing import assert_allclose
import os
from pathlib import PosixPath, WindowsPath, Path
from mock import patch, PropertyMock, MagicMock
from pytest import raises


import menpo.io as mio
from menpo.io.utils import _norm_path
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure


builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'

test_lg = mio.import_landmark_file(mio.data_path_to('lenna.ljson'),
                                   group='LJSON')
nan_lg = test_lg.copy()
nan_lg.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = '/tmp/test.fake'


@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
    exists.return_value = True
    landmark_types.__contains__.return_value = True
    mio.export_landmark_file(test_lg, fake_path, overwrite=True)
    mock_open.assert_called_with('wb')
示例#41
0
import numpy as np
from mock import patch, PropertyMock
from nose.tools import raises
import sys

import menpo.io as mio
from menpo.image import Image

builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'

test_lg = mio.import_landmark_file(mio.data_path_to('breakingbad.pts'))
nan_lg = test_lg.copy()
nan_lg.lms.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
fake_path = '/tmp/test.fake'


@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
    exists.return_value = True
    mio.export_landmark_file(test_lg, fake_path, overwrite=True)
    mock_open.assert_called_once_with('wb')
    landmark_types.__getitem__.assert_called_once_with('.fake')
    export_function = landmark_types.__getitem__.return_value
    export_function.assert_called_once()


@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
示例#42
0
import numpy as np
import sys
from numpy.testing import assert_allclose
import os
from pathlib import PosixPath, WindowsPath, Path
from mock import patch, PropertyMock, MagicMock
from nose.tools import raises

import menpo.io as mio
from menpo.io.utils import _norm_path
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure

builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'

test_lg = mio.import_landmark_file(mio.data_path_to('lenna.ljson'))
nan_lg = test_lg.copy()
nan_lg.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = '/tmp/test.fake'


@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
    exists.return_value = True
    landmark_types.__contains__.return_value = True
    mio.export_landmark_file(test_lg, fake_path, overwrite=True)
    mock_open.assert_called_with('wb')
示例#43
0
def load_images(paths, group=None, verbose=True):
    """Loads and rescales input images to the diagonal of the reference shape.

    Args:
      paths: a list of strings containing the data directories.
      reference_shape: a numpy array [num_landmarks, 2]
      group: landmark group containing the grounth truth landmarks.
      verbose: boolean, print debugging info.
    Returns:
      images: a list of numpy arrays containing images.
      shapes: a list of the ground truth landmarks.
      reference_shape: a numpy array [num_landmarks, 2].
      shape_gen: PCAModel, a shape generator.
    """
    images = []
    shapes = []
    bbs = []

    reference_shape = PointCloud(build_reference_shape(paths))

    for path in paths:
        if verbose:
            print('Importing data from {}'.format(path))

        for im in mio.import_images(path, verbose=verbose, as_generator=True):
            group = group or im.landmarks[group]._group_label

            bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
            if 'set' not in str(bb_root):
                bb_root = im.path.parent.relative_to(im.path.parent.parent)
            im.landmarks['bb'] = mio.import_landmark_file(
                str(Path('bbs') / bb_root / (im.path.stem + '.pts')))
            im = im.crop_to_landmarks_proportion(0.3, group='bb')
            im = im.rescale_to_pointcloud(reference_shape, group=group)
            im = grey_to_rgb(im)
            images.append(im.pixels.transpose(1, 2, 0))
            shapes.append(im.landmarks[group].lms)
            bbs.append(im.landmarks['bb'].lms)

    train_dir = Path(FLAGS.train_dir)
    mio.export_pickle(reference_shape.points,
                      train_dir / 'reference_shape.pkl',
                      overwrite=True)
    print('created reference_shape.pkl using the {} group'.format(group))

    pca_model = detect.create_generator(shapes, bbs)

    # Pad images to max length
    max_shape = np.max([im.shape for im in images], axis=0)
    max_shape = [len(images)] + list(max_shape)
    padded_images = np.random.rand(*max_shape).astype(np.float32)
    print(padded_images.shape)

    for i, im in enumerate(images):
        height, width = im.shape[:2]
        dy = max(int((max_shape[1] - height - 1) / 2), 0)
        dx = max(int((max_shape[2] - width - 1) / 2), 0)
        lms = shapes[i]
        pts = lms.points
        pts[:, 0] += dy
        pts[:, 1] += dx

        lms = lms.from_vector(pts)
        padded_images[i, dy:(height + dy), dx:(width + dx)] = im

    return padded_images, shapes, reference_shape.points, pca_model
示例#44
0
import numpy as np
import sys
from numpy.testing import assert_allclose
import os
from pathlib import PosixPath, WindowsPath, Path
from mock import patch, PropertyMock, MagicMock
from nose.tools import raises

import menpo.io as mio
from menpo.io.utils import _norm_path
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure

builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'

test_lg = mio.import_landmark_file(mio.data_path_to('breakingbad.pts'))
nan_lg = test_lg.copy()
nan_lg.lms.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = '/tmp/test.fake'


@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
    exists.return_value = True
    landmark_types.__contains__.return_value = True
    mio.export_landmark_file(test_lg, fake_path, overwrite=True)
    mock_open.assert_called_with('wb')