Ejemplo n.º 1
0
def align_shapes(im, reference_shape, init=True, bb_hat=None):
    reference_shape = PointCloud(reference_shape)
    if init:
        bb = im.landmarks['bb'].lms.bounding_box()
        im.landmarks['__initial'] = align_shape_with_bounding_box(
            reference_shape, bb)

        im = im.rescale_to_pointcloud(reference_shape, group='__initial')
        lms = im.landmarks['PTS'].lms

        init = im.landmarks['__initial'].lms

        bb_hat = im.landmarks['bb'].lms
        # im = im.resize((235,200))
        pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0).copy()
        height, width = pixels.shape[:2]

        padded_image = np.random.rand(395, 467, 3).astype(np.float32)
        dy = max(int((395 - height - 1) / 2), 0)
        dx = max(int((467 - width - 1) / 2), 0)
        pts = lms.points

        pts[:, 0] += dy
        pts[:, 1] += dx

        init_pts = init.points

        init_pts[:, 0] += dy
        init_pts[:, 1] += dx

        bb_pts = bb_hat.points
        bb_pts[:, 0] += dy
        bb_pts[:, 1] += dx

        lms = lms.from_vector(pts)
        init = init.from_vector(init_pts)

        bb_hat = bb_hat.from_vector(bb_pts)

        padded_image[dy:(height + dy), dx:(width + dx), :] = pixels
        gt = lms.points.astype(np.float32)
        init = init.points.astype(np.float32)

        return np.expand_dims(padded_image,
                              0), np.expand_dims(init, 0), np.expand_dims(
                                  gt, 0), bb_hat.bounding_box()
    else:
        bb = bb_hat
        # print(bb.points)
        im.landmarks['a'] = align_shape_with_bounding_box(reference_shape, bb)
        init = im.landmarks['a'].lms
        init = init.points.astype(np.float32)
        # print(PointCloud(init).bounding_box().points)
        return np.expand_dims(init, 0)
Ejemplo n.º 2
0
def load_image_test(path, reference_shape, frame_num):
    file_name = path[:-1] + "/%06d.jpg" % (frame_num)

    im = mio.import_image(file_name)

    im.landmarks['PTS'] = mio.import_landmark_file(path[:-1] +
                                                   "/annot/%06d.pts" %
                                                   (frame_num))
    # im.landmarks['PTS'] = mio.import_landmark_file(path[:-1] + "/%06d.pts" % (frame_num))
    bb_path = path[:-1] + "/bbs/%06d.pts" % (frame_num)

    im.landmarks['bb'] = mio.import_landmark_file(bb_path)

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(
        reference_shape, bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    lms = im.landmarks['PTS'].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)

    return 1, pixels.astype(np.float32).copy(), gt_truth, estimate
Ejemplo n.º 3
0
def load_image(path,
               reference_shape,
               is_training=False,
               group='PTS',
               mirror_image=False):
    """Load an annotated image.

    In the directory of the provided image file, there
    should exist a landmark file (.pts) with the same
    basename as the image file.

    Args:
      path: a path containing an image file.
      reference_shape: a numpy array [num_landmarks, 2]
      is_training: whether in training mode or not.
      group: landmark group containing the grounth truth landmarks.
      mirror_image: flips horizontally the image's pixels and landmarks.
    Returns:
      pixels: a numpy array [width, height, 3].
      estimate: an initial estimate a numpy array [68, 2].
      gt_truth: the ground truth landmarks, a numpy array [68, 2].
    """
    im = mio.import_image(path)
    bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
    if 'set' not in str(bb_root):
        bb_root = im.path.parent.relative_to(im.path.parent.parent)

    im.landmarks['bb'] = mio.import_landmark_file(
        str(Path('bbs') / bb_root / (im.path.stem + '.pts')))

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(
        reference_shape, bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    if mirror_image:
        im = utils.mirror_image(im)

    lms = im.landmarks[group].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)
    return pixels.astype(np.float32).copy(), gt_truth, estimate
Ejemplo n.º 4
0
def load_image(path, reference_shape, is_training=False, group='PTS',
               mirror_image=False):
    """Load an annotated image.

    In the directory of the provided image file, there
    should exist a landmark file (.pts) with the same
    basename as the image file.

    Args:
      path: a path containing an image file.
      reference_shape: a numpy array [num_landmarks, 2]
      is_training: whether in training mode or not.
      group: landmark group containing the grounth truth landmarks.
      mirror_image: flips horizontally the image's pixels and landmarks.
    Returns:
      pixels: a numpy array [width, height, 3].
      estimate: an initial estimate a numpy array [68, 2].
      gt_truth: the ground truth landmarks, a numpy array [68, 2].
    """
    im = mio.import_image(path)
    bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
    if 'set' not in str(bb_root):
        bb_root = im.path.parent.relative_to(im.path.parent.parent)

    im.landmarks['bb'] = mio.import_landmark_file(str(Path('bbs') / bb_root / (
        im.path.stem + '.pts')))

    im = im.crop_to_landmarks_proportion(0.3, group='bb')
    reference_shape = PointCloud(reference_shape)

    bb = im.landmarks['bb'].lms.bounding_box()

    im.landmarks['__initial'] = align_shape_with_bounding_box(reference_shape,
                                                              bb)
    im = im.rescale_to_pointcloud(reference_shape, group='__initial')

    if mirror_image:
        im = utils.mirror_image(im)

    lms = im.landmarks[group].lms
    initial = im.landmarks['__initial'].lms

    # if the image is greyscale then convert to rgb.
    pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)

    gt_truth = lms.points.astype(np.float32)
    estimate = initial.points.astype(np.float32)
    return pixels.astype(np.float32).copy(), gt_truth, estimate
Ejemplo n.º 5
0
def get_noisy_init_from_bb(reference_shape, bb, noise_percentage=.02):
    """Roughly aligns a reference shape to a bounding box.
    This adds some uniform noise for translation and scale to the
    aligned shape.
    Args:
      reference_shape: a numpy array [num_landmarks, 2]
      bb: bounding box, a numpy array [4, ]
      noise_percentage: noise presentation to add.
    Returns:
      The aligned shape, as a numpy array [num_landmarks, 2]
    """
    bb = PointCloud(bb)
    reference_shape = PointCloud(reference_shape)

    bb = noisy_shape_from_bounding_box(
        reference_shape,
        bb,
        noise_percentage=[noise_percentage, 0,
                          noise_percentage]).bounding_box()

    return align_shape_with_bounding_box(reference_shape, bb).points
Ejemplo n.º 6
0
def get_noisy_init_from_bb(reference_shape, bb, noise_percentage=.02):
    """Roughly aligns a reference shape to a bounding box.

    This adds some uniform noise for translation and scale to the
    aligned shape.

    Args:
      reference_shape: a numpy array [num_landmarks, 2]
      bb: bounding box, a numpy array [4, ]
      noise_percentage: noise presentation to add.
    Returns:
      The aligned shape, as a numpy array [num_landmarks, 2]
    """
    bb = PointCloud(bb)
    reference_shape = PointCloud(reference_shape)

    bb = noisy_shape_from_bounding_box(
        reference_shape, bb, 
        noise_percentage=[noise_percentage, 0, noise_percentage]
    ).bounding_box()

    return align_shape_with_bounding_box(reference_shape, bb).points
Ejemplo n.º 7
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     bounding_box_group_glob=None, verbose=False):
        # Rescale images wrt the scale factor between the existing
        # reference_shape and their ground truth (group) shapes
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        # Create a callable that generates perturbations of the bounding boxes
        # of the provided images.
        generated_bb_func = generate_perturbations_from_gt(
            image_batch, self.n_perturbations,
            self._perturb_from_gt_bounding_box, gt_group=group,
            bb_group_glob=bounding_box_group_glob, verbose=verbose)

        # For each scale (low --> high)
        for j in range(self.n_scales):
            # Print progress if asked
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Extract features. Features are extracted only if we are at the
            # first scale or if the features of the current scale are different
            # than the ones extracted at the previous scale.
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif (j == 0 or
                  self.holistic_features[j] != self.holistic_features[j - 1]):
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)

            # Rescale images according to scales. Note that scale_images is smart
            # enough in order not to rescale the images if the current scale
            # factor equals to 1.
            scaled_images, scale_transforms = scale_images(
                feature_images, self.scales[j], prefix=scale_prefix,
                return_transforms=True, verbose=verbose)

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group] for i in scaled_images]

            # Get shape estimations of current scale. If we are at the first
            # scale, this is done by aligning the reference shape with the
            # perturbed bounding boxes. If we are at the rest of the scales,
            # then the current shapes are attached on the scaled_images with
            # key '__sdm_current_shape_{}'.
            current_shapes = []
            if j == 0:
                # At the first scale, the current shapes are created by aligning
                # the reference shape to the perturbed bounding boxes.
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)
                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)
            else:
                # At the rest of the scales, extract the current shapes that
                # were attached to the images
                msg = '{}Extracting shape estimations from previous ' \
                      'scale.'.format(scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for k in list(range(self.n_perturbations)):
                        c_key = '__sdm_current_shape_{}'.format(k)
                        c_shapes.append(ii.landmarks[c_key])
                    current_shapes.append(c_shapes)

            # Train supervised descent algorithm. This returns the shape
            # estimations for the next scale.
            if not increment:
                current_shapes = self.algorithms[j].train(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)

            # Scale the current shape estimations for the next level. This
            # doesn't have to be done for the last scale. The only thing we need
            # to do at the last scale is to remove any attached landmarks from
            # the training images.
            if j < (self.n_scales - 1):
                if self.holistic_features[j + 1] != self.holistic_features[j]:
                    # Features will be extracted, thus attach current_shapes on
                    # the training images (image_batch)
                    for jj, image_shapes in enumerate(current_shapes):
                        for k, shape in enumerate(image_shapes):
                            c_key = '__sdm_current_shape_{}'.format(k)
                            image_batch[jj].landmarks[c_key] = \
                                scale_transforms[jj].apply(shape)
                else:
                    # Features won't be extracted;. the same feature_images will
                    # be used for the next scale, thus attach current_shapes on
                    # them.
                    for jj, image_shapes in enumerate(current_shapes):
                        for k, shape in enumerate(image_shapes):
                            c_key = '__sdm_current_shape_{}'.format(k)
                            feature_images[jj].landmarks[c_key] = \
                                scale_transforms[jj].apply(shape)
            else:
                # Check if original training image (image_batch) got some current
                # shape estimations attached. If yes, delete them.
                if '__sdm_current_shape_0' in image_batch[0].landmarks:
                    for image in image_batch:
                        for k in list(range(self.n_perturbations)):
                            c_key = '__sdm_current_shape_{}'.format(k)
                            del image.landmarks[c_key]
Ejemplo n.º 8
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     bounding_box_group_glob=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(image_batch,
                                                        group,
                                                        self.reference_shape,
                                                        verbose=verbose)

        generated_bb_func = generate_perturbations_from_gt(
            image_batch,
            self.n_perturbations,
            self._perturb_from_gt_bounding_box,
            gt_group=group,
            bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_shapes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[
                    j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group].lms for i in scaled_images]

            if j == 0:
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress,
                               prefix=msg,
                               end_with_newline=False,
                               verbose=verbose)

                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)

            # train supervised descent algorithm
            if not increment:
                current_shapes = self.algorithms[j].train(scaled_images,
                                                          scaled_shapes,
                                                          current_shapes,
                                                          prefix=scale_prefix,
                                                          verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images,
                    scaled_shapes,
                    current_shapes,
                    prefix=scale_prefix,
                    verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for image_shapes in current_shapes:
                    for k, shape in enumerate(image_shapes):
                        image_shapes[k] = transform.apply(shape)
Ejemplo n.º 9
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     bounding_box_group_glob=None, verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        generated_bb_func = generate_perturbations_from_gt(
            image_batch, self.n_perturbations,
            self._perturb_from_gt_bounding_box, gt_group=group,
            bb_group_glob=bounding_box_group_glob, verbose=verbose)

        # for each scale (low --> high)
        current_shapes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group].lms for i in scaled_images]

            if j == 0:
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)

                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)

            # train supervised descent algorithm
            if not increment:
                current_shapes = self.algorithms[j].train(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for image_shapes in current_shapes:
                    for shape in image_shapes:
                        transform.apply_inplace(shape)
def prepare_images(paths, num_patches=73, verbose=True):
    """Save Train Images to TFRecord
    Args:
        paths: a list of strings containing the data directories.
        num_patches: number of landmarks
        verbose: boolean, print debugging info.
    Returns:
        None
    """
    if len(paths) == 0:
        return
    # .../<Dataset>/Images/*.png -> .../<Dataset>
    path_base = Path(paths[0]).parent.parent
    image_paths = []

    # First: get all image paths
    for path in paths:
        for file in Path('.').glob(path):
            try:
                mio.import_landmark_file(
                    str(Path(file.parent.parent / 'BoundingBoxes' / (file.stem + '.pts')))
                )
            except ValueError:
                continue
            image_paths.append(file)
    print('Got all image paths...')

    # Second: split to train, test and validate. 7:2:1
    if Path(path_base / 'train_img.txt').exists():
        with Path(path_base / 'train_img.txt').open('rb') as train_ifs, \
                Path(path_base / 'test_img.txt').open('rb') as test_ifs, \
                Path(path_base / 'val_img.txt').open('rb') as val_ifs:
            train_paths = [Path(line[:-1].decode('utf-8')) for line in train_ifs.readlines()]
            test_paths = [Path(line[:-1].decode('utf-8')) for line in test_ifs.readlines()]
            val_paths = [Path(line[:-1].decode('utf-8')) for line in val_ifs.readlines()]
    else:
        random.shuffle(image_paths)
        num_train = int(len(image_paths) * 0.7)
        num_test = int(len(image_paths) * 0.2)
        train_paths = sorted(image_paths[:num_train])
        test_paths = sorted(image_paths[num_train:num_train+num_test])
        val_paths = sorted(image_paths[num_train+num_test:])
        with Path(path_base / 'train_img.txt').open('wb') as train_ofs, \
                Path(path_base / 'test_img.txt').open('wb') as test_ofs, \
                Path(path_base / 'val_img.txt').open('wb') as val_ofs:
            train_ofs.writelines([str(line).encode('utf-8') + b'\n' for line in train_paths])
            test_ofs.writelines([str(line).encode('utf-8') + b'\n' for line in test_paths])
            val_ofs.writelines([str(line).encode('utf-8') + b'\n' for line in val_paths])
    print('Found Train/Test/Validate {}/{}/{}'.format(len(train_paths), len(test_paths), len(val_paths)))

    # Third: export reference shape on train
    if Path(path_base / 'reference_shape.pkl').exists():
        reference_shape = PointCloud(mio.import_pickle(path_base / 'reference_shape.pkl'))
    else:
        reference_shape = PointCloud(build_reference_shape(train_paths, num_patches))
        mio.export_pickle(reference_shape.points, path_base / 'reference_shape.pkl', overwrite=True)
    print('Created reference_shape.pkl')

    # Fourth: image shape & pca
    image_shape = [0, 0, 3]  # [H, W, C]
    if Path(path_base / 'pca.bin').exists() and Path(path_base / 'meta.txt').exists():
        with Path(path_base / 'meta.txt').open('r') as ifs:
            image_shape = [int(x) for x in ifs.read().split(' ')]
    else:
        with tf.io.TFRecordWriter(str(path_base / 'pca.bin')) as ofs:
            counter = 0
            for path in train_paths:
                counter += 1
                if verbose:
                    status = 10.0 * counter / len(train_paths)
                    status_str = '\rPreparing {:2.2f}%['.format(status * 10)
                    for i in range(int(status)):
                        status_str += '='
                    for i in range(int(status), 10):
                        status_str += ' '
                    status_str += '] {}     '.format(path)
                    print(status_str, end='')
                mp_image = mio.import_image(path)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(Path(mp_image.path.parent.parent / 'BoundingBoxes' / (mp_image.path.stem + '.pts')))
                )
                mp_image = mp_image.crop_to_landmarks_proportion(0.3, group='bb')
                mp_image = mp_image.rescale_to_pointcloud(reference_shape, group='PTS')
                mp_image = grey_to_rgb(mp_image)
                assert(mp_image.pixels.shape[0] == image_shape[2])
                image_shape[0] = max(mp_image.pixels.shape[1], image_shape[0])
                image_shape[1] = max(mp_image.pixels.shape[2], image_shape[1])
                features = tf.train.Features(
                    feature={
                        'pca/shape': tf.train.Feature(
                            float_list=tf.train.FloatList(value=mp_image.landmarks['PTS'].points.flatten())
                        ),
                        'pca/bb': tf.train.Feature(
                            float_list=tf.train.FloatList(value=mp_image.landmarks['bb'].points.flatten())
                        ),
                    }
                )
                ofs.write(tf.train.Example(features=features).SerializeToString())
            if verbose:
                print('')
        with Path(path_base / 'meta.txt').open('w') as ofs:
            for s in image_shape[:-1]:
                ofs.write('{} '.format(s))
            ofs.write('{}'.format(image_shape[-1]))
    print('Image shape', image_shape)

    # Fifth: train data
    if Path(path_base / 'train.bin').exists():
        pass
    else:
        random.shuffle(train_paths)
        with tf.io.TFRecordWriter(str(path_base / 'train.bin')) as ofs:
            print('Preparing train data...')
            counter = 0
            for path in train_paths:
                counter += 1
                if verbose:
                    status = 10.0 * counter / len(train_paths)
                    status_str = '\rPreparing {:2.2f}%['.format(status * 10)
                    for i in range(int(status)):
                        status_str += '='
                    for i in range(int(status), 10):
                        status_str += ' '
                    status_str += '] {}     '.format(path)
                    print(status_str, end='')
                mp_image = mio.import_image(path)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(Path(mp_image.path.parent.parent / 'BoundingBoxes' / (mp_image.path.stem + '.pts')))
                )
                mp_image = mp_image.crop_to_landmarks_proportion(0.3, group='bb')
                mp_image = mp_image.rescale_to_pointcloud(reference_shape, group='PTS')
                mp_image = grey_to_rgb(mp_image)
                # Padding to the same size
                height, width = mp_image.pixels.shape[1:]  # [C, H, W]
                dy = max(int((image_shape[0] - height - 1) / 2), 0)
                dx = max(int((image_shape[1] - width - 1) / 2), 0)
                padded_image = np.random.rand(*image_shape).astype(np.float32)
                padded_image[dy:(height + dy), dx:(width + dx), :] = mp_image.pixels.transpose(1, 2, 0)
                padded_landmark = mp_image.landmarks['PTS'].points
                padded_landmark[:, 0] += dy
                padded_landmark[:, 1] += dx
                features = tf.train.Features(
                    feature={
                        'train/image': tf.train.Feature(
                            bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(padded_image.tostring())])
                        ),
                        'train/shape': tf.train.Feature(
                            float_list=tf.train.FloatList(value=padded_landmark.flatten())
                        )
                    }
                )
                ofs.write(tf.train.Example(features=features).SerializeToString())
            if verbose:
                print('')

    # Sixth: test data
    if Path(path_base / 'test.bin').exists():
        pass
    else:
        with tf.io.TFRecordWriter(str(path_base / 'test.bin')) as ofs:
            print('Preparing test data...')
            counter = 0
            for path in test_paths:
                counter += 1
                if verbose:
                    status = 10.0 * counter / len(test_paths)
                    status_str = '\rPreparing {:2.2f}%['.format(status * 10)
                    for i in range(int(status)):
                        status_str += '='
                    for i in range(int(status), 10):
                        status_str += ' '
                    status_str += '] {}     '.format(path)
                    print(status_str, end='')
                mp_image = mio.import_image(path)
                mp_image.landmarks['bb'] = mio.import_landmark_file(
                    str(Path(mp_image.path.parent.parent / 'BoundingBoxes' / (mp_image.path.stem + '.pts')))
                )
                mp_image = mp_image.crop_to_landmarks_proportion(0.3, group='bb')
                mp_bb = mp_image.landmarks['bb'].bounding_box()
                mp_image.landmarks['init'] = align_shape_with_bounding_box(reference_shape, mp_bb)
                mp_image = mp_image.rescale_to_pointcloud(reference_shape, group='init')
                mp_image = grey_to_rgb(mp_image)
                # Padding to the same size
                height, width = mp_image.pixels.shape[1:]  # [C, H, W]
                dy = max(int((256 - height - 1) / 2), 0)  # 200*(1+0.3*2)/sqrt(2) == 226.7
                dx = max(int((256 - width - 1) / 2), 0)  # 200*(1+0.3*2)/sqrt(2) == 226.7
                padded_image = np.random.rand(256, 256, 3).astype(np.float32)
                padded_image[dy:(height + dy), dx:(width + dx), :] = mp_image.pixels.transpose(1, 2, 0)
                padded_landmark = mp_image.landmarks['PTS'].points
                padded_landmark[:, 0] += dy
                padded_landmark[:, 1] += dx
                padded_init_landmark = mp_image.landmarks['init'].points
                padded_init_landmark[:, 0] += dy
                padded_init_landmark[:, 1] += dx
                features = tf.train.Features(
                    feature={
                        'test/image': tf.train.Feature(
                            bytes_list=tf.train.BytesList(
                                value=[tf.compat.as_bytes(padded_image.tostring())])
                        ),
                        'test/shape': tf.train.Feature(
                            float_list=tf.train.FloatList(value=padded_landmark.flatten())
                        ),
                        'test/init': tf.train.Feature(
                            float_list=tf.train.FloatList(value=padded_init_landmark.flatten())
                        )
                    }
                )
                ofs.write(tf.train.Example(features=features).SerializeToString())
            if verbose:
                print('')
Ejemplo n.º 11
0
def load_images(paths,
                group1=None,
                group2=None,
                verbose=True,
                PLOT=False,
                AFLW=False,
                PLOT_shape=True):
    """Loads and rescales input knn_2D to the diagonal of the reference shape.

    Args:
      paths: a list of strings containing the data directories.
      reference_shape (meanshape): a numpy array [num_landmarks, 2]
      group: landmark group containing the grounth truth landmarks.
      verbose: boolean, print debugging info.
    Returns:
      knn_2D: a list of numpy arrays containing knn_2D.
      shapes: a list of the ground truth landmarks.
      reference_shape (meanshape): a numpy array [num_landmarks, 2].
      shape_gen: PCAModel, a shape generator.
    """
    images = []
    shapes = []

    bbs = []
    inits = []

    shape_space = []
    plot_shape_x = []
    plot_shape_y = []
    # compute mean shape
    # if AFLW:
    #     # reference_shape = PointCloud(mio.import_pickle(Path('/home/hliu/gmh/RL_FA/mdm_aflw/ckpt/train_aflw') / 'reference_shape.pkl'))
    #     reference_shape = mio.import_pickle(
    #         Path('/home/hliu/data2/CongcongZhu/ICME/RDN/ckpt/pred') / 'reference_shape.pkl')
    # else:
    reference_shape = PointCloud(build_reference_shape(paths))

    for path in paths:
        if verbose:
            print('Importing data from {}'.format(path))

        for im in mio.import_images(path, verbose=verbose, as_generator=True):
            # group = group or im.landmarks[group]._group_label
            # group = group or im.landmarks.keys()[0]
            group1 = 'PTS'

            bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
            if 'set' not in str(bb_root):
                bb_root = im.path.parent.relative_to(im.path.parent.parent)

            if AFLW:
                im.landmarks['bb'] = im.landmarks['PTS'].lms.bounding_box()
            else:
                im.landmarks['bb'] = mio.import_landmark_file(
                    str(Path('bbs') / bb_root / (im.path.stem + '.pts')))
            im = im.crop_to_landmarks_proportion(0.3, group='bb')
            bb = im.landmarks['bb'].lms.bounding_box()

            im.landmarks['initial'] = align_shape_with_bounding_box(
                reference_shape, bb)

            im = im.rescale_to_pointcloud(reference_shape, group=group1)
            im = grey_to_rgb(im)
            images.append(im.pixels.transpose(1, 2, 0))
            inits.append(im.landmarks['initial'].lms)

            shapes.append(im.landmarks[group1].lms)

            shape_space.append(im.landmarks[group1].lms.points)
            bbs.append(im.landmarks['bb'].lms)
            if PLOT_shape:
                x_tmp = np.sum((im.landmarks[group1].lms.points[:, 0] -
                                reference_shape.points[:, 0]))
                y_tmp = np.sum((im.landmarks[group1].lms.points[:, 1] -
                                reference_shape.points[:, 1]))
                if x_tmp < 0 and y_tmp < 0:
                    plot_shape_x.append(x_tmp)
                    plot_shape_y.append(y_tmp)

    shape_space = np.array(shape_space)
    print('shape_space:', shape_space.shape)

    train_dir = Path(FLAGS.train_dir)

    # centers = utils.k_means(shape_space, 100)
    # centers = np.reshape(centers, [-1, 68, 2])

    # np.save(train_dir/'shape_space_origin.npy', centers)
    # print('created shape_space.npy using the {} group'.format(group))
    # exit(0)
    if PLOT_shape:
        k_nn_plot_x = []
        k_nn_plot_y = []
        centers = utils.k_means(shape_space, 100)
        centers = np.reshape(centers, [-1, 68, 2])
        for i in range(centers.shape[0]):
            x_tmp = np.sum((centers[i, :, 0] - reference_shape.points[:, 0]))
            y_tmp = np.sum((centers[i, :, 1] - reference_shape.points[:, 1]))
            if x_tmp < 0 and y_tmp < 0:
                k_nn_plot_x.append(x_tmp)
                k_nn_plot_y.append(y_tmp)
        # pdb.set_trace()
        # plt.scatter(plot_shape_x, plot_shape_y, s=20)
        # plt.scatter(k_nn_plot_x, k_nn_plot_y, s=40)
        # plt.xticks(())
        # plt.yticks(())
        # plt.show()
        # pdb.set_trace()

    mio.export_pickle(reference_shape.points,
                      train_dir / 'reference_shape.pkl',
                      overwrite=True)
    print('created reference_shape.pkl using the {} group'.format(group1))

    pca_model = detect.create_generator(shapes, bbs)

    # Pad knn_2D to max length
    max_shape = np.max([im.shape for im in images], axis=0)
    max_shape = [len(images)] + list(max_shape)
    padded_images = np.random.rand(*max_shape).astype(np.float32)
    print(padded_images.shape,
          '====================================================')

    if PLOT:
        # plot without padding
        centers = utils.k_means(shape_space, 100)
        centers = np.reshape(centers, [-1, 68, 2])
        plot_img = cv2.imread('a.png').transpose(2, 0, 1)
        centers_tmp = np.zeros(centers.shape)
        # menpo_img = mio.import_image('a.png')
        menpo_img = menpo.image.Image(plot_img)
        for i in range(centers.shape[0]):
            menpo_img.view()
            min_y = np.min(centers[i, :, 0])
            min_x = np.min(centers[i, :, 1])
            centers_tmp[i, :, 0] = centers[i, :, 0] - min_y + 20
            centers_tmp[i, :, 1] = centers[i, :, 1] - min_x + 20
            print(centers_tmp[i, :, :])
            menpo_img.landmarks['center'] = PointCloud(centers_tmp[i, :, :])
            menpo_img.view_landmarks(group='center',
                                     marker_face_colour='b',
                                     marker_size='16')
            # menpo_img.landmarks['center'].view(render_legend=True)
            plt.savefig('plot_shape_space/' + str(i) + '.png')
            plt.close()
        exit(0)

    # !!!shape_space without delta, which means shape_space has already been padded!

    # delta = np.zeros(shape_space.shape)
    gts = []

    inis = []

    for i, im in enumerate(images):
        height, width = im.shape[:2]
        dy = max(int((max_shape[1] - height - 1) / 2), 0)
        dx = max(int((max_shape[2] - width - 1) / 2), 0)
        lms = shapes[i]
        pts = lms.points

        init = inits[i]
        init_shape = init.points

        pts[:, 0] += dy
        pts[:, 1] += dx

        init_shape[:, 0] += dy
        init_shape[:, 1] += dx

        shape_space[i, :, 0] += dy
        shape_space[i, :, 1] += dx
        # delta[i][:, 0] = dy
        # delta[i][:, 1] = dx
        lms = lms.from_vector(pts)

        init = init.from_vector(init_shape)

        padded_images[i, dy:(height + dy), dx:(width + dx)] = im

        gts.append(lms)

        inis.append(init)

    # shape_space = np.concatenate((shape_space, delta), 2)

    centers = utils.k_means(shape_space, 100)
    centers = np.reshape(centers, [-1, 68, 2])

    np.save(train_dir / 'shape_space.npy', centers)
    print('created shape_space.npy using the {} group'.format(group1))

    return padded_images, gts, reference_shape.points.astype(
        'float32'), pca_model, centers, inis