예제 #1
0
def cli(
        image_name,
        out_name=None,  # pylint: disable=too-many-locals, too-many-statements, too-many-branches
        folder_image_suffix='.png',
        only_missing=False,
        allow_subsampling=False):
    """Get a 3D body model fit."""
    if _os.path.isdir(image_name):
        processing_folder = True
        folder_name = image_name[:]
        _LOGGER.info(
            "Specified image name is a folder. Processing all images "
            "with suffix %s.", folder_image_suffix)
        images = sorted(
            _glob.glob(_os.path.join(folder_name, '*' + folder_image_suffix)))
        images = [im for im in images if not im.endswith('vis.png')]
        pose_names = get_pose_names(images)
    else:
        processing_folder = False
        images = [image_name]
        pose_names = get_pose_names(images)
    for image_name, pose_name in zip(images, pose_names):
        if not _path.exists(pose_name) and allow_subsampling:
            continue
        if out_name is None or processing_folder:
            out_name = image_name + '_body_directseparate.pkl'
        vis_name = out_name + '_vis.png'
        work_name = out_name + '_working'
        lock_name = _path.join(_path.dirname(work_name), 'work_lock')
        if only_missing and _path.exists(out_name) and _path.exists(vis_name):
            continue
        with fasteners.InterProcessLock(lock_name):
            if _path.exists(work_name):
                continue
            else:
                with open(work_name, 'w') as outf:
                    outf.write('t')
        if pose_name.endswith('.npz'):
            pose = _np.load(pose_name)['pose']
        else:
            pose = _np.load(pose_name)
        core_pose = pose[:3, reduction_91tolsp].copy()
        core_pose[2, :] = core_pose[2, :] >= 0.  # no threshold right now
        person_size_est = robust_person_size(core_pose)
        size_factor = 500. / person_size_est
        _LOGGER.info("Predicting the 3D body on `%s` (saving to `%s`).",
                     image_name, out_name)
        image = _scipy.misc.imread(image_name)
        if image.ndim == 2:
            image = _np.dstack((image, image, image))
        else:
            image = image[:, :, 3::-1]
        params, vis = run_single_fit(
            image,  # pylint: disable=unused-variable
            pose[:3, :],
            size_factor,
            do_degrees=[0.])
        with open(out_name, 'w') as outf:
            _pickle.dump(params, outf)
        _cv2.imwrite(vis_name, vis[0])
        _os.remove(work_name)
    _LOGGER.info("Shutting down...")
    if _POSE_EST is not None:
        _POSE_EST[3].put('shutdown')
        _POSE_EST[0].join()
    if _SHAPE_EST is not None:
        _SHAPE_EST[3].put('shutdown')
        _SHAPE_EST[0].join()
    if _DEPTH_EST is not None:
        _DEPTH_EST[3].put('shutdown')
        _DEPTH_EST[0].join()
    if _ROT_EST is not None:
        _ROT_EST[3].put('shutdown')
        _ROT_EST[0].join()
예제 #2
0
def add_dataset(
        dset_fp,
        list_ids,
        up3d_fp,  # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches, unused-argument
        train_dset,
        val_dset,
        test_dset,
        train_spec,
        val_spec,
        test_spec,
        target_person_size,
        landmarks,
        train_crop,
        test_crop,
        train_steps_x,
        train_steps_y,
        running_idx,
        only_missing=False):
    """Add a dataset to the collection."""
    test_ids = [int(id_[1:6]) for id_ in test_spec]
    train_ids = [int(id_[1:6]) for id_ in train_spec]
    val_ids = [int(id_[1:6]) for id_ in val_spec]
    LOGGER.info("Split: %d train, %d val, %d test.", len(train_ids),
                len(val_ids), len(test_ids))
    LOGGER.info("Writing dataset...")
    for im_idx in tqdm.tqdm(train_ids + val_ids + test_ids):
        image = scipy.misc.imread(
            path.join(up3d_fp, '%05d_image.png' % (im_idx)))
        with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)),
                  'r') as inf:
            cropinfo = [int(val) for val in inf.readline().strip().split()]
            fac_y = cropinfo[0] / float(cropinfo[3] - cropinfo[2])
            fac_x = cropinfo[1] / float(cropinfo[5] - cropinfo[4])
            rec_scale = np.mean([fac_x, fac_y])
            rec_x = cropinfo[4]
            rec_y = cropinfo[2]
        assert image.ndim == 3
        out_exists = (path.exists(
            path.join(dset_fp, '%05d_image.png' % (running_idx)))
                      and path.exists(
                          path.join(dset_fp, '%05d_ann_vis.png' %
                                    (running_idx))))
        joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
        joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :]))
        person_size = robust_person_size(joints)
        norm_factor = float(target_person_size) / person_size
        joints[:2, :] *= norm_factor
        image = scipy.misc.imresize(image, norm_factor, interp='bilinear')
        if im_idx in test_ids:
            crop = test_crop
        else:
            crop = train_crop
        if image.shape[0] > crop or image.shape[1] > crop:
            LOGGER.debug(
                "Image (original %d, here %d) too large (%s)! Cropping...",
                im_idx, running_idx, str(image.shape[:2]))
            person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1)
            crop_y, crop_x = get_crop(image, person_center, crop)
            image = image[crop_y[0]:crop_y[1], crop_x[0]:crop_x[1], :]
            assert image.shape[0] == crop or image.shape[1] == crop, (
                "Error cropping image (original %d, here %d)!" %
                (im_idx, running_idx))
        else:
            crop_x = [0, image.shape[1]]
            crop_y = [0, image.shape[0]]
        assert image.shape[0] <= crop and image.shape[
            1] <= crop and image.shape[2] == 3, (
                "Wrong image shape (original %d, here %d)!" %
                (im_idx, running_idx))
        if not (only_missing and out_exists):
            if im_idx in test_ids:
                steps_x = 1
                steps_y = 1
            else:
                steps_x = train_steps_x
                steps_y = train_steps_y
            LOGGER.debug('Crop infos: x: %s, y: %s', str(crop_x), str(crop_y))
            landmark_pos_list, full_parameter_list = get_landmark_positions(
                path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
                (image.shape[1], image.shape[0]),
                (cropinfo[1], cropinfo[0]),
                landmarks,
                trans=(
                    -crop_x[0] - rec_x,  # pylint: disable=line-too-long
                    -crop_y[0] - rec_y),  # pylint: disable=line-too-long
                scale=norm_factor / rec_scale,  # pylint: disable=line-too-long
                steps_x=steps_x,
                steps_y=steps_y)
        if im_idx in train_ids:
            append_dset = train_dset
        elif im_idx in val_ids:
            append_dset = val_dset
        elif im_idx in test_ids:
            append_dset = test_dset
        for rend_idx, (landmark_pos, full_parameters) in enumerate(  # pylint: disable=unused-variable
                zip(landmark_pos_list, full_parameter_list)):
            append_dset.resize(append_dset.shape[0] + 1, axis=0)
            append_dset[-1, :] = landmark_pos + full_parameters
        running_idx += 1
    return running_idx
예제 #3
0
def add_dataset(
        dset_fp,
        dset_fromroot,
        list_ids,
        up3d_fp,  # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches
        train_list_f,
        val_list_f,
        train_val_list_f,
        test_list_f,
        scale_f,
        train_spec,
        val_spec,
        test_spec,
        target_person_size,
        landmarks,
        train_crop,
        test_crop,
        running_idx,
        only_missing=False,
        with_rlswap=True,
        write_gtjoints_as_lm=False,
        human_annotations=False):
    """Add a dataset to the collection."""
    test_ids = [int(id_[1:6]) for id_ in test_spec]
    train_ids = [int(id_[1:6]) for id_ in train_spec]
    val_ids = [int(id_[1:6]) for id_ in val_spec]
    LOGGER.info("Split: %d train, %d val, %d test.", len(train_ids),
                len(val_ids), len(test_ids))
    LOGGER.info("Writing dataset...")
    for im_idx in tqdm.tqdm(train_ids + val_ids + test_ids):
        image = scipy.misc.imread(
            path.join(up3d_fp, '%05d_image.png' % (im_idx)))
        with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)),
                  'r') as inf:
            cropinfo = [int(val) for val in inf.readline().strip().split()]
        assert image.ndim == 3
        out_exists = (path.exists(
            path.join(dset_fp, '%05d_image.png' % (running_idx)))
                      and path.exists(
                          path.join(dset_fp, '%05d_ann_vis.png' %
                                    (running_idx))))
        if with_rlswap and im_idx not in test_ids:
            out_exists = out_exists and (path.exists(
                path.join(dset_fp, '%05d_image.png' %
                          (running_idx + 1))) and path.exists(
                              path.join(dset_fp, '%05d_ann_vis.png' %
                                        (running_idx + 1))))
        if not (only_missing and out_exists or write_gtjoints_as_lm):
            if human_annotations:
                landmark_pos = np.load(
                    path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
            else:
                landmark_pos = get_landmark_positions(
                    path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
                    (cropinfo[1], cropinfo[0]), landmarks)
                fac_y = cropinfo[0] / float(cropinfo[3] - cropinfo[2])
                fac_x = cropinfo[1] / float(cropinfo[5] - cropinfo[4])
                landmark_pos[:2, :] /= np.mean([fac_x, fac_y])
                landmark_pos[0, :] += cropinfo[4]
                landmark_pos[1, :] += cropinfo[2]
        joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
        joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :]))
        person_size = robust_person_size(joints)
        norm_factor = float(target_person_size) / person_size
        joints[:2, :] *= norm_factor
        if not (only_missing and out_exists or write_gtjoints_as_lm):
            landmark_pos[:2, :] *= norm_factor
        if write_gtjoints_as_lm:
            landmark_pos = joints.copy()
        image = scipy.misc.imresize(image, norm_factor, interp='bilinear')
        if im_idx in test_ids:
            crop = test_crop
        else:
            crop = train_crop
        if image.shape[0] > crop or image.shape[1] > crop:
            LOGGER.debug(
                "Image (original %d, here %d) too large (%s)! Cropping...",
                im_idx, running_idx, str(image.shape[:2]))
            person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1)
            crop_y, crop_x = get_crop(image, person_center, crop)
            image = image[crop_y[0]:crop_y[1], crop_x[0]:crop_x[1], :]
            landmark_pos[0, :] -= crop_x[0]
            landmark_pos[1, :] -= crop_y[0]
            assert image.shape[0] == crop or image.shape[1] == crop, (
                "Error cropping image (original %d, here %d)!" %
                (im_idx, running_idx))
        assert image.shape[0] <= crop and image.shape[
            1] <= crop and image.shape[2] == 3, (
                "Wrong image shape (original %d, here %d)!" %
                (im_idx, running_idx))
        vis_im = vs.visualize_pose(image, landmark_pos, scale=1.)
        if not (only_missing and out_exists):
            scipy.misc.imsave(
                path.join(dset_fp, '%05d_image.png' % (running_idx)), image)
            scipy.misc.imsave(
                path.join(dset_fp, '%05d_ann_vis.png' % (running_idx)), vis_im)
        if with_rlswap and im_idx not in test_ids:
            if landmark_pos.shape[1] == 14:
                landmark_pos_swapped = landmark_pos[:, rlswap_lsp]
            else:
                landmark_pos_swapped = landmark_pos[:, rlswap_landmarks_91]
            landmark_pos_swapped[
                0, :] = image.shape[1] - landmark_pos_swapped[0, :]
            image_swapped = image[:, ::-1, :]
            # Use core visualization for 14 joints.
            vis_im_swapped = vs.visualize_pose(image_swapped,
                                               landmark_pos_swapped,
                                               scale=1)
            if not (only_missing and out_exists):
                scipy.misc.imsave(
                    path.join(dset_fp, '%05d_image.png' % (running_idx + 1)),
                    image_swapped)
                scipy.misc.imsave(
                    path.join(dset_fp, '%05d_ann_vis.png' % (running_idx + 1)),
                    vis_im_swapped)
        list_fs = []
        list_id_ids = []
        if im_idx in train_ids:
            list_fs.append(train_val_list_f)
            list_id_ids.append(2)
            list_fs.append(train_list_f)
            list_id_ids.append(0)
        elif im_idx in val_ids:
            list_fs.append(train_val_list_f)
            list_id_ids.append(2)
            list_fs.append(val_list_f)
            list_id_ids.append(1)
        elif im_idx in test_ids:
            list_fs.append(test_list_f)
            list_id_ids.append(3)
        for list_f, list_id_idx in zip(list_fs, list_id_ids):
            # pylint: disable=bad-continuation
            list_f.write("""# %d
%s
3
%d
%d
%d
""" % (list_ids[list_id_idx],
            path.join('/' + dset_fromroot, '%05d_image.png' % (running_idx)),
            image.shape[0], image.shape[1], landmark_pos.shape[1]))
            for landmark_idx, landmark_point in enumerate(landmark_pos.T):
                list_f.write("%d %d %d\n" %
                             (landmark_idx + 1, int(
                                 landmark_point[0]), int(landmark_point[1])))
            list_f.flush()
            list_ids[list_id_idx] += 1
        scale_f.write("%05d_image.png %f\n" % (running_idx, norm_factor))
        scale_f.flush()
        running_idx += 1
        if with_rlswap and im_idx not in test_ids:
            for list_f, list_id_idx in zip(list_fs, list_id_ids):
                # pylint: disable=bad-continuation
                list_f.write("""# %d
%s
3
%d
%d
%d
""" % (list_ids[list_id_idx],
                path.join('/' + dset_fromroot, '%05d_image.png' % (running_idx)),
                image.shape[0], image.shape[1], landmark_pos.shape[1]))
                for landmark_idx, landmark_point in enumerate(
                        landmark_pos_swapped.T):
                    list_f.write(
                        "%d %d %d\n" % (landmark_idx + 1, int(
                            landmark_point[0]), int(landmark_point[1])))
                list_f.flush()
                list_ids[list_id_idx] += 1
            scale_f.write("%05d_image.png %f\n" % (running_idx, norm_factor))
            scale_f.flush()
            running_idx += 1
    return running_idx
예제 #4
0
def add_dataset(dset_fp, dset_rel_fp, up3d_fp,  # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches
                train_list_f, val_list_f, test_list_f,
                train_spec, val_spec, test_spec,
                target_person_size, landmarks, partspec, crop, running_idx,
                only_missing=False):
    """Add a dataset to the collection."""
    test_ids = [int(id_[1:6]) for id_ in test_spec]
    train_ids = [int(id_[1:6]) for id_ in train_spec]
    val_ids = [int(id_[1:6]) for id_ in val_spec]
    ids_list = sorted(train_ids + val_ids + test_ids)
    LOGGER.info("Split: %d train, %d val, %d test.",
                len(train_ids), len(val_ids), len(test_ids))
    LOGGER.info("Writing dataset...")
    for im_idx in tqdm.tqdm(ids_list):
        image = scipy.misc.imread(path.join(up3d_fp, '%05d_image.png' % (im_idx)))
        with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)), 'r') as inf:
            cropinfo = [int(val) for val in inf.readline().strip().split()]
        assert image.ndim == 3
        out_exists = (path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx))) and
                      path.exists(path.join(dset_fp, '%05d_ann.png' % (running_idx))) and
                      path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx))) and
                      path.exists(path.join(dset_fp, '%05d_render.png' % (running_idx))) and
                      path.exists(path.join(dset_fp, '%05d_render_light.png' % (running_idx))))
        if not (only_missing and out_exists):
            rendering = uncrop(render_body_impl(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
                                                resolution=(cropinfo[1],
                                                            cropinfo[0]),
                                                quiet=True,
                                                use_light=False)[0],
                               image.shape[:2],
                               cropinfo)
            rendering_l = uncrop(render_body_impl(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
                                                  resolution=(cropinfo[1],
                                                              cropinfo[0]),
                                                  quiet=True,
                                                  use_light=True)[0],
                                 image.shape[:2],
                                 cropinfo)
        joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
        joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :]))
        person_size = robust_person_size(joints)
        norm_factor = float(target_person_size) / person_size
        landmark_pos = get_landmark_positions(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
                                              (cropinfo[1], cropinfo[0]),
                                              landmarks)
        fac_y = cropinfo[0] / float(cropinfo[3] - cropinfo[2])
        fac_x = cropinfo[1] / float(cropinfo[5] - cropinfo[4])
        landmark_pos[:2, :] /= np.mean([fac_x, fac_y])
        landmark_pos[0, :] += cropinfo[4]
        landmark_pos[1, :] += cropinfo[2]
        landmark_pos[:2, :] *= norm_factor
        if not (only_missing and out_exists):
            image = scipy.misc.imresize(image, norm_factor, interp='bilinear')
            rendering = scipy.misc.imresize(rendering, norm_factor, interp='nearest')
            rendering_l = scipy.misc.imresize(rendering_l, norm_factor, interp='bilinear')
            if image.shape[0] > crop or image.shape[1] > crop:
                LOGGER.debug("Image (original %d, here %d) too large (%s)! Cropping...",
                             im_idx, running_idx, str(image.shape[:2]))
                person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1) * norm_factor
                crop_y, crop_x = get_crop(image, person_center, crop)
                image = image[crop_y[0]:crop_y[1],
                              crop_x[0]:crop_x[1], :]
                rendering = rendering[crop_y[0]:crop_y[1],
                                      crop_x[0]:crop_x[1], :]
                rendering_l = rendering_l[crop_y[0]:crop_y[1],
                                          crop_x[0]:crop_x[1], :]
                landmark_pos[0, :] -= crop_x[0]
                landmark_pos[1, :] -= crop_y[0]
                assert image.shape[0] == crop or image.shape[1] == crop, (
                    "Error cropping image (original %d, here %d)!" % (im_idx,
                                                                      running_idx))
            assert image.shape[0] <= crop and image.shape[1] <= crop and image.shape[2] == 3, (
                "Wrong image shape (original %d, here %d)!" % (im_idx, running_idx))
            class_groups = six_region_groups if partspec == '6' else None
            annotation = regions_to_classes(rendering, class_groups, warn_id=str(im_idx))
            if partspec == '1':
                annotation = (annotation > 0).astype('uint8')
            assert np.max(annotation) <= int(partspec), (
                "Wrong annotation value (original %d, here %d): %s!" % (
                    im_idx, running_idx, str(np.unique(annotation))))
            if running_idx == 0:
                assert np.max(annotation) == int(partspec), (
                    "Probably an error in the number of parts!")
            pose_vis_im = vs.visualize_pose(cv2.cvtColor(annotation*8, cv2.COLOR_GRAY2RGB),
                                            landmark_pos,
                                            scale=1.)
            scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx)), image)
            scipy.misc.imsave(path.join(dset_fp, '%05d_ann.png' % (running_idx)), annotation)
            scipy.misc.imsave(path.join(dset_fp, '%05d_seg_ann_vis.png' % (running_idx)),
                              apply_colormap(annotation, vmax=int(partspec)))
            # scipy.misc.imsave(path.join(dset_fp, '%05d_render.png' % (running_idx)), rendering)
            scipy.misc.imsave(path.join(dset_fp, '%05d_render_light.png' % (running_idx)), rendering_l)  # pylint: disable=line-too-long
            scipy.misc.imsave(path.join(dset_fp, '%05d_pose_ann_vis.png' % (running_idx)), pose_vis_im)
            landmark_pos = np.concatenate((landmark_pos, joints[2][None, :]))
            np.save(str(path.join(dset_fp, '%05d_joints.npy' % (running_idx))), landmark_pos, allow_pickle=False)

        if im_idx in train_ids:
            list_f = train_list_f
        elif im_idx in val_ids:
            list_f = val_list_f
        elif im_idx in test_ids:
            list_f = test_list_f
        list_f.write("/%s/%05d_image.png /%s/%05d_ann.png %f\n" % (
            dset_rel_fp, running_idx, dset_rel_fp, running_idx, norm_factor))
        list_f.flush()
        running_idx += 1
    return running_idx