def resample_mask(id: int):
    """
    Resamples the mask of image_id with similar face masks (face + hair is kept from original)
    :param id: id of mask image in celebaHQ
    :return: resampled mask
    """
    # get real parts we want to keep similar
    skin = create_mask_from_id(id,
                               skin=True, nose=False, glasses=False, eyes=False, brows=False, ears=True,
                               mouth=False, u_lip=False, l_lip=False, hair=False, neck=True, misc=False)

    hair = create_mask_from_id(id,
                               skin=False, nose=False, glasses=False, eyes=False, brows=False, ears=True,
                               mouth=False, u_lip=False, l_lip=False, hair=True, neck=False, misc=True)

    parts_to_resample = create_mask_from_id(id,
                                            skin=False, nose=True, glasses=True, eyes=True, brows=True, ears=False,
                                            mouth=True, u_lip=True, l_lip=True, hair=False, neck=False, misc=False)

    # get similar masks
    max_score = -1
    for i in range(40):
        # get random id
        rand_id = id
        while rand_id is id:
            # dont sample from training data?
            # 0-6000 is test that I use
            # 6001-25200 is training
            # 25201-30000 is val that I never touched
            rand_id = random.randint(0, 6000)

        random_parts = create_mask_from_id(rand_id,
                                           skin=False, nose=True, glasses=True, eyes=True, brows=True, ears=False,
                                           mouth=True, u_lip=True, l_lip=True, hair=False, neck=False, misc=False)
        # compute difference of masks
        (score, _) = structural_similarity(np.array(parts_to_resample), np.array(random_parts),
                                           full=True, multichannel=True)
        # keep highest score
        if score > max_score:
            resampled_parts = random_parts
            max_score = score
            new_id = rand_id

    print(f"Image {id} resampled to {new_id}")

    # combine real and random masks into one mask
    add_real_background = True
    if add_real_background:
        resampled_mask = Image.open('/home/mo/datasets/CelebAMask-HQ/CelebA-HQ-img/' + str(id) +".jpg").resize((256, 256), Image.NEAREST).convert("RGBA")
        skin = make_color_transparent(skin, (0, 0, 0))
    else:
        resampled_mask = Image.new("RGBA", (256, 256), "BLACK")

    resampled_mask.paste(skin, (0, 0), skin)
    resampled_parts = make_color_transparent(resampled_parts, (0, 0, 0))
    resampled_mask.paste(resampled_parts, (0, 0), resampled_parts)
    hair = make_color_transparent(hair, (0, 0, 0))
    resampled_mask.paste(hair, (0, 0), hair)

    return resampled_mask.convert("RGB")
def render_frames(frames: list):
    """
    Renders list of PIL or openCV images
    :type frames: object
    :return merged image of same type as imput image (PIL or openCV)
    """
    opencv = False
    try:
        # assume Opencv image
        shape = (frames[0].shape[0], frames[0].shape[1])
        opencv = True
    except AttributeError:
        # we dont have a opencv image
        pass
    if not opencv:
        # so we have a PIL image
        shape = frames[0].size

    background = Image.new("RGBA", shape, "BLACK")

    for frame in frames:
        if opencv:
            frame = Image.fromarray(frame)

        frame = make_color_transparent(frame, (0, 0, 0))
        background.paste(frame, (0, 0), frame)

    if opencv:
        return np.array(background)
    else:
        return background
def run_segmentation_model(model, opt, root_folder, warp=False):
    print("\n---- Running Face2Mask Model ----\n")

    dataset = create_dataset(opt)
    for i, data in tqdm(enumerate(dataset)):
        if i >= opt.num_test:
            break

        # run model and get processed image
        model.set_input(data)
        model.test()

        # get mask
        visuals_mask = model.get_current_visuals()
        mask = tensor2im(visuals_mask['fake_B'])
        mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
        mask = clean_mask(mask)
        mask = Image.fromarray(mask)
        mask = make_color_transparent(mask, (0, 0, 0), tolerance=50)

        # open original
        img_path = model.get_image_paths()[0]
        img_name = img_path.split("/")[-1][:-4]
        image = Image.open(img_path)

        # add background to mask
        overlayed_mask = overlay_two_images(image, mask)

        # save file
        overlayed_mask.save(f'{root_folder}/{img_name}.png')

    print("SAVED GENERATED MASKS\n")
def face_mask_from_keypoints(keypoints, img):
    """
    Converts keypoints to face mask
    :param keypoints: tuple of keypoints
    :param img: image
    :return: mask
    """
    # define mask regions
    upper_keypoints = keypoints[18:27].copy()
    # invert direction
    upper_keypoints = upper_keypoints[::-1]

    # add padding to eyebrows
    padded_upper_keypoints = apply_padding_in_x(upper_keypoints)

    # add padded keypoints to make face larger
    skin_keypoints = np.concatenate((keypoints[0:17], padded_upper_keypoints))
    nose_keypoints = np.concatenate((keypoints[27:28], keypoints[31:36]))

    keypoint_dict = {
        'skin': {
            'keypoints': skin_keypoints, 'color': (204, 0, 0)},
        'eyebrow_l': {
            'keypoints': keypoints[18:22], 'color': (0, 255, 255)},
        'eyebrow_r': {
            'keypoints': keypoints[22:27], 'color': (0, 255, 255)},
        'nose': {
            'keypoints': nose_keypoints, 'color': (76, 153, 0)},
        'eye_l': {
            'keypoints': keypoints[36:42], 'color': (51, 51, 255)},
        'eye_r': {
            'keypoints': keypoints[42:48], 'color': (51, 51, 255)},
        'lips': {
            'keypoints': keypoints[48:60], 'color': (255, 255, 0)},
        'mouth': {
            'keypoints': keypoints[60:69], 'color': (102, 204, 0)}
    }

    mask = []
    for key, value in keypoint_dict.items():
        dilate_radius = 1
        if 'eyebrow' in key:
            dilate_radius = 2
        mask.append(keypoints_to_mask(value['keypoints'], img.shape, dilate_radius, value['color']))

    for i in range(len(mask) - 1):
        if i is 0:
            back = Image.fromarray(mask[i]).convert("RGBA")

        front = Image.fromarray(mask[i + 1]).convert("RGBA")
        front = make_color_transparent(front, (0, 0, 0))
        # back is always assigned as i starts from 0
        back.paste(front, (0, 0), front)

    return np.array(back.convert("RGB"))
Exemplo n.º 5
0
def insert_face(img, face, add_border=True):
    """
    inserts face onto image
    :param add_border: add black border around face
    :param img: Image to paste onto
    :param face: face image with removed background
    :return: merged images
    """
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    face = make_color_transparent(face, (0, 0, 0))
    if add_border:
        black = np.array(face)
        black[..., :3] = [0, 0, 0]
        dilated = dilate(black, 7)
        dilated = Image.fromarray(dilated)
        dilated.paste(face, (0, 0), face)
        face = dilated

    img = Image.fromarray(img)
    img.paste(face, (0, 0), face)
    return img
def preprocess_images(data_points: int, data_path: str, processed_path: str,
                      train: float, save_a_b: bool, no_misc: bool,
                      no_hair: bool, no_neck: bool, no_ears: bool,
                      overlay_mask: bool):
    """
    Preprocess images according to pix2pix requirements
    """
    print(
        f'Processing with following options:\n'
        f'Overlay Mask: {overlay_mask}, save_a_b: {save_a_b}, train-percentage: {train * 100}%\n'
        f'Misc: {not no_misc}, Hair: {not no_hair}, Neck: {not no_neck}, Ears {not no_ears}'
    )
    data_path = Path(data_path)
    processed_path = Path(processed_path)

    if save_a_b:
        create_folder_structure(processed_path / 'A')
        create_folder_structure(processed_path / 'B')
    else:
        create_folder_structure(processed_path)

    test = 1 - train
    for i in tqdm(range(data_points)):
        # load and resize 512x512 px mask image
        mask_img = create_mask_from_id(
            i,
            skin=True,
            nose=True,
            glasses=True,
            eyes=True,
            brows=True,
            ears=not no_ears,
            mouth=True,
            u_lip=True,
            l_lip=True,
            hair=not no_hair,
            neck=not no_neck,
            misc=not no_misc,
        )
        # load and resize corresponding 1028x1028px image
        normal_img = im_resize(Image.open(data_path / f"{i}.jpg"))

        # split dataset
        if i < test * data_points:
            folder = "test"
        elif i < (test + train * train) * data_points:
            folder = "train"
        else:
            # using the val images as train as well as we dont really have validation with pix2pix
            folder = "train"

        if overlay_mask:
            mask_img = make_color_transparent(mask_img, (0, 0, 0))
            mask_background = normal_img.copy()
            mask_background.paste(mask_img, (0, 0), mask_img)
            mask_background.convert("RGB")
            mask_img = mask_background

        # save images
        if save_a_b:
            normal_img.save(processed_path / "A" / folder / f"{i}.png",
                            format='PNG',
                            subsampling=0,
                            quality=100)
            mask_img.save(processed_path / "B" / folder / f"{i}.png",
                          format='PNG',
                          subsampling=0,
                          quality=100)
        else:
            # merge the two images into one 256x512px image
            output = merge_images_side_by_side(im_resize(normal_img),
                                               im_resize(mask_img))
            # output.save(processed_path / folder / f"{i}.png", format='PNG', subsampling=0, quality=100)
            output.save(processed_path / folder / f"{i}.png",
                        format='PNG',
                        subsampling=0,
                        quality=100)
def create_mask_from_id(
    pic_id: int,
    face_only_segmentation=False,
    warp=False,
    showGUI=False,
    data_path=Path('/home/mo/datasets/CelebAMask-HQ/CelebAMask-HQ-mask-anno'),
    skin=True,
    nose=True,
    glasses=True,
    eyes=True,
    brows=True,
    ears=True,
    mouth=True,
    u_lip=True,
    l_lip=True,
    hair=True,
    neck=True,
    misc=True,
):
    """Creates one image with all segmentation masks
    :param showGUI: displays hui
    :type data_path: Path
    :param warp: warps the masks randomly
    :param face_only_segmentation: only face
    :param pic_id: id of image
    :param data_path: path object of data root containing masks
    :return: the merged Image object
    """
    sub_folder = str(pic_id // 2000)
    pic_id = str(pic_id).zfill(5)

    segments = get_segments()
    unprocessed_segments = 0
    frames = []
    warped_frames = []

    # seed for warping
    if warp:
        deg = random.randint(0, 10)
        if random.getrandbits(1):
            deg = 360 - deg
        seed = [
            random.getrandbits(1),
            random.getrandbits(1),
            random.getrandbits(1), deg
        ]

    # render loop
    for key, segment in segments.items():

        if face_only_segmentation:
            if "skin" in key:
                segment["color"] = (255, 255, 255)
            else:
                # stop to add other segmentation masks
                break
        try:
            for name_id in range(len(segment["name"])):
                if not skin and "skin" in key:
                    break
                if not nose and "nose" in key:
                    break
                if not glasses and "glasses" in key:
                    break
                if not eyes and "eye" in key:
                    break
                if not brows and "brows" in key:
                    break
                if not ears and "ear" in key:
                    break
                if not mouth and "mouth" in key:
                    break
                if not u_lip and "u_lip" in key:
                    break
                if not l_lip and "l_lip" in key:
                    break
                if not hair and "hair" in key:
                    # we want to remove the regions of masks that are below the hair mask
                    pass
                if not neck and "neck" in key:
                    break
                if not misc and "misc" in key:
                    break
                # open segmentation mask
                img = Image.open(data_path / sub_folder /
                                 (pic_id + segment["name"][name_id]))
                # recolor white to respective color of segment
                img = recolor_to_rgba(img, (255, 255, 255), segment["color"])
                # make black transparent
                img = make_color_transparent(img, (0, 0, 0))
                # save individual frames for later modification
                frames.append(img)
                # warp image
                if warp:
                    img = add_random_warp(img, seed=seed)
                    warped_frames.append(img)
        except FileNotFoundError:
            unprocessed_segments = unprocessed_segments + 1
            if unprocessed_segments == len(segments):
                print("No segmentation maps for file " + pic_id +
                      " found! Stopping.")
                return
    # render frames
    if warp:
        result = render_frames(warped_frames)
    else:
        result = render_frames(frames)
    if showGUI:
        result = render_gui(frames)

    if not hair:
        # remove hair region again, this ensures no mask sticks out "below" the hair
        result = make_color_transparent(result, segments['hair']["color"])

    return im_resize(result)
def cutout_face(img_path: str, mask_path: str, processed_path: str,
                face_mask_path: str):
    """Cuts out face of person
    :param face_mask_path:
    :param processed_path:
    :param img_path: path of folder containing images
    :param mask_path: path of folder containing masks
    """

    # get all filenames in the folders
    img_file_list = get_files(img_path, '.jpg')
    mask_file_list = get_files(mask_path, '.png')
    if img_file_list is None or mask_file_list is None:
        print("No image files found. Exiting")
        return
    if not len(img_file_list) == len(mask_file_list):
        print("Not the same amount og image files and mask files. Exiting")
        return

    # create folder for processed data
    processed_path = Path(processed_path)
    img_path = Path(img_path)
    face_mask_path = Path(face_mask_path)
    mask_path = Path(mask_path)
    processed_path.mkdir(exist_ok=True)

    for file in tqdm(img_file_list):
        # Load images
        face_img = Image.open(img_path / file.name)
        face_mask_img = Image.open(face_mask_path / f"{file.stem}.png")
        mask_img = Image.open(mask_path / f"{file.stem}.png")

        # Create dilated face mask as transparent, background is black
        face_mask_img_dilated = cv.imread(
            cv.samples.findFile(str(face_mask_path / f"{file.stem}.png")))
        face_mask_img_dilated = dilate(face_mask_img_dilated, 20)
        face_mask_img_dilated = cv.cvtColor(face_mask_img_dilated,
                                            cv.COLOR_BGR2RGB)
        face_mask_img_dilated = Image.fromarray(face_mask_img_dilated)
        face_mask_img_dilated = make_color_transparent(face_mask_img_dilated,
                                                       (255, 255, 255))

        # Make face mask transparent, background is black
        face_mask_img = make_color_transparent(face_mask_img, (255, 255, 255))

        # copy face mask onto the mask image
        overlaid_mask = overlay_two_images(mask_img, face_mask_img)
        # Make background transparent
        overlaid_mask = make_color_transparent(overlaid_mask, (0, 0, 0))

        # overlay dilated face onto image
        overlaid_img = overlay_two_images(face_img, face_mask_img_dilated)
        # overlay mask onto image
        overlaid_mask = overlay_two_images(overlaid_img, overlaid_mask)

        # save result
        merge_images_side_by_side(overlaid_mask, overlaid_img).save(
            processed_path / file.name,
            format='PNG',
            subsampling=0,
            quality=100)