Exemple #1
0
def hist_similarity(img1, img2, PLOT=False):
    img1 = adjust_support(img1, "0->255", "0->1")
    img2 = adjust_support(img2, "0->255", "0->1")

    OPENCV_METHODS = (("Correlation", cv2.HISTCMP_CORREL),
                      ("Chi-Squared", cv2.HISTCMP_CHISQR),
                      ("Intersection", cv2.HISTCMP_INTERSECT),
                      ("Hellinger", cv2.HISTCMP_BHATTACHARYYA))

    hist1 = cv2.calcHist([img1], [0, 1, 2], None, [8, 8, 8],
                         [0, 256, 0, 256, 0, 256])
    hist1 = cv2.normalize(hist1, hist1).flatten()

    hist2 = cv2.calcHist([img2], [0, 1, 2], None, [8, 8, 8],
                         [0, 256, 0, 256, 0, 256])
    hist2 = cv2.normalize(hist2, hist2).flatten()

    hist_compare = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)

    if PLOT:
        fig, (ax1, ax2) = plt.subplots(1, 2)
        plt.title(f"histcompare: {hist_compare}")
        ax1.imshow(img1)
        ax2.imshow(img2)
        plt.show()

    return hist_compare
Exemple #2
0
def make_video(D, q_idxs, opt, crop_name, full_name):
    import tempfile
    from PIL import Image
    from edflow.data.util import adjust_support

    D.expand = True
    
    with tempfile.TemporaryDirectory() as tmpd:
        for i, [q, o] in enumerate(zip(q_idxs, opt)):
            imq = Image.fromarray(adjust_support(D[q][crop_name], '0->255'))
            imo = Image.fromarray(adjust_support(D[o][crop_name], '0->255'))

            imq = _add_text(imq, 'Q')
            imo = _add_text(imo, 'R')

            qpath = os.path.join(tmpd, f'q_{i:0>4d}.png')
            imq.save(qpath)
            imo.save(os.path.join(tmpd, f'o_{i:0>4d}.png'))

        pat_q = os.path.join(tmpd, 'q_%04d.png')
        pat_o = os.path.join(tmpd, 'o_%04d.png')
        name_q = os.path.join(tmpd, 'q.mp4')
        name_o = os.path.join(tmpd, 'o.mp4')
        out_name = f'{full_name}.mp4'

        vid_command = 'ffmpeg -i {im_pat} -vf fps=25 -vcodec libx264  -crf 18 {name}'

        qcommand = vid_command.format(im_pat=pat_q, name=name_q)
        os.system(qcommand)
        os.system(vid_command.format(im_pat=pat_o, name=name_o))

        stack_command = 'ffmpeg -y -i {top} -i {bot} -vcodec libx264  -crf 18 -filter_complex hstack {name}'
        os.system(stack_command.format(top=name_q, bot=name_o, name=out_name))
def crop(image, box):
    '''Arguments:
        image (np.ndarray or PIL.Image): Image to crop.
        box (list): Box specifying ``[x, y, width, height]``
        points (np.ndarray): Optional set of points in image coordinate, which
        are translated to box coordinates. Shape: ``[(*), 2]``.

    Returns:
        np.ndarray: Cropped image with shape ``[W, H, C]`` and same support
            as :attr:`image`.

        If points is not None:
            np.ndarray: The translated point coordinates.
    '''

    is_image = True
    if not isinstance(image, Image.Image):
        in_support = get_support(image)
        image = adjust_support(image, '0->255')
        image = Image.fromarray(image)
        is_image = False

    box[2:] = box[:2] + box[2:]

    image = image.crop(box)

    if not is_image:
        image = adjust_support(np.array(image), in_support)

    return image
Exemple #4
0
        def log_op():

            PCK_THRESH = [0.01, 0.025, 0.05, 0.1, 0.125, 0.15, 0.175, 0.2, 0.25, 0.5]
            HM_THRESH = [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
            if self.config['pck_alpha'] not in PCK_THRESH: PCK_THRESH.append(self.config["pck_alpha"])

            coords, _ = heatmaps_to_coords(predictions.clone(), thresh=self.config["hm"]["thresh"])
            pck = {t: percentage_correct_keypoints(kwargs["kps"], coords, t, self.config["pck"]["type"]) for t in
                   PCK_THRESH}

            gridded_outputs = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
                predictions[0], nrow=10)), 0).transpose(1, 2, 3, 0)
            gridded_targets = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
                sure_to_torch(kwargs["targets"])[0], nrow=10)), 0).transpose(1, 2, 3, 0)

            logs = {
                "images": {
                    # Image input not needed, because stick animal is printed on input image
                    # "image_input": adjust_support(torch2numpy(inputs).transpose(0, 2, 3, 1), "-1->1"),
                    "first_pred": adjust_support(gridded_outputs, "-1->1", "0->1"),
                    "first_targets": adjust_support(gridded_targets, "-1->1", "0->1"),
                    "outputs": adjust_support(heatmaps_to_image(torch2numpy(predictions)).transpose(0, 2, 3, 1),
                                              "-1->1", "0->1"),
                    "targets": adjust_support(heatmaps_to_image(kwargs["targets"]).transpose(0, 2, 3, 1), "-1->1"),
                    "inputs_with_stick": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), kwargs["kps"]),
                    "stickanimal": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), predictions.clone(),
                                                    thresh=self.config["hm"]["thresh"]),
                },
                "scalars": {
                    "loss": losses["total"],
                    "learning_rate": self.optimizer.state_dict()["param_groups"][0]["lr"],
                    f"PCK@{self.config['pck_alpha']}": np.around(pck[self.config['pck_alpha']][0], 5),
                },
                "figures": {
                    "Keypoint Mapping": plot_input_target_keypoints(torch2numpy(inputs).transpose(0, 2, 3, 1),
                                                                    # get BHWC
                                                                    torch2numpy(predictions),  # stay BCHW
                                                                    kwargs["kps"], coords),
                }
            }
            if self.config["losses"]["L2"]:
                logs["scalars"]["L2"] = losses["L2"]
            if self.config["losses"]["L1"]:
                logs["scalars"]["L1"] = losses["L1"]
            if self.config["losses"]["perceptual"]:
                logs["scalars"]["perceptual"] = losses["perceptual"]

            if self.config["pck"]["pck_multi"]:
                for key, val in pck.items():
                    # get mean value for pck at given threshold
                    logs["scalars"][f"PCK@{key}"] = np.around(val[0], 5)
                    for idx, part in enumerate(val[1]):
                        logs["scalars"][f"PCK@{key}_{self.dataset.get_idx_parts(idx)}"] = np.around(part, 5)
            return logs
 def log_op():
     is_train = self.get_split() == "train"
     logs = {
         "images": {
             "input": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
             "outputs": adjust_support(torch2numpy(predictions).transpose(0, 2, 3, 1), "-1->1", "0->1"),
         },
         "scalars": {
             "loss": loss,
         },
     }
     return logs
Exemple #6
0
def render_image(image_array, id):
    '''Displays an image. Allows zooming!

    Based on https://plot.ly/python/imshow/ and https://plot.ly/python/images/

    Parameters
    ----------
    image_array : np.ndarray
        Image to display. Shape: ``[W, H, 3 or 4]``. Will be modified for
        display using :func:`adjust_support`.
    '''

    plot_im = adjust_support(image_array, '0->255')

    # Create figure and add image
    fig = px.imshow(plot_im)

    info_dict = {
        'min': image_array.min(),
        'max': image_array.max(),
        'dtype': image_array.dtype,
        'dimensions': '{}x{}'.format(*image_array.shape[:2])
    }

    return dcc.Graph(id=id, figure=fig), info_dict
Exemple #7
0
def vid(start, length=250, K=100):

    save_root = f'./video/{start}/'
    query_root = os.path.join(save_root, 'query')
    ref_root = os.path.join(save_root, 'reference')

    os.makedirs(save_root, exist_ok=True)
    os.makedirs(query_root, exist_ok=True)
    os.makedirs(ref_root, exist_ok=True)

    N = length

    HG = HumanGaitFixedBox({'data_split': 'train'})
    vidps = HG.labels['video_path']
    vidns = np.char.rpartition(vidps, '/')[:, -1]
    healths = np.char.rpartition(vidns, 'H')[:, 1]

    healthy = healths == 'H'
    unhealthy = healths == ''

    h_indices = np.arange(len(HG))[healthy]
    i_indices = np.arange(len(HG))[unhealthy]

    kps = HG.labels['kps_fixed_rel'][..., :2].astype('float32')

    kp_hidden = kps[healthy]

    R = ReferenceSampler(kp_hidden, k=K)

    unhealthy_kps = kps[unhealthy]

    end = start + N
    q_idxs = list(range(start, end))
    q = unhealthy_kps[start:end]

    opt = R(q)

    print(opt)

    HG.expand = True

    vids = []
    for which, indices in [['query', q_idxs], ['reference', opt]]:
        save = os.path.join(save_root, which)
        for i, idx in enumerate(indices):
            if which == 'reference':
                im = HG[h_indices[idx]]['target']
            else:
                im = HG[i_indices[idx]]['target']
            im = adjust_support(im, '0->255')
            Image.fromarray(im).save(os.path.join(save, f'{i:0>3}.png'))
        vid = os.path.join(save_root, f'{which}.mp4')
        vids += [vid]
        command = f'ffmpeg -y -i {save}/%3d.png {vid}'
        os.system(f'{command}')

    stack = os.path.join(save_root, f'query_vs_ref_{start}-{length}-{K}.mp4')
    command = f'ffmpeg -y -i {vids[0]} -i {vids[1]} -filter_complex hstack {stack}'
    os.system(f'{command}')
Exemple #8
0
def image_saver(savepath, image):
    im_adjust = adjust_support(image, "0->255", clip=True)

    mode = "RGB" if im_adjust.shape[-1] in [1, 3] else "RGBA"

    im = Image.fromarray(im_adjust, mode)

    im.save(savepath)
Exemple #9
0
    def get_example(self, idx: object) -> object:
        """
        Args:
            idx: integer indicating index of dataset

        Returns: example element from dataset

        """
        example = super().get_example(idx)
        # Images are loaded with support from 0->255
        output = {}
        if self.config.get("image_type", "") == "mask":
            image_p0a0 = example["p0a0_masked_frames"]()
            image_p0a1 = example["p0a1_masked_frames"]()
            image_p1a0 = example["p1a0_masked_frames"]()
            image_p1a1 = example["p1a1_masked_frames"]()
        elif self.config.get("image_type", "") == "white":
            image_p0a0 = example["p0a0_whitened_frames"]()
            image_p0a1 = example["p0a1_whitened_frames"]()
            image_p1a0 = example["p1a0_whitened_frames"]()
            image_p1a1 = example["p1a1_whitened_frames"]()
        else:
            image_p0a0 = example["p0a0_frames"]()
            image_p0a1 = example["p0a1_frames"]()
            image_p1a0 = example["p1a0_frames"]()
            image_p1a1 = example["p1a1_frames"]()

        if self.augmentation:
            # randomly perform some augmentations on the image, keypoints and bboxes
            image_p0a0 = self.seq(image=image_p0a0)
            image_p0a1 = self.seq(image=image_p0a1)
            image_p1a1 = self.seq(image=image_p1a0)
            image_p1a1 = self.seq(image=image_p1a1)

        image_p0a0 = self.resize(image=image_p0a0)
        image_p0a1 = self.resize(image=image_p0a1)
        image_p1a0 = self.resize(image=image_p1a0)
        image_p1a1 = self.resize(image=image_p1a1)

        output["inp0"] = adjust_support(image_p0a0, "0->1")  # p0a0
        output["inp1"] = adjust_support(image_p0a1, "0->1")  # p0a1
        output["inp2"] = adjust_support(image_p1a0, "0->1")  # p1a0
        output["inp3"] = adjust_support(image_p1a1, "0->1")  # p1a1

        output["animal_class"] = np.array(animal_class[self.data.data.animal])
        return output
Exemple #10
0
def rgb2openpose(image):
    '''Prepares an image to be interpreted by openpose:
    RGB->BGR, supp.: 0->1
    '''
    image = np.stack([image[..., 2], image[..., 1], image[..., 0]], axis=-1)
    image = adjust_support(image, '0->255')

    return image
Exemple #11
0
def plot_input_target_keypoints(inputs: np.ndarray, targets, gt_coords,
                                coords):
    """
    Remember to clip output numpy array to [0, 255] range and cast it to uint8.
     Otherwise matplot.pyplot.imshow would show weird results.
    Args:
        inputs:
        targets:
        gt_coords:

    Returns:

    """
    fig = plt.figure(figsize=(10, 10))
    # heatmaps_to_coords needs [batch_size, num_joints, height, width]
    # coords, _ = heatmaps_to_coords(targets)
    coords = sure_to_numpy(coords.clone())
    for idx in range(8):
        fig.add_subplot(4, 2, idx + 1)
        fig.suptitle('Blue: GT, Red: Predicted')
        if inputs[idx].shape[-1] == 1:
            plt.imshow(adjust_support(inputs[idx].squeeze(-1), "0->255"))
        else:
            plt.imshow(adjust_support(inputs[idx], "0->255"))
        mask = np.ones(20).astype(bool)
        for kpt in range(0, len(coords[0])):
            if (gt_coords[idx][:, :2][kpt] == [0, 0]).all():
                mask[kpt] = False
                # If gt_coords are 0,0 meaning not present in the dataset, don't draw them.
                continue

            plt.plot([
                np.array(gt_coords[idx][:, :2][kpt][0]),
                np.array(coords[idx][kpt][0])
            ], [
                np.array(gt_coords[idx][:, :2][kpt][1]),
                np.array(coords[idx][kpt][1])
            ],
                     'bx-',
                     alpha=0.3)

        plt.scatter(gt_coords[idx][mask][:, 0],
                    gt_coords[idx][mask][:, 1],
                    c="blue")
        plt.scatter(coords[idx][mask][:, 0], coords[idx][mask][:, 1], c="red")
    return fig
Exemple #12
0
def ssim(root,
         data_in,
         data_out,
         config,
         im_in_key='image',
         im_out_key='image',
         name='ssim'):

    data_range = retrieve(config, 'ssim_cb/data_range', default='None')
    if data_range == 'None':
        data_range = None

    im_shape = np.shape(retrieve(data_in[0], im_in_key))
    multichannel = len(im_shape) == 3 and im_shape[-1] > 1

    ssims = []
    for i in trange(len(data_in), desc=name):
        im_targ = retrieve(data_in[i], im_in_key)
        im_gen = retrieve(data_out[i], im_out_key)

        im_targ = adjust_support(im_targ, '0->1')
        im_gen = adjust_support(im_gen, '0->1')

        similiarity = sk_ssim(im_targ,
                              im_gen,
                              data_range=data_range,
                              multichannel=multichannel)

        ssims += [similiarity]

    ssims = np.array(ssims)

    mean_ssim = np.mean(ssims)
    std_ssim = np.std(ssims)

    print('\n{}: {:4.3f} +- {:4.3}'.format(name, mean_ssim, std_ssim))

    save_root = os.path.join(root, name)
    os.makedirs(save_root, exist_ok=True)

    save_name = os.path.join(save_root, 'vals.npz')

    np.savez(save_name, ssims=ssims, mean=mean_ssim, std=std_ssim)
        def log_op():
            logs = {
                "images": {
                    "image_input_0": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "disentanglement": adjust_support(np.expand_dims(generate_samples(inputs0, model), 0), "-1->1",
                                                      "0->1"),
                    "pose_random": adjust_support(np.expand_dims(generate_samples(inputs0, model, pose_random=True), 0),
                                                  "-1->1",
                                                  "0->1"),
                    "appearance_random": adjust_support(
                        np.expand_dims(generate_samples(inputs0, model, appreance_random=True), 0), "-1->1",
                        "0->1"),

                    "outputs": adjust_support(torch2numpy(predictions).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                },
                "scalars": {
                    "loss": loss,
                },
            }
            logs["images"]["image_input_1_flipped"] = adjust_support(
                torch2numpy(inputs1_flipped).transpose(0, 2, 3, 1), "-1->1", "0->1")
            if self.get_global_step() >= self.config["LossConstrained"]["no_kl_for"] and \
                    self.config["LossConstrained"]["active"]:
                logs["scalars"]["lambda_"] = log["scalars"]["lambda_"]
                logs["scalars"]["gain"] = log["scalars"]["gain"]
                logs["scalars"]["active"] = log["scalars"]["active"]
                logs["scalars"]["kl_loss"] = log["scalars"]["kl_loss"]
                logs["scalars"]["nll_loss"] = log["scalars"]["nll_loss"]
                logs["scalars"]["rec_loss"] = log["scalars"]["rec_loss"]
                logs["scalars"]["mu"] = log["scalars"]["mu"]
                logs["scalars"]["eps"] = log["scalars"]["eps"]

            if self.encoder_2:
                logs["images"]["image_input_1"] = adjust_support(torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1",
                                                                 "0->1")
                logs["images"]["image_input_1_flipped"] = adjust_support(
                    torch2numpy(inputs1_flipped).transpose(0, 2, 3, 1), "-1->1", "0->1")
                logs["images"]["pose_reconstruction"] = adjust_support(
                    torch2numpy(kl_test_preds).transpose(0, 2, 3, 1), "-1->1", "0->1")
                if retrieve(self.config, "classifier/active", default=False):
                    logs["scalars"]["accuracy"] = accuracy
            if self.encoder_2:
                logs["scalars"]["hog"] = np.array(hog_values).mean()
                logs["scalars"]["hog_std"] = np.array(hog_values).std()
                logs["scalars"]["hist"] = np.array(hist_values).mean()
                logs["scalars"]["hist_std"] = np.array(hist_values).std()

            return logs
Exemple #14
0
def resize(image, size, points=None, prev_size=None):
    '''Arguments:
        image (np.ndarray or PIL.Image): Image to crop.
        size (int or list): Size of the image after resize.
        points (np.ndarray): Optional set of points in image coordinate, which
            are translated to box coordinates. Shape: ``[(*), 2]``.
        prev_size (int or list): Used to calculate the scaling for the points.
            If not given, this is estimated from the shape of the image.

    Returns:
        np.ndarray: Resized image with shape ``[W, H, C]`` and same support
            as :attr:`image`.

        If points is not None:
            np.ndarray: The translated point coordinates.
    '''

    in_support = get_support(image)
    if prev_size is None:
        prev_size = image.shape[:2]
    else:
        if isinstance(prev_size, int):
            prev_size = [prev_size] * 2
    prev_size = np.array(prev_size)

    if not isinstance(image, Image.Image):
        image = adjust_support(image, '0->255')
        image = Image.fromarray(image)

    if isinstance(size, int):
        size = [size] * 2
    image = image.resize(size)
    size = np.array(size)

    image = adjust_support(np.array(image), in_support)

    if points is not None:
        points[..., :2] = points[..., :2] * size / prev_size

        return image, points

    return image
Exemple #15
0
def plot_kps(datum, idx=-1):
    import matplotlib.pyplot as plt
    im = datum['image']
    kps = datum['keypoints']

    f, ax = plt.subplots(1, 1)

    ax.imshow(adjust_support(im, '0->1', clip=True))
    ax.scatter(kps[:, 0], kps[:, 1])

    f.savefig('kps-{}.png'.format(idx))
    def get_example(self, idx):
        example = self.MP[idx]

        example['fid'] = self.labels['fid'][idx]
        example['pid'] = self.labels['pid'][idx]
        example['vid'] = example['video_path']

        example['box'] = example['bbox']
        example['orig_path'] = example['frame_path']

        example['keypoints'] = self.labels['keypoints'][idx]

        if self.load_images:
            crop_im = Image.open(example['crop_path'])

            crop_im = crop_im.resize(self.crop_size)
            crop_im = np.array(crop_im)
            crop_im = adjust_support(crop_im, '-1->1', '0->255')
            example['crop_im'] = crop_im

            m_path = example['mask_path']
            if os.path.exists(m_path):
                mask_im = Image.open(m_path)
                mask_im = mask_im.resize(self.crop_size)
                mask_im = np.array(mask_im)
                mask_im = adjust_support(mask_im, '-1->1')
            else:
                mask_im = np.ones(crop_im.shape[:2])

            if len(mask_im.shape) == 2:
                mask_im = np.expand_dims(mask_im, -1)

            example['mask'] = mask_im

            if self.apply_mask:
                example['target'] = np.concatenate([crop_im, mask_im], axis=-1)
            else:
                example['target'] = crop_im
            example['flow'] = None

        return example
Exemple #17
0
    def get_example(self, idx):
        target = self.base[idx]['crop']()

        pose = self.base.labels['kps_rel'][idx]
        pose = pose * np.array(target.shape[:2])[None]
        stickman = kp2stick(pose, size=[256, 256], kp_model=OPENPOSE_18)
        stickman = adjust_support(stickman, '-1->1', '0->255')

        app_idx = self.prng.choice(len(self.base))
        appearance = self.base[app_idx]['crop']

        return {'stickman': stickman, 'appearance': appearance, 'target': target}
        def log_op():

            logs = {
                "images": {
                    "p0a0": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "p0a1": adjust_support(torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "p1a0": adjust_support(torch2numpy(inputs2).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "p1a1": adjust_support(torch2numpy(inputs3).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "disentanglement": adjust_support(torch2numpy(predictions).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "testing": adjust_support(torch2numpy(testing).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                },
                "scalars": {
                    "loss": loss,
                    "loss_dis": loss_dis,
                },
            }
            logs["scalars"]["lambda_"] = log["scalars"]["lambda_"]
            logs["scalars"]["gain"] = log["scalars"]["gain"]
            logs["scalars"]["active"] = log["scalars"]["active"]
            logs["scalars"]["kl_loss"] = log["scalars"]["kl_loss"]
            logs["scalars"]["nll_loss"] = log["scalars"]["nll_loss"]
            logs["scalars"]["rec_loss"] = log["scalars"]["rec_loss"]
            logs["scalars"]["mu"] = log["scalars"]["mu"]
            logs["scalars"]["eps"] = log["scalars"]["eps"]

            return logs
        def eval_op():  # np.concatenate((np.expand_dims(generate_samples(inputs0, model)),0),)*5)
            from datetime import datetime
            now = datetime.now()
            mystring = now.strftime("%m-%d-%Y-%H-%M-%S")
            n_samples = 4
            out, hog, cosine_distances, hist_similarities, cosine_row, hist_column = generate_samples(inputs0, model,
                                                                                                      n_samples)
            out_pose, hog_pose, cosine_distances_pose, hist_similarities_pose, cosine_row_pose, hist_column_pose = generate_samples(
                inputs0, model, n_samples, pose_random=True, )
            out_appearance, hog_appearance, cosine_distances_appearance, hist_similarities_appearance, cosine_row_appearance, hist_column_appearance = generate_samples(
                inputs0, model, n_samples, appreance_random=True)
            export_path = "/export/home/ffeldman/tempfolder/"

            save_hog_image(hog, out, n_samples, cosine_distances, cosine_row, export_path, mystring)
            save_hist_image(out, n_samples, hist_similarities, hist_column, export_path, mystring)
            logs = {

                #"image_input_0": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                #"inputs1_flipped": adjust_support(torch2numpy(inputs1_flipped).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                "disentanglement": adjust_support(
                    np.concatenate(
                        (np.expand_dims(out.transpose(1, 2, 0), 0),) * inputs0.size(0)),
                    "-1->1", "0->1"),
                "pose_random": adjust_support(
                    np.concatenate((
                                       np.expand_dims(
                                           out_pose.transpose(1, 2, 0),
                                           0),) * inputs0.size(0)),
                    "-1->1", "0->1"),
                "appearance_random": adjust_support(
                    np.concatenate((np.expand_dims(
                        out_appearance.transpose(1, 2, 0), 0),) * inputs0.size(
                        0)),
                    "-1->1", "0->1"),

                #"test_preds": adjust_support(torch2numpy(kl_test_preds).transpose(0, 2, 3, 1), "-1->1", "0->1"),
            }
            return logs
Exemple #20
0
def hog_similarity(img1, img2=False, PLOT=False):
    orientations = 8
    pixels_per_cell = (16, 16)
    img1 = adjust_support(img1, "0->255", "0->1")
    hog_inp, hog_img_inp = skimage.feature.hog(img1,
                                               orientations=orientations,
                                               pixels_per_cell=pixels_per_cell,
                                               visualize=True,
                                               feature_vector=True)
    hog_img_inp = skimage.exposure.rescale_intensity(hog_img_inp,
                                                     in_range=(0, 10))

    if type(img2) == np.ndarray:
        img2 = adjust_support(img2, "0->255", "0->1")
        hog_dis, hog_img_dis = skimage.feature.hog(
            img2,
            orientations=orientations,
            pixels_per_cell=pixels_per_cell,
            # cells_per_block=(1, 1),
            visualize=True,
            feature_vector=True)
        hog_img_dis = skimage.exposure.rescale_intensity(hog_img_dis,
                                                         in_range=(0, 10))

        value = 1 - scipy.spatial.distance.cosine(hog_inp, hog_dis)
        if PLOT:
            fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
            plt.title(value)
            ax1.imshow(
                np.stack((hog_img_inp, ) * 3, axis=0).transpose(1, 2, 0)[:, :,
                                                                         0])
            ax2.imshow(hog_img_dis)
            ax3.imshow(adjust_support(img1, "0->1"))
            ax4.imshow(adjust_support(img2, "0->1"))
            plt.show()
        return value, [hog_img_inp, hog_img_dis]

    return hog_img_inp
Exemple #21
0
    def loader(support=support, resize_to=resize_to):
        im = Image.open(path)

        if resize_to is not None:
            if isinstance(resize_to, int):
                resize_to = [resize_to] * 2

            im = im.resize(resize_to)

        im = np.array(im)

        if support == "0->255":
            return im
        else:
            return adjust_support(im, support, "0->255")
Exemple #22
0
def run_through_dataset(
    dataset, support: str = "-1->1", batch_size: int = 50, image_key: str = "image", model_name=""
):
    """

    :param dataset: DatasetMixin which contains the images.
    :param support: Support of images. One of '-1->1', '0->1' or '0->255'
    :param batch_size: The images numpy array is split into batches with batch size
                     batch_size. A reasonable batch size depends on the disposable hardware.
    :param image_key: Dataset key containing the image to be embedded
    :return: np.ndarray embedding.
    """

    dataset_length = len(dataset)
    if batch_size > dataset_length:
        print(Warning("Setting batch size to length of the dataset.."))
        batch_size = dataset_length

    batches = make_batches(dataset, batch_size, shuffle=False)
    n_batches = len(batches)
    n_used_imgs = n_batches * batch_size
    embeddings = []  # np.empty((n_used_imgs, 128))
    labels = []
    sess_config = tf.compat.v1.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    session = tf.compat.v1.Session(config=sess_config)

    model = reIdModel(model_name=model_name)
    if os.path.basename(TRIP_CHECK) == "checkpoint-25000":
        initialize_model(model, TRIP_CHECK, session)

    for i, batch in enumerate(tqdm(batches, desc="reID")):
        if i >= n_batches:
            break
        images = retrieve(batch, image_key)
        labels_batch = retrieve(batch, "pose_pid")
        images = adjust_support(
            np.array(images), future_support="0->255", current_support=support, clip=True
        )
        images = images.astype(np.float32)[..., :3]

        batch_embeddings = get_embedding(model, session=session, image=images)["emb"]
        embeddings += [batch_embeddings.reshape(batch_size, -1)]
        labels += [labels_batch]
    batches.finalize()
    return np.array(embeddings), np.array(labels)
        def log_op():
            from AnimalPose.utils.log_utils import plot_pred_figure
            from edflow.data.util import adjust_support

            logs = {
                "images": {
                    "image_input": adjust_support(torch2numpy(inputs).transpose(0, 2, 3, 1), "-1->1"),
                },
                "scalars": {
                    "loss": losses["batch"]["total"],
                    "accuracy": accuracy.cpu().numpy(),
                },
                "figures": {
                    "predictions": plot_pred_figure(inputs, preds.cpu().detach().numpy(), labels)
                }

            }
            if self.config["losses"]["CEL"]:
                logs["scalars"]["CrossEntropyLoss"] = losses["batch"]["CEL"]
            return logs
Exemple #24
0
def image_plot(start):

    N = 20
    K = 100

    HG = HumanGaitFixedBox({'data_split': 'train'})
    edprint(HG.labels)

    kps = HG.labels['kps_fixed_rel'][..., :2].astype('float32')
    print(kps)

    kp_hidden = kps[:int(0.84 * len(HG))]

    R = ReferenceSampler(kp_hidden, k=K)

    start_ = int(0.84 * len(HG))
    start += start_
    end = start + N
    q_idxs = list(range(start, end))
    print(q_idxs)
    q = kps[start:end]

    opt = R(q)

    print(opt)

    f, AX = plt.subplots(2,
                         len(opt),
                         figsize=[N / 10 * 12.8, 7.2],
                         dpi=100,
                         constrained_layout=True)

    HG.expand = True

    for Ax, indices in zip(AX, [q_idxs, opt]):
        for ax, idx in zip(Ax, indices):
            im = adjust_support(HG[idx]['target'], '0->1')
            ax.imshow(im)
            ax.axis('off')

    f.savefig(f'viterbi_{start}.pdf')
Exemple #25
0
 def log_op():
     from edflow.data.util import adjust_support
     logs = {
         "images": {
             "inputs0":
             adjust_support(
                 torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "inputs1":
             adjust_support(
                 torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "pred0":
             adjust_support(
                 torch2numpy(pred0).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "pred1":
             adjust_support(
                 torch2numpy(pred1).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "mixed_reconstruction":
             adjust_support(
                 torch2numpy(mixed_reconstruction).transpose(
                     0, 2, 3, 1), "-1->1", "0->1"),
             "input0_flipped":
             adjust_support(
                 torch2numpy(torch.flip(inputs0,
                                        [0])).transpose(0, 2, 3, 1),
                 "-1->1", "0->1"),
             "flip_test":
             adjust_support(
                 torch2numpy(flip_test).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
         },
         "scalars": {
             "loss":
             loss_appearance + loss_pose + loss_mixed_reconstruction,
             "loss_appearance": loss_appearance,
             "loss_pose": loss_pose,
             "loss_mixed_reconstruction": loss_mixed_reconstruction,
         },
     }
     return logs
Exemple #26
0
def test_new_crop(d, idx):
    from multiperson_dataset import square_bbox, get_kps_rel

    kps = d['keypoints_abs']

    box = square_bbox(kps)
    kps_rel = get_kps_rel(kps, box)


    image = np.array(Image.open(d['frame_path']))
    im_crop = crop(image, box)

    im_crop = adjust_support(im_crop, '0->255')
    im_crop = Image.fromarray(im_crop)
    im_crop.save('croptest_{}.png'.format(idx), 'PNG')


    d['keypoints'] = kps
    d['im_crop'] = im_crop

    plot_kps(d, idx)
Exemple #27
0
def plot_outcome(D, opt, q_idxs, crop_key, full_name):
    import matplotlib.pyplot as plt
    from edflow.data.util import adjust_support

    f, AX = plt.subplots(2, len(opt),
                         figsize=[len(opt)/10*12.8, 7.2],
                         dpi=100,
                         constrained_layout=True)

    D.expand = True

    for i, [Ax, indices] in enumerate(zip(AX, [q_idxs, opt])):
        for ax, idx in zip(Ax, indices):
            D.base.loader_kwargs['target']['root'] = '/home/jhaux/remote/cg2'
            ex = D[idx]
            crop = D[idx][crop_key]
            im = adjust_support(crop, '0->1')
            ax.imshow(im)
            ax.axis('off')
            ax.set_title(f'{"Q" if i == 0 else "R"}: {idx}')

    f.savefig(f'{full_name}.pdf')
Exemple #28
0
def plot_pred_figure(images, predictions, labels=None):
    """
    Remember to clip output numpy array to [0, 255] range and cast it to uint8.
    Otherwise matplot.pyplot.imshow would show weird results.
    Args:
        images:
        predictions:
    Returns:
    """
    from AnimalPose.data.animals_VOC2011 import animal_class
    idx_to_animal = {v: k for k, v in animal_class.items()}
    fig = plt.figure(figsize=(10, 10))
    for idx in range(8):
        fig.add_subplot(4, 2, idx + 1)
        fig.suptitle('Input, Prediction')
        if labels != None:
            plt.title(f"GT:{labels[idx]}, Pred: {predictions[idx]}")
        else:
            plt.title(f"{idx_to_animal[predictions[idx]]}")
        plt.imshow(
            adjust_support(images[idx].cpu().numpy().transpose(1, 2, 0),
                           "0->1", "0->1"))
        plt.tight_layout()
    return fig
Exemple #29
0
def image_saver(savepath, image):
    """

    Parameters
    ----------
    savepath :

    image :


    Returns
    -------

    """
    im_adjust = adjust_support(image, "0->255", clip=True)

    modes = {1: "L", 3: "RGB", 4: "RGBA"}
    mode = modes[im_adjust.shape[-1]]
    if mode == "L":
        im_adjust = np.squeeze(im_adjust, -1)

    im = Image.fromarray(im_adjust, mode)

    im.save(savepath)
        def log_op():
            from edflow.data.util import adjust_support

            #PCK_THRESH = [0.01, 0.025, 0.05, 0.1, 0.125, 0.15, 0.175, 0.2, 0.25, 0.5]
            #if self.config['pck_alpha'] not in PCK_THRESH: PCK_THRESH.append(self.config["pck_alpha"])

            #coords, _ = heatmaps_to_coords(pose_predictions.clone(), thresh=self.config["hm"]["thresh"])
            #pck = {t: percentage_correct_keypoints(kwargs["kps"], coords, t, self.config["pck"]["type"]) for t in
            #       PCK_THRESH}

            logs = {
                "images": {
                    "inputs0":
                    adjust_support(
                        torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "inputs1":
                    adjust_support(
                        torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "pred0":
                    adjust_support(
                        torch2numpy(pred0).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "pred1":
                    adjust_support(
                        torch2numpy(pred1).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "mixed_reconstruction":
                    adjust_support(
                        torch2numpy(mixed_reconstruction).transpose(
                            0, 2, 3, 1), "-1->1", "0->1"),
                    "cycle0":
                    adjust_support(
                        torch2numpy(pose_recon).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "cycle1":
                    adjust_support(
                        torch2numpy(appearance_recon).transpose(0, 2, 3, 1),
                        "-1->1", "0->1"),
                    #"targets": adjust_support(heatmaps_to_image(kwargs["targets0"]).transpose(0, 2, 3, 1), "-1->1"),
                    #"inputs_with_stick": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), kwargs["kps0"]),
                    #"stickanimal": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), pose_predictions.clone(),
                    #                                thresh=self.config["hm"]["thresh"]),
                    #"pred_hm": adjust_support(heatmaps_to_image(torch2numpy(pose_predictions)).transpose(0, 2, 3, 1),
                    #                          "-1->1", "0->1"),
                },
                "scalars": {
                    "loss": autoencoder_loss + cycle_loss,  # + pose_loss,
                    "loss_autoencoder": autoencoder_loss,
                    "recon_loss": cycle_loss,
                    #"pose_loss": pose_loss,
                    #f"PCK@{self.config['pck_alpha']}": np.around(pck[self.config['pck_alpha']][0], 5),
                },
            }

            # if self.config["pck"]["pck_multi"]:
            #     for key, val in pck.items():
            #         # get mean value for pck at given threshold
            #         logs["scalars"][f"PCK@{key}"] = np.around(val[0], 5)
            #         for idx, part in enumerate(val[1]):
            #             logs["scalars"][f"PCK@{key}_{self.dataset.get_idx_parts(idx)}"] = np.around(part, 5)

            # gridded_outputs = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
            #    predictions[0], nrow=10)), 0).transpose(1, 2, 3, 0)
            # gridded_targets = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
            #    sure_to_torch(kwargs["targets"])[0], nrow=10)), 0).transpose(1, 2, 3, 0)
            #
            # logs = {
            #     "images": {
            #         # Image input not needed, because stick animal is printed on input image
            #         # "image_input": adjust_support(torch2numpy(inputs).transpose(0, 2, 3, 1), "-1->1"),
            #         #"first_pred": adjust_support(gridded_outputs, "-1->1", "0->1"),
            #         #"first_targets": adjust_support(gridded_targets, "-1->1", "0->1"),
            #         "outputs": adjust_support(heatmaps_to_image(torch2numpy(predictions)).transpose(0, 2, 3, 1),
            #                                   "-1->1", "0->1"),

            #     },
            #     "scalars": {
            #         "loss": losses["total"],
            #         "learning_rate": self.optimizer.state_dict()["param_groups"][0]["lr"],
            #         f"PCK@{self.config['pck_alpha']}": np.around(pck[self.config['pck_alpha']][0], 5),
            #     },
            #     "figures": {
            #         "Keypoint Mapping": plot_input_target_keypoints(torch2numpy(inputs).transpose(0, 2, 3, 1),
            #                                                         # get BHWC
            #                                                         torch2numpy(predictions),  # stay BCHW
            #                                                         kwargs["kps"], coords),
            #     }
            # }
            # if self.config["losses"]["L2"]:
            #     logs["scalars"]["L2"] = losses["L2"]
            # if self.config["losses"]["L1"]:
            #     logs["scalars"]["L1"] = losses["L1"]
            # if self.config["losses"]["perceptual"]:
            #     logs["scalars"]["perceptual"] = losses["perceptual"]
            #

            return logs