def on_epoch_end(self, epoch, logs={}):

        # create the folder for predictions
        cur_out_dir = join(
            self.out_dir_path,
            'epoch_{:03d}'.format(epoch) if epoch != 'begin' else 'begin')
        if not os.path.exists(cur_out_dir):
            os.makedirs(cur_out_dir)

        # load and predict
        X, Y = load_batch(batchsize=batchsize, mode='val', gt_type='fix')
        P = self.model.predict(X)

        for b in range(0, batchsize):
            x = X[b].transpose(1, 2, 0)
            x = normalize(x)

            p = postprocess_predictions(P[b, 0], shape_r, shape_c)
            p = np.tile(np.expand_dims(p, axis=2), reps=(1, 1, 3))

            y = postprocess_predictions(Y[b, 0], shape_r, shape_c)
            y = np.tile(np.expand_dims(y, axis=2), reps=(1, 1, 3))
            y = normalize(y)

            # stitch and save
            stitch = stitch_together([x, p, y], layout=(1, 3))
            cv2.imwrite(join(cur_out_dir, '{:02d}.png'.format(b)), stitch)
Ejemplo n.º 2
0
    def on_epoch_end(self, epoch, logs={}):

        # create epoch folder
        epoch_out_dir = join(self.out_dir_path, '{:02d}'.format(epoch))
        if not exists(epoch_out_dir):
            os.makedirs(epoch_out_dir)

        # load batch
        seqs, frs, X, Y = RMDN_batch(batchsize=batchsize, mode='val')

        # predict batch
        Z = self.model.predict(X)

        for b in range(0, batchsize):
            for t in range(0, T):
                seq = seqs[b, t]
                id = frs[b, t]

                # load this frame image and gt
                rgb_frame = read_image(join(DREYEVE_ROOT, '{:02d}'.format(seq), 'frames', '{:06d}.jpg'.format(id)),
                                       channels_first=False, resize_dim=(h, w), dtype=np.uint8)
                fixation_map = read_image(join(DREYEVE_ROOT, '{:02d}'.format(seq), 'saliency_fix',
                                               '{:06d}.png'.format(id+1)),
                                       channels_first=False, resize_dim=(h, w), dtype=np.uint8)

                # extract this frame mixture
                gmm = Z[b, t]

                pred_map = gmm_to_probability_map(gmm=gmm, image_size=(h, w))
                pred_map = normalize(pred_map)
                pred_map = np.tile(np.expand_dims(pred_map, axis=-1), reps=(1, 1, 3))

                # stitch
                stitch = stitch_together([rgb_frame, pred_map, fixation_map], layout=(1, 3))
                write_image(join(epoch_out_dir, '{:02d}_{:02d}.jpg'.format(b, t)), stitch)
Ejemplo n.º 3
0
def visualize_batch(X, Y):
    """
    Helper function to visualize a batch.

    :param X: input data: [X, X_s, X_c, OF, OF_s, OF_c, SEG, SEG_s, SEG_c].
    :param Y: saliency data like [Y, Y_c].
    """
    batchsize, _, frames_per_batch, h, w = X[0].shape
    batchsize, _, frames_per_batch, h_s, w_s = X[1].shape
    batchsize, _, frames_per_batch, h_c, w_c = X[2].shape

    X, X_s, X_c, OF, OF_s, OF_c, SEG, SEG_s, SEG_c = X
    Y, Y_c = Y
    for b in range(0, batchsize):
        for f in range(0, frames_per_batch):
            # FULL FRAME SECTION -----
            x = X[b, :, 0, :, :].transpose(1, 2, 0)
            x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)

            of = OF[b, :, 0, :, :].transpose(1, 2, 0)
            of = cv2.cvtColor(of, cv2.COLOR_RGB2BGR)

            # seg is different, we have to turn into colors
            seg = SEG[b, :, 0, :, :]
            seg = palette[np.argmax(seg, axis=0).ravel()].reshape(h, w, 3)
            seg = cv2.cvtColor(seg, cv2.COLOR_RGB2BGR)

            # we have to turn y to 3 channels 255 for stitching
            y = Y[b, 0, :, :]
            y = (np.tile(y, (3, 1, 1))).transpose(1, 2, 0)

            # stitch and visualize
            stitch_ff = stitch_together(
                [normalize(x), of, seg, normalize(y)],
                layout=(2, 2),
                resize_dim=(540, 960))

            # CROPPED FRAME SECTION -----
            x_c = X_c[b, :, f, :, :].transpose(1, 2, 0)
            x_c = cv2.cvtColor(x_c, cv2.COLOR_RGB2BGR)

            of_c = OF_c[b, :, f, :, :].transpose(1, 2, 0)
            of_c = cv2.cvtColor(of_c, cv2.COLOR_RGB2BGR)

            # seg is different, we have to turn into colors
            seg_c = SEG_c[b, :, f, :, :]
            seg_c = palette[np.argmax(seg_c,
                                      axis=0).ravel()].reshape(h_c, w_c, 3)
            seg_c = cv2.cvtColor(seg_c, cv2.COLOR_RGB2BGR)

            # we have to turn y to 3 channels 255 for stitching
            y_c = Y_c[b, 0, :, :]
            y_c = (np.tile(y_c, (3, 1, 1))).transpose(1, 2, 0)

            # stitch and visualize
            stitch_c = stitch_together(
                [normalize(x_c), of_c, seg_c,
                 normalize(y_c)],
                layout=(2, 2),
                resize_dim=(540, 960))

            # SMALL FRAME SECTION -----
            x_s = X_s[b, :, f, :, :].transpose(1, 2, 0)
            x_s = cv2.cvtColor(x_s, cv2.COLOR_RGB2BGR)

            of_s = OF_s[b, :, f, :, :].transpose(1, 2, 0)
            of_s = cv2.cvtColor(of_s, cv2.COLOR_RGB2BGR)

            # seg is different, we have to turn into colors
            seg_s = SEG_s[b, :, f, :, :]
            seg_s = palette[np.argmax(seg_s,
                                      axis=0).ravel()].reshape(h_s, w_s, 3)
            seg_s = cv2.cvtColor(seg_s, cv2.COLOR_RGB2BGR)

            # we have to turn y to 3 channels 255 for stitching
            # also, we resize it to small (just for visualize it)
            y_s = cv2.resize(Y[b, 0, :, :], dsize=(h_s, w_s)[::-1])
            y_s = (np.tile(y_s, (3, 1, 1))).transpose(1, 2, 0)

            # stitch and visualize
            stitch_s = stitch_together(
                [normalize(x_s), of_s, seg_s,
                 normalize(y_s)],
                layout=(2, 2),
                resize_dim=(540, 960))

            # stitch the stitchs D=
            final_stitch = stitch_together([stitch_ff, stitch_s, stitch_c],
                                           layout=(1, 3),
                                           resize_dim=(810, 1440))
            cv2.imshow('Batch_viewer', final_stitch.astype(np.uint8))
            cv2.waitKey()
    model.compile(optimizer='adam', loss='kld')  # do we need this?
    model.load_weights('weights.mlnet.07-0.0193.pkl')  # load weights

    # set up some directories
    pred_dir = join(args.pred_dir, '{:02d}'.format(int(args.seq)), 'output')
    makedirs([pred_dir])

    sequence_dir = join(dreyeve_dir, '{:02d}'.format(int(args.seq)))
    for sample in tqdm(range(15, 7500 - 1)):
        X = load_dreyeve_sample(sequence_dir=sequence_dir,
                                sample=sample,
                                shape_c=shape_c,
                                shape_r=shape_r)

        # predict sample
        P = model.predict(X)
        P = np.squeeze(P)

        # save model output
        P = postprocess_predictions(P, shape_r, shape_c)
        cv2.imwrite(join(pred_dir, '{:06d}.png'.format(sample)), P)

        if verbose:
            # visualization
            x_img = X[0].transpose(1, 2, 0)
            p_img = cv2.cvtColor(P, cv2.COLOR_GRAY2BGR)
            stitch = stitch_together([normalize(x_img), p_img], layout=(1, 2))

            cv2.imshow('predition', stitch)
            cv2.waitKey(1)
Ejemplo n.º 5
0
if __name__ == '__main__':

    # parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--seq")
    parser.add_argument("--pred_dir")
    args = parser.parse_args()

    assert args.seq is not None, 'Please provide a correct dreyeve sequence'
    assert args.pred_dir is not None, 'Please provide a correct pred_dir'

    # get the model
    model = RMDN_test(hidden_states=hidden_states, n_mixtures=C, input_shape=(1, encoding_dim))
    model.load_weights('bazzani.h5')

    # set up some directories
    pred_dir = join(args.pred_dir, '{:02d}'.format(int(args.seq)), 'output')
    makedirs([pred_dir])

    sequence_dir = join(DREYEVE_ROOT, '{:02d}'.format(int(args.seq)))
    for sample in tqdm(range(15, 7500)):
        X = load_dreyeve_sample(sequence_dir=sequence_dir, sample=sample)

        # predict sample
        P = model.predict(X)
        P_map = gmm_to_probability_map(P[0, 0], image_size=(h, w))

        # save model output
        cv2.imwrite(join(pred_dir, '{:06d}.png'.format(sample)), normalize(P_map))
    def on_epoch_end(self, epoch, logs={}):

        if self.branch == 'image':
            # X is [B_ff, B_s, B_c]
            X, Y = dreyeve_I_batch(batchsize=callback_batchsize, nb_frames=frames_per_seq, image_size=(h, w),
                                   mode='val', gt_type='fix')
        elif self.branch == 'optical_flow':
            X, Y = dreyeve_OF_batch(batchsize=callback_batchsize, nb_frames=frames_per_seq, image_size=(h, w),
                                    mode='val', gt_type='fix')
        elif self.branch == 'semseg':
            X, Y = dreyeve_SEG_batch(batchsize=callback_batchsize, nb_frames=frames_per_seq, image_size=(h, w),
                                     mode='val', gt_type='fix')
        elif self.branch == 'all':
            X, Y = dreyeve_batch(batchsize=callback_batchsize, nb_frames=frames_per_seq, image_size=(h, w),
                                 mode='val', gt_type='fix')

        # predict batch
        Z = self.model.predict(X)

        for b in range(0, callback_batchsize):

            if self.branch == 'image':
                x_ff_img = X[0][b]  # fullframe, b-th image
                x_ff_img = np.squeeze(x_ff_img, axis=1).transpose(1, 2, 0)

                x_cr_img = X[2][b][:, -1, :, :]  # cropped frame (last one), b-th image
                x_cr_img = x_cr_img.transpose(1, 2, 0)
            elif self.branch == 'optical_flow':
                x_ff_img = X[0][b]  # fullframe, b-th image
                x_ff_img = np.squeeze(x_ff_img, axis=1).transpose(1, 2, 0)

                x_cr_img = X[2][b][:, -1, :, :]  # cropped frame (last one), b-th image
                x_cr_img = x_cr_img.transpose(1, 2, 0)
            elif self.branch == 'semseg':
                x_ff_img = X[0][b]  # fullframe, b-th image
                x_ff_img = seg_to_colormap(np.argmax(np.squeeze(x_ff_img, axis=1), axis=0), channels_first=False)

                x_cr_img = X[2][b][:, -1, :, :]  # cropped frame (last one), b-th image
                x_cr_img = seg_to_colormap(np.argmax(x_cr_img, axis=0), channels_first=False)
            elif self.branch == 'all':
                # fullframe
                i_ff_img = X[0][b]
                i_ff_img = np.squeeze(i_ff_img, axis=1).transpose(1, 2, 0)
                of_ff_img = X[3][b]
                of_ff_img = np.squeeze(of_ff_img, axis=1).transpose(1, 2, 0)
                seg_ff_img = seg_to_colormap(np.argmax(np.squeeze(X[6][b], axis=1), axis=0), channels_first=False)

                x_ff_img = stitch_together([normalize(i_ff_img), normalize(of_ff_img), normalize(seg_ff_img)],
                                           layout=(3, 1), resize_dim=i_ff_img.shape[:2])  # resize like they're one

                # crop
                i_cr_img = X[2][b][:, -1, :, :].transpose(1, 2, 0)
                of_cr_img = X[5][b][:, -1, :, :].transpose(1, 2, 0)
                seg_cr_img = seg_to_colormap(np.argmax(X[8][b][:, -1, :, :], axis=0), channels_first=False)

                x_cr_img = stitch_together([normalize(i_cr_img), normalize(of_cr_img), normalize(seg_cr_img)],
                                           layout=(3, 1), resize_dim=i_cr_img.shape[:2])  # resize like they're one

            # prediction
            z_ff_img = np.tile(np.expand_dims(normalize(Z[0][b, 0]), axis=2), reps=(1, 1, 3)).astype(np.uint8)
            z_cr_img = np.tile(np.expand_dims(normalize(Z[1][b, 0]), axis=2), reps=(1, 1, 3)).astype(np.uint8)

            # groundtruth
            y_ff_img = np.tile(np.expand_dims(normalize(Y[0][b, 0]), axis=2), reps=(1, 1, 3))
            y_cr_img = np.tile(np.expand_dims(normalize(Y[1][b, 0]), axis=2), reps=(1, 1, 3))

            # stitch and write
            stitch_ff = stitch_together([normalize(x_ff_img), z_ff_img, y_ff_img], layout=(1, 3), resize_dim=(500, 1500))
            stitch_cr = stitch_together([normalize(x_cr_img), z_cr_img, y_cr_img], layout=(1, 3), resize_dim=(500, 1500))
            write_image(join(self.out_dir_path, 'ff_e{:02d}_{:02d}.png'.format(epoch + 1, b + 1)), stitch_ff, channels_first=False)
            write_image(join(self.out_dir_path, 'cr_e{:02d}_{:02d}.png'.format(epoch + 1, b + 1)), stitch_cr, channels_first=False)
Ejemplo n.º 7
0
    y, x = np.mgrid[0:h:1, 0:w:1]
    pos = np.empty(x.shape + (2, ))
    pos[:, :, 0] = y
    pos[:, :, 1] = x

    out = np.zeros(shape=(h, w))

    for g in range(0, gmm.shape[0]):
        w = gmm[g, 0]
        normal = scipy.stats.multivariate_normal(mean=gmm[g, 1:3],
                                                 cov=[[gmm[g, 3], gmm[g, 5]],
                                                      [gmm[g, 5], gmm[g, 4]]])
        out += w * normal.pdf(pos)

    out /= out.sum()

    return out


if __name__ == '__main__':

    gmm = np.array([[0.5, 50, 0, 100, 100, 0], [0.5, 100, 100, 10, 10, -1]],
                   dtype='float32')
    map = gmm_to_probability_map(gmm, image_size=(128, 171))

    from computer_vision_utils.io_helper import normalize
    import cv2
    cv2.imshow('GMM', normalize(map))
    cv2.waitKey()
Ejemplo n.º 8
0
def load(seq, frame):
    """
    Function to load a 16-frames sequence to plot

    :param seq: the sequence number
    :param frame: the frame inside the sequence
    :return: a stitched image to show
    """

    small_size = (270, 480)

    sequence_x_dir = join(dreyeve_dir, '{:02d}'.format(seq))
    sequence_y_dir = join(pred_dir, '{:02d}'.format(seq))

    # x
    x_img = read_image(join(sequence_x_dir, 'frames',
                            '{:06d}.jpg'.format(frame)),
                       channels_first=False,
                       color_mode='BGR',
                       dtype=np.uint8)
    x_img_small = cv2.resize(x_img, small_size[::-1])
    of_img = read_image(join(sequence_x_dir, 'optical_flow',
                             '{:06d}.png'.format(frame + 1)),
                        channels_first=False,
                        color_mode='BGR',
                        resize_dim=small_size)
    seg_img = seg_to_colormap(np.argmax(np.squeeze(
        np.load(join(sequence_x_dir, 'semseg',
                     '{:06d}.npz'.format(frame)))['arr_0']),
                                        axis=0),
                              channels_first=False)
    seg_img = cv2.cvtColor(seg_img, cv2.COLOR_RGB2BGR)

    # pred
    image_p = normalize(
        np.squeeze(
            np.load(
                join(sequence_y_dir, 'image_branch',
                     '{:06d}.npz'.format(frame)))['arr_0']))
    flow_p = normalize(
        np.squeeze(
            np.load(
                join(sequence_y_dir, 'flow_branch',
                     '{:06d}.npz'.format(frame)))['arr_0']))
    semseg_p = normalize(
        np.squeeze(
            np.load(
                join(sequence_y_dir, 'semseg_branch',
                     '{:06d}.npz'.format(frame)))['arr_0']))
    dreyevenet_p = normalize(
        np.squeeze(
            np.load(
                join(sequence_y_dir, 'dreyeveNet',
                     '{:06d}.npz'.format(frame)))['arr_0']))

    image_p = cv2.resize(cv2.cvtColor(image_p, cv2.COLOR_GRAY2BGR),
                         small_size[::-1])
    flow_p = cv2.resize(cv2.cvtColor(flow_p, cv2.COLOR_GRAY2BGR),
                        small_size[::-1])
    semseg_p = cv2.resize(cv2.cvtColor(semseg_p, cv2.COLOR_GRAY2BGR),
                          small_size[::-1])
    dreyevenet_p = cv2.resize(cv2.cvtColor(dreyevenet_p, cv2.COLOR_GRAY2BGR),
                              small_size[::-1])

    s1 = stitch_together(
        [x_img_small, of_img, seg_img, image_p, flow_p, semseg_p],
        layout=(2, 3))
    x_img = cv2.resize(x_img, dsize=(s1.shape[1], s1.shape[0]))
    dreyevenet_p = cv2.resize(dreyevenet_p, dsize=(s1.shape[1], s1.shape[0]))
    dreyevenet_p = cv2.applyColorMap(dreyevenet_p, cv2.COLORMAP_JET)
    blend = cv2.addWeighted(x_img, 0.5, dreyevenet_p, 0.5, gamma=0)

    stitch = stitch_together([s1, blend], layout=(2, 1), resize_dim=(720, 970))

    return stitch
Ejemplo n.º 9
0
            metric_file.write('{},{},{},{},{},{},{},{},{}\n'.format(
                sample,
                cc_numeric(GT_sal, Y_dreyevenet),
                cc_numeric(GT_fix, Y_dreyevenet),
                cc_numeric(GT_sal, Y_image),
                cc_numeric(GT_fix, Y_image),
                cc_numeric(GT_sal, Y_flow),
                cc_numeric(GT_fix, Y_flow),
                cc_numeric(GT_sal, Y_semseg),
                cc_numeric(GT_fix, Y_semseg),
            ))

        if verbose:
            # visualization
            x_stitch = stitch_together([
                normalize(X[0][0, :, 0, :, :].transpose(1, 2, 0)),
                normalize(X[3][0, :, 0, :, :].transpose(1, 2, 0)),
                normalize(
                    seg_to_colormap(np.argmax(X[6][0, :, 0, :, :], axis=0),
                                    channels_first=False))
            ],
                                       layout=(3, 1),
                                       resize_dim=(720, 720))

            y_stitch = stitch_together([
                np.tile(normalize(Y_image[0].transpose(1, 2, 0)),
                        reps=(1, 1, 3)),
                np.tile(normalize(Y_flow[0].transpose(1, 2, 0)),
                        reps=(1, 1, 3)),
                np.tile(normalize(Y_semseg[0].transpose(1, 2, 0)),
                        reps=(1, 1, 3))