Ejemplo n.º 1
0
def make_reconstruction_movie(ims,
                              titles=None,
                              n_rows=0,
                              n_cols=0,
                              save_file=None,
                              frame_rate=15,
                              dpi=100):
    """Produce movie with original video and reconstructed videos.

    `ims` and `titles` are corresponding lists; this data is plotted using a linear index, i.e. if
    n_rows = 2 and n_cols = 3 the image stack in ims[2] will be in the first row, second column;
    the image stack in ims[4] will be in the second row, first column. If ims[i] is empty, that
    grid location will be skipped.

    Parameters
    ----------
    ims : :obj:`list` of :obj:`np.ndarray`
        each list element is of shape (n_frames, n_channels, y_pix, x_pix)
    titles : :obj:`list` of :obj:`str`, optional
        title for each panel
    n_rows : :obj:`int`
        number of rows in video grid layout
    n_cols : :obj:`int`
        number of columns in video grid layout
    save_file : :obj:`str`, optional
        full save file (path and filename)
    frame_rate : :obj:`float`, optional
        frame rate of saved movie
    dpi : :obj:`int`, optional
        dpi of movie figure; controls resolution of titles

    """

    for im in ims:
        if len(im) != 0:
            n_frames, n_channels, y_pix, x_pix = im.shape
            break
    scale_ = 5
    fig_width = scale_ * n_cols * n_channels / 2
    fig_height = y_pix / x_pix * scale_ * n_rows / 2
    offset = 0.5 if n_rows == 1 else 0
    fig = plt.figure(figsize=(fig_width, fig_height + offset), dpi=dpi)

    gs = GridSpec(n_rows, n_cols, figure=fig)
    axs = []
    ax_count = 0
    for i in range(n_rows):
        for j in range(n_cols):
            if ax_count < len(ims):
                axs.append(fig.add_subplot(gs[i, j]))
                ax_count += 1
            else:
                break
    for ax_i, ax in enumerate(axs):
        ax.set_xticks([])
        ax.set_yticks([])
        if len(ims[ax_i]) == 0:
            ax.set_axis_off()

    fontsize = 12
    titles = ['' for _ in range(n_cols * n_rows)] if titles is None else titles
    for ax_i, ax in enumerate(axs):
        if len(ims[ax_i]) != 0:
            ax.set_title(titles[ax_i], fontsize=fontsize)

    default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}

    # ims is a list of lists, each row is a list of artists to draw in the current frame; here we
    # are just animating one artist, the image, in each frame
    ims_ani = []
    for i in range(n_frames):

        ims_curr = []

        for ax_i, ax in enumerate(axs):
            if len(ims[ax_i]) != 0:
                ims_tmp = ims[ax_i][i, 0] if n_channels == 1 else concat(
                    ims[ax_i][i])
                im = ax.imshow(ims_tmp, **default_kwargs)
                [s.set_visible(False) for s in ax.spines.values()]
                ims_curr.append(im)

        ims_ani.append(ims_curr)

    plt.tight_layout(pad=0)

    ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
    save_movie(save_file, ani, frame_rate=frame_rate)
Ejemplo n.º 2
0
def make_ae_reconstruction_movie(ims_orig,
                                 ims_recon_ae,
                                 ims_recon_lin=None,
                                 save_file=None,
                                 frame_rate=15):
    """Produce movie with original video, reconstructed video, and residual.

    Parameters
    ----------
    ims_orig : :obj:`np.ndarray`
        shape (n_frames, n_channels, y_pix, x_pix)
    ims_recon_ae : :obj:`np.ndarray`
        shape (n_frames, n_channels, y_pix, x_pix)
    ims_recon_lin : :obj:`np.ndarray`, optional
        shape (n_frames, n_channels, y_pix, x_pix)
    save_file : :obj:`str`, optional
        full save file (path and filename)
    frame_rate : :obj:`float`, optional
        frame rate of saved movie

    """

    n_frames, n_channels, y_pix, x_pix = ims_orig.shape
    n_cols = 1 if ims_recon_lin is None else 2
    n_rows = 3
    offset = 1  # 0 if ims_recon_lin is None else 1
    scale_ = 5
    fig_width = scale_ * n_cols * n_channels / 2
    fig_height = y_pix / x_pix * scale_ * n_rows / 2
    fig = plt.figure(figsize=(fig_width, fig_height + offset), dpi=100)

    gs = GridSpec(n_rows, n_cols, figure=fig)
    axs = []
    if ims_recon_lin is not None:
        axs.append(fig.add_subplot(gs[0, 0]))  # 0: original frames
        axs.append(fig.add_subplot(gs[1, 0]))  # 1: ae reconstructed frames
        axs.append(fig.add_subplot(gs[1, 1]))  # 2: ae residuals
        axs.append(fig.add_subplot(gs[2, 0]))  # 3: linear reconstructed frames
        axs.append(fig.add_subplot(gs[2, 1]))  # 4: linear residuals
    else:
        axs.append(fig.add_subplot(gs[0, 0]))  # 0: original frames
        axs.append(fig.add_subplot(gs[1, 0]))  # 1: ae reconstructed frames
        axs.append(fig.add_subplot(gs[2, 0]))  # 2: ae residuals
    for ax in fig.axes:
        ax.set_xticks([])
        ax.set_yticks([])

    fontsize = 12
    axs[0].set_title('Original', fontsize=fontsize)
    axs[1].set_title('Conv AE reconstructed', fontsize=fontsize)
    axs[2].set_title('Conv AE residual', fontsize=fontsize)
    if ims_recon_lin is not None:
        axs[3].set_title('Linear AE reconstructed', fontsize=fontsize)
        axs[4].set_title('Linear AE residual', fontsize=fontsize)

    ims_res_ae = ims_orig - ims_recon_ae
    if ims_recon_lin is not None:
        ims_res_lin = ims_orig - ims_recon_lin
    else:
        ims_res_lin = None

    default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}

    # ims is a list of lists, each row is a list of artists to draw in the current frame; here we
    # are just animating one artist, the image, in each frame
    ims = []
    for i in range(ims_orig.shape[0]):

        ims_curr = []

        # original video
        ims_tmp = ims_orig[i, 0] if n_channels == 1 else concat(ims_orig[i])
        im = axs[0].imshow(ims_tmp, **default_kwargs)
        [s.set_visible(False) for s in axs[0].spines.values()]
        ims_curr.append(im)

        # ae reconstructed video
        ims_tmp = ims_recon_ae[i, 0] if n_channels == 1 else concat(
            ims_recon_ae[i])
        im = axs[1].imshow(ims_tmp, **default_kwargs)
        [s.set_visible(False) for s in axs[1].spines.values()]
        ims_curr.append(im)

        # ae residual video
        ims_tmp = ims_res_ae[i,
                             0] if n_channels == 1 else concat(ims_res_ae[i])
        im = axs[2].imshow(0.5 + ims_tmp, **default_kwargs)
        [s.set_visible(False) for s in axs[2].spines.values()]
        ims_curr.append(im)

        if ims_recon_lin is not None:

            # linear reconstructed video
            ims_tmp = ims_recon_lin[i, 0] if n_channels == 1 else concat(
                ims_recon_lin[i])
            im = axs[3].imshow(ims_tmp, **default_kwargs)
            [s.set_visible(False) for s in axs[3].spines.values()]
            ims_curr.append(im)

            # linear residual video
            ims_tmp = ims_res_lin[i, 0] if n_channels == 1 else concat(
                ims_res_lin[i])
            im = axs[4].imshow(0.5 + ims_tmp, **default_kwargs)
            [s.set_visible(False) for s in axs[4].spines.values()]
            ims_curr.append(im)

        ims.append(ims_curr)

    plt.tight_layout(pad=0)

    ani = animation.ArtistAnimation(fig, ims, blit=True, repeat_delay=1000)
    writer = FFMpegWriter(fps=frame_rate, bitrate=-1)

    if save_file is not None:
        make_dir_if_not_exists(save_file)
        if save_file[-3:] != 'mp4':
            save_file += '.mp4'
        print('saving video to %s...' % save_file, end='')
        ani.save(save_file, writer=writer)
        # if save_file[-3:] != 'gif':
        #     save_file += '.gif'
        # ani.save(save_file, writer='imagemagick', fps=15)
        print('done')
Ejemplo n.º 3
0
def make_neural_reconstruction_movie(ims_orig,
                                     ims_recon_ae,
                                     ims_recon_neural,
                                     latents_ae,
                                     latents_neural,
                                     ae_model_class='AE',
                                     colored_predictions=False,
                                     scale=0.5,
                                     xtick_locs=None,
                                     frame_rate_beh=None,
                                     save_file=None,
                                     frame_rate=15):
    """Produce movie with original video, ae reconstructed video, and neural reconstructed video.

    Latent traces are additionally plotted, as well as the residual between the ae reconstruction
    and the neural reconstruction.

    Parameters
    ----------
    ims_orig : :obj:`np.ndarray`
        original images; shape (n_frames, n_channels, y_pix, x_pix)
    ims_recon_ae : :obj:`np.ndarray`
        images reconstructed by AE; shape (n_frames, n_channels, y_pix, x_pix)
    ims_recon_neural : :obj:`np.ndarray`
        images reconstructed by neural activity; shape (n_frames, n_channels, y_pix, x_pix)
    latents_ae : :obj:`np.ndarray`
        original AE latents; shape (n_frames, n_latents)
    latents_neural : :obj:`np.ndarray`
        latents reconstruted by neural activity; shape (n_frames, n_latents)
    ae_model_class : :obj:`str`, optional
        'AE', 'VAE', etc. for plot titles
    colored_predictions : :obj:`bool`, optional
        False to plot reconstructions in black, True to plot in different colors
    scale : :obj:`int`, optional
        scale magnitude of traces
    xtick_locs : :obj:`array-like`, optional
        tick locations in units of bins
    frame_rate_beh : :obj:`float`, optional
        frame rate of behavorial video; to properly relabel xticks
    save_file : :obj:`str`, optional
        full save file (path and filename)
    frame_rate : :obj:`float`, optional
        frame rate of saved movie

    """

    means = np.nanmean(latents_ae, axis=0)
    std = np.nanstd(latents_ae) / scale

    latents_ae_sc = (latents_ae - means) / std
    latents_dec_sc = (latents_neural - means) / std

    n_channels, y_pix, x_pix = ims_orig.shape[1:]
    n_time, n_ae_latents = latents_ae.shape

    n_cols = 3
    n_rows = 2
    offset = 2  # 0 if ims_recon_lin is None else 1
    scale_ = 5
    fig_width = scale_ * n_cols * n_channels / 2
    fig_height = y_pix / x_pix * scale_ * n_rows / 2
    fig = plt.figure(figsize=(fig_width, fig_height + offset))

    gs = GridSpec(n_rows, n_cols, figure=fig)
    axs = []
    axs.append(fig.add_subplot(gs[0, 0]))  # 0: original frames
    axs.append(fig.add_subplot(gs[0, 1]))  # 1: ae reconstructed frames
    axs.append(fig.add_subplot(gs[0, 2]))  # 2: neural reconstructed frames
    axs.append(fig.add_subplot(gs[1, 0]))  # 3: residual
    axs.append(fig.add_subplot(gs[1, 1:3]))  # 4: ae and predicted ae latents
    for i, ax in enumerate(fig.axes):
        ax.set_yticks([])
        if i > 2:
            ax.get_xaxis().set_tick_params(labelsize=12, direction='in')
    axs[0].set_xticks([])
    axs[1].set_xticks([])
    axs[2].set_xticks([])
    axs[3].set_xticks([])

    # check that the axes are correct
    fontsize = 12
    idx = 0
    axs[idx].set_title('Original', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('%s reconstructed' % ae_model_class, fontsize=fontsize)
    idx += 1
    axs[idx].set_title('Neural reconstructed', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('Reconstructions residual', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('%s latent predictions' % ae_model_class,
                       fontsize=fontsize)
    if xtick_locs is not None and frame_rate_beh is not None:
        axs[idx].set_xticks(xtick_locs)
        axs[idx].set_xticklabels(
            (np.asarray(xtick_locs) / frame_rate_beh).astype('int'))
        axs[idx].set_xlabel('Time (s)', fontsize=fontsize)
    else:
        axs[idx].set_xlabel('Time (bins)', fontsize=fontsize)

    time = np.arange(n_time)

    ims_res = ims_recon_ae - ims_recon_neural

    im_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
    tr_kwargs = {'animated': True, 'linewidth': 2}
    latents_ae_color = [0.2, 0.2, 0.2]

    label_ae_base = '%s latents' % ae_model_class
    label_dec_base = 'Predicted %s latents' % ae_model_class

    # ims is a list of lists, each row is a list of artists to draw in the
    # current frame; here we are just animating one artist, the image, in
    # each frame
    ims = []
    for i in range(n_time):

        ims_curr = []
        idx = 0

        if i % 100 == 0:
            print('processing frame %03i/%03i' % (i, n_time))

        ###################
        # behavioral videos
        ###################
        # original video
        ims_tmp = ims_orig[i, 0] if n_channels == 1 else concat(ims_orig[i])
        im = axs[idx].imshow(ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        # ae reconstruction
        ims_tmp = ims_recon_ae[i, 0] if n_channels == 1 else concat(
            ims_recon_ae[i])
        im = axs[idx].imshow(ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        # neural reconstruction
        ims_tmp = ims_recon_neural[i, 0] if n_channels == 1 else concat(
            ims_recon_neural[i])
        im = axs[idx].imshow(ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        # residual
        ims_tmp = ims_res[i, 0] if n_channels == 1 else concat(ims_res[i])
        im = axs[idx].imshow(0.5 + ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        ########
        # traces
        ########
        # latents over time
        axs[idx].set_prop_cycle(None)  # reset colors
        for latent in range(n_ae_latents):
            if colored_predictions:
                latents_dec_color = axs[idx]._get_lines.get_next_color()
            else:
                latents_dec_color = [0, 0, 0]
            # just put labels on last lvs
            if latent == n_ae_latents - 1 and i == 0:
                label_ae = label_ae_base
                label_dec = label_dec_base
            else:
                label_ae = None
                label_dec = None
            im = axs[idx].plot(time[0:i + 1],
                               latent + latents_ae_sc[0:i + 1, latent],
                               color=latents_ae_color,
                               alpha=0.7,
                               label=label_ae,
                               **tr_kwargs)[0]
            axs[idx].spines['top'].set_visible(False)
            axs[idx].spines['right'].set_visible(False)
            axs[idx].spines['left'].set_visible(False)
            ims_curr.append(im)
            im = axs[idx].plot(time[0:i + 1],
                               latent + latents_dec_sc[0:i + 1, latent],
                               color=latents_dec_color,
                               label=label_dec,
                               **tr_kwargs)[0]
            axs[idx].spines['top'].set_visible(False)
            axs[idx].spines['right'].set_visible(False)
            axs[idx].spines['left'].set_visible(False)
            if colored_predictions:
                # original latents - gray
                orig_line = mlines.Line2D([], [],
                                          color=[0.2, 0.2, 0.2],
                                          linewidth=3,
                                          alpha=0.7)
                # predicted latents - cycle through some colors
                colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
                dls = []
                for c in range(5):
                    dls.append(
                        mlines.Line2D([], [],
                                      linewidth=3,
                                      linestyle='--',
                                      dashes=(0, 3 * c, 20, 1),
                                      color='%s' % colors[c]))
                plt.legend([orig_line, tuple(dls)],
                           [label_ae_base, label_dec_base],
                           loc='lower right',
                           fontsize=fontsize,
                           frameon=True,
                           framealpha=0.7,
                           edgecolor=[1, 1, 1])
            else:
                plt.legend(loc='lower right',
                           fontsize=fontsize,
                           frameon=True,
                           framealpha=0.7,
                           edgecolor=[1, 1, 1])
            ims_curr.append(im)
        ims.append(ims_curr)

    plt.tight_layout(pad=0)

    ani = animation.ArtistAnimation(fig, ims, blit=True, repeat_delay=1000)
    save_movie(save_file, ani, frame_rate=frame_rate)
Ejemplo n.º 4
0
def make_neural_reconstruction_movie(ims_orig,
                                     ims_recon_ae,
                                     ims_recon_neural,
                                     latents_ae,
                                     latents_neural,
                                     save_file=None,
                                     frame_rate=15):
    """Produce movie with original video, ae reconstructed video, and neural reconstructed video.

    Latent traces are additionally plotted, as well as the residual between the ae reconstruction
    and the neural reconstruction.

    Parameters
    ----------
    ims_orig : :obj:`np.ndarray`
        shape (n_frames, n_channels, y_pix, x_pix)
    ims_recon_ae : :obj:`np.ndarray`
        shape (n_frames, n_channels, y_pix, x_pix)
    ims_recon_neural : :obj:`np.ndarray`, optional
        shape (n_frames, n_channels, y_pix, x_pix)
    latents_ae : :obj:`np.ndarray`, optional
        shape (n_frames, n_latents)
    save_file : :obj:`str`, optional
        full save file (path and filename)
    frame_rate : :obj:`float`, optional
        frame rate of saved movie

    """

    means = np.mean(latents_ae, axis=0)
    std = np.std(latents_ae) * 2

    latents_ae_sc = (latents_ae - means) / std
    latents_dec_sc = (latents_neural - means) / std

    n_channels, y_pix, x_pix = ims_orig.shape[1:]
    n_time, n_ae_latents = latents_ae.shape

    n_cols = 3
    n_rows = 2
    offset = 2  # 0 if ims_recon_lin is None else 1
    scale_ = 5
    fig_width = scale_ * n_cols * n_channels / 2
    fig_height = y_pix / x_pix * scale_ * n_rows / 2
    fig = plt.figure(figsize=(fig_width, fig_height + offset))

    gs = GridSpec(n_rows, n_cols, figure=fig)
    axs = []
    axs.append(fig.add_subplot(gs[0, 0]))  # 0: original frames
    axs.append(fig.add_subplot(gs[0, 1]))  # 1: ae reconstructed frames
    axs.append(fig.add_subplot(gs[0, 2]))  # 2: neural reconstructed frames
    axs.append(fig.add_subplot(gs[1, 0]))  # 3: residual
    axs.append(fig.add_subplot(gs[1, 1:3]))  # 4: ae and predicted ae latents
    for i, ax in enumerate(fig.axes):
        ax.set_yticks([])
        if i > 2:
            ax.get_xaxis().set_tick_params(labelsize=12, direction='in')
    axs[0].set_xticks([])
    axs[1].set_xticks([])
    axs[2].set_xticks([])
    axs[3].set_xticks([])

    # check that the axes are correct
    fontsize = 12
    idx = 0
    axs[idx].set_title('Original', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('AE reconstructed', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('Neural reconstructed', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('Reconstructions residual', fontsize=fontsize)
    idx += 1
    axs[idx].set_title('AE latent predictions', fontsize=fontsize)
    axs[idx].set_xlabel('Time (bins)', fontsize=fontsize)

    time = np.arange(n_time)

    ims_res = ims_recon_ae - ims_recon_neural

    im_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
    tr_kwargs = {'animated': True, 'linewidth': 2}
    latents_ae_color = [0.2, 0.2, 0.2]
    latents_dec_color = [0, 0, 0]

    # ims is a list of lists, each row is a list of artists to draw in the
    # current frame; here we are just animating one artist, the image, in
    # each frame
    ims = []
    for i in range(n_time):

        ims_curr = []
        idx = 0

        if i % 100 == 0:
            print('processing frame %03i/%03i' % (i, n_time))

        ###################
        # behavioral videos
        ###################
        # original video
        ims_tmp = ims_orig[i, 0] if n_channels == 1 else concat(ims_orig[i])
        im = axs[idx].imshow(ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        # ae reconstruction
        ims_tmp = ims_recon_ae[i, 0] if n_channels == 1 else concat(
            ims_recon_ae[i])
        im = axs[idx].imshow(ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        # neural reconstruction
        ims_tmp = ims_recon_neural[i, 0] if n_channels == 1 else concat(
            ims_recon_neural[i])
        im = axs[idx].imshow(ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        # residual
        ims_tmp = ims_res[i, 0] if n_channels == 1 else concat(ims_res[i])
        im = axs[idx].imshow(0.5 + ims_tmp, **im_kwargs)
        ims_curr.append(im)
        idx += 1

        ########
        # traces
        ########
        # latents over time
        for latent in range(n_ae_latents):
            # just put labels on last lvs
            if latent == n_ae_latents - 1 and i == 0:
                label_ae = 'AE latents'
                label_dec = 'Predicted AE latents'
            else:
                label_ae = None
                label_dec = None
            im = axs[idx].plot(time[0:i + 1],
                               latent + latents_ae_sc[0:i + 1, latent],
                               color=latents_ae_color,
                               alpha=0.7,
                               label=label_ae,
                               **tr_kwargs)[0]
            axs[idx].spines['top'].set_visible(False)
            axs[idx].spines['right'].set_visible(False)
            axs[idx].spines['left'].set_visible(False)
            ims_curr.append(im)
            im = axs[idx].plot(time[0:i + 1],
                               latent + latents_dec_sc[0:i + 1, latent],
                               color=latents_dec_color,
                               label=label_dec,
                               **tr_kwargs)[0]
            axs[idx].spines['top'].set_visible(False)
            axs[idx].spines['right'].set_visible(False)
            axs[idx].spines['left'].set_visible(False)
            plt.legend(loc='lower right',
                       fontsize=fontsize,
                       frameon=True,
                       framealpha=0.7,
                       edgecolor=[1, 1, 1])
            ims_curr.append(im)
        ims.append(ims_curr)

    plt.tight_layout(pad=0)

    ani = animation.ArtistAnimation(fig, ims, blit=True, repeat_delay=1000)
    writer = FFMpegWriter(fps=frame_rate, bitrate=-1)

    if save_file is not None:
        make_dir_if_not_exists(save_file)
        if save_file[-3:] != 'mp4':
            save_file += '.mp4'
        print('saving video to %s...' % save_file, end='')
        ani.save(save_file, writer=writer)
        print('done')