コード例 #1
0
ファイル: video_view.py プロジェクト: inwwin/waterwave-ddm
def main():
    import argparse
    import pathlib
    # import json

    parser = argparse.ArgumentParser()
    # parser.add_argument('--method', default='diff', choices=['diff', 'fft', 'corr'])
    parser.add_argument('-p', '--position', nargs=2, default=[0, 0], type=int)
    parser.add_argument('size', type=int)
    parser.add_argument(
        'vid_in')  # Directly passed to PyAV which then pass on to FFmpeg
    params = parser.parse_args()

    # print(params)

    frames_iter = pyav_single_frames_reader(params.vid_in)
    vid_info = next(frames_iter)
    # vid_info.pop('codec_context')

    video = frames_iter
    video = crop(video, (
        (params.position[0], params.position[0] + params.size),
        (params.position[1], params.position[1] + params.size),
    ))

    vid_info['vid_in_path'] = str(pathlib.Path(params.vid_in).resolve())

    print(vid_info)

    viewer = VideoViewer(video, vmin=0, vmax=255, count=vid_info['duration'])
    viewer.show()
コード例 #2
0
        plt.subplot(121)
        plt.imshow(video[0][0], cmap="gray")
        plt.colorbar()
        plt.yticks([])
        plt.xticks([])
        plt.title("Camera 1")
        plt.subplot(122)
        plt.imshow(video[0][1], cmap="gray")
        plt.colorbar()
        plt.yticks([])
        plt.xticks([])
        plt.title("Camera 2")
        plt.savefig("frames.pdf")

    #: camera 1
    viewer1 = VideoViewer(video,
                          count=NFRAMES_DUAL,
                          id=0,
                          vmin=0,
                          cmap="gray",
                          vmax=VMAX)
    viewer1.show()

    #: camera 2
    viewer2 = VideoViewer(video,
                          count=NFRAMES_DUAL,
                          id=1,
                          vmin=0,
                          cmap="gray",
                          vmax=VMAX)
    viewer2.show()
コード例 #3
0
    img = ax.imshow(ddm_array[:, :, 0], **imshow_kwargs)
    fig.colorbar(img, ax=ax)
    ti_indicator = fig.text(.02,
                            .98,
                            '$t_i$ = 0 frames\n$t$ = 0',
                            verticalalignment='top')
    ax.set_xlabel(f'$q_x$ (${wavenumber_unit}$)')
    ax.set_ylabel(f'$q_y$ (${wavenumber_unit}$)')

    def change_img(ti):
        img.set_data(ddm_array[:, :, ti])
        ti_indicator.set_text(
            f"$t_i$ = {ti} frames\n$\\tau$ = {ti*frame_interval:.3f} s")
        return img,

    animation = matplotlib.animation.FuncAnimation(fig,
                                                   change_img,
                                                   frames=params.frames,
                                                   blit=False,
                                                   interval=interval)
    # plt.show()
    animation.save(
        params.save_html_path,
        matplotlib.animation.HTMLWriter(fps=1000 / interval,
                                        default_mode='loop'))
else:
    ddm_iter = [ddm_array[:, :, i] for i in range(ddm_array.shape[2])]
    viewer = VideoViewer(ddm_iter, count=params.frames, **imshow_kwargs)
    viewer.pause_duration = interval / 1000
    viewer.show()
コード例 #4
0
ファイル: full_video.py プロジェクト: inwwin/cddm
                              intensity=INTENSITY,
                              dtype="uint16")

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

#: apply dust particles
if APPLY_DUST:
    dust = plt.imread(DUST1_PATH)[0:SHAPE[0], 0:SHAPE[1],
                                  0]  #float normalized to (0,1)
    dust = ((dust, ), ) * NFRAMES_FULL
    video = multiply(video, dust)

video = (tuple((adc(f,
                    noise_model=NOISE_MODEL,
                    saturation=SATURATION,
                    readout_noise=READOUT_NOISE,
                    bit_depth=BIT_DEPTH) for f in frames)) for frames in video)

if __name__ == "__main__":
    #: no need to load video, but this way we load video into memory, and we
    #: can scroll back and forth with the viewer. Uncomment the line below.
    #video = load(video, NFRAMES) # loads and displays progress bar

    #: VideoViewer either expects a multi_frame iterator, or a numpy array
    viewer = VideoViewer(video,
                         count=NFRAMES_FULL,
                         vmin=0,
                         cmap="gray",
                         vmax=VMAX)
    viewer.show()
コード例 #5
0
ファイル: dual_video_simulator.py プロジェクト: inwwin/cddm
video = simple_brownian_video(t1,
                              t2,
                              shape=SIMSHAPE,
                              background=BACKGROUND,
                              sigma=SIGMA,
                              delta=DELTA,
                              intensity=INTENSITY)

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

#: apply dust particles
dust1 = plt.imread(DUST1_PATH)[..., 0]  #float normalized to (0,1)
dust2 = plt.imread(DUST2_PATH)[..., 0]
dust = ((dust1, dust2), ) * NFRAMES

video = multiply(video, dust)

if __name__ == "__main__":

    #: no need to load video, but this way we load video into memory, and we
    #: can scroll back and forth with the viewer. Uncomment the line below
    #video = load(video, NFRAMES) # loads and displays progress bar

    #: camera 1
    viewer1 = VideoViewer(video, count=NFRAMES, id=0, vmin=0, cmap="gray")
    viewer1.show()

    #: camera 2
    viewer2 = VideoViewer(video, count=NFRAMES, id=1, vmin=0, cmap="gray")
    viewer2.show()