def test_show_matplotlib(self): set_showlib("matplotlib") video = fromarrays((vid, vid)) with self.assertRaises(ValueError): show_fft(video, mode="wrong") video = show_video(video) video = show_fft(video, mode="real") video = show_fft(video, clip=256) video = show_diff(video) video = play(video, fps=100) video = load(video, 128)
import matplotlib.pyplot as plt # test dual camera video (regular spaced) video = simple_brownian_video(range(NFRAMES), range(NFRAMES), shape=SHAPE, background=BACKGROUND) #: apply dust particles dust1 = plt.imread('dust1.png')[..., 0] #float normalized to (0,1) dust2 = plt.imread('dust2.png')[..., 0] dust = ((dust1, dust2), ) * NFRAMES video = multiply(video, dust) video = show_video(video) video = show_diff(video) video = show_fft(video, mode="real") #: set fps to your required FPS. Video will be updated only if visualization #: is fast enough not to interfere with the acquisition. #: here video is again a valid video iterator, no visualization has yet took place video = play(video, fps=100) #: you should use either cv2 or pyqtgraph, matplotlib is too slow #set_showlib("cv2") set_showlib("pyqtgraph") # now go through frames and show videos for frames in video: pass
from cddm import conf import numpy as np #video iterator from simple_brownian_video import get_dual_video #trigger parameters from simple_brownian_video import t1, t2, PERIOD, SHAPE #setting this to 2 shows progress bar conf.set_verbose(2) #obtain frames iterator dual_video = get_dual_video() #apply blackman window #dual_video = apply_window(dual_video, blackman(SHAPE)) dual_video = show_video(dual_video) fdual_video = rfft2(dual_video, kisize=64, kjsize=64) fdual_video = show_fft(fdual_video) fdual_video = play(fdual_video, fps=100) data, bg = iccorr_multi(fdual_video, t1, t2, period=PERIOD, level=5, chunk_size=256, show=True, auto_background=False, binning=True, return_background=True)