コード例 #1
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
    def long_func2():
        for i in trange(4):
            for x in trange(4):
                pass

        for x in trange(4):
            pass
コード例 #2
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
def test_tqdm_outside_of_functiongui():
    """Test that we can make a tqdm wrapper with ProgressBar outside of @magicgui."""
    with trange(10) as pbar:
        assert pbar.n == 0
        assert pbar.total == 10
        assert not pbar._mgui

    assert tuple(trange(5)) == tuple(range(5))
コード例 #3
0
def long_function(steps=10,
                  repeats=4,
                  choices="ABCDEFGHIJKLMNOP12345679",
                  char="",
                  delay=0.05):
    """Long running computation with nested iterators."""
    # trange and tqdm accept all the kwargs from tqdm itself, as well as any
    # valid kwargs for magicgui.widgets.ProgressBar, (such as "label")
    for r in trange(repeats, label="repeats"):
        letters = [random.choice(choices) for _ in range(steps)]
        # `tqdm`, like `tqdm`, accepts any iterable
        # this progress bar is nested and will be run & reset multiple times
        for letter in tqdm(letters, label="steps"):
            long_function.char.value = letter
            sleep(delay)
コード例 #4
0
def long_running(steps=10, delay=0.1):
    """Long running computation with range iterator."""
    # trange(steps) is a shortcut for `tqdm(range(steps))`
    for i in trange(steps):
        sleep(delay)
コード例 #5
0
ファイル: OPMMirrorReconstruction.py プロジェクト: QI2lab/OPM
    def _process_data(self):
        
        # create parameter array from scan parameters saved by acquisition code
        df_metadata = read_metadata(self.data_path / Path('scan_metadata.csv'))
        root_name = df_metadata['root_name']
        scan_type = df_metadata['scan_type']
        theta = df_metadata['theta']
        scan_step = df_metadata['scan_step']
        pixel_size = df_metadata['pixel_size']
        num_t = df_metadata['num_t']
        num_y = df_metadata['num_y']
        num_z  = df_metadata['num_z']
        num_ch = df_metadata['num_ch']
        num_images = df_metadata['scan_axis_positions']
        y_pixels = df_metadata['y_pixels']
        x_pixels = df_metadata['x_pixels']
        chan_405_active = df_metadata['405_active']
        chan_488_active = df_metadata['488_active']
        chan_561_active = df_metadata['561_active']
        chan_635_active = df_metadata['635_active']
        chan_730_active = df_metadata['730_active']
        active_channels = [chan_405_active,chan_488_active,chan_561_active,chan_635_active,chan_730_active]
        channel_idxs = [0,1,2,3,4]
        channels_in_data = list(compress(channel_idxs, active_channels))
        n_active_channels = len(channels_in_data)

        self.active_channels = active_channels
        self.channels_in_data = channels_in_data

        # calculate pixel sizes of deskewed image in microns
        deskewed_x_pixel = pixel_size
        deskewed_y_pixel = pixel_size
        deskewed_z_pixel = pixel_size
        if self.debug:
            print('Deskewed pixel sizes before downsampling (um). x='+str(deskewed_x_pixel)+', y='+str(deskewed_y_pixel)+', z='+str(deskewed_z_pixel)+'.')

        # deskew parameters
        deskew_parameters = np.empty([3])
        deskew_parameters[0] = theta            # (degrees)
        deskew_parameters[1] = scan_step*100    # (nm)
        deskew_parameters[2] = pixel_size*100   # (nm)

        # amount of down sampling in z
        z_down_sample = 1

        # load dataset
        dataset_zarr = zarr.open(self.data_path / Path(root_name+'.zarr'),mode='r')

        # create output directory
        if self.decon == 0 and self.flatfield == 0:
            output_dir_path = self.data_path / 'deskew_output'
        elif self.decon == 0 and self.flatfield == 1:
            output_dir_path = self.data_path / 'deskew_flatfield_output'
        elif self.decon == 1 and self.flatfield == 0:
            output_dir_path = self.data_path / 'deskew_decon_output'
        elif self.decon == 1 and self.flatfield == 1:
            output_dir_path = self.data_path / 'deskew_flatfield_decon_output'
        output_dir_path.mkdir(parents=True, exist_ok=True)

        # create name for zarr directory
        zarr_output_path = output_dir_path / Path('OPM_processed.zarr')

        # calculate size of one volume
        # change step size from physical space (nm) to camera space (pixels)
        pixel_step = scan_step/pixel_size    # (pixels)

        # calculate the number of pixels scanned during stage scan 
        scan_end = num_images * pixel_step  # (pixels)

        # calculate properties for final image
        ny = np.int64(np.ceil(scan_end+y_pixels*np.cos(theta*np.pi/180))) # (pixels)
        nz = np.int64(np.ceil(y_pixels*np.sin(theta*np.pi/180)))          # (pixels)
        nx = np.int64(x_pixels)                                           # (pixels)

        # create and open zarr file
        opm_data = zarr.open(str(zarr_output_path), mode="w", shape=(num_t, num_ch, nz, ny, nx), chunks=(1, 1, int(nz), int(ny), int(nx)), dtype=np.uint16)
            
        # if retrospective flatfield is requested, try to import CuPY based flat-fielding
        if self.flatfield:
            pass

        # if decon is requested, try to import microvolution wrapper or dexp library
        if self.decon:
            from src.utils.opm_psf import generate_skewed_psf
            from src.utils.image_post_processing import lr_deconvolution_cupy

            ex_wavelengths = [.405,.488,.561,.635,.730]
            em_wavelengths = [.420,.520,.605,.680,.760]

            skewed_psf = []

            for ch_idx in active_channels:
                skewed_psf.append(generate_skewed_psf(0.1,ex_wavelengths[ch_idx],em_wavelengths[ch_idx]))

        # loop over all timepoints and channels
        for t_idx in trange(num_t,desc='t',position=0):
            for ch_idx in trange(n_active_channels,desc='c',position=1, leave=False):

                # pull data stack into memory
                if self.debug:
                    print('Process timepoint '+str(t_idx)+'; channel '+str(ch_idx) +'.')
                raw_data = return_data_from_zarr_to_numpy(dataset_zarr, t_idx, ch_idx, num_images, y_pixels,x_pixels)

                # run deconvolution on deskewed image
                if self.decon:
                    if self.debug:
                        print('Deconvolve.')
                    decon = lr_deconvolution_cupy(raw_data,skewed_psf[ch_idx])
                else:
                    decon = raw_data
                    pass
                del raw_data

                # perform flat-fielding
                if self.flatfield:
                    corrected=decon
                else:
                    if self.debug:
                        print('Flatfield.')
                    corrected=decon
                del decon

                # deskew
                if self.debug:
                    print('Deskew.')
                deskewed = deskew(np.flipud(corrected),*deskew_parameters)
                del corrected

                # downsample in z due to oversampling when going from OPM to coverslip geometry
                if z_down_sample==1:
                    deskewed_downsample = deskewed
                else:
                    if self.debug:
                        print('Downsample.')
                    deskewed_downsample = block_reduce(deskewed, block_size=(z_down_sample,1,1), func=np.mean)
                del deskewed

                if self.debug:
                    print('Write data into Zarr container')

                opm_data[t_idx, ch_idx, :, :, :] = deskewed_downsample

                # free up memory
                del deskewed_downsample
                gc.collect()

        # exit
        self.dataset_zarr = zarr_output_path
        self.scale = [1,deskewed_z_pixel,deskewed_y_pixel,deskewed_x_pixel]
コード例 #6
0
 def _on_click(self):
     for i in trange(10):
         sleep(0.1)
コード例 #7
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
 def f2():
     with trange(10, leave=False) as pbar2:
         pass
     assert pbar2.progressbar.visible is False
コード例 #8
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
 def f():
     with trange(10, leave=True) as pbar1:
         pass
     assert pbar1.progressbar.visible is True
コード例 #9
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
 def f():
     with trange(10, disable=True) as pbar:
         assert not hasattr(pbar, "progressbar")
         assert not pbar._mgui
コード例 #10
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
def _indirectly_decorated(steps=2):
    for i in trange(4):
        pass
コード例 #11
0
ファイル: test_tqdm.py プロジェクト: uschmidt83/magicgui
 def long_func(steps=2):
     for i in trange(4):
         pass
コード例 #12
0
ファイル: OPMMirrorScan.py プロジェクト: QI2lab/OPM
    def _acquire_3d_t_data(self):

        with RemoteMMCore() as mmc_3d_time:

            #------------------------------------------------------------------------------------------------------------------------------------
            #----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
            #------------------------------------------------------------------------------------------------------------------------------------
            # parse which channels are active
            active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
            self.n_active_channels = len(active_channel_indices)
                
            if self.debug:
                print("%d active channels: " % self.n_active_channels, end="")
                for ind in active_channel_indices:
                    print("%s " % self.channel_labels[ind], end="")
                print("")

            if self.ROI_changed:
                self._crop_camera()
                self.ROI_changed = False

            # set exposure time
            if self.exposure_changed:
                mmc_3d_time.setExposure(self.exposure_ms)
                self.exposure_changed = False

            if self.powers_changed:
                self._set_mmc_laser_power()
                self.powers_changed = False
            
            if self.channels_changed or self.footprint_changed or not(self.DAQ_running):
                if self.DAQ_running:
                    self.opmdaq.stop_waveform_playback()
                    self.DAQ_running = False
                self.opmdaq.set_scan_type('mirror')
                self.opmdaq.set_channels_to_use(self.channel_states)
                self.opmdaq.set_interleave_mode(True)
                self.scan_steps = self.opmdaq.set_scan_mirror_range(self.scan_axis_step_um,self.scan_mirror_footprint_um)
                self.opmdaq.generate_waveforms()
                self.channels_changed = False
                self.footprint_changed = False

            # create directory for timelapse
            time_string = datetime.now().strftime("%Y_%m_%d-%I_%M_%S")
            self.output_dir_path = self.save_path / Path('timelapse_'+time_string)
            self.output_dir_path.mkdir(parents=True, exist_ok=True)


            # create name for zarr directory
            zarr_output_path = self.output_dir_path / Path('OPM_data.zarr')

            # create and open zarr file
            opm_data = zarr.open(str(zarr_output_path), mode="w", shape=(self.n_timepoints, self.n_active_channels, self.scan_steps, self.ROI_width_y, self.ROI_width_x), chunks=(1, 1, 1, self.ROI_width_y, self.ROI_width_x),compressor=None, dtype=np.uint16)

            #------------------------------------------------------------------------------------------------------------------------------------
            #----------------------------------------------End setup of scan parameters----------------------------------------------------------
            #------------------------------------------------------------------------------------------------------------------------------------


            #------------------------------------------------------------------------------------------------------------------------------------
            #----------------------------------------------------Start acquisition---------------------------------------------------------------
            #------------------------------------------------------------------------------------------------------------------------------------

            # set circular buffer to be large
            mmc_3d_time.clearCircularBuffer()
            circ_buffer_mb = 96000
            mmc_3d_time.setCircularBufferMemoryFootprint(int(circ_buffer_mb))

            # run hardware triggered acquisition
            if self.wait_time == 0:
                self.opmdaq.start_waveform_playback()
                self.DAQ_running = True
                mmc_3d_time.startSequenceAcquisition(int(self.n_timepoints*self.n_active_channels*self.scan_steps),0,True)
                for t in trange(self.n_timepoints,desc="t", position=0):
                    for z in trange(self.scan_steps,desc="z", position=1, leave=False):
                        for c in range(self.n_active_channels):
                            while mmc_3d_time.getRemainingImageCount()==0:
                                pass
                            opm_data[t, c, z, :, :]  = mmc_3d_time.popNextImage()
                mmc_3d_time.stopSequenceAcquisition()
                self.opmdaq.stop_waveform_playback()
                self.DAQ_running = False
            else:
                for t in trange(self.n_timepoints,desc="t", position=0):
                    self.opmdaq.start_waveform_playback()
                    self.DAQ_running = True
                    mmc_3d_time.startSequenceAcquisition(int(self.n_active_channels*self.scan_steps),0,True)
                    for z in trange(self.scan_steps,desc="z", position=1, leave=False):
                        for c in range(self.n_active_channels):
                            while mmc_3d_time.getRemainingImageCount()==0:
                                pass
                            opm_data[t, c, z, :, :]  = mmc_3d_time.popNextImage()
                    mmc_3d_time.stopSequenceAcquisition()
                    self.opmdaq.stop_waveform_playback()
                    self.DAQ_running = False
                    time.sleep(self.wait_time)
                    
            # construct metadata and save
            self._save_metadata()

            #------------------------------------------------------------------------------------------------------------------------------------
            #--------------------------------------------------------End acquisition-------------------------------------------------------------
            #------------------------------------------------------------------------------------------------------------------------------------

            # set circular buffer to be small 
            mmc_3d_time.clearCircularBuffer()
            circ_buffer_mb = 4000
            mmc_3d_time.setCircularBufferMemoryFootprint(int(circ_buffer_mb))