Example #1
0
 def write_virtual_by_plane(self):
     bdv_writer = npy2bdv.BdvWriter(self.fname0,
                                    nchannels=self.N_CH,
                                    nilluminations=self.N_ILL,
                                    ntiles=self.N_TILES,
                                    nangles=self.N_ANGLES)
     i = 0
     for t in range(self.N_T):
         for i_ch in range(self.N_CH):
             for i_illum in range(self.N_ILL):
                 for i_tile in range(self.N_TILES):
                     for i_angle in range(self.N_ANGLES):
                         bdv_writer.append_view(stack=None,
                                                virtual_stack_dim=(self.NZ, self.NY, self.NX),
                                                time=t,
                                                channel=i_ch,
                                                illumination=i_illum,
                                                tile=i_tile,
                                                angle=i_angle)
                         for iz in range(self.NZ):
                             bdv_writer.append_plane(plane=self.stacks[i][iz, :, :],
                                                     z=iz,
                                                     time=t, channel=i_ch, illumination=i_illum,
                                                     tile=i_tile, angle=i_angle)
                         i += 1
     bdv_writer.write_xml()
     bdv_writer.close()
Example #2
0
 def write_virtual_by_substack(self):
     ''''Write a virtual stack by substacks, with downsampling pyramid.'''
     bdv_writer = npy2bdv.BdvWriter(self.fname1,
                                    nchannels=self.N_CH,
                                    nilluminations=self.N_ILL,
                                    ntiles=self.N_TILES,
                                    nangles=self.N_ANGLES,
                                    subsamp=((1, 1, 1), (2, 4, 4)),
                                    blockdim=((4, 16, 16), (4, 16, 16)))
     # Initialize virtual stacks
     for t in range(self.N_T):
         for i_ch in range(self.N_CH):
             for i_illum in range(self.N_ILL):
                 for i_tile in range(self.N_TILES):
                     for i_angle in range(self.N_ANGLES):
                         bdv_writer.append_view(stack=None,
                                                virtual_stack_dim=(self.NZ, self.NY, self.NX),
                                                time=t, channel=i_ch, illumination=i_illum,
                                                tile=i_tile, angle=i_angle)
     # Populate the virtual stacks
     i = 0
     for t in range(self.N_T):
         for i_ch in range(self.N_CH):
             for i_illum in range(self.N_ILL):
                 for i_tile in range(self.N_TILES):
                     for i_angle in range(self.N_ANGLES):
                         for isub in range(self.N_SUBSTACKS):
                             zslice = slice(isub*(self.NZ//self.N_SUBSTACKS), (isub+1)*(self.NZ//self.N_SUBSTACKS))
                             bdv_writer.append_substack(substack=self.stacks[i][zslice, :, :],
                                                        z_start=zslice.start,
                                                        time=t, channel=i_ch, illumination=i_illum,
                                                        tile=i_tile, angle=i_angle)
                         i += 1
     bdv_writer.write_xml()
     bdv_writer.close()
Example #3
0
    def setUp(self) -> None:
        self.test_dir = "./test/test_files/"
        self.fname = self.test_dir + "test_ex1_t2_ch2_illum2_angle2.h5"
        if not os.path.exists(self.test_dir):
            os.mkdir(self.test_dir)

        self.NZ, self.NY, self.NX = 8, 35, 35  # XY dims must be odd to get nominal 65535 peak value.
        self.N_T, self.N_CH, self.N_ILL, self.N_TILES, self.N_ANGLES = 2, 2, 4, 6, 4

        self.stack = np.empty((self.NZ, self.NY, self.NX), "uint16")
        for z in range(self.NZ):
            self.stack[z, :, :] = generate_test_image((self.NY, self.NX), z,
                                                      self.NZ)

        bdv_writer = npy2bdv.BdvWriter(self.fname,
                                       nchannels=self.N_CH,
                                       nilluminations=self.N_ILL,
                                       ntiles=self.N_TILES,
                                       nangles=self.N_ANGLES,
                                       overwrite=True)
        for t in range(self.N_T):
            for i_ch in range(self.N_CH):
                for i_illum in range(self.N_ILL):
                    for i_tile in range(self.N_TILES):
                        for i_angle in range(self.N_ANGLES):
                            bdv_writer.append_view(self.stack,
                                                   time=t,
                                                   channel=i_ch,
                                                   illumination=i_illum,
                                                   tile=i_tile,
                                                   angle=i_angle,
                                                   voxel_size_xyz=(1, 1, 4))
        bdv_writer.write_xml_file(ntimes=self.N_T)
        bdv_writer.close()
Example #4
0
    def setUp(self) -> None:
        """"This will automatically call for EVERY single test we run."""
        self.test_dir = "./test/test_files/"
        if not os.path.exists(self.test_dir):
            os.mkdir(self.test_dir)
        self.fname = self.test_dir + "test_real_stack." + FILE_EXTENSION
        self.NZ, self.NY, self.NX = 16, 64, 64
        self.N_T, self.N_CH, self.N_ILL, self.N_TILES, self.N_ANGLES = 2, 2, 2, 3, 2
        self.N_VIEWS = self.N_T*self.N_CH*self.N_ILL*self.N_TILES*self.N_ANGLES
        self.affine = np.random.uniform(0, 1, (3, 4))
        self.probe_t_ch_ill_tile_angle = (0, 1, 1, 0, 1) # pick a random index of a view to probe
        self.subsamp = ((1, 1, 1), (2, 4, 4),)  # OPTIONAL param
        self.blockdim = ((8, 32, 32), (4, 8, 8),) # OPTIONAL param
        self.stacks = []
        # generate random views (stacks)
        for t in range(self.N_T):
            for i_ch in range(self.N_CH):
                for i_illum in range(self.N_ILL):
                    for i_tile in range(self.N_TILES):
                        for i_angle in range(self.N_ANGLES):
                            stack = np.empty((self.NZ, self.NY, self.NX), "uint16")
                            peak = np.random.randint(100, 65535)
                            for z in range(self.NZ):
                                stack[z, :, :] = generate_test_image((self.NY, self.NX), z, self.NZ, peak=peak)
                            self.stacks.append(stack)

        bdv_writer = npy2bdv.BdvWriter(self.fname,
                                       nchannels=self.N_CH,
                                       nilluminations=self.N_ILL,
                                       ntiles=self.N_TILES,
                                       nangles=self.N_ANGLES,
                                       #subsamp=self.subsamp,
                                       #blockdim=self.blockdim,
                                       )
        i = 0
        for t in range(self.N_T):
            for i_ch in range(self.N_CH):
                for i_illum in range(self.N_ILL):
                    for i_tile in range(self.N_TILES):
                        for i_angle in range(self.N_ANGLES):
                            bdv_writer.append_view(self.stacks[i], time=t,
                                                   channel=i_ch,
                                                   illumination=i_illum,
                                                   tile=i_tile,
                                                   angle=i_angle,
                                                   voxel_size_xyz=(1, 1, 4))
                            i += 1
        bdv_writer.create_pyramids(subsamp=self.subsamp[1:], blockdim=self.blockdim[1:])
        bdv_writer.write_xml()
        bdv_writer.append_affine(self.affine, 'test affine transform', *self.probe_t_ch_ill_tile_angle)
        bdv_writer.close()
Example #5
0
def main(argv):

    # parse directory name from command line argument
    # parse command line arguments
    parser = argparse.ArgumentParser(description="Process raw OPM data.")
    parser.add_argument("-i",
                        "--ipath",
                        type=str,
                        help="supply the directory to be processed")
    parser.add_argument("-d",
                        "--decon",
                        type=int,
                        default=0,
                        help="0: no deconvolution (DEFAULT), 1: deconvolution")
    parser.add_argument(
        "-f",
        "--flatfield",
        type=int,
        default=0,
        help=
        "0: No flat field (DEFAULT), 1: flat field (FIJI) 2: flat field (python)"
    )
    parser.add_argument(
        "-s",
        "--save_type",
        type=int,
        default=1,
        help="0: TIFF stack output, 1: BDV output (DEFAULT), 2: Zarr output")
    parser.add_argument(
        "-z",
        "--z_down_sample",
        type=int,
        default=1,
        help="1: No downsampling (DEFAULT), n: Nx downsampling")
    args = parser.parse_args()

    input_dir_string = args.ipath
    decon_flag = args.decon
    flatfield_flag = args.flatfield
    save_type = args.save_type
    z_down_sample = args.z_down_sample

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path = Path(input_dir_string)

    # create parameter array from scan parameters saved by acquisition code
    df_metadata = data_io.read_metadata(input_dir_path /
                                        Path('scan_metadata.csv'))
    root_name = df_metadata['root_name']
    scan_type = df_metadata['scan_type']
    theta = df_metadata['theta']
    scan_step = df_metadata['scan_step']
    pixel_size = df_metadata['pixel_size']
    num_t = df_metadata['num_t']
    num_y = df_metadata['num_y']
    num_z = df_metadata['num_z']
    num_ch = df_metadata['num_ch']
    num_images = df_metadata['scan_axis_positions']
    y_pixels = df_metadata['y_pixels']
    x_pixels = df_metadata['x_pixels']
    chan_405_active = df_metadata['405_active']
    chan_488_active = df_metadata['488_active']
    chan_561_active = df_metadata['561_active']
    chan_635_active = df_metadata['635_active']
    chan_730_active = df_metadata['730_active']
    active_channels = [
        chan_405_active, chan_488_active, chan_561_active, chan_635_active,
        chan_730_active
    ]
    channel_idxs = [0, 1, 2, 3, 4]
    channels_in_data = list(compress(channel_idxs, active_channels))
    n_active_channels = len(channels_in_data)
    if not (num_ch == n_active_channels):
        print('Channel setup error. Check metatdata file and directory names.')
        sys.exit()

    # calculate pixel sizes of deskewed image in microns
    deskewed_x_pixel = pixel_size / 1000.
    deskewed_y_pixel = pixel_size / 1000.
    deskewed_z_pixel = pixel_size / 1000.
    print('Deskewed pixel sizes before downsampling (um). x=' +
          str(deskewed_x_pixel) + ', y=' + str(deskewed_y_pixel) + ', z=' +
          str(deskewed_z_pixel) + '.')

    # create output directory
    if decon_flag == 0 and flatfield_flag == 0:
        output_dir_path = input_dir_path / 'deskew_output'
    elif decon_flag == 0 and flatfield_flag > 0:
        output_dir_path = input_dir_path / 'deskew_flatfield_output'
    elif decon_flag == 1 and flatfield_flag == 0:
        output_dir_path = input_dir_path / 'deskew_decon_output'
    elif decon_flag == 1 and flatfield_flag > 1:
        output_dir_path = input_dir_path / 'deskew_flatfield_decon_output'
    output_dir_path.mkdir(parents=True, exist_ok=True)

    # Create TIFF if requested
    if (save_type == 0):
        # create directory for data type
        tiff_output_dir_path = output_dir_path / Path('tiff')
        tiff_output_dir_path.mkdir(parents=True, exist_ok=True)
    # Create BDV if requested
    elif (save_type == 1):
        # create directory for data type
        bdv_output_dir_path = output_dir_path / Path('bdv')
        bdv_output_dir_path.mkdir(parents=True, exist_ok=True)

        # https://github.com/nvladimus/npy2bdv
        # create BDV H5 file with sub-sampling for BigStitcher
        bdv_output_path = bdv_output_dir_path / Path(root_name + '_bdv.h5')
        bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
                                       nchannels=num_ch,
                                       ntiles=num_y * num_z,
                                       subsamp=((1, 1, 1), (4, 8, 8), (8, 16,
                                                                       16)),
                                       blockdim=((32, 128, 128), ),
                                       compression=None)

        # create blank affine transformation to use for stage translation
        unit_matrix = np.array((
            (1.0, 0.0, 0.0, 0.0),  # change the 4. value for x_translation (px)
            (0.0, 1.0, 0.0, 0.0),  # change the 4. value for y_translation (px)
            (0.0, 0.0, 1.0,
             0.0)))  # change the 4. value for z_translation (px)
    # Create Zarr if requested
    elif (save_type == 2):
        # create directory for data type
        zarr_output_dir_path = output_dir_path / Path('zarr')
        zarr_output_dir_path.mkdir(parents=True, exist_ok=True)

        # create name for zarr directory
        zarr_output_path = zarr_output_dir_path / Path(root_name +
                                                       '_zarr.zarr')

        # calculate size of one volume
        # change step size from physical space (nm) to camera space (pixels)
        pixel_step = scan_step / pixel_size  # (pixels)

        # calculate the number of pixels scanned during stage scan
        scan_end = num_images * pixel_step  # (pixels)

        # calculate properties for final image
        ny = np.int64(
            np.ceil(scan_end +
                    y_pixels * np.cos(theta * np.pi / 180)))  # (pixels)
        nz = np.int64(np.ceil(y_pixels *
                              np.sin(theta * np.pi / 180)))  # (pixels)
        nx = np.int64(x_pixels)  # (pixels)

        # create and open zarr file
        root = zarr.open(str(zarr_output_path), mode="w")
        opm_data = root.zeros("opm_data",
                              shape=(num_t, num_y * num_z, num_ch, nz, ny, nx),
                              chunks=(1, 1, 1, 32, 128, 128),
                              dtype=np.uint16)
        root = zarr.open(str(zarr_output_path), mode="rw")
        opm_data = root["opm_data"]

    # if retrospective flatfield is requested, import and open pyimagej in interactive mode
    # because BaSiC flat-fielding plugin cannot run in headless mode
    if flatfield_flag == 1:
        from image_post_processing import manage_flat_field
        import imagej
        import scyjava

        scyjava.config.add_option('-Xmx12g')
        plugins_dir = Path('/home/dps/Fiji.app/plugins')
        scyjava.config.add_option(f'-Dplugins.dir={str(plugins_dir)}')
        ij_path = Path('/home/dps/Fiji.app')
        ij = imagej.init(str(ij_path), headless=False)
        ij.ui().showUI()
        print(
            'PyimageJ approach to flat fielding will be removed soon. Switch to GPU accelerated python BASIC code (-f 2).'
        )
    elif flatfield_flag == 2:
        from image_post_processing import manage_flat_field_py

    # if decon is requested, import microvolution wrapper
    if decon_flag == 1:
        from image_post_processing import mv_decon

    # initialize counters
    timepoints_in_data = list(range(num_t))
    y_tile_in_data = list(range(num_y))
    z_tile_in_data = list(range(num_z))
    ch_in_BDV = list(range(n_active_channels))
    tile_idx = 0

    # loop over all directories. Each directory will be placed as a "tile" into the BigStitcher file
    for (y_idx, z_idx) in product(y_tile_in_data, z_tile_in_data):
        for (t_idx, ch_BDV_idx) in product(timepoints_in_data, ch_in_BDV):

            ch_idx = channels_in_data[ch_BDV_idx]

            # open stage positions file
            stage_position_filename = Path('t' + str(t_idx).zfill(4) + '_y' +
                                           str(y_idx).zfill(4) + '_z' +
                                           str(z_idx).zfill(4) + '_ch' +
                                           str(ch_idx).zfill(4) +
                                           '_stage_positions.csv')
            stage_position_path = input_dir_path / stage_position_filename
            # check to see if stage poisition file exists yet
            while (not (stage_position_filename.exists())):
                time.sleep(60)

            df_stage_positions = data_io.read_metadata(stage_position_path)

            stage_x = np.round(float(df_stage_positions['stage_x']), 2)
            stage_y = np.round(float(df_stage_positions['stage_y']), 2)
            stage_z = np.round(float(df_stage_positions['stage_z']), 2)
            print('y tile ' + str(y_idx + 1) + ' of ' + str(num_y) +
                  '; z tile ' + str(z_idx + 1) + ' of ' + str(num_z) +
                  '; channel ' + str(ch_BDV_idx + 1) + ' of ' +
                  str(n_active_channels))
            print('Stage location (um): x=' + str(stage_x) + ', y=' +
                  str(stage_y) + ', z=' + str(stage_z) + '.')

            # construct directory name
            current_tile_dir_path = Path(root_name + '_t' +
                                         str(t_idx).zfill(4) + '_y' +
                                         str(y_idx).zfill(4) + '_z' +
                                         str(z_idx).zfill(4) + '_ch' +
                                         str(ch_idx).zfill(4) + '_1')
            tile_dir_path_to_load = input_dir_path / current_tile_dir_path

            # https://pycro-manager.readthedocs.io/en/latest/read_data.html
            dataset = Dataset(str(tile_dir_path_to_load))
            raw_data = data_io.return_data_numpy(dataset=dataset,
                                                 time_axis=None,
                                                 channel_axis=None,
                                                 num_images=num_images,
                                                 y_pixels=y_pixels,
                                                 x_pixels=x_pixels)

            # perform flat-fielding
            if flatfield_flag == 1:
                print('Flatfield.')
                corrected_stack = manage_flat_field(raw_data, ij)
            elif flatfield_flag == 2:
                corrected_stack = manage_flat_field_py(raw_data)
            else:
                corrected_stack = raw_data
            del raw_data

            # deskew
            print('Deskew.')
            deskewed = deskew(data=np.flipud(corrected_stack),
                              theta=theta,
                              distance=scan_step,
                              pixel_size=pixel_size)
            del corrected_stack

            # downsample in z due to oversampling when going from OPM to coverslip geometry
            if z_down_sample > 1:
                print('Downsample.')
                deskewed_downsample = block_reduce(deskewed,
                                                   block_size=(z_down_sample,
                                                               1, 1),
                                                   func=np.mean)
            else:
                deskewed_downsample = deskewed
            del deskewed

            # run deconvolution on deskewed image
            if decon_flag == 1:
                print('Deconvolve.')
                deskewed_downsample_decon = mv_decon(
                    deskewed_downsample, ch_idx, deskewed_y_pixel,
                    z_down_sample * deskewed_z_pixel)
            else:
                deskewed_downsample_decon = deskewed_downsample
            del deskewed_downsample

            # save deskewed image into TIFF stack
            if (save_type == 0):
                print('Write TIFF stack')
                tiff_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.tiff'
                tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
                tifffile.imwrite(str(tiff_output_path),
                                 deskewed_downsample_decon,
                                 imagej=True,
                                 resolution=(1 / deskewed_x_pixel,
                                             1 / deskewed_y_pixel),
                                 metadata={
                                     'spacing':
                                     (z_down_sample * deskewed_z_pixel),
                                     'unit': 'um',
                                     'axes': 'ZYX'
                                 })

                metadata_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.csv'
                metadata_output_path = tiff_output_dir_path / Path(
                    metadata_filename)
                tiff_stage_metadata = [{
                    'stage_x': float(stage_x),
                    'stage_y': float(stage_y),
                    'stage_z': float(stage_z)
                }]
                data_io.write_metadata(tiff_stage_metadata[0],
                                       metadata_output_path)

            elif (save_type == 1):
                # create affine transformation for stage translation
                # swap x & y from instrument to BDV
                affine_matrix = unit_matrix
                affine_matrix[0, 3] = (stage_y) / (deskewed_y_pixel
                                                   )  # x-translation
                affine_matrix[1, 3] = (stage_x) / (deskewed_x_pixel
                                                   )  # y-translation
                affine_matrix[2, 3] = (-1 * stage_z) / (
                    z_down_sample * deskewed_z_pixel)  # z-translation

                # save tile in BDV H5 with actual stage positions
                print('Write into BDV H5.')
                bdv_writer.append_view(
                    deskewed_downsample_decon,
                    time=0,
                    channel=ch_BDV_idx,
                    tile=tile_idx,
                    voxel_size_xyz=(deskewed_x_pixel, deskewed_y_pixel,
                                    z_down_sample * deskewed_z_pixel),
                    voxel_units='um',
                    calibration=(1, 1, (z_down_sample * deskewed_z_pixel) /
                                 deskewed_y_pixel),
                    m_affine=affine_matrix,
                    name_affine='tile ' + str(tile_idx) + ' translation')

            elif (save_type == 2):
                print('Write data into Zarr container')
                opm_data[t_idx, tile_idx,
                         ch_BDV_idx, :, :, :] = deskewed_downsample_decon
                metadata_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.csv'
                metadata_output_path = zarr_output_dir_path / Path(
                    metadata_filename)
                zarr_stage_metadata = [{
                    'stage_x': float(stage_x),
                    'stage_y': float(stage_y),
                    'stage_z': float(stage_z)
                }]
                data_io.write_metadata(zarr_stage_metadata[0],
                                       metadata_output_path)

            # free up memory
            del deskewed_downsample_decon
            gc.collect()

        tile_idx = tile_idx + 1

    if (save_type == 2):
        # write BDV xml file
        # https://github.com/nvladimus/npy2bdv
        # bdv_writer.write_xml(ntimes=num_t)
        bdv_writer.write_xml()
        bdv_writer.close()

    # shut down pyimagej
    if (flatfield_flag == 1):
        ij.getContext().dispose()

    # exit
    print('Finished.')
    sys.exit()
Example #6
0
def main():

    #------------------------------------------------------------------------------------------------------------------------------------
    #----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------------------------

    # lasers to use
    # 0 -> inactive
    # 1 -> active
    state_405 = 1
    state_488 = 0
    state_561 = 0
    state_635 = 1
    state_730 = 0

    # laser powers (0 -> 100%)
    power_405 = 10
    power_488 = 0
    power_561 = 0
    power_635 = 10
    power_730 = 0

    # exposure time
    exposure_ms = 5.

    # scan axis limits. Use stage positions reported by MM
    scan_axis_start_um = -26000.  #unit: um
    scan_axis_end_um = -25500.  #unit: um

    # tile axis limits. Use stage positions reported by MM
    tile_axis_start_um = -7000  #unit: um
    tile_axis_end_um = -6500.  #unit: um

    # height axis limits. Use stage positions reported by MM
    height_axis_start_um = 345.  #unit: um
    height_axis_end_um = 375.  #unit:  um

    # FOV parameters
    # ONLY MODIFY IF NECESSARY
    ROI = [0, 1024, 1599, 255]  #unit: pixels

    # setup file name
    save_directory = Path('E:/20201130/')
    save_name = Path('shaffer_lung_v1.h5')

    #------------------------------------------------------------------------------------------------------------------------------------
    #----------------------------------------------End setup of scan parameters----------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------------------------

    # instantiate the Python-Java bridge to MM
    bridge = Bridge()
    core = bridge.get_core()

    # turn off lasers
    core.set_config('Coherent-State', 'off')
    core.wait_for_config('Coherent-State', 'off')

    # set camera into 16bit readout mode
    core.set_property('Camera', 'ReadoutRate', '100MHz 16bit')
    time.sleep(1)

    # set camera into low noise readout mode
    core.set_property('Camera', 'Gain', '2-CMS')
    time.sleep(1)

    # set camera to trigger first mode
    # TO DO: photometrics claims this setting doesn't exist in PVCAM. Why is it necessary then?
    core.set_property('Camera', 'Trigger Timeout (secs)', 300)
    time.sleep(1)

    # set camera to internal trigger
    core.set_property('Camera', 'TriggerMode', 'Internal Trigger')
    time.sleep(1)

    # change core timeout for long stage moves
    core.set_property('Core', 'TimeoutMs', 100000)

    # crop FOV
    #core.set_roi(*ROI)

    # set exposure
    core.set_exposure(exposure_ms)

    # grab one image to determine actual framerate

    # get actual framerate from micromanager properties
    actual_readout_ms = float(core.get_property(
        'Camera', 'ActualInterval-ms'))  #unit: ms

    # camera pixel size
    pixel_size_um = .115  # unit: um

    # scan axis setup
    scan_axis_step_um = 0.4  # unit: um
    scan_axis_step_mm = scan_axis_step_um / 1000.  #unit: mm
    scan_axis_start_mm = scan_axis_start_um / 1000.  #unit: mm
    scan_axis_end_mm = scan_axis_end_um / 1000.  #unit: mm
    scan_axis_range_um = np.abs(scan_axis_end_um -
                                scan_axis_start_um)  # unit: um
    scan_axis_range_mm = scan_axis_range_um / 1000  #unit: mm
    actual_exposure_s = actual_readout_ms / 1000.  #unit: s
    scan_axis_speed = np.round(scan_axis_step_mm / actual_exposure_s,
                               2)  #unit: mm/s
    scan_axis_positions = np.rint(scan_axis_range_mm /
                                  scan_axis_step_mm).astype(
                                      int)  #unit: number of positions

    # tile axis setup
    tile_axis_overlap = 0.2  #unit: percentage
    tile_axis_range_um = np.abs(tile_axis_end_um -
                                tile_axis_start_um)  #unit: um
    tile_axis_range_mm = tile_axis_range_um / 1000  #unit: mm
    tile_axis_ROI = ROI[2] * pixel_size_um  #unit: um
    tile_axis_step_um = np.round((tile_axis_ROI) * (1 - tile_axis_overlap),
                                 2)  #unit: um
    tile_axis_step_mm = tile_axis_step_um / 1000  #unit: mm
    tile_axis_positions = np.rint(tile_axis_range_mm /
                                  tile_axis_step_mm).astype(
                                      int)  #unit: number of positions
    # if tile_axis_positions rounded to zero, make sure we acquire at least one position
    if tile_axis_positions == 0:
        tile_axis_positions = 1

    # height axis setup
    # this is more complicated, since we have an oblique light sheet
    # the height of the scan is the length of the ROI in the tilted direction * sin(tilt angle)
    height_axis_overlap = 0.2  #unit: percentage
    height_axis_range_um = np.abs(height_axis_end_um -
                                  height_axis_start_um)  #unit: um
    height_axis_range_mm = height_axis_range_um / 1000  #unit: mm
    height_axis_ROI = ROI[3] * pixel_size_um * np.sin(
        30 * (np.pi / 180.))  #unit: um
    height_axis_step_um = np.round(
        (height_axis_ROI) * (1 - height_axis_overlap), 2)  #unit: um
    height_axis_step_mm = height_axis_step_um / 1000  #unit: mm
    height_axis_positions = np.rint(height_axis_range_mm /
                                    height_axis_step_mm).astype(
                                        int)  #unit: number of positions
    # if height_axis_positions rounded to zero, make sure we acquire at least one position
    if height_axis_positions == 0:
        height_axis_positions = 1

    # get handle to xy and z stages
    xy_stage = core.get_xy_stage_device()
    z_stage = core.get_focus_device()

    # Setup PLC card to give start trigger
    plcName = 'PLogic:E:36'
    propPosition = 'PointerPosition'
    propCellConfig = 'EditCellConfig'
    #addrOutputBNC3 = 35
    addrOutputBNC1 = 33
    addrStageSync = 46  # TTL5 on Tiger backplane = stage sync signal

    # connect stage sync signal to BNC output
    core.set_property(plcName, propPosition, addrOutputBNC1)
    core.set_property(plcName, propCellConfig, addrStageSync)

    # turn on 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'No')

    # set tile axis speed for all moves
    command = 'SPEED Y=.1'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # set scan axis speed for large move to initial position
    command = 'SPEED X=.1'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # turn off 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'Yes')

    # move scan scan stage to initial position
    core.set_xy_position(scan_axis_start_um, tile_axis_start_um)
    core.wait_for_device(xy_stage)
    core.set_position(height_axis_start_um)
    core.wait_for_device(z_stage)

    # turn on 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'No')

    # set scan axis speed to correct speed for continuous stage scan
    # expects mm/s
    command = 'SPEED X=' + str(scan_axis_speed)
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # set scan axis to true 1D scan with no backlash
    command = '1SCAN X? Y=0 Z=9 F=0'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # set range and return speed (10% of max) for scan axis
    # expects mm
    command = '1SCANR X=' + str(scan_axis_start_mm) + ' Y=' + str(
        scan_axis_end_mm) + ' R=10'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # turn off 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'Yes')

    # construct boolean array for lasers to use
    channel_states = [state_405, state_488, state_561, state_635, state_730]
    channel_powers = [power_405, power_488, power_561, power_635, power_730]

    # set lasers to user defined power
    core.set_property('Coherent-Scientific Remote',
                      'Laser 405-100C - PowerSetpoint (%)', channel_powers[0])
    core.set_property('Coherent-Scientific Remote',
                      'Laser 488-150C - PowerSetpoint (%)', channel_powers[1])
    core.set_property('Coherent-Scientific Remote',
                      'Laser OBIS LS 561-150 - PowerSetpoint (%)',
                      channel_powers[2])
    core.set_property('Coherent-Scientific Remote',
                      'Laser 637-140C - PowerSetpoint (%)', channel_powers[3])
    core.set_property('Coherent-Scientific Remote',
                      'Laser 730-30C - PowerSetpoint (%)', channel_powers[4])

    # calculate total tiles
    total_tiles = tile_axis_positions * height_axis_positions

    # output acquisition metadata
    print('Number of X positions: ' + str(scan_axis_positions))
    print('Number of Y tiles: ' + str(tile_axis_positions))
    print('Number of Z slabs: ' + str(height_axis_positions))
    print('Number of channels:' + str(np.sum(channel_states)))
    print('Number of BDV H5 tiles: ' + str(total_tiles))

    # define unit tranformation matrix
    unit_matrix = np.array((
        (1.0, 0.0, 0.0, 0.0),  # change the 4. value for x_translation (px)
        (0.0, 1.0, 0.0, 0.0),  # change the 4. value for y_translation (px)
        (0.0, 0.0, 1.0, 0.0)))  # change the 4. value for z_translation (px)

    # create BDV H5 using npy2bdv
    fname = save_directory / save_name
    bdv_writer = npy2bdv.BdvWriter(fname,
                                   nchannels=np.sum(channel_states),
                                   ntiles=total_tiles,
                                   subsamp=((1, 1, 1), ),
                                   blockdim=((1, 128, 256), ))

    # reset tile index for BDV H5 file
    tile_index = 0

    for y in range(tile_axis_positions):
        # calculate tile axis position
        tile_position_um = tile_axis_start_um + (tile_axis_step_um * y)

        # move XY stage to new tile axis position
        core.set_xy_position(scan_axis_start_um, tile_position_um)
        core.wait_for_device(xy_stage)

        for z in range(height_axis_positions):

            print('Tile index: ' + str(tile_index))

            # calculate height axis position
            height_position_um = height_axis_start_um + (height_axis_step_um *
                                                         z)

            # move Z stage to new height axis position
            core.set_position(height_position_um)
            core.wait_for_device(z_stage)

            # reset channel index for BDV H5 file
            channel_index = 0

            for c in range(len(channel_states)):

                # determine active channel
                if channel_states[c] == 1:
                    if (c == 0):
                        core.set_config('Coherent-State', '405nm')
                        core.wait_for_config('Coherent-State', '405nm')
                    elif (c == 1):
                        core.set_config('Coherent-State', '488nm')
                        core.wait_for_config('Coherent-State', '488nm')
                    elif (c == 2):
                        core.set_config('Coherent-State', '561nm')
                        core.wait_for_config('Coherent-State', '561nm')
                    elif (c == 3):
                        core.set_config('Coherent-State', '637nm')
                        core.wait_for_config('Coherent-State', '637nm')
                    elif (c == 4):
                        core.set_config('Coherent-State', '730nm')
                        core.wait_for_config('Coherent-State', '730nm')

                    print('Channel index: ' + str(channel_index))
                    print('Active channel: ' + str(c))

                    # set camera to trigger first mode for stage synchronization
                    core.set_property('Camera', 'TriggerMode', 'Trigger first')
                    time.sleep(1)

                    # get current X, Y, and Z stage positions for translation transformation
                    point = core.get_xy_stage_position()
                    x_now = point.get_x()
                    y_now = point.get_y()
                    z_now = core.get_position(z_stage)

                    # calculate affine matrix components for translation transformation
                    affine_matrix = unit_matrix
                    affine_matrix[
                        0,
                        3] = y_now / pixel_size_um  # x axis in BDV H5 (tile axis on scope).
                    affine_matrix[1, 3] = z_now / (
                        pixel_size_um * np.sin(30. * np.pi / 180.)
                    )  # y axis in BDV H5 (height axis on scope).
                    affine_matrix[2, 3] = x_now / (
                        pixel_size_um * np.cos(30. * np.pi / 180.)
                    )  # z axis in BDV H5 (scan axis on scope).

                    # turn off 'transmit repeated commands' for Tiger
                    core.set_property('TigerCommHub',
                                      'OnlySendSerialCommandOnChange', 'No')

                    # check to make sure Tiger is not busy
                    ready = 'B'
                    while (ready != 'N'):
                        command = 'STATUS'
                        core.set_property('TigerCommHub', 'SerialCommand',
                                          command)
                        ready = core.get_property('TigerCommHub',
                                                  'SerialResponse')
                        time.sleep(.500)

                    # turn off 'transmit repeated commands' for Tiger
                    core.set_property('TigerCommHub',
                                      'OnlySendSerialCommandOnChange', 'Yes')

                    # start acquistion
                    core.start_sequence_acquisition(int(scan_axis_positions),
                                                    float(0.0), True)

                    # tell stage to execute scan
                    command = '1SCAN'
                    core.set_property('TigerCommHub', 'SerialCommand', command)

                    # reset image counter
                    image_counter = 0

                    # place stack into BDV H5
                    bdv_writer.append_view(
                        stack=None,
                        virtual_stack_dim=(scan_axis_positions, ROI[3],
                                           ROI[2]),
                        time=0,
                        channel=channel_index,
                        tile=tile_index,
                        m_affine=affine_matrix,
                        name_affine='stage translation',
                        voxel_size_xyz=(.115, .115, .200),
                        voxel_units='um')

                    # grab images from buffer
                    while (image_counter < scan_axis_positions):

                        # if there are images in the buffer, grab and process
                        if (core.get_remaining_image_count() > 0):
                            # grab top image in buffer
                            tagged_image = core.pop_next_tagged_image()

                            # grab metadata to convert 1D array to 2D image
                            image_height = tagged_image.tags['Height']
                            image_width = tagged_image.tags['Width']

                            # convert to 2D image and place into virtual stack in BDV H5
                            bdv_writer.append_plane(plane=np.flipud(
                                tagged_image.pix.reshape(
                                    (image_height, image_width))),
                                                    plane_index=image_counter,
                                                    time=0,
                                                    channel=channel_index)

                            # increment image counter
                            image_counter = image_counter + 1

                        # no images in buffer, wait for another image to arrive.
                        else:
                            time.sleep(np.minimum(.01 * exposure_ms, 1) / 1000)

                    # clean up acquistion
                    core.stop_sequence_acquisition()

                    # turn off lasers
                    core.set_config('Coherent-State', 'off')
                    core.wait_for_config('Coherent-State', 'off')

                    # set camera to internal trigger
                    # this is necessary to avoid PVCAM driver issues that we keep having for long acquisitions.
                    core.set_property('Camera', 'TriggerMode',
                                      'Internal Trigger')
                    time.sleep(1)

                    # increment channel index for BDV H5 file
                    channel_index = channel_index + 1

                    # turn off 'transmit repeated commands' for Tiger
                    core.set_property('TigerCommHub',
                                      'OnlySendSerialCommandOnChange', 'No')

                    # check to make sure Tiger is not busy
                    ready = 'B'
                    while (ready != 'N'):
                        command = 'STATUS'
                        core.set_property('TigerCommHub', 'SerialCommand',
                                          command)
                        ready = core.get_property('TigerCommHub',
                                                  'SerialResponse')
                        time.sleep(.500)

                    # turn off 'transmit repeated commands' for Tiger
                    core.set_property('TigerCommHub',
                                      'OnlySendSerialCommandOnChange', 'Yes')

            # increment tile index for BDV H5
            tile_index = tile_index + 1

    # write BDV XML and close BDV H5
    bdv_writer.write_xml_file(ntimes=1)
    bdv_writer.close()
Example #7
0
def main(argv):

    # parse directory name from command line argument 
    input_dir_string = ''
    output_dir_string = ''

    try:
        arguments, values = getopt.getopt(argv,"hi:o:n:c:",["help","ipath=","opath="])
    except getopt.GetoptError:
        print('Error. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
        sys.exit(2)
    for current_argument, current_value in arguments:
        if current_argument == '-h':
            print('Usage. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
            sys.exit()
        elif current_argument in ("-i", "--ipath"):
            input_dir_string = current_value
        elif current_argument in ("-o", "--opath"):
            output_dir_string = current_value
        
    if (input_dir_string == ''):
        print('Input parse error.')
        sys.exit(2)

    # Load data
    # this approach assumes data is generated by QI2lab MM script
    # the strategy is to sequentially activate each channel for each tile position
    # this allows for smooth stage scanning at a fast camera rate without having to synchronize
    # laser changing during stage scan

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path=Path(input_dir_string)

    # Parse directory for number of channels and strip positions then sort
    sub_dirs = [x for x in input_dir_path.iterdir() if x.is_dir()]
    sub_dirs = natsorted(sub_dirs, alg=ns.PATH)

    # TO DO: automatically determine number of channels and tile positions
    num_channels=1
    num_tiles=69

    # create parameter array
    # [theta, stage move distance, camera pixel size]
    # units are [degrees,nm,nm]
    params=np.array([30,200,116],dtype=np.float32)

    # check if user provided output path
    if (output_dir_string==''):
        output_dir_path = input_dir_path
    else:
        output_dir_path = Path(output_dir_string)

    # https://github.com/nvladimus/npy2bdv
    # create BDV H5 file with sub-sampling for BigStitcher
    # TO DO: modify npy2bdv to support B3D compression, https://git.embl.de/balazs/B3D
    #        this may involve change the underlying hdf5 install that h5py is using
    output_path = output_dir_path / 'deskewed_ch0.h5'
    bdv_writer = npy2bdv.BdvWriter(str(output_path), nchannels=num_channels, ntiles=2*num_tiles+1, \
        subsamp=((1,1,1),(4,8,4),(8,16,8),),blockdim=((16, 32, 16),))

    # loop over each directory. Each directory will be placed as a "tile" into the BigStitcher file
    # TO DO: implement directory polling to do this in the background while data is being acquired.
    for sub_dir in sub_dirs:

        # determine the channel this directory corresponds to
        m = re.search('ch(\d+)', str(sub_dir), re.IGNORECASE)
        channel_id = int(m.group(1))

        if channel_id == 0:

            # determine the experimental tile this directory corresponds to
            m = re.search('y(\d+)', str(sub_dir), re.IGNORECASE)
            tile_id = int(m.group(1))
            
            # output metadata information to console
            print('Channel ID: '+str(channel_id)+'; Experimental tile ID: '+str(tile_id)+ \
                '; BDV tile IDs: '+str(2*tile_id)+' & '+str(2*tile_id+1))

            # load bright field image for this channel
            bright_field_file = input_dir_path / Path('ch0_bright.tif')
            bright_field = np.asarray(io.imread(bright_field_file),dtype=np.float32)
            
            # find all individual tif files in the current channel + tile sub directory and sort 
            files = natsorted(sub_dir.glob('*.tif'), alg=ns.PATH)
            
            # flip order so that light sheet tilt is along scan direction
            files.reverse()

            # find middle of tilted plane acquisition
            split = len(files)//2
            overlap = np.int64(np.floor(2*split*.1))

            print('Deskew block 1.')
            # read in first block of data with a small overlap for alignment in BigStitcher
            sub_stack = np.asarray([io.imread(file) for file in files[0:split+overlap]],dtype=np.float32)
            sub_stack = sub_stack/bright_field

            # run deskew for the first block of data
            deskewed = stage_deskew(data=sub_stack,parameters=params)
            del sub_stack

            # downsample by 2x in z due to oversampling when going from OPM to coverslip geometry
            #deskewed_downsample = block_reduce(deskewed,block_size=(2,1,1),func=np.mean)
            #del deskewed
            gc.collect()

            print('Writing deskewed block 1.')
            # write BDV tile
            # https://github.com/nvladimus/npy2bdv 
            bdv_writer.append_view(deskewed, time=0, channel=channel_id, tile=2*tile_id, \
                voxel_size_xyz=(.116,.116,.100), voxel_units='um')

            # free up memory
            del deskewed
            gc.collect()

            print('Deskew block 2.')
            # read in second block of data with a small overlap for alignment in BigStitcher
            sub_stack = np.asarray([io.imread(file, plugin='pil') for file in files[split-overlap:]],dtype=np.float32)
            sub_stack = sub_stack/bright_field

            # run deskew for the second block of data
            deskewed = stage_deskew(data=sub_stack,parameters=params)
            del sub_stack

            # downsample by 2x in z due to oversampling when going from OPM to coverslip geometry
            #deskewed_downsample = block_reduce(deskewed,block_size=(2,1,1),func=np.mean)
            #del deskewed
            gc.collect()

            print('Writing deskewed block 2.')
            # write BDV tile
            # https://github.com/nvladimus/npy2bdv 
            bdv_writer.append_view(deskewed, time=0, channel=channel_id, tile=2*tile_id+1, \
                voxel_size_xyz=(.116,.116,.100), voxel_units='um')

            # free up memory
            del deskewed
            del bright_field
            gc.collect()

    # write BDV xml file
    # https://github.com/nvladimus/npy2bdv
    bdv_writer.write_xml_file(ntimes=1)
    bdv_writer.close()

    # clean up memory
    gc.collect()
Example #8
0
def main(argv):

    # parse directory name from command line argument
    input_dir_string = ''
    output_dir_string = ''

    try:
        arguments, values = getopt.getopt(argv, "hi:o:n:c:",
                                          ["help", "ipath=", "opath="])
    except getopt.GetoptError:
        print('Error. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
        sys.exit(2)
    for current_argument, current_value in arguments:
        if current_argument == '-h':
            print(
                'Usage. stage_recon.py -i <inputdirectory> -o <outputdirectory>'
            )
            sys.exit()
        elif current_argument in ("-i", "--ipath"):
            input_dir_string = current_value
        elif current_argument in ("-o", "--opath"):
            output_dir_string = current_value

    if (input_dir_string == ''):
        print('Input parse error.')
        sys.exit(2)

    # Load data

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path = Path(input_dir_string)

    # Parse directory for number of channels and strip positions then sort
    sub_dirs = [x for x in input_dir_path.iterdir() if x.is_dir()]
    sub_dirs = natsorted(sub_dirs, alg=ns.PATH)

    # TO DO: automatically determine number of channels and tile positions
    num_channels = 1
    num_tiles = 1

    # create parameter array
    # [theta, stage move distance, camera pixel size]
    # units are [degrees,nm,nm]
    params = np.array([60, 250, 115], dtype=np.float32)

    # check if user provided output path
    if (output_dir_string == ''):
        output_dir_path = input_dir_path
    else:
        output_dir_path = Path(output_dir_string)

    # https://github.com/nvladimus/npy2bdv
    # create BDV H5 file with sub-sampling for BigStitcher
    output_path = output_dir_path / 'deskewed_ch0.h5'
    bdv_writer = npy2bdv.BdvWriter(str(output_path), nchannels=num_channels, ntiles=2*num_tiles+1, \
        subsamp=((1,1,1),(4,8,4),(8,16,8),),blockdim=((16, 32, 16),))

    # loop over each directory. Each directory will be placed as a "tile" into the BigStitcher file
    for sub_dir in sub_dirs:

        # determine the channel this directory corresponds to
        m = re.search('ch(\d+)', str(sub_dir), re.IGNORECASE)
        channel_id = int(m.group(1))

        if channel_id == 0:

            # determine the experimental tile this directory corresponds to
            m = re.search('y(\d+)', str(sub_dir), re.IGNORECASE)
            tile_id = int(m.group(1))

            # output metadata information to console
            print('Channel ID: '+str(channel_id)+'; Experimental tile ID: '+str(tile_id)+ \
                '; BDV tile IDs: '+str(2*tile_id)+' & '+str(2*tile_id+1))

            # find all individual tif files in the current channel + tile sub directory and sort
            files = natsorted(sub_dir.glob('*.tif'), alg=ns.PATH)

            print('Deskew data.')
            # read in data
            sub_stack = np.asarray([io.imread(file) for file in files],
                                   dtype=np.float32)

            # run deskew
            deskewed = stage_deskew(data=sub_stack, parameters=params)
            del sub_stack

            print('Writing deskewed data.')
            # write BDV tile
            # https://github.com/nvladimus/npy2bdv
            bdv_writer.append_view(deskewed, time=0, channel=channel_id, tile=tile_id, \
                voxel_size_xyz=(params[2],params[2],params[1]*np.cos(params[0]* (np.pi/180.))), voxel_units='um')

            # free up memory
            del deskewed
            gc.collect()

    # write BDV xml file
    # https://github.com/nvladimus/npy2bdv
    bdv_writer.write_xml_file(ntimes=1)
    bdv_writer.close()

    # clean up memory
    gc.collect()
Example #9
0
def save_files_for_bigstitcher(
    matrix_screener_fields,
    projected=True,
    volume=True,
    *,
    h5_proj_name=None,
    h5_vol_name=None,
    zspacing=1.0,
    project_func=np.max,
    direction_x=-1,
    direction_y=1,
):
    """
    Save the fields in matrix screener fields as BigStitcher projects

    if volume is True, a project for stitching volumes is created
    if projection is True, a project for stitching projections is creates
    h5_*_name are the outputfilenames for the volume and projection projects
    zspacing is the spacing between z slices in um (cannot find this in metadata)
    project_func is the aggregation function for projections
    direction_* should be either +1 or -1 and can be used to flip coordinate 
    system directions
    """
    print(f"Zspacing: {zspacing}")
    if projected:
        assert h5_proj_name is not None, "h5 output file for projections must be provided"
        bdv_proj_writer = npy2bdv.BdvWriter(
            h5_proj_name,
            nchannels=1,
            ntiles=len(matrix_screener_fields),
            subsamp=((1, 1, 1), (1, 2, 2), (1, 4, 4), (1, 8, 8), (1, 16, 16)),
            blockdim=((1, 64, 64), ),
            compression="gzip",
        )  # , (4,4,1)))

    if volume:
        assert h5_vol_name is not None, "h5 output file for volumes must be provided"
        bdv_vol_writer = npy2bdv.BdvWriter(
            h5_vol_name,
            nchannels=1,
            ntiles=len(matrix_screener_fields),
            subsamp=(
                (1, 1, 1),
                (1, 2, 2),
                (1, 4, 4),
                (1, 8, 8),
                (2, 16, 16),
                (4, 32, 32),
            ),
            blockdim=(
                (64, 64, 64),
                (64, 64, 64),
                (64, 64, 64),
                (64, 64, 64),
                (32, 32, 32),
                (16, 16, 16),
            ),
            compression="gzip",
        )

    affine_matrix_template = np.array(
        ((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0)))

    for tile_nr, field in enumerate(matrix_screener_fields):
        print(f"Processing {tile_nr+1} out of {len(matrix_screener_fields)}:")
        print(field)
        stack, meta = get_field(field)
        affine = affine_matrix_template.copy()
        # Explanation for formula below:
        # Stage position in metadata appears to be in units of metres (m)
        # PhysicalSize appears to be micrometers per voxel (um/vox)
        # therefore for the stageposition in voxel coordinates we need to
        # scale from meters to um (factor 1000000) and then divide by um/vox
        # the direction vectors should be either 1 or -1 and can be used
        # to flip the direction of the coordinate axes.
        affine[1, 3] = (meta["Stage X"] * 1_000_000 / meta["PhysicalSize X"] *
                        direction_x)  # -2247191 #-2_000_000
        affine[0, 3] = (meta["Stage Y"] * 1_000_000 / meta["PhysicalSize Y"] *
                        direction_y)  # 2247191 #2_000_000

        if volume:
            _tmp_stack = np.copy(stack)
            bdv_vol_writer.append_view(
                _tmp_stack,
                time=0,
                channel=0,
                m_affine=affine,
                tile=tile_nr,
                name_affine=f"tile {tile_nr} translation",
                voxel_size_xyz=(meta["PhysicalSize X"], meta["PhysicalSize Y"],
                                zspacing),
                voxel_units="um",
                calibration=(1, 1, zspacing / meta["PhysicalSize X"]),
            )
        if projected:
            outstack = np.expand_dims(project_func(stack, axis=0), axis=0)
            bdv_proj_writer.append_view(
                outstack,
                time=0,
                channel=0,
                m_affine=affine,
                tile=tile_nr,
                name_affine=f"proj. tile {tile_nr} translation",
                # Projections are inherently 2D, so we just repeat the X voxel size for Z
                voxel_size_xyz=(
                    meta["PhysicalSize X"],
                    meta["PhysicalSize Y"],
                    meta["PhysicalSize X"],
                ),
                voxel_units="um",
                # calibration=(1, 1, 1),
            )

    if projected:
        bdv_proj_writer.write_xml_file(ntimes=1)
        bdv_proj_writer.close()
    if volume:
        bdv_vol_writer.write_xml_file(ntimes=1)
        bdv_vol_writer.close()
Example #10
0
def main(argv):

    # parse directory name from command line argument 
    input_dir_string = ''
    output_dir_string = ''

    try:
        arguments, values = getopt.getopt(argv,"hi:o:n:c:",["help","ipath=","opath="])
    except getopt.GetoptError:
        print('Error. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
        sys.exit(2)
    for current_argument, current_value in arguments:
        if current_argument == '-h':
            print('Usage. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
            sys.exit()
        elif current_argument in ("-i", "--ipath"):
            input_dir_string = current_value
        elif current_argument in ("-o", "--opath"):
            output_dir_string = current_value
        
    if (input_dir_string == ''):
        print('Input parse error.')
        sys.exit(2)

    # Load data
    # this approach assumes data is generated by QI2lab pycromanager control code

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path=Path(input_dir_string)

    # determine number of directories in root directory. loop over each one.
    tile_dir_path = [f for f in input_dir_path.iterdir() if f.is_dir()]
    num_tiles = len(tile_dir_path)

    # TO DO: read text file with this information in root directory
    num_y = 73
    num_z = 3
    num_channels = 2
    split=40

    # create parameter array
    # [theta, stage move distance, camera pixel size]
    # units are [degrees,nm,nm]
    params=np.array([30,200,115],dtype=np.float32)

    # check if user provided output path
    if (output_dir_string==''):
        output_dir_path = input_dir_path
    else:
        output_dir_path = Path(output_dir_string)

    # https://github.com/nvladimus/npy2bdv
    # create BDV H5 file with sub-sampling for BigStitcher
    output_path = output_dir_path / 'full.h5'
    bdv_writer = npy2bdv.BdvWriter(str(output_path), nchannels=num_channels, ntiles=(num_z*num_y*split)+1, \
        subsamp=((1,1,1),(2,2,2),(4,4,4),(8,8,8),(16,16,16)),blockdim=((16, 16, 8),))

    # loop over each directory. Each directory will be placed as a "tile" into the BigStitcher file
    # TO DO: implement directory polling to do this in the background while data is being acquired.
    for tile in range(0,num_tiles):

        # load tile
        tile_dir_path_to_load = tile_dir_path[tile]
        print('Loading directory: '+str(tile_dir_path_to_load))

        # decode directory name to determine tile_id in h5. reverse Z order, normal y order
        test_string = tile_dir_path_to_load.parts[-1].split('_')
        for i in range(len(test_string)):
            if 'y0' in test_string[i]:
                y_idx = int(test_string[i].split('y')[1])
            if 'z0' in test_string[i]:
                z_idx = int(test_string[i].split('z')[1])

        tile_id = ((num_z-z_idx-1)*(num_y))+y_idx
        print('y index: '+str(y_idx)+' z index: '+str(z_idx)+' H5 tile id: '+str(tile_id))

        # https://pycro-manager.readthedocs.io/en/latest/read_data.html
        dataset = Dataset(tile_dir_path_to_load)
        
        # extract number of images in the tile
        num_x = len(dataset.axes['x'])
        num_x = num_x-10

        # loop over channels inside tile
        for channel_id in range(num_channels):

            # read images from dataset. Skip first 10 images for stage speed up
            sub_stack = np.zeros([num_x,256,1600])
            for i in range(num_x):
                sub_stack[i,:,:] = dataset.read_image(channel=channel_id, x=i+10, y=0, z=0, read_metadata=False)

            #TO DO: Integrate Microvolution hook here to do deconvolution on skewed data before deskewing.
            
            print('Deskew tile.')
            # run deskew
            deskewed = stage_deskew(data=sub_stack,parameters=params)
            del sub_stack
            gc.collect()

            # downsample by 2x in z due to oversampling when going from OPM to coverslip geometry
            deskewed_downsample = block_reduce(deskewed, block_size=(2,1,1), func=np.mean)
            del deskewed
            gc.collect()

            print('Split and write tiles.')
            # write BDV tile
            # https://github.com/nvladimus/npy2bdv 
            for split_id in range(split):
                size = np.floor(num_x/split)
                size_overlap = size * 1.1

                if split_id == split-1:
                    deskewed_downsample_substack = deskewed_downsample[split_id*size:]
                else:
                    deskewed_downsample_substack = deskewed_downsample[split_id*size:(split_id+1)*size_overlap]

                bdv_writer.append_view(deskewed_downsample_substack, time=0, channel=channel_id, 
                                        tile=(tile_id*split)+split_id,
                                        voxel_size_xyz=(.115,.115,.200), voxel_units='um')

            # free up memory
            del deskewed_downsample
            #del deskewed_downsample
            gc.collect()

    # write BDV xml file
    # https://github.com/nvladimus/npy2bdv
    bdv_writer.write_xml_file(ntimes=1)
    bdv_writer.close()

    # clean up memory
    gc.collect()
Example #11
0
def main(argv):

    # parse directory name from command line argument 
    input_dir_string = ''
    output_dir_string = ''

    try:
        arguments, values = getopt.getopt(argv,"hi:o:n:c:",["help","ipath=","opath="])
    except getopt.GetoptError:
        print('Error. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
        sys.exit(2)
    for current_argument, current_value in arguments:
        if current_argument == '-h':
            print('Usage. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
            sys.exit()
        elif current_argument in ("-i", "--ipath"):
            input_dir_string = current_value
        elif current_argument in ("-o", "--opath"):
            output_dir_string = current_value
        
    if (input_dir_string == ''):
        print('Input parse error.')
        sys.exit(2)

    # Load data
    # this approach assumes data is generated by QI2lab MM script
    # the strategy is to sequentially activate each channel for each tile position
    # this allows for smooth stage scanning at a fast camera rate without having to synchronize
    # laser changing during stage scan

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path=Path(input_dir_string)

    # Parse directory for number of channels and strip positions then sort
    sub_dirs = [x for x in input_dir_path.iterdir() if x.is_dir()]
    sub_dirs = natsorted(sub_dirs, alg=ns.PATH)

    # TO DO: automatically determine number of channels and tile positions
    num_channels=1
    num_tiles=1

    # create parameter array
    # [theta, stage move distance, camera pixel size]
    # units are [degrees,nm,nm]
    params=np.array([30,100,116],dtype=np.float32)

    # check if user provided output path
    if (output_dir_string==''):
        output_dir_path = input_dir_path
    else:
        output_dir_path = Path(output_dir_string)

    # https://github.com/nvladimus/npy2bdv
    # create BDV H5 file with sub-sampling for BigStitcher
    # TO DO: modify npy2bdv to support B3D compression, https://git.embl.de/balazs/B3D
    #        this may involve change the underlying hdf5 install that h5py is using
    output_path = output_dir_path / 'deskewed.h5'
    bdv_writer = npy2bdv.BdvWriter(str(output_path), nchannels=num_channels, ntiles=2*num_tiles+1, \
        subsamp=((1,1,1),(4,8,4),(8,16,8),),blockdim=((16, 32, 16),))

    # loop over each directory. Each directory will be placed as a "tile" into the BigStitcher file
    # TO DO: implement directory polling to do this in the background while data is being acquired.
    for sub_dir in sub_dirs:

        # determine the channel this directory corresponds to
        m = re.search('ch(\d+)', str(sub_dir), re.IGNORECASE)
        channel_id = int(m.group(1))
Example #12
0
import time
import sys
import numpy as np
import npy2bdv

print("Example1: writing 2 time points and 2 channels")
fname = "./ex1_t2_ch2.h5"
bdv_writer = npy2bdv.BdvWriter(fname, nchannels=2, subsamp=((1, 1, 1), ))
rand_stack = np.random.randint(0, 100, size=(41, 1024, 2048), dtype='int16')
bdv_writer.append_view(rand_stack, time=0, channel=0)
bdv_writer.append_view(rand_stack, time=0, channel=1)
bdv_writer.append_view(rand_stack, time=1, channel=0)
bdv_writer.append_view(rand_stack, time=1, channel=1)
bdv_writer.write_xml_file(ntimes=2)
bdv_writer.close()
print("Random-generated data is written into " + fname + "\n")

print(
    "Example2: speed test for 20 time points and 2 channels. File size is 7 GB!"
)
fname = "./ex2_t20_chan2.h5"
bdv_writer = npy2bdv.BdvWriter(fname, nchannels=2, subsamp=((1, 1, 1), ))
ntimes = 20
nchannels = 2
start_time_total = time.time()
i_stacks = 0
time_list = []
for ichannel in range(nchannels):
    for itime in range(ntimes):
        start_time = time.time()
        bdv_writer.append_view(rand_stack, time=itime, channel=ichannel)
Example #13
0

######################
## 1. Basic writing ##
######################
print("Example1: writing 2 time points, 2 channels, 2 illuminations, 2 angles")
plane = generate_test_image((1024, 2048))
stack = []
for z in range(50):
    stack.append(plane)
stack = np.asarray(stack)

if not os.path.exists("./test"):
    os.mkdir("./test")
fname = "./test/ex1_t2_ch2_illum2_angle2.h5"
bdv_writer = npy2bdv.BdvWriter(fname, nchannels=2, nilluminations=2, nangles=2, subsamp=((1, 1, 1),))
for t in range(2):
    for i_ch in range(2):
        for i_illum in range(2):
            for i_angle in range(2):
                bdv_writer.append_view(stack, time=t, channel=i_ch, illumination=i_illum, angle=i_angle)

bdv_writer.write_xml_file(ntimes=2)
bdv_writer.close()
print("dataset in " + fname)

#########################
# 2. Writing speed test #
#########################
print("Example2: speed test for 20 time points and 2 channels. File size is 7 GB!")
ntimes = 20
    def generate_big_stitcher(
        self,
        outfolder_base: str,
        projected: bool = True,
        volume: bool = True,
        xyspacing: float = 1.0,
        zspacing: float = 1.0,
        project_func=np.max,
        direction_x: int = 1,
        direction_y: int = -1,
    ):
        """Generate a big stitcher project (.xml/h5)
        
        Parameters
        ----------
        outfolder_base : str
            base folder for the output
        projected : bool, optional
            if True, create a project based on z-projected tiles, by default True
        volume : bool, optional
            if True create a project for the full volumes, by default True
        xyspacing : float, optional
            pixel spacing in x/y in um/pix, by default 1.0
        zspacing : float, optional
            pixel spacing in z in um/pix, by default 1.0
        project_func : [type], optional
            projection function for z-stacks if projected, by default np.max
        direction_x : int, optional
            can be used to flip coordinate axes should be 1 or -1, by default 1
        direction_y : int, optional
            as above, by default -1
        """

        if not (projected or volume):
            print(
                "Neither 2D nor 3D project generation selected. Nothing to do."
            )
            return

        assert len(self.df) > 0, "No files found"
        print(f"Zspacing: {zspacing}")
        print(f"XYspacing: {xyspacing}")

        # get unique channels and illuminations
        # the index into these lists will be used to identify the dataset
        channels = list(map(str, self.df["ch"].unique())
                        )  # converting to string fixes problems with nan
        illuminations = list(map(str, self.df["illu"].unique()))

        # Group by stack
        grouped_stacks = self.df.groupby("first_Z")
        ntiles: int = len(grouped_stacks)
        nchannels: int = len(channels)
        nillu: int = len(illuminations)
        print(f"Processing {ntiles} tiles.")
        affine_matrix_template = np.array(
            ((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0)))

        if projected:
            h5_proj_name: str = self._generate_project_folder(
                outfolder_base, "projected")
            bdv_proj_writer = npy2bdv.BdvWriter(
                h5_proj_name,
                nchannels=nchannels,
                nilluminations=nillu,
                ntiles=ntiles,
                subsamp=((1, 1, 1), (1, 2, 2), (1, 4, 4), (1, 8, 8), (1, 16,
                                                                      16)),
                blockdim=((1, 64, 64), ),
                compression="gzip",
            )

        if volume:
            h5_vol_name: str = self._generate_project_folder(
                outfolder_base, "volume")
            bdv_vol_writer = npy2bdv.BdvWriter(
                h5_vol_name,
                nchannels=nchannels,
                nilluminations=nillu,
                ntiles=ntiles,
                subsamp=(
                    (1, 1, 1),
                    (1, 2, 2),
                    (1, 4, 4),
                    (1, 8, 8),
                    (2, 16, 16),
                    (4, 32, 32),
                ),
                blockdim=(
                    (64, 64, 64),
                    (64, 64, 64),
                    (64, 64, 64),
                    (64, 64, 64),
                    (32, 32, 32),
                    (16, 16, 16),
                ),
                compression="gzip",
            )

        for tile_nr, (grname, group) in enumerate(grouped_stacks):
            print(f"Processing {tile_nr+1} out of {ntiles}:")
            stack = readstack(group["pathname"].values, convertto=np.int16)

            print("finished reading stack")
            xyz = group["stagexyz"].values[0]
            print(f"xyz is {xyz}")
            affine = affine_matrix_template.copy()

            # Explanation for formula below:
            # Stage position in metadata appears to be in units of metres (m)
            # PhysicalSize appears to be micrometers per voxel (um/vox)
            # therefore for the stageposition in voxel coordinates we need to
            # scale from meters to um (factor 1000000) and then divide by um/vox
            # the direction vectors should be either 1 or -1 and can be used
            # to flip the direction of the coordinate axes.
            affine[1, 3] = xyz[1] / xyspacing * direction_y
            affine[0, 3] = xyz[0] / xyspacing * direction_x

            ch = str(group["ch"].values[0])
            illu = str(group["illu"].values[0])
            ch_index = channels.index(ch)
            illu_index = illuminations.index(illu)

            if volume:
                bdv_vol_writer.append_view(
                    stack,
                    time=0,
                    channel=ch_index,
                    illumination=illu_index,
                    m_affine=affine,
                    tile=tile_nr,
                    name_affine=f"tile {tile_nr} translation",
                    voxel_size_xyz=(xyspacing, xyspacing, zspacing),
                    voxel_units="um",
                    calibration=(1, 1, zspacing / xyspacing),
                )
            if projected:
                outstack = np.expand_dims(project_func(stack, axis=0), axis=0)
                bdv_proj_writer.append_view(
                    outstack,
                    time=0,
                    channel=ch_index,
                    illumination=illu_index,
                    m_affine=affine,
                    tile=tile_nr,
                    name_affine=f"proj. tile {tile_nr} translation",
                    # Projections are inherently 2D, so we just repeat the X voxel size for Z
                    voxel_size_xyz=(xyspacing, xyspacing, xyspacing),
                    voxel_units="um",
                    # calibration=(1, 1, 1),
                )

        if projected:
            bdv_proj_writer.write_xml_file(ntimes=1)
            bdv_proj_writer.close()
        if volume:
            bdv_vol_writer.write_xml_file(ntimes=1)
            bdv_vol_writer.close()
Example #15
0
    def prepare_acquisition(self, acq, acq_list):
        self.folder = acq['folder']
        self.filename = acq['filename']
        self.path = self.folder + '/' + self.filename
        logger.info(f'Image Writer: Save path: {self.path}')

        _, self.file_extension = os.path.splitext(self.filename)

        self.binning_string = self.state[
            'camera_binning']  # Should return a string in the form '2x4'
        self.x_binning = int(self.binning_string[0])
        self.y_binning = int(self.binning_string[2])

        self.x_pixels = int(self.x_pixels / self.x_binning)
        self.y_pixels = int(self.y_pixels / self.y_binning)

        self.max_frame = acq.get_image_count()
        self.processing_options_string = acq['processing']

        if self.file_extension == '.h5':
            if hasattr(self.cfg, "hdf5"):
                subsamp = self.cfg.hdf5['subsamp']
                compression = self.cfg.hdf5['compression']
                flip_flags = self.cfg.hdf5['flip_xyz']
            else:
                subsamp = ((1, 1, 1), )
                compression = None
                flip_flags = (False, False, False)
            # create writer object if the view is first in the list
            if acq == acq_list[0]:
                self.bdv_writer = npy2bdv.BdvWriter(
                    self.path,
                    nilluminations=acq_list.get_n_shutter_configs(),
                    nchannels=acq_list.get_n_lasers(),
                    nangles=acq_list.get_n_angles(),
                    ntiles=acq_list.get_n_tiles(),
                    blockdim=((1, 256, 256), ),
                    subsamp=subsamp,
                    compression=compression)
            # x and y need to be exchanged to account for the image rotation
            shape = (self.max_frame, self.y_pixels, self.x_pixels)
            px_size_um = self.cfg.pixelsize[acq['zoom']]
            sign_xyz = (1 - np.array(flip_flags)) * 2 - 1
            affine_matrix = np.array(
                ((1.0, 0.0, 0.0, sign_xyz[0] * acq['x_pos'] / px_size_um),
                 (0.0, 1.0, 0.0, sign_xyz[1] * acq['y_pos'] / px_size_um),
                 (0.0, 0.0, 1.0,
                  sign_xyz[2] * acq['z_start'] / acq['z_step'])))
            self.bdv_writer.append_view(
                stack=None,
                virtual_stack_dim=shape,
                illumination=acq_list.find_value_index(acq['shutterconfig'],
                                                       'shutterconfig'),
                channel=acq_list.find_value_index(acq['laser'], 'laser'),
                angle=acq_list.find_value_index(acq['rot'], 'rot'),
                tile=acq_list.get_tile_index(acq),
                voxel_units='um',
                voxel_size_xyz=(px_size_um, px_size_um, acq['z_step']),
                calibration=(1.0, 1.0, acq['z_step'] / px_size_um),
                m_affine=affine_matrix,
                name_affine="Translation to Regular Grid")
        else:
            self.fsize = self.x_pixels * self.y_pixels
            self.xy_stack = np.memmap(self.path,
                                      mode="write",
                                      dtype=np.uint16,
                                      shape=self.fsize * self.max_frame)

        self.cur_image = 0
Example #16
0
def main(argv):

    # parse command line arguments
    parser = argparse.ArgumentParser(description="Process raw OPM data.")
    parser.add_argument("-i",
                        "--ipath",
                        type=str,
                        nargs="+",
                        help="supply the directories to be processed")
    parser.add_argument("-d",
                        "--decon",
                        type=int,
                        default=0,
                        help="0: no deconvolution (DEFAULT), 1: deconvolution")
    parser.add_argument("-f",
                        "--flatfield",
                        type=int,
                        default=0,
                        help="0: No flat field (DEFAULT), 1: flat field")
    parser.add_argument("-k",
                        "--deskew",
                        type=int,
                        default=1,
                        help="0: no deskewing, 1: deskewing (DEFAULT)")
    parser.add_argument(
        "-s",
        "--save_type",
        type=int,
        default=0,
        help="0: TIFF stack output (DEFAULT), 1: BDV output, 2: Zarr output")
    parser.add_argument(
        "-t",
        "--tilt_orientation",
        type=str,
        default='new',
        help="new: new orientation (DEFAULT), prev: previous orientation")
    parser.add_argument(
        "--time_steps",
        nargs='+',
        type=int,
        default=-1,
        help="-1: all time steps (DEFAULT), else list of time steps")
    parser.add_argument(
        "--channels",
        nargs='+',
        type=int,
        default=-1,
        help="-1: all channels (DEFAULT), else list of all channels")
    parser.add_argument(
        "--overwrite",
        type=int,
        default=0,
        help="0: do not overwrite existing folder (DEFAULT), 1: overwrite")
    args = parser.parse_args()

    input_dir_strings = args.ipath
    decon_flag = args.decon
    flatfield_flag = args.flatfield
    deskew_flag = args.deskew
    save_type = args.save_type
    tilt_orientation = args.tilt_orientation
    overwrite_flag = args.overwrite == 1

    # Loop over all user supplied directories for batch reconstruction
    for ii, input_dir_string in enumerate(input_dir_strings):
        print("Processing directory %d/%d" % (ii + 1, len(input_dir_strings)))

        # https://docs.python.org/3/library/pathlib.html
        # Create Path object to directory
        input_dir_path = Path(input_dir_string)

        # create parameter array from scan parameters saved by acquisition code
        df_metadata = data_io.read_metadata(
            input_dir_path.resolve().parents[0] / 'scan_metadata.csv')
        root_name = df_metadata['root_name']
        scan_type = df_metadata['scan_type']
        theta = df_metadata['theta']
        scan_step = df_metadata['scan_step']
        pixel_size = df_metadata['pixel_size']
        num_t = df_metadata['num_t']
        num_y = df_metadata['num_y']
        num_z = df_metadata['num_z']
        num_ch = df_metadata['num_ch']
        num_images = df_metadata['scan_axis_positions']
        excess_images = 0
        y_pixels = df_metadata['y_pixels']
        x_pixels = df_metadata['x_pixels']
        chan_405_active = df_metadata['405_active']
        chan_488_active = df_metadata['488_active']
        chan_561_active = df_metadata['561_active']
        chan_635_active = df_metadata['635_active']
        chan_730_active = df_metadata['730_active']
        active_channels = [
            chan_405_active, chan_488_active, chan_561_active, chan_635_active,
            chan_730_active
        ]
        channel_idxs = [0, 1, 2, 3, 4]
        channels_in_data = list(compress(channel_idxs, active_channels))
        n_active_channels = len(channels_in_data)
        if not (num_ch == n_active_channels):
            print(
                'Channel setup error. Check metatdata file and directory names.'
            )
            sys.exit()

        # calculate pixel sizes of deskewed image in microns
        deskewed_x_pixel = pixel_size / 1000.
        deskewed_y_pixel = pixel_size / 1000.
        deskewed_z_pixel = pixel_size / 1000.
        print('Deskewed pixel sizes before downsampling (um). x=' +
              str(deskewed_x_pixel) + ', y=' + str(deskewed_y_pixel) + ', z=' +
              str(deskewed_z_pixel) + '.')

        # amount of down sampling in z
        z_down_sample = 1

        # load dataset
        if str(input_dir_path).endswith('zarr'):
            dataset = zarr.open(input_dir_path, mode='r')
            im_type = 'zarr'
        else:
            dataset = Dataset(str(input_dir_path))
            im_type = 'pycro'

        # create output directory
        im_processes = []
        if decon_flag == 1:
            im_processes.append('decon')
        if flatfield_flag == 1:
            im_processes.append('flatfield')
        if deskew_flag == 1:
            im_processes.append('deskew')
        if len(im_processes) == 0:
            str_processes = 'original_output'
        else:
            str_processes = '_'.join(im_processes) + '_output'
        input_dir_path = Path(input_dir_string)
        output_dir_path = input_dir_path.resolve().parents[0] / str_processes
        output_dir_path.mkdir(parents=True, exist_ok=True)

        # initialize counters
        timepoints_in_data = list(range(num_t))
        ch_in_BDV = list(range(n_active_channels))
        em_wavelengths = [.450, .520, .580, .670, .780]

        # if specific time steps or channels are provided, we use them only
        # by default -1, list of int if provided by user
        if not isinstance(args.time_steps, int):
            timepoints_in_data = args.time_steps
            num_t = len(timepoints_in_data)
        if not isinstance(args.channels, int):
            ch_in_BDV = args.channels
            num_ch = len(ch_in_BDV)

        # Create TIFF if requested
        if (save_type == 0):
            # create directory for data type
            tiff_output_dir_path = output_dir_path / Path('tiff')
            tiff_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)
        # Create BDV if requested
        elif (save_type == 1):
            # create directory for data type
            bdv_output_dir_path = output_dir_path / Path('bdv')
            bdv_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)

            # https://github.com/nvladimus/npy2bdv
            # create BDV H5 file with sub-sampling for BigStitcher
            bdv_output_path = bdv_output_dir_path / Path(root_name + '_bdv.h5')
            bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
                                           nchannels=num_ch,
                                           ntiles=1,
                                           subsamp=((1, 1, 1), ),
                                           blockdim=((16, 16, 16), ))

            # create blank affine transformation to use for stage translation
            unit_matrix = np.array((
                (1.0, 0.0, 0.0,
                 0.0),  # change the 4. value for x_translation (px)
                (0.0, 1.0, 0.0,
                 0.0),  # change the 4. value for y_translation (px)
                (0.0, 0.0, 1.0,
                 0.0)))  # change the 4. value for z_translation (px)
        # Create Zarr if requested
        elif (save_type == 2):
            # create directory for data type
            zarr_output_dir_path = output_dir_path / Path('zarr')
            zarr_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)

            # create name for zarr directory
            zarr_output_path = zarr_output_dir_path / Path(root_name +
                                                           '_zarr.zarr')

            # calculate size of one volume
            # change step size from physical space (nm) to camera space (pixels)
            pixel_step = scan_step / pixel_size  # (pixels)

            # calculate the number of pixels scanned during stage scan
            scan_end = num_images * pixel_step  # (pixels)

            # calculate properties for final image
            ny = np.int64(
                np.ceil(scan_end +
                        y_pixels * np.cos(theta * np.pi / 180)))  # (pixels)
            nz = np.int64(np.ceil(y_pixels *
                                  np.sin(theta * np.pi / 180)))  # (pixels)
            nx = np.int64(x_pixels)  # (pixels)

            # create and open zarr file
            root = zarr.open(str(zarr_output_path), mode="w")
            opm_data = root.zeros("opm_data",
                                  shape=(num_t, num_ch, nz, ny, nx),
                                  chunks=(1, 1, 32, 256, 256),
                                  dtype=np.uint16)
            root = zarr.open(str(zarr_output_path), mode="rw")
            opm_data = root["opm_data"]

        # if retrospective flatfield is requested, import and open pyimagej in interactive mode
        # TO DO: need to fix for new call
        if flatfield_flag == 1:
            from image_post_processing import manage_flat_field

        # if decon is requested, import microvolution wrapper
        if decon_flag == 1:
            from image_post_processing import lr_deconvolution

        # loop over all timepoints and channels
        for (t_idx, ch_BDV_idx) in product(timepoints_in_data, ch_in_BDV):

            ch_idx = channels_in_data[ch_BDV_idx]

            # pull data stack into memory
            print('Process timepoint ' + str(t_idx) + '; channel ' +
                  str(ch_BDV_idx) + '.')
            if im_type == 'pycro':
                raw_data = data_io.return_data_numpy(dataset, t_idx,
                                                     ch_BDV_idx, num_images,
                                                     excess_images, y_pixels,
                                                     x_pixels)
            elif im_type == 'zarr':
                raw_data = dataset[t_idx, ch_BDV_idx, :, :, :]

            # run deconvolution on skewed image
            if decon_flag == 1:
                print('Deconvolve.')
                em_wvl = em_wavelengths[ch_idx]
                channel_opm_psf = data_io.return_opm_psf(em_wvl)
                if tilt_orientation == 'new':
                    channel_opm_psf = np.flip(channel_opm_psf, axis=1)
                #decon = mv_lr_decon(image=raw_data,psf=channel_opm_psf,iterations=50)
                decon = lr_deconvolution(image=raw_data,
                                         psf=channel_opm_psf,
                                         iterations=50)
            else:
                decon = raw_data
            del raw_data
            gc.collect()

            # perform flat-fielding
            if flatfield_flag == 0:
                corrected_stack = decon
            else:
                print('Flatfield.')
                corrected_stack, flat_field, dark_field = manage_flat_field(
                    decon, ij)
            del decon
            gc.collect()

            # deskew raw_data
            if deskew_flag == 1:
                print('Deskew.')
                if tilt_orientation == 'new':
                    deskewed = deskew(data=np.flip(corrected_stack, axis=1),
                                      theta=theta,
                                      distance=scan_step,
                                      pixel_size=pixel_size)
                else:
                    deskewed = deskew(data=np.flip(corrected_stack, axis=0),
                                      theta=theta,
                                      distance=scan_step,
                                      pixel_size=pixel_size)
            else:
                if tilt_orientation == 'new':
                    deskewed = np.flip(corrected_stack, axis=1)
                else:
                    deskewed = np.flip(corrected_stack, axis=0)
            del corrected_stack
            gc.collect()

            # downsample in z due to oversampling when going from OPM to coverslip geometry
            if z_down_sample == 1:
                downsampled = deskewed
            else:
                print('Downsample.')
                downsampled = block_reduce(deskewed,
                                           block_size=(z_down_sample, 1, 1),
                                           func=np.mean)
            del deskewed
            gc.collect()

            # save deskewed image into TIFF stack
            if (save_type == 0):
                print('Write TIFF stack')
                tiff_filename = 'f_' + root_name + '_c' + str(ch_idx).zfill(
                    3) + '_t' + str(t_idx).zfill(5) + '.tiff'
                tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
                tifffile.imwrite(str(tiff_output_path),
                                 downsampled.astype(np.int16),
                                 imagej=True,
                                 resolution=(1 / deskewed_x_pixel,
                                             1 / deskewed_y_pixel),
                                 metadata={
                                     'unit': 'um',
                                     'axes': 'ZYX'
                                 })
            # save tile in BDV H5 with actual stage positions
            elif (save_type == 1):
                print('Write data into BDV H5.')
                bdv_writer.append_view(
                    downsampled,
                    time=t_idx,
                    channel=ch_BDV_idx,
                    tile=0,
                    voxel_size_xyz=(deskewed_y_pixel, deskewed_y_pixel,
                                    z_down_sample * deskewed_z_pixel),
                    voxel_units='um')

                # save deskewed image into Zarr container
            elif (save_type == 2):
                print('Write data into Zarr container')
                opm_data[t_idx, ch_BDV_idx, :, :, :] = downsampled

            # free up memory
            del downsampled
            gc.collect()

        if (save_type == 1):
            # write BDV xml file
            # https://github.com/nvladimus/npy2bdv
            bdv_writer.write_xml()
            bdv_writer.close()

    # shut down pyimagej
    if flatfield_flag == 1:
        ij.getContext().dispose()

    # exit
    print('Finished.')
    sys.exit()