예제 #1
0
def main(argv):

    # parse directory name from command line argument
    # parse command line arguments
    parser = argparse.ArgumentParser(description="Process raw OPM data.")
    parser.add_argument("-i",
                        "--ipath",
                        type=str,
                        help="supply the directory to be processed")
    parser.add_argument("-d",
                        "--decon",
                        type=int,
                        default=0,
                        help="0: no deconvolution (DEFAULT), 1: deconvolution")
    parser.add_argument(
        "-f",
        "--flatfield",
        type=int,
        default=0,
        help=
        "0: No flat field (DEFAULT), 1: flat field (FIJI) 2: flat field (python)"
    )
    parser.add_argument(
        "-s",
        "--save_type",
        type=int,
        default=1,
        help="0: TIFF stack output, 1: BDV output (DEFAULT), 2: Zarr output")
    parser.add_argument(
        "-z",
        "--z_down_sample",
        type=int,
        default=1,
        help="1: No downsampling (DEFAULT), n: Nx downsampling")
    args = parser.parse_args()

    input_dir_string = args.ipath
    decon_flag = args.decon
    flatfield_flag = args.flatfield
    save_type = args.save_type
    z_down_sample = args.z_down_sample

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path = Path(input_dir_string)

    # create parameter array from scan parameters saved by acquisition code
    df_metadata = data_io.read_metadata(input_dir_path /
                                        Path('scan_metadata.csv'))
    root_name = df_metadata['root_name']
    scan_type = df_metadata['scan_type']
    theta = df_metadata['theta']
    scan_step = df_metadata['scan_step']
    pixel_size = df_metadata['pixel_size']
    num_t = df_metadata['num_t']
    num_y = df_metadata['num_y']
    num_z = df_metadata['num_z']
    num_ch = df_metadata['num_ch']
    num_images = df_metadata['scan_axis_positions']
    y_pixels = df_metadata['y_pixels']
    x_pixels = df_metadata['x_pixels']
    chan_405_active = df_metadata['405_active']
    chan_488_active = df_metadata['488_active']
    chan_561_active = df_metadata['561_active']
    chan_635_active = df_metadata['635_active']
    chan_730_active = df_metadata['730_active']
    active_channels = [
        chan_405_active, chan_488_active, chan_561_active, chan_635_active,
        chan_730_active
    ]
    channel_idxs = [0, 1, 2, 3, 4]
    channels_in_data = list(compress(channel_idxs, active_channels))
    n_active_channels = len(channels_in_data)
    if not (num_ch == n_active_channels):
        print('Channel setup error. Check metatdata file and directory names.')
        sys.exit()

    # calculate pixel sizes of deskewed image in microns
    deskewed_x_pixel = pixel_size / 1000.
    deskewed_y_pixel = pixel_size / 1000.
    deskewed_z_pixel = pixel_size / 1000.
    print('Deskewed pixel sizes before downsampling (um). x=' +
          str(deskewed_x_pixel) + ', y=' + str(deskewed_y_pixel) + ', z=' +
          str(deskewed_z_pixel) + '.')

    # create output directory
    if decon_flag == 0 and flatfield_flag == 0:
        output_dir_path = input_dir_path / 'deskew_output'
    elif decon_flag == 0 and flatfield_flag > 0:
        output_dir_path = input_dir_path / 'deskew_flatfield_output'
    elif decon_flag == 1 and flatfield_flag == 0:
        output_dir_path = input_dir_path / 'deskew_decon_output'
    elif decon_flag == 1 and flatfield_flag > 1:
        output_dir_path = input_dir_path / 'deskew_flatfield_decon_output'
    output_dir_path.mkdir(parents=True, exist_ok=True)

    # Create TIFF if requested
    if (save_type == 0):
        # create directory for data type
        tiff_output_dir_path = output_dir_path / Path('tiff')
        tiff_output_dir_path.mkdir(parents=True, exist_ok=True)
    # Create BDV if requested
    elif (save_type == 1):
        # create directory for data type
        bdv_output_dir_path = output_dir_path / Path('bdv')
        bdv_output_dir_path.mkdir(parents=True, exist_ok=True)

        # https://github.com/nvladimus/npy2bdv
        # create BDV H5 file with sub-sampling for BigStitcher
        bdv_output_path = bdv_output_dir_path / Path(root_name + '_bdv.h5')
        bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
                                       nchannels=num_ch,
                                       ntiles=num_y * num_z,
                                       subsamp=((1, 1, 1), (4, 8, 8), (8, 16,
                                                                       16)),
                                       blockdim=((32, 128, 128), ),
                                       compression=None)

        # create blank affine transformation to use for stage translation
        unit_matrix = np.array((
            (1.0, 0.0, 0.0, 0.0),  # change the 4. value for x_translation (px)
            (0.0, 1.0, 0.0, 0.0),  # change the 4. value for y_translation (px)
            (0.0, 0.0, 1.0,
             0.0)))  # change the 4. value for z_translation (px)
    # Create Zarr if requested
    elif (save_type == 2):
        # create directory for data type
        zarr_output_dir_path = output_dir_path / Path('zarr')
        zarr_output_dir_path.mkdir(parents=True, exist_ok=True)

        # create name for zarr directory
        zarr_output_path = zarr_output_dir_path / Path(root_name +
                                                       '_zarr.zarr')

        # calculate size of one volume
        # change step size from physical space (nm) to camera space (pixels)
        pixel_step = scan_step / pixel_size  # (pixels)

        # calculate the number of pixels scanned during stage scan
        scan_end = num_images * pixel_step  # (pixels)

        # calculate properties for final image
        ny = np.int64(
            np.ceil(scan_end +
                    y_pixels * np.cos(theta * np.pi / 180)))  # (pixels)
        nz = np.int64(np.ceil(y_pixels *
                              np.sin(theta * np.pi / 180)))  # (pixels)
        nx = np.int64(x_pixels)  # (pixels)

        # create and open zarr file
        root = zarr.open(str(zarr_output_path), mode="w")
        opm_data = root.zeros("opm_data",
                              shape=(num_t, num_y * num_z, num_ch, nz, ny, nx),
                              chunks=(1, 1, 1, 32, 128, 128),
                              dtype=np.uint16)
        root = zarr.open(str(zarr_output_path), mode="rw")
        opm_data = root["opm_data"]

    # if retrospective flatfield is requested, import and open pyimagej in interactive mode
    # because BaSiC flat-fielding plugin cannot run in headless mode
    if flatfield_flag == 1:
        from image_post_processing import manage_flat_field
        import imagej
        import scyjava

        scyjava.config.add_option('-Xmx12g')
        plugins_dir = Path('/home/dps/Fiji.app/plugins')
        scyjava.config.add_option(f'-Dplugins.dir={str(plugins_dir)}')
        ij_path = Path('/home/dps/Fiji.app')
        ij = imagej.init(str(ij_path), headless=False)
        ij.ui().showUI()
        print(
            'PyimageJ approach to flat fielding will be removed soon. Switch to GPU accelerated python BASIC code (-f 2).'
        )
    elif flatfield_flag == 2:
        from image_post_processing import manage_flat_field_py

    # if decon is requested, import microvolution wrapper
    if decon_flag == 1:
        from image_post_processing import mv_decon

    # initialize counters
    timepoints_in_data = list(range(num_t))
    y_tile_in_data = list(range(num_y))
    z_tile_in_data = list(range(num_z))
    ch_in_BDV = list(range(n_active_channels))
    tile_idx = 0

    # loop over all directories. Each directory will be placed as a "tile" into the BigStitcher file
    for (y_idx, z_idx) in product(y_tile_in_data, z_tile_in_data):
        for (t_idx, ch_BDV_idx) in product(timepoints_in_data, ch_in_BDV):

            ch_idx = channels_in_data[ch_BDV_idx]

            # open stage positions file
            stage_position_filename = Path('t' + str(t_idx).zfill(4) + '_y' +
                                           str(y_idx).zfill(4) + '_z' +
                                           str(z_idx).zfill(4) + '_ch' +
                                           str(ch_idx).zfill(4) +
                                           '_stage_positions.csv')
            stage_position_path = input_dir_path / stage_position_filename
            # check to see if stage poisition file exists yet
            while (not (stage_position_filename.exists())):
                time.sleep(60)

            df_stage_positions = data_io.read_metadata(stage_position_path)

            stage_x = np.round(float(df_stage_positions['stage_x']), 2)
            stage_y = np.round(float(df_stage_positions['stage_y']), 2)
            stage_z = np.round(float(df_stage_positions['stage_z']), 2)
            print('y tile ' + str(y_idx + 1) + ' of ' + str(num_y) +
                  '; z tile ' + str(z_idx + 1) + ' of ' + str(num_z) +
                  '; channel ' + str(ch_BDV_idx + 1) + ' of ' +
                  str(n_active_channels))
            print('Stage location (um): x=' + str(stage_x) + ', y=' +
                  str(stage_y) + ', z=' + str(stage_z) + '.')

            # construct directory name
            current_tile_dir_path = Path(root_name + '_t' +
                                         str(t_idx).zfill(4) + '_y' +
                                         str(y_idx).zfill(4) + '_z' +
                                         str(z_idx).zfill(4) + '_ch' +
                                         str(ch_idx).zfill(4) + '_1')
            tile_dir_path_to_load = input_dir_path / current_tile_dir_path

            # https://pycro-manager.readthedocs.io/en/latest/read_data.html
            dataset = Dataset(str(tile_dir_path_to_load))
            raw_data = data_io.return_data_numpy(dataset=dataset,
                                                 time_axis=None,
                                                 channel_axis=None,
                                                 num_images=num_images,
                                                 y_pixels=y_pixels,
                                                 x_pixels=x_pixels)

            # perform flat-fielding
            if flatfield_flag == 1:
                print('Flatfield.')
                corrected_stack = manage_flat_field(raw_data, ij)
            elif flatfield_flag == 2:
                corrected_stack = manage_flat_field_py(raw_data)
            else:
                corrected_stack = raw_data
            del raw_data

            # deskew
            print('Deskew.')
            deskewed = deskew(data=np.flipud(corrected_stack),
                              theta=theta,
                              distance=scan_step,
                              pixel_size=pixel_size)
            del corrected_stack

            # downsample in z due to oversampling when going from OPM to coverslip geometry
            if z_down_sample > 1:
                print('Downsample.')
                deskewed_downsample = block_reduce(deskewed,
                                                   block_size=(z_down_sample,
                                                               1, 1),
                                                   func=np.mean)
            else:
                deskewed_downsample = deskewed
            del deskewed

            # run deconvolution on deskewed image
            if decon_flag == 1:
                print('Deconvolve.')
                deskewed_downsample_decon = mv_decon(
                    deskewed_downsample, ch_idx, deskewed_y_pixel,
                    z_down_sample * deskewed_z_pixel)
            else:
                deskewed_downsample_decon = deskewed_downsample
            del deskewed_downsample

            # save deskewed image into TIFF stack
            if (save_type == 0):
                print('Write TIFF stack')
                tiff_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.tiff'
                tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
                tifffile.imwrite(str(tiff_output_path),
                                 deskewed_downsample_decon,
                                 imagej=True,
                                 resolution=(1 / deskewed_x_pixel,
                                             1 / deskewed_y_pixel),
                                 metadata={
                                     'spacing':
                                     (z_down_sample * deskewed_z_pixel),
                                     'unit': 'um',
                                     'axes': 'ZYX'
                                 })

                metadata_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.csv'
                metadata_output_path = tiff_output_dir_path / Path(
                    metadata_filename)
                tiff_stage_metadata = [{
                    'stage_x': float(stage_x),
                    'stage_y': float(stage_y),
                    'stage_z': float(stage_z)
                }]
                data_io.write_metadata(tiff_stage_metadata[0],
                                       metadata_output_path)

            elif (save_type == 1):
                # create affine transformation for stage translation
                # swap x & y from instrument to BDV
                affine_matrix = unit_matrix
                affine_matrix[0, 3] = (stage_y) / (deskewed_y_pixel
                                                   )  # x-translation
                affine_matrix[1, 3] = (stage_x) / (deskewed_x_pixel
                                                   )  # y-translation
                affine_matrix[2, 3] = (-1 * stage_z) / (
                    z_down_sample * deskewed_z_pixel)  # z-translation

                # save tile in BDV H5 with actual stage positions
                print('Write into BDV H5.')
                bdv_writer.append_view(
                    deskewed_downsample_decon,
                    time=0,
                    channel=ch_BDV_idx,
                    tile=tile_idx,
                    voxel_size_xyz=(deskewed_x_pixel, deskewed_y_pixel,
                                    z_down_sample * deskewed_z_pixel),
                    voxel_units='um',
                    calibration=(1, 1, (z_down_sample * deskewed_z_pixel) /
                                 deskewed_y_pixel),
                    m_affine=affine_matrix,
                    name_affine='tile ' + str(tile_idx) + ' translation')

            elif (save_type == 2):
                print('Write data into Zarr container')
                opm_data[t_idx, tile_idx,
                         ch_BDV_idx, :, :, :] = deskewed_downsample_decon
                metadata_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.csv'
                metadata_output_path = zarr_output_dir_path / Path(
                    metadata_filename)
                zarr_stage_metadata = [{
                    'stage_x': float(stage_x),
                    'stage_y': float(stage_y),
                    'stage_z': float(stage_z)
                }]
                data_io.write_metadata(zarr_stage_metadata[0],
                                       metadata_output_path)

            # free up memory
            del deskewed_downsample_decon
            gc.collect()

        tile_idx = tile_idx + 1

    if (save_type == 2):
        # write BDV xml file
        # https://github.com/nvladimus/npy2bdv
        # bdv_writer.write_xml(ntimes=num_t)
        bdv_writer.write_xml()
        bdv_writer.close()

    # shut down pyimagej
    if (flatfield_flag == 1):
        ij.getContext().dispose()

    # exit
    print('Finished.')
    sys.exit()
예제 #2
0
import numpy as np
from pycromanager import Dataset
import napari

#This path is to the top level of the magellan dataset (i.e. the one that contains the Full resolution folder)
# data_path = '/Users/henrypinkard/megllandump/l_axis_1'
# data_path = '/Users/henrypinkard/megllandump/l_axis_3'
data_path = '/Users/henrypinkard/megllandump/experiment_1_11'

#open the dataset
dataset = Dataset(data_path)

#read tiles or tiles + metadata by channel, slice, time, and position indices
#img is a numpy array and md is a python dictionary
img, img_metadata = dataset.read_image(l=10, read_metadata=True)

dask_array = dataset.as_array(stitched=True)

with napari.gui_qt():
    v = napari.Viewer()
    v.add_image(dask_array)
예제 #3
0
def acquireImage(channelGroup, channelName, hook):

    x_array = []
    y_array = []
    z_array = []

    for idx in range(pos_list.get_number_of_positions()):
        pos = pos_list.get_position(idx)
        #pos.go_to_position(pos, mmc)

        x = pos_list.get_position(idx).get(0).x
        y = pos_list.get_position(idx).get(0).y
        z = pos_list.get_position(idx).get(1).x

        x_array.append(x)
        y_array.append(y)
        z_array.append(z)

    x_array = np.array(x_array)
    y_array = np.array(y_array)
    z_array = np.array(z_array)

    with Acquisition(directory=directoryPATH,
                     name=nameofSAVEDFILE,
                     post_hardware_hook_fn=hook,
                     post_camera_hook_fn=hook_fn) as acq:
        x = np.hstack([x_array[:, None]])
        y = np.hstack([y_array[:, None]])
        z = np.hstack([z_array[:, None]])
        #Generate the events for a single z-stack
        xyz = np.hstack([x_array[:, None], y_array[:, None], z_array[:, None]])
        events = multi_d_acquisition_events(xyz_positions=xyz,
                                            channel_group=channelGroup,
                                            channels=[channelName])
        acq.acquire(events)
        #acquire a 2 x 1 grid
        #acq.acquire({'row': 0, 'col': 0})
        #acq.acquire({'row': 1, 'col': 0})

    stackfolder = "**/*"
    folder = Path(directoryPATH)
    foldernames = []
    for name in folder.glob('saving_name_*'):
        print(name.stem)
        foldernames.append(name.stem)
    maximum = 1
    for file in foldernames:
        number = int(
            re.search(nameofSAVEDFILE + "_" + '(\d*)',
                      file).group(1))  # assuming filename is "filexxx.txt"
        # compare num to previous max, e.g.
        maximum = number if number > maximum else maximum  # set max = 0 before for-loop
        print(number)

    highest = nameofSAVEDFILE + "_" + str(maximum)

    data_path = os.path.join(folder, highest)

    dataset = Dataset(data_path)
    #dataset = acq.get_dataset()
    #dataset = acq.get_dataset()
    #print(dataset)
    #data_path=str(directoryPATH/saving_name)
    dataset = Dataset(data_path)
    print(dataset.axes)
    print("data_path", data_path)

    length = (len(xyz))

    dataset_metadata = dataset.read_metadata(channel=0, position=1)
    print(dataset_metadata)
    pos = dataset_metadata["Axes"]["position"]
    print(pos)
    if (dataset):

        sizeimg = dataset.read_image(channel=0, position=0)
        sizeimg = cv2.cvtColor(sizeimg, cv2.COLOR_GRAY2RGB)
        h, w, c = sizeimg.shape
    length = int(
        (sqrt(length)
         ))  #size of the grid (row or column should be same technically)
    blank_image = np.zeros((h * (math.ceil(math.sqrt(length)) + 2), w *
                            (math.ceil(math.sqrt(length)) + 2), 3), np.uint16)

    print("image size ", blank_image.shape)

    pixelsizeinum = dataset_metadata["PixelSizeUm"]  #get size of pixel in um
    print(pixelsizeinum)
    """
    for datarow in range(10):
        for datacolumn in range(10):
            metadata = dataset.read_metadata(row=datarow, col=datacolumn)
            if(metadata["Axes"]["position"]>=0):
                pos=metadata["Axes"]["position"]
                #print(pos)
                img = dataset.read_image(position=pos)
            
    
                img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
                cv2.imshow("test",img)
    """
    xtotaloffset = 0
    ytotaloffset = 0
    for dataposition in range(
            len(xyz)):  #do range for all positions in micromanager
        print(dataposition)
        metadata = dataset.read_metadata(channel=0, position=dataposition)
        img = dataset.read_image(channel=0, position=dataposition)
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        img = cv2.flip(img, 1)
        xoffset_um = metadata["XPosition_um_Intended"]
        yoffset_um = metadata["YPosition_um_Intended"]

        print("Intended location is : ", xoffset_um, yoffset_um)
        # cv2.imshow("test",img)
        # cv2.waitKey(0)
        xoffset_px = (xoffset_um - dataset.read_metadata(
            channel=0, position=0)['XPosition_um_Intended']) / pixelsizeinum
        yoffset_px = (yoffset_um - dataset.read_metadata(
            channel=0, position=0)['YPosition_um_Intended']) / pixelsizeinum
        xoffset_px = int(xoffset_px)
        print("Xoffset ", xoffset_px)
        #print("img max X ",blank_image.shape[0])
        yoffset_px = int(yoffset_px)
        print("Yoffset ", yoffset_px)
        #print("img max Y ",blank_image.shape[1])

        alpha = 0
        blank_image[xoffset_px:xoffset_px + (img.shape[1]),
                    yoffset_px:yoffset_px + (img.shape[0])] = cv2.addWeighted(
                        blank_image[xoffset_px:xoffset_px + (img.shape[1]),
                                    yoffset_px:yoffset_px + (img.shape[0])],
                        alpha, img, 1 - alpha, 0)
        #blank_image[:yoffset_px+img.shape[0], :xoffset_px+img.shape[1]] = img
        #blank_image = cv2.addWeighted(blank_image[yoffset_px:yoffset_px+img.shape[0], xoffset_px:xoffset_px+img.shape[1]],img)

    ####################
    #printout only ignore
    ####################
    scale_percent = 5
    width = int(blank_image.shape[1] * scale_percent / 100)
    height = int(blank_image.shape[0] * scale_percent / 100)
    dim = (width, height)

    resized = cv2.resize(blank_image, dim, interpolation=cv2.INTER_AREA)
    '''
    #show image
    winname = "test"
    cv2.namedWindow(winname)        # Create a named window
    cv2.moveWindow(winname, 1000,1000)  # Move it to (40,30)

    cv2.imshow(winname, resized)
    cv2.waitKey(0)
    '''
    blank_image = cv2.cvtColor(blank_image, cv2.COLOR_BGR2GRAY)
    return blank_image, pixelsizeinum
예제 #4
0
def direct_stitch(data_path):
    dataset = Dataset(data_path)
    print(dataset.axes)
    dataset_metadata = dataset.read_metadata(channel=0, position=0)
    print(dataset_metadata)
    pos = dataset_metadata["Axes"]["position"]
    print(pos)
    if (dataset):

        sizeimg = dataset.read_image(channel=0, position=0)
        sizeimg = cv2.cvtColor(sizeimg, cv2.COLOR_GRAY2RGB)
        h, w, c = sizeimg.shape
    length = 10  #size of the grid (row or column should be same technically)
    blank_image = np.ones((h * (length + 1), w * (length + 1), 3), np.uint16)

    print("image size ", blank_image.shape)
    print(dataset_metadata)
    pixelsizeinum = dataset_metadata["PixelSizeUm"]  #get size of pixel in um
    print(pixelsizeinum)
    """
    for datarow in range(10):
        for datacolumn in range(10):
            metadata = dataset.read_metadata(row=datarow, col=datacolumn)
            if(metadata["Axes"]["position"]>=0):
                pos=metadata["Axes"]["position"]
                #print(pos)
                img = dataset.read_image(position=pos)
            
    
                img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
                cv2.imshow("test",img)
    """
    xtotaloffset = 0
    ytotaloffset = 0
    for dataposition in range(70):
        print(dataposition)
        metadata = dataset.read_metadata(channel=0, position=dataposition)
        img = dataset.read_image(channel=0, position=dataposition)
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        img = cv2.flip(img, 1)
        xoffset_um = metadata["XPosition_um_Intended"]
        yoffset_um = metadata["YPosition_um_Intended"]

        print("Intended location is : ", xoffset_um, yoffset_um)
        # cv2.imshow("test",img)
        # cv2.waitKey(0)
        xoffset_px = (xoffset_um - dataset.read_metadata(
            channel=0, position=0)['XPosition_um_Intended']) / pixelsizeinum
        yoffset_px = (yoffset_um - dataset.read_metadata(
            channel=0, position=0)['YPosition_um_Intended']) / pixelsizeinum
        xoffset_px = int(xoffset_px)
        print("Xoffset ", xoffset_px)
        #print("img max X ",blank_image.shape[0])
        yoffset_px = int(yoffset_px)
        print("Yoffset ", yoffset_px)
        #print("img max Y ",blank_image.shape[1])

        blank_image[xoffset_px:xoffset_px + (img.shape[1]),
                    yoffset_px:yoffset_px + (img.shape[0])] += img
        #blank_image[:yoffset_px+img.shape[0], :xoffset_px+img.shape[1]] = img
        #blank_image = cv2.addWeighted(blank_image[yoffset_px:yoffset_px+img.shape[0], xoffset_px:xoffset_px+img.shape[1]],img)

    ####################
    #printout only ignore
    ####################
    scale_percent = 5
    width = int(blank_image.shape[1] * scale_percent / 100)
    height = int(blank_image.shape[0] * scale_percent / 100)
    dim = (width, height)

    resized = cv2.resize(blank_image, dim, interpolation=cv2.INTER_AREA)
    winname = "test"
    cv2.namedWindow(winname)  # Create a named window
    cv2.moveWindow(winname, 1000, 1000)  # Move it to (40,30)
    cv2.imshow(winname, resized)
    cv2.waitKey(0)

    return blank_image
예제 #5
0
def main(argv):

    # parse directory name from command line argument 
    input_dir_string = ''
    output_dir_string = ''

    try:
        arguments, values = getopt.getopt(argv,"hi:o:n:c:",["help","ipath=","opath="])
    except getopt.GetoptError:
        print('Error. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
        sys.exit(2)
    for current_argument, current_value in arguments:
        if current_argument == '-h':
            print('Usage. stage_recon.py -i <inputdirectory> -o <outputdirectory>')
            sys.exit()
        elif current_argument in ("-i", "--ipath"):
            input_dir_string = current_value
        elif current_argument in ("-o", "--opath"):
            output_dir_string = current_value
        
    if (input_dir_string == ''):
        print('Input parse error.')
        sys.exit(2)

    # Load data
    # this approach assumes data is generated by QI2lab pycromanager control code

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path=Path(input_dir_string)

    # determine number of directories in root directory. loop over each one.
    tile_dir_path = [f for f in input_dir_path.iterdir() if f.is_dir()]
    num_tiles = len(tile_dir_path)

    # TO DO: read text file with this information in root directory
    num_y = 73
    num_z = 3
    num_channels = 2
    split=40

    # create parameter array
    # [theta, stage move distance, camera pixel size]
    # units are [degrees,nm,nm]
    params=np.array([30,200,115],dtype=np.float32)

    # check if user provided output path
    if (output_dir_string==''):
        output_dir_path = input_dir_path
    else:
        output_dir_path = Path(output_dir_string)

    # https://github.com/nvladimus/npy2bdv
    # create BDV H5 file with sub-sampling for BigStitcher
    output_path = output_dir_path / 'full.h5'
    bdv_writer = npy2bdv.BdvWriter(str(output_path), nchannels=num_channels, ntiles=(num_z*num_y*split)+1, \
        subsamp=((1,1,1),(2,2,2),(4,4,4),(8,8,8),(16,16,16)),blockdim=((16, 16, 8),))

    # loop over each directory. Each directory will be placed as a "tile" into the BigStitcher file
    # TO DO: implement directory polling to do this in the background while data is being acquired.
    for tile in range(0,num_tiles):

        # load tile
        tile_dir_path_to_load = tile_dir_path[tile]
        print('Loading directory: '+str(tile_dir_path_to_load))

        # decode directory name to determine tile_id in h5. reverse Z order, normal y order
        test_string = tile_dir_path_to_load.parts[-1].split('_')
        for i in range(len(test_string)):
            if 'y0' in test_string[i]:
                y_idx = int(test_string[i].split('y')[1])
            if 'z0' in test_string[i]:
                z_idx = int(test_string[i].split('z')[1])

        tile_id = ((num_z-z_idx-1)*(num_y))+y_idx
        print('y index: '+str(y_idx)+' z index: '+str(z_idx)+' H5 tile id: '+str(tile_id))

        # https://pycro-manager.readthedocs.io/en/latest/read_data.html
        dataset = Dataset(tile_dir_path_to_load)
        
        # extract number of images in the tile
        num_x = len(dataset.axes['x'])
        num_x = num_x-10

        # loop over channels inside tile
        for channel_id in range(num_channels):

            # read images from dataset. Skip first 10 images for stage speed up
            sub_stack = np.zeros([num_x,256,1600])
            for i in range(num_x):
                sub_stack[i,:,:] = dataset.read_image(channel=channel_id, x=i+10, y=0, z=0, read_metadata=False)

            #TO DO: Integrate Microvolution hook here to do deconvolution on skewed data before deskewing.
            
            print('Deskew tile.')
            # run deskew
            deskewed = stage_deskew(data=sub_stack,parameters=params)
            del sub_stack
            gc.collect()

            # downsample by 2x in z due to oversampling when going from OPM to coverslip geometry
            deskewed_downsample = block_reduce(deskewed, block_size=(2,1,1), func=np.mean)
            del deskewed
            gc.collect()

            print('Split and write tiles.')
            # write BDV tile
            # https://github.com/nvladimus/npy2bdv 
            for split_id in range(split):
                size = np.floor(num_x/split)
                size_overlap = size * 1.1

                if split_id == split-1:
                    deskewed_downsample_substack = deskewed_downsample[split_id*size:]
                else:
                    deskewed_downsample_substack = deskewed_downsample[split_id*size:(split_id+1)*size_overlap]

                bdv_writer.append_view(deskewed_downsample_substack, time=0, channel=channel_id, 
                                        tile=(tile_id*split)+split_id,
                                        voxel_size_xyz=(.115,.115,.200), voxel_units='um')

            # free up memory
            del deskewed_downsample
            #del deskewed_downsample
            gc.collect()

    # write BDV xml file
    # https://github.com/nvladimus/npy2bdv
    bdv_writer.write_xml_file(ntimes=1)
    bdv_writer.close()

    # clean up memory
    gc.collect()
예제 #6
0
import numpy as np
from pycromanager import Dataset
import napari

# This path is to the top level of the magellan dataset (i.e. the one that contains the Full resolution folder)
data_path = "/Users/henrypinkard/tmp/tcz_acq_1"

# open the dataset
dataset = Dataset(data_path)

# read tiles or tiles + metadata by channel, slice, time, and position indices
# img is a numpy array and md is a python dictionary
# img, img_metadata = dataset.read_image(l=10, read_metadata=True)

dask_array = dataset.as_array(stitched=False, verbose=True)

with napari.gui_qt():
    v = napari.Viewer()
    v.add_image(dask_array)
예제 #7
0
from pycromanager import Dataset
from pathlib import Path
import napari

# This path is to the top level of the dataset
data_path = Path('E:\\20201024\\restrepo_z000_1\\')

# construct dataset
dataset = Dataset(data_path)

dask_array = dataset.as_array(verbose=False)
print(dask_array.shape)
예제 #8
0
def main(argv):

    # parse command line arguments
    parser = argparse.ArgumentParser(description="Process raw OPM data.")
    parser.add_argument("-i",
                        "--ipath",
                        type=str,
                        nargs="+",
                        help="supply the directories to be processed")
    parser.add_argument("-d",
                        "--decon",
                        type=int,
                        default=0,
                        help="0: no deconvolution (DEFAULT), 1: deconvolution")
    parser.add_argument("-f",
                        "--flatfield",
                        type=int,
                        default=0,
                        help="0: No flat field (DEFAULT), 1: flat field")
    parser.add_argument("-k",
                        "--deskew",
                        type=int,
                        default=1,
                        help="0: no deskewing, 1: deskewing (DEFAULT)")
    parser.add_argument(
        "-s",
        "--save_type",
        type=int,
        default=0,
        help="0: TIFF stack output (DEFAULT), 1: BDV output, 2: Zarr output")
    parser.add_argument(
        "-t",
        "--tilt_orientation",
        type=str,
        default='new',
        help="new: new orientation (DEFAULT), prev: previous orientation")
    parser.add_argument(
        "--time_steps",
        nargs='+',
        type=int,
        default=-1,
        help="-1: all time steps (DEFAULT), else list of time steps")
    parser.add_argument(
        "--channels",
        nargs='+',
        type=int,
        default=-1,
        help="-1: all channels (DEFAULT), else list of all channels")
    parser.add_argument(
        "--overwrite",
        type=int,
        default=0,
        help="0: do not overwrite existing folder (DEFAULT), 1: overwrite")
    args = parser.parse_args()

    input_dir_strings = args.ipath
    decon_flag = args.decon
    flatfield_flag = args.flatfield
    deskew_flag = args.deskew
    save_type = args.save_type
    tilt_orientation = args.tilt_orientation
    overwrite_flag = args.overwrite == 1

    # Loop over all user supplied directories for batch reconstruction
    for ii, input_dir_string in enumerate(input_dir_strings):
        print("Processing directory %d/%d" % (ii + 1, len(input_dir_strings)))

        # https://docs.python.org/3/library/pathlib.html
        # Create Path object to directory
        input_dir_path = Path(input_dir_string)

        # create parameter array from scan parameters saved by acquisition code
        df_metadata = data_io.read_metadata(
            input_dir_path.resolve().parents[0] / 'scan_metadata.csv')
        root_name = df_metadata['root_name']
        scan_type = df_metadata['scan_type']
        theta = df_metadata['theta']
        scan_step = df_metadata['scan_step']
        pixel_size = df_metadata['pixel_size']
        num_t = df_metadata['num_t']
        num_y = df_metadata['num_y']
        num_z = df_metadata['num_z']
        num_ch = df_metadata['num_ch']
        num_images = df_metadata['scan_axis_positions']
        excess_images = 0
        y_pixels = df_metadata['y_pixels']
        x_pixels = df_metadata['x_pixels']
        chan_405_active = df_metadata['405_active']
        chan_488_active = df_metadata['488_active']
        chan_561_active = df_metadata['561_active']
        chan_635_active = df_metadata['635_active']
        chan_730_active = df_metadata['730_active']
        active_channels = [
            chan_405_active, chan_488_active, chan_561_active, chan_635_active,
            chan_730_active
        ]
        channel_idxs = [0, 1, 2, 3, 4]
        channels_in_data = list(compress(channel_idxs, active_channels))
        n_active_channels = len(channels_in_data)
        if not (num_ch == n_active_channels):
            print(
                'Channel setup error. Check metatdata file and directory names.'
            )
            sys.exit()

        # calculate pixel sizes of deskewed image in microns
        deskewed_x_pixel = pixel_size / 1000.
        deskewed_y_pixel = pixel_size / 1000.
        deskewed_z_pixel = pixel_size / 1000.
        print('Deskewed pixel sizes before downsampling (um). x=' +
              str(deskewed_x_pixel) + ', y=' + str(deskewed_y_pixel) + ', z=' +
              str(deskewed_z_pixel) + '.')

        # amount of down sampling in z
        z_down_sample = 1

        # load dataset
        if str(input_dir_path).endswith('zarr'):
            dataset = zarr.open(input_dir_path, mode='r')
            im_type = 'zarr'
        else:
            dataset = Dataset(str(input_dir_path))
            im_type = 'pycro'

        # create output directory
        im_processes = []
        if decon_flag == 1:
            im_processes.append('decon')
        if flatfield_flag == 1:
            im_processes.append('flatfield')
        if deskew_flag == 1:
            im_processes.append('deskew')
        if len(im_processes) == 0:
            str_processes = 'original_output'
        else:
            str_processes = '_'.join(im_processes) + '_output'
        input_dir_path = Path(input_dir_string)
        output_dir_path = input_dir_path.resolve().parents[0] / str_processes
        output_dir_path.mkdir(parents=True, exist_ok=True)

        # initialize counters
        timepoints_in_data = list(range(num_t))
        ch_in_BDV = list(range(n_active_channels))
        em_wavelengths = [.450, .520, .580, .670, .780]

        # if specific time steps or channels are provided, we use them only
        # by default -1, list of int if provided by user
        if not isinstance(args.time_steps, int):
            timepoints_in_data = args.time_steps
            num_t = len(timepoints_in_data)
        if not isinstance(args.channels, int):
            ch_in_BDV = args.channels
            num_ch = len(ch_in_BDV)

        # Create TIFF if requested
        if (save_type == 0):
            # create directory for data type
            tiff_output_dir_path = output_dir_path / Path('tiff')
            tiff_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)
        # Create BDV if requested
        elif (save_type == 1):
            # create directory for data type
            bdv_output_dir_path = output_dir_path / Path('bdv')
            bdv_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)

            # https://github.com/nvladimus/npy2bdv
            # create BDV H5 file with sub-sampling for BigStitcher
            bdv_output_path = bdv_output_dir_path / Path(root_name + '_bdv.h5')
            bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
                                           nchannels=num_ch,
                                           ntiles=1,
                                           subsamp=((1, 1, 1), ),
                                           blockdim=((16, 16, 16), ))

            # create blank affine transformation to use for stage translation
            unit_matrix = np.array((
                (1.0, 0.0, 0.0,
                 0.0),  # change the 4. value for x_translation (px)
                (0.0, 1.0, 0.0,
                 0.0),  # change the 4. value for y_translation (px)
                (0.0, 0.0, 1.0,
                 0.0)))  # change the 4. value for z_translation (px)
        # Create Zarr if requested
        elif (save_type == 2):
            # create directory for data type
            zarr_output_dir_path = output_dir_path / Path('zarr')
            zarr_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)

            # create name for zarr directory
            zarr_output_path = zarr_output_dir_path / Path(root_name +
                                                           '_zarr.zarr')

            # calculate size of one volume
            # change step size from physical space (nm) to camera space (pixels)
            pixel_step = scan_step / pixel_size  # (pixels)

            # calculate the number of pixels scanned during stage scan
            scan_end = num_images * pixel_step  # (pixels)

            # calculate properties for final image
            ny = np.int64(
                np.ceil(scan_end +
                        y_pixels * np.cos(theta * np.pi / 180)))  # (pixels)
            nz = np.int64(np.ceil(y_pixels *
                                  np.sin(theta * np.pi / 180)))  # (pixels)
            nx = np.int64(x_pixels)  # (pixels)

            # create and open zarr file
            root = zarr.open(str(zarr_output_path), mode="w")
            opm_data = root.zeros("opm_data",
                                  shape=(num_t, num_ch, nz, ny, nx),
                                  chunks=(1, 1, 32, 256, 256),
                                  dtype=np.uint16)
            root = zarr.open(str(zarr_output_path), mode="rw")
            opm_data = root["opm_data"]

        # if retrospective flatfield is requested, import and open pyimagej in interactive mode
        # TO DO: need to fix for new call
        if flatfield_flag == 1:
            from image_post_processing import manage_flat_field

        # if decon is requested, import microvolution wrapper
        if decon_flag == 1:
            from image_post_processing import lr_deconvolution

        # loop over all timepoints and channels
        for (t_idx, ch_BDV_idx) in product(timepoints_in_data, ch_in_BDV):

            ch_idx = channels_in_data[ch_BDV_idx]

            # pull data stack into memory
            print('Process timepoint ' + str(t_idx) + '; channel ' +
                  str(ch_BDV_idx) + '.')
            if im_type == 'pycro':
                raw_data = data_io.return_data_numpy(dataset, t_idx,
                                                     ch_BDV_idx, num_images,
                                                     excess_images, y_pixels,
                                                     x_pixels)
            elif im_type == 'zarr':
                raw_data = dataset[t_idx, ch_BDV_idx, :, :, :]

            # run deconvolution on skewed image
            if decon_flag == 1:
                print('Deconvolve.')
                em_wvl = em_wavelengths[ch_idx]
                channel_opm_psf = data_io.return_opm_psf(em_wvl)
                if tilt_orientation == 'new':
                    channel_opm_psf = np.flip(channel_opm_psf, axis=1)
                #decon = mv_lr_decon(image=raw_data,psf=channel_opm_psf,iterations=50)
                decon = lr_deconvolution(image=raw_data,
                                         psf=channel_opm_psf,
                                         iterations=50)
            else:
                decon = raw_data
            del raw_data
            gc.collect()

            # perform flat-fielding
            if flatfield_flag == 0:
                corrected_stack = decon
            else:
                print('Flatfield.')
                corrected_stack, flat_field, dark_field = manage_flat_field(
                    decon, ij)
            del decon
            gc.collect()

            # deskew raw_data
            if deskew_flag == 1:
                print('Deskew.')
                if tilt_orientation == 'new':
                    deskewed = deskew(data=np.flip(corrected_stack, axis=1),
                                      theta=theta,
                                      distance=scan_step,
                                      pixel_size=pixel_size)
                else:
                    deskewed = deskew(data=np.flip(corrected_stack, axis=0),
                                      theta=theta,
                                      distance=scan_step,
                                      pixel_size=pixel_size)
            else:
                if tilt_orientation == 'new':
                    deskewed = np.flip(corrected_stack, axis=1)
                else:
                    deskewed = np.flip(corrected_stack, axis=0)
            del corrected_stack
            gc.collect()

            # downsample in z due to oversampling when going from OPM to coverslip geometry
            if z_down_sample == 1:
                downsampled = deskewed
            else:
                print('Downsample.')
                downsampled = block_reduce(deskewed,
                                           block_size=(z_down_sample, 1, 1),
                                           func=np.mean)
            del deskewed
            gc.collect()

            # save deskewed image into TIFF stack
            if (save_type == 0):
                print('Write TIFF stack')
                tiff_filename = 'f_' + root_name + '_c' + str(ch_idx).zfill(
                    3) + '_t' + str(t_idx).zfill(5) + '.tiff'
                tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
                tifffile.imwrite(str(tiff_output_path),
                                 downsampled.astype(np.int16),
                                 imagej=True,
                                 resolution=(1 / deskewed_x_pixel,
                                             1 / deskewed_y_pixel),
                                 metadata={
                                     'unit': 'um',
                                     'axes': 'ZYX'
                                 })
            # save tile in BDV H5 with actual stage positions
            elif (save_type == 1):
                print('Write data into BDV H5.')
                bdv_writer.append_view(
                    downsampled,
                    time=t_idx,
                    channel=ch_BDV_idx,
                    tile=0,
                    voxel_size_xyz=(deskewed_y_pixel, deskewed_y_pixel,
                                    z_down_sample * deskewed_z_pixel),
                    voxel_units='um')

                # save deskewed image into Zarr container
            elif (save_type == 2):
                print('Write data into Zarr container')
                opm_data[t_idx, ch_BDV_idx, :, :, :] = downsampled

            # free up memory
            del downsampled
            gc.collect()

        if (save_type == 1):
            # write BDV xml file
            # https://github.com/nvladimus/npy2bdv
            bdv_writer.write_xml()
            bdv_writer.close()

    # shut down pyimagej
    if flatfield_flag == 1:
        ij.getContext().dispose()

    # exit
    print('Finished.')
    sys.exit()