Esempio n. 1
0
    def store_magellan_metadata(self, magellan_directory):
        print('Collecting Metadata: ' + magellan_directory)
        magellan = MagellanDataset(magellan_directory)
        self.all_data = magellan.as_array(stitched=True)
        # Get size of dataset
        size_array = self.all_data.shape
        num_slices = size_array[2]
        total_height = size_array[3]
        total_width = size_array[4]
        voxel_size_z_um = magellan.summary_metadata['z-step_um']
        pixel_size_xy_um = magellan.summary_metadata['PixelSize_um']
        num_positions = magellan.get_num_xy_positions()

        num_frames = magellan.get_num_frames()
        print('Time List')
        time_list = []
        for t in range(num_frames):
            metadata_dictionary = magellan.read_metadata(t_index=t)
            print(metadata_dictionary)
            try:
                time_list.append(metadata_dictionary['TimeReceivedByCore'])
            except Exception:
                time_list.append(metadata_dictionary['Time'])

        # Version of Magellan dependent
        print('Channels')
        try:
            num_channels = len(magellan.summary_metadata['ChNames'])
            channel_names = magellan.summary_metadata['ChNames']
        except Exception:
            num_channels = len(magellan.get_channel_names())
            channel_names = magellan.get_channel_names()

        image_height = magellan.summary_metadata['Height']
        image_width = magellan.summary_metadata['Width']
        print('Data type')
        if magellan.summary_metadata['PixelType'] == 'GRAY16':
            data_type = np.uint16
        else:
            data_type = np.uint8

        # Add it all to the local dictionary
        local_dictionary = {
            'directory': magellan_directory,
            'slices': num_slices,
            'width': total_width,
            'height': total_height,
            'zSize': voxel_size_z_um,
            'xySize': pixel_size_xy_um,
            'frames': num_frames,
            'time_list': time_list,
            'positions': num_positions,
            'channels': num_channels,
            'channel_names': channel_names,
            'single_image_height': image_height,
            'single_image_width': image_width,
            'data_type': data_type
        }
        # append to global dictionary
        self.magellan_dataset_dictionary[magellan_directory] = local_dictionary
Esempio n. 2
0
 def get_working_directory(self):
     # Get the string version
     magellan_directory_str = str(
         QFileDialog.getExistingDirectory(self, "Select Directory"))
     # Convert to path version for OS compatibility
     magellan_directory_path = pathlib.Path(magellan_directory_str)
     try:
         magellan = MagellanDataset(magellan_directory_path)
     except Exception:
         print('Not a Magellan Dataset')
         self.show_not_magellan_dialog()
         return
     print('PATH: ' + str(magellan_directory_path))
     self._ui.magellan_dataset_listWidget.addItem(magellan_directory_str)
     self.store_magellan_metadata(magellan_directory_str)
    def run(self):
        """
        Run the thread to create and package files based on the inout from the GUI, which is packaged in the
        output dictionary.
        """
        for files in range(len(self.output_list)):
            #print('In threading')
            self.file = create_h5.create_h5(
                self.output_dictionary[self.output_list[files]])
            self.name_signal.emit(
                self.output_dictionary[self.output_list[files]]['file_name'])
            #print('After signal')
            # Re-open Magellan directory to have the data on hand
            magellan_directory = self.output_dictionary[
                self.output_list[files]]['magellan_directory']
            magellan = MagellanDataset(magellan_directory)
            all_data = magellan.as_array(stitched=True)
            # Gather pertinent information for packing the file
            time_start = self.output_dictionary[
                self.output_list[files]]['time_start']
            time_end = self.output_dictionary[
                self.output_list[files]]['time_end']
            channel_list = self.output_dictionary[
                self.output_list[files]]['file_channel_list']
            x_min = self.output_dictionary[self.output_list[files]]['x_min']
            x_max = self.output_dictionary[self.output_list[files]]['x_max']
            y_min = self.output_dictionary[self.output_list[files]]['y_min']
            y_max = self.output_dictionary[self.output_list[files]]['y_max']
            z_min = self.output_dictionary[self.output_list[files]]['z_min']
            z_max = self.output_dictionary[self.output_list[files]]['z_max']
            total_width = x_max - x_min + 1
            total_height = y_max - y_min + 1
            num_slices = z_max - z_min + 1
            #print(num_slices)
            num_channels = len(channel_list)
            #print(num_channels)
            num_time = (time_end - time_start + 1)
            #print(num_time)
            data_type = self.output_dictionary[
                self.output_list[files]]['data_type']
            count = 0
            # Status bar information passed to other function via emit
            self.progress_bar_signal.emit(count, num_slices, num_channels,
                                          num_time)
            #print('to time')
            # Time and channel information packaged in .ims file
            for t in range(time_start, (time_end + 1)):
                t = t - 1
                time_name_string = '/TimePoint ' + str(t)
                for c in channel_list:
                    channel_name_string = '/Channel ' + str(c)
                    channel_group_data = self.file.create_group(
                        "DataSet/ResolutionLevel 0" + time_name_string +
                        channel_name_string)
                    create_h5.write_attribute(channel_group_data,
                                              'HistogramMax', '65535.000')
                    create_h5.write_attribute(channel_group_data,
                                              'HistogramMin', '0.000')
                    create_h5.write_attribute(channel_group_data,
                                              'ImageBlockSizeX', '256')
                    create_h5.write_attribute(channel_group_data,
                                              'ImageBlockSizeY', '256')
                    create_h5.write_attribute(channel_group_data,
                                              'ImageBlockSizeZ', '256')
                    create_h5.write_attribute(channel_group_data, 'ImageSizeX',
                                              str(total_width))
                    create_h5.write_attribute(channel_group_data, 'ImageSizeY',
                                              str(total_height))
                    create_h5.write_attribute(channel_group_data, 'ImageSizeZ',
                                              str(num_slices))
                    #print('to data temp')
                    data_temp = self.file.create_dataset(
                        "DataSet/ResolutionLevel 0" + time_name_string +
                        channel_name_string + "/Data",
                        (1, total_height, total_width),
                        chunks=(8, 256, 256),
                        maxshape=(num_slices, total_height, total_width),
                        compression="gzip",
                        compression_opts=2,
                        dtype=data_type)

                    #print('to write direct')
                    data_temp.write_direct(np.array(all_data[t, c, z_min - 1]))
                    count = count + 1
                    #print('to count')
                    # Status bar information passed to other function via emit
                    self.progress_bar_signal.emit(count, num_slices,
                                                  num_channels, num_time)
                    # Package z data into .ims file
                    for z in range(z_min, z_max):
                        count = count + 1
                        #print(count)
                        data_temp.resize(data_temp.shape[0] + 1, axis=0)
                        data_temp[z, :, :] = np.array(all_data[t, c, z])
                        print('T:' + str(t) + ', C:' + str(c) + ', Z:' +
                              str(z))
                        time.sleep(0.05)
                        # Status bar information passed to other function via emit
                        self.progress_bar_signal.emit(count, num_slices,
                                                      num_channels, num_time)
            # Close the files upon completion of packing data
            self.file.close()
        # Return signal to GUI to announce all files are have completed
        self.completed_signal.emit('All Files Complete')
        return
import numpy as np
from pygellan.magellan_data import MagellanDataset
import napari

#This path is to the top level of the magellan dataset (i.e. the one that contains the Full resolution folder)
data_path = '/path/to/data'

#open the dataset
magellan = MagellanDataset(data_path)

#read tiles or tiles + metadata by channel, slice, time, and position indices
#img is a numpy array and md is a python dictionary
img, img_metadata = magellan.read_image(channel_index=0,
                                        z_index=30,
                                        pos_index=20,
                                        read_metadata=True)

#Alternatively, all data can be opened at once in a single dask array. Using dask arrays enables all_data to be
#held in a single memory-mapped array (i.e. the data are not loaded in RAM until they are used, enabing a convenient
#way to work with data larger than the computer's memory. Dask arrays also enable visulization in Napari (see below),
#and allow for code to be prototyped on a small computers and scaled up to clusters without having to rewrite code.
#More information can be found at https://dask.org/
all_data = magellan.as_array(
    stitched=True
)  #returns an array with 5 dimensions corresponding to time-channel-z-y-x
# all_data = magellan.as_array(stitched=False) #this version has a leading axis for position

#dask array can be used just like numpy array
#take max intenisty z projection of z stack at time point 0 in channel 0
max_intensity = np.max(all_data[0, 0], axis=0)
Esempio n. 5
0
import numpy as np
import h5py
from write_functions import write_attribute
from pygellan.magellan_data import MagellanDataset
"""
data_path = 'E:\\...\\201978_AgedOvary_Nobox_3umSteps_2'
save_path = 'E:\\...\\MagellanTest'
save_name = 'E:\\...\\201978_AgedOvary_Nobox_3umSteps_2\\test.ims'
"""
data_path = 'E:\\...\\Cortex_3umStep_1'
save_path = 'E:\\...\\SPIM'
save_name = 'E:\\...\\Cortex_3umStep_1_test.ims'

magellan = MagellanDataset(data_path)

all_data = magellan.as_array(stitched=True)
size_array = all_data.shape
print('All Data')
print(size_array)
num_slices = size_array[2]
print(num_slices)
total_height = size_array[3]
print(total_height)
total_width = size_array[4]
print(total_width)

# Get pertinent information
voxel_size_z_um = magellan.summary_metadata['z-step_um']
print(voxel_size_z_um)
pixel_size_xy_um = magellan.summary_metadata['PixelSize_um']
print(pixel_size_xy_um)
Esempio n. 6
0
import numpy as np
from pygellan.magellan_data import MagellanDataset
import matplotlib.pyplot as plt

#data_path = 'C:\\...\\test_1'
#data_path = 'J:\\...\\Cortex_10um_2timepoints_1'
data_path = 'E:\\...\\PyMARIS_test_z10_c4_t3_1'

magellan = MagellanDataset(data_path)
#channel_list = magellan.get_channel_names()

num_frames = magellan.get_num_frames()
time_list = []
for t in range(num_frames):
    metadata_dictionary = magellan.read_metadata(t_index=t)
    time_list.append(metadata_dictionary['TimeReceivedByCore'])
print(time_list)

#'TimeReceivedByCore': '2019-10-15 13:48:14.036429'
#'TimeReceivedByCore': '2019-10-15 13:50:55.130478'

#rint(channel_list)

#img, img_metadata = magellan.read_image(channel_index=0, z_index=0, pos_index=0, read_metadata=True)
#print(img_metadata)
"""
all_data = magellan.as_array(stitched=True)
print(all_data)

sub_data = np.array(all_data[0, 2, 5])
Esempio n. 7
0
import numpy as np
import json
from pygellan.magellan_data import MagellanDataset
import tifffile as tf
import matplotlib.pyplot as plt

#data_path = 'C:\\...\\test_1'
data_path = 'J:\\...\\Cortex_10um_2timepoints_1'
save_path = 'J:\\...\\Cortex_10um_2timepoints_1'
save_name = 'J:\\...\\Cortex_10um_2timepoints_1\\test.tif'

magellan = MagellanDataset(data_path)

all_data = magellan.as_array(stitched=True)

z_steps = magellan.summary_metadata['z-step_um']
print("Z Steps")
print(z_steps)

img, img_metadata = magellan.read_image(channel_index=0,
                                        z_index=0,
                                        pos_index=0,
                                        read_metadata=True)
xres = float(img_metadata['PixelSizeUm'])
print(img_metadata['PixelSizeUm'])
print(img_metadata)

# Find xy pixel data type
#data_type = xy_data.dtype
#print('Data Type: ' + str(data_type))