Exemplo n.º 1
0
def nd2arr_list(nd_dh=None, nd_fns=[], nd_fh=None):
    arr_list = []
    if nd_fh is None:
        for nd_fn in nd_fns:
            nd = nd2reader.Nd2("%s/%s" % (nd_dh, nd_fn))
            for ndi in nd:
                arr_list.append(np.array(ndi))
            del nd
        return arr_list
    elif not nd_fh is None:
        nd = nd2reader.Nd2("%s" % (nd_fh))
        for ndi in nd:
            arr_list.append(np.array(ndi))
        return arr_list
    def run_extraction(self):
        start_t = datetime.now()

        os.chdir(self.input_path)
        # get position name if xml is available
        if self.xml_file:
            if not self.xml_dir:
                self.xml_dir = self.input_path
                self.pos_info()
        # otherwise get lane info from y_um
        else:
            self.lane_info()
        os.chdir(self.input_path)

        # switch to another ND2reader for faster iterations
        nd2 = nd2reader.Nd2(self.nd2_file)

        main_dir = self.input_path + "/" + self.nd2_file_name
        if not os.path.exists(main_dir):
            os.makedirs(main_dir)

        # parallelize extraction
        poses = nd2.fields_of_view
        cores = pathos.multiprocessing.cpu_count()
        pool = pathos.multiprocessing.Pool(cores)
        pool.map(self.tiff_extractor, poses)

        time_elapsed = datetime.now() - start_t
        print('Time elapsed for extraction (hh:mm:ss.ms) {}'.format(
            time_elapsed))
Exemplo n.º 3
0
    def save_action(self, timestamps_model):
        """
        Writes missing timestamp files.

        So the first ND2 starts at 2015-11-13 16:53:12.
        The second ND2 starts at 2015-11-14 17:01:43.


        :type timestamps_model: fylm.model.Timestamps()

        """
        # ND2 timestamps are relative to the beginning of acquisition of a single time period. So to get the true timestamp
        # we need to look at the datetime that each ND2 began and compare it to the first ND2. This will be zero for the
        # first one.
        timestamp_offset = self._experiment.exact_start_time(
            timestamps_model.time_period) - self._experiment.exact_start_time(
                1)
        log.debug("Timestamp offset for time period %s: %s" %
                  (timestamps_model.time_period, timestamp_offset))
        log.info(
            "Creating timestamps for time_period:%s, Field of View:%s" %
            (timestamps_model.time_period, timestamps_model.field_of_view))
        nd2_filename = self._experiment.get_nd2_from_time_period(
            timestamps_model.time_period)
        nd2 = nd2reader.Nd2(nd2_filename)
        # subtract 1 from the field of view since nd2reader uses 0-based indexing, but we
        # refer to the fields of view with 1-based indexing
        for image_set in nd2.image_sets(
                field_of_view=timestamps_model.field_of_view,
                channels=[""],
                z_levels=[0]):
            image = [i for i in image_set][0]
            timestamp = image.timestamp + timestamp_offset
            timestamps_model.add(timestamp)
def nd2_to_tiff(data_path):
    basename = splitext(data_path)[0]
    nd2 = nd2reader.Nd2(data_path)

    frames = nd2.frames
    channels = nd2.channels
    planes = nd2.z_levels
    fovs = nd2.fields_of_view

    for f in frames:
        for c in channels:
            for p in planes:
                for fov in fovs:
                    output_filename = basename + '_frame_' + str(f) + \
                        '_channel_' + c + '_plane_' + str(p) + '_field_' + \
                        str(fov) + '.tif'
                    try:
                        image = nd2.get_image(frame_number=f,
                                              field_of_view=fov,
                                              channel_name=c,
                                              z_level=p)
                    except:
                        import pudb
                        pudb.set_trace()
                    image = np.array(image).astype('uint16')
                    imsave(filename=output_filename, data=image)
Exemplo n.º 5
0
def nd2csv(nd_fh):
    nd = nd2reader.Nd2(nd_fh)
    frame_cnt = 0
    for frame in nd:
        np.savetxt("%s/%s%02d.csv" % (nd2_dh, basename(nd_fh), frame_cnt),
                   frame,
                   delimiter=",")
        frame_cnt += 1
    nd.close
Exemplo n.º 6
0
def load_ZStack(stack_path):

    read_obj = nd2reader.Nd2(stack_path)

    green_stack = np.zeros((len(read_obj.z_levels), 512, 512))
    red_stack = np.zeros((len(read_obj.z_levels), 512, 512))
    for i_z, z in enumerate(read_obj.z_levels):
        green_stack[i_z] = read_obj.get_image(0, 0, read_obj.channels[0], z)
        red_stack[i_z] = read_obj.get_image(0, 0, read_obj.channels[1], z)

    return green_stack, red_stack
Exemplo n.º 7
0
    def __init__(self,
                 input_file,
                 crop_edge=0.05,
                 image_type="nd2",
                 plate_index=0):
        self.pixel_microns = 1
        self.channels = None
        self.shape = None
        self.fl_img = {}
        self.ph_img = None
        self.ph_filtered = None
        self.ph_binary = None
        self.shape_indexed = None
        #self.shape_idx_binary = None deprecated
        self.shape_indexed_smoothed = None
        self.microcolony_labels = None
        self.microcolony_info = None
        self.sobel = None
        self.celllike = []
        self.cells = []
        self.microcolonies = []
        self.shape = None
        self.global_fl_bg_mean = {}
        self.global_fl_bg_std = {}
        self.plate_idx = plate_index

        if image_type == "nd2":
            img = nd2.Nd2(input_file)
            self.pixel_microns = img.pixel_microns
            self.channels = img.channels
            self.shape = (img.width, img.height)
            self.date_time = img.date
            if 0 <= crop_edge < 0.4:
                crop_w = int(crop_edge * img.width)
                crop_h = int(crop_edge * img.height)
                w1, w2 = crop_w, img.width - crop_w
                h1, h2 = crop_h, img.height - crop_h
                self.shape = (img.width - 2 * crop_w, img.height - 2 * crop_h)
            else:
                raise ValueError(
                    "Fraction of cropped edge should be less than 0.4!")
            self.min_length = int(round(conf.min_length / self.pixel_microns))
            self.max_length = int(round(conf.max_length / self.pixel_microns))
            for channel in self.channels:
                if re.search("ph", channel, re.IGNORECASE):
                    self.ph_img = np.array(
                        img[img.channels.index(channel)]).astype(
                            np.uint16)[h1:h2, w1:w2]
                else:
                    self.fl_img[channel] = np.array(
                        img[img.channels.index(channel)]).astype(
                            np.uint16)[h1:h2, w1:w2]
Exemplo n.º 8
0
    def _get_nd2_attributes(self, experiment):
        """
        Determine several attributes of the ND2s used in this experiment.

        :type experiment:   model.experiment.Experiment()

        """
        experiment_log = self._load_experiment_log(experiment)
        found_an_nd2 = False
        for n, nd2_filename in enumerate(experiment.nd2s):
            try:
                nd2 = nd2reader.Nd2(nd2_filename)
            except IOError:
                pass
            else:
                # We need to know the absolute time that an experiment began so we can figure out the gap between
                # different files (as that could be any amount of time).
                timestamp = self._utc_timestamp(nd2.absolute_start)
                time_period = n + 1
                experiment.set_time_period_start_time(time_period, timestamp)
                experiment_log['start_unix_timestamps'][str(
                    time_period)] = timestamp

                experiment.field_of_view_count = nd2.field_of_view_count
                experiment_log['field_of_view_count'] = nd2.field_of_view_count
                experiment_log['has_fluorescent_channels'] = False
                for channel in nd2.channels:
                    if channel.name != "":
                        log.info("Experiment has fluorescent channels.")
                        experiment.has_fluorescent_channels = True
                        experiment_log['has_fluorescent_channels'] = True
                        break
                else:
                    log.info("Experiment does not have fluorescent channels.")
                self._save_experiment_log(experiment, experiment_log)
                found_an_nd2 = True

        if not found_an_nd2:
            # There are no ND2s so we load all the information we need from the log.
            if 'field_of_view_count' not in experiment_log.keys(
            ) or 'has_fluorescent_channels' not in experiment_log.keys():
                terminal_error(
                    "No ND2s found and no attributes saved. It seems like you haven't even started this experiment."
                )
            experiment.field_of_view_count = int(
                experiment_log['field_of_view_count'])
            experiment.has_fluorescent_channels = experiment_log[
                'has_fluorescent_channels']
            for time_period, timestamp in experiment_log[
                    'start_unix_timestamps'].items():
                experiment.set_time_period_start_time(time_period, timestamp)
def get_wholefield_fluorescence(stim_table,im_filetype,im_directory,exp_date,mouse_ID,savepath):  
    
    if os.path.isfile(savepath+exp_date+'_'+mouse_ID+'_wholefield.npy'):
        avg_fluorescence = np.load(savepath+exp_date+'_'+mouse_ID+'_wholefield.npy')
    else:
    
        im_path = None
        if im_filetype=='nd2':
            for f in os.listdir(im_directory):
                if f.endswith(im_filetype) and f.lower().find('local') == -1:
                    im_path = im_directory + f
                    print im_path
        elif im_filetype=='h5':
            #find experiment directory:
            for f in os.listdir(im_directory):
                if f.lower().find('ophys_experiment_')!=-1:
                    exp_path = im_directory+f+'\\'
                    session_ID = f[17:]
                    print session_ID
        else:
            print 'im_filetype not recognized!'
            sys.exit(1)
                
        if im_filetype=='nd2':
            print 'Reading nd2...'
            read_obj = nd2reader.Nd2(im_path)
            num_frames = len(read_obj.frames)
            avg_fluorescence = np.zeros((num_frames,))
            
            sweep_starts = stim_table['Start'].values
            block_bounds = []
            block_bounds.append((np.min(sweep_starts)-30,np.max(sweep_starts[sweep_starts<50000])+100))
            block_bounds.append((np.min(sweep_starts[sweep_starts>50000])-30,np.max(sweep_starts)+100))
            
            for block in block_bounds:
                frame_start = int(block[0])
                frame_end = int(block[1])
                for f in np.arange(frame_start,frame_end):
                    this_frame = read_obj.get_image(f,0,read_obj.channels[0],0)
                    print 'Loaded frame ' + str(f) + ' of ' + str(num_frames)
                    avg_fluorescence[f] = np.mean(this_frame)
        elif im_filetype=='h5':
            f = h5py.File(exp_path+session_ID+'.h5')
            data = np.array(f['data'])
            avg_fluorescence = np.mean(data,axis=(1,2))
            f.close()
        np.save(savepath+exp_date+'_'+mouse_ID+'_wholefield.npy',avg_fluorescence)
                    
    return avg_fluorescence
Exemplo n.º 10
0
    def save_action(self, registration_model):
        """
        Calculates the rotation offset for a single field of view and time_period.

        :type registration_model:   fylm.model.registration.Registration()

        """
        log.info("Creating registration file %s" % registration_model.filename)
        # This is a pretty naive loop - the same file will get opened 8-12 times
        # There are obvious ways to optimize this but that can be done later if it matters
        # It probably doesn't matter though and I like simple things
        base_nd2_filename = self._experiment.get_nd2_from_time_period(1)
        nd2_filename = self._experiment.get_nd2_from_time_period(
            registration_model.time_period)
        base_nd2 = nd2reader.Nd2(base_nd2_filename)
        nd2 = nd2reader.Nd2(nd2_filename)
        # gets the first out-of-focus image from the first time_period in the stack
        base_image = base_nd2.get_image(0, registration_model.field_of_view,
                                        "", 0)
        for i in range(nd2.time_index_count):
            image = nd2.get_image(i, registration_model.field_of_view, "", 0)
            dx, dy = self._determine_registration_offset(
                base_image.data, image.data)
            registration_model.add(dx, dy)
Exemplo n.º 11
0
def process(file):

	stack = nd2r.Nd2('Images/' + file)

	for i in range(6):

		bf = normalize(stack[i*5+4])
		gfp = normalize(stack[i*5+2])

		find_distance_from_center(gfp, bf, signal)

	# with open('pickles/' + file + '.pkl', 'wb') as handle:
	# 	pik.dump(signal, handle)

	make_boxplot(signal, file)
Exemplo n.º 12
0
    def save_action(self, rotation_model):
        """
        Calculates the rotation offset for a single field of view and time_period.

        :type rotation_model:   fylm.model.rotation.Rotation()

        """
        log.info("Creating rotation file %s" % rotation_model.filename)
        # This is a pretty naive loop - the same file will get opened 8-12 times
        # There are obvious ways to optimize this but that can be done later if it matters
        # It probably doesn't matter though and I like simple things
        nd2_filename = self._experiment.get_nd2_from_time_period(
            rotation_model.time_period)
        nd2 = nd2reader.Nd2(nd2_filename)
        # gets the first in-focus image from the first timpoint in the stack
        # TODO: Update nd2reader to figure out which one is in focus or to be able to set it
        image = nd2.get_image(0, rotation_model.field_of_view, "", 1)
        offset = self._determine_rotation_offset(image.data)
        rotation_model.offset = offset
Exemplo n.º 13
0
def get_nd2_dict(img_path):

    print img_path in os.listdir(IMAGE_DIR)
    # print os.listdir(IMAGE_DIR)

    im_name = img_path.replace('.nd2', '')
    im_name_pickle = im_name + '.pkl'

    if im_name_pickle not in os.listdir(IMAGE_DIR):

        print '---------------------------------'
        print 'CREATING NEW PICKLE FILE'
        print '---------------------------------'

        img_dict = {
            'DAPI': {},
            'FITC': {},
            'FITClong': {},
            'CY3': {},
            'BF-Cy3': {}
        }

        nd2 = nd2reader.Nd2(IMAGE_DIR + '/' + img_path)
        # nd2 = nd2reader.Nd2(img_path)

        print nd2.fields_of_view

        for i in range(len(nd2)):
            channel = nd2[i].channel
            fov = nd2[i].field_of_view
            # img_dict[channel][fov] = nd2[i]
            img_dict[channel][fov] = nd2[i].astype(np.uint16)
            # / np.amax(nd2[i]).astype(np.uint16)

        pickle.dump(img_dict, open(IMAGE_DIR + '/' + im_name_pickle, 'w'))
    else:
        print '---------------------------------'
        print 'LOADING FROM PICKLE FILE'
        print '---------------------------------'
        img_dict = pickle.load(open(IMAGE_DIR + '/' + im_name_pickle, 'r'))
        print '...done'

    return img_dict
Exemplo n.º 14
0
    def tiff_extractor(self, pos):
        nd2 = nd2reader.Nd2(self.nd2_f)
        if self.pos_dict:
            new_dir = self.main_dir + "/Lane_" + str(
                self.lane_dict[pos]).im(2) + "/" + self.pos_dict[pos] + "/"
        else:
            lane_ind = self.lane_dict[pos]
            pos_off = self.pos_offset[lane_ind]
            new_dir = self.main_dir + "/Lane_" + str(lane_ind).zfill(
                2) + "/pos_" + str(pos - pos_off).zfill(3) + "/"

        # create a folder for each position
        if not os.path.exists(new_dir):
            os.makedirs(new_dir)
        os.chdir(new_dir)

        if self.pos_dict:
            meta_name = self.nd2_file_name + "_" + self.pos_dict[pos] + "_t"
        else:
            meta_name = self.nd2_file_name + "_pos_" + str(
                pos - pos_off).zfill(3) + "_t"

        for image in nd2.select(fields_of_view=pos):
            channel = image._channel
            channel = str(channel.encode('ascii', 'ignore'))
            time_point = image.frame_number
            tiff_name = meta_name + str(time_point).zfill(
                4) + "_c_" + channel + ".tiff"

            # save file in 16-bit
            # thanks to http://shortrecipes.blogspot.com/2009/01/python-python-imaging-library-16-bit.html
            image = image.base.astype(np.uint16)
            out = PIL.Image.frombytes("I;16", (image.shape[1], image.shape[0]),
                                      image.tobytes())
            out.save(tiff_name)

        os.chdir(self.file_dir)
Exemplo n.º 15
0
                                                                        mt)**2

            if maxBetweenVar < currVarB:
                maxBetweenVar = currVarB
                t1 = i
                t2 = j
    return 1.0 * t1 / bins, 1.0 * t2 / bins


def normalize(img):
    high = np.amax(img)
    low = np.amin(img)
    return (img - low) / (high - low)


stack = nd2r.Nd2(
    '/Users/student/Projects/PlateImages/Plate000_WellF08_Seq0040.nd2')
test = stack[4].astype(np.float64) / np.amax(stack[4])
cy3 = stack[3].astype(np.float64) / np.amax(stack[3])
dapi = stack[0].astype(np.float64) / np.amax(stack[0])


def find_cells(img):
    strong_blur = filters.gaussian(img, 20)
    no_back = img - strong_blur
    no_back = normalize(no_back)
    equalized_no_back = exposure.equalize_hist(no_back)
    equalized_no_back = normalize(equalized_no_back)
    edges_nb = feature.canny(equalized_no_back, sigma=5)
    close_nb = ndi.binary_closing(edges_nb,
                                  structure=np.ones((3, 3)),
                                  iterations=1)
Exemplo n.º 16
0
def nd2_to_npy(gene,
               raw_data_gene_fname,
               tmp_gene_dir,
               processing_hyb,
               use_ram=True,
               max_ram=150000000000):
    """    
    Function used to convert Nikon ND2 files in numpy arrays.
    Important: require the nd2reader (https://github.com/jimrybarski/nd2reader)
    pip  install "nd2reader==2.1.3". There is a new version based on pims
    but need to be tested
    The input ND2 file will have only one channel and will be converted in a
    binary npy.
    
    Parameters:
    -----------

    gene: str 
        Name of the gene to process (ex. Aldoc)
    raw_data_gene_fname: str
        Path to the ND2 file (ex. '/Users/simone/Desktop/test/ND2conversion/BW_Aldoc.nd2')
    tmp_gene_dir: str 
        path to the folder where to save the npy files
    processing_hyb: str 
        Name of the processing_hyb to process (ex. EXP-17-BP3597_hyb2)
    use_ram: bool 
        If True the images are written first in ram until max_ram is reached
    max_ram: int
        Size of the RAM that can be used as temporary storage
 
    """

    size_counter = 0
    tmp_storage = {}

    with nd2.Nd2(raw_data_gene_fname) as nd2file:
        for channel in nd2file.channels:
            for fov in nd2file.fields_of_view:
                img_stack = np.empty(
                    [len(nd2file.z_levels), nd2file.height, nd2file.width],
                    dtype='uint16')
                images = nd2file.select(channels=channel,
                                        fields_of_view=fov,
                                        z_levels=nd2file.z_levels)
                for idx, im in enumerate(images):
                    img_stack[idx, :, :] = im

                converted_fname = tmp_gene_dir + processing_hyb + '_' + gene + '_pos_' + str(
                    fov) + '.npy'

                if use_ram:
                    # Create a dict that is saved in ram. When full is written on disk
                    # Done to reduce the number of i/o and increase performance
                    size_counter += img_stack.nbytes
                    if size_counter < max_ram:
                        tmp_storage[fov] = {}
                        tmp_storage[fov]['img'] = img_stack
                        tmp_storage[fov]['converted_fname'] = converted_fname
                    else:
                        for pos in tmp_storage.keys():
                            np.save(tmp_storage[pos]['converted_fname'],
                                    tmp_storage[pos]['img'],
                                    allow_pickle=False)

                        tmp_storage = {}
                        size_counter = img_stack.nbytes
                        tmp_storage[fov] = {}
                        tmp_storage[fov]['img'] = img_stack
                        tmp_storage[fov]['converted_fname'] = converted_fname

                else:
                    # Directly save the file without saving it in RAM
                    np.save(converted_fname, img_stack, allow_pickle=False)

        # Save the data that remained in the tmp_storage (if RAM is used)
        if use_ram:
            for pos in tmp_storage.keys():
                np.save(tmp_storage[pos]['converted_fname'],
                        tmp_storage[pos]['img'],
                        allow_pickle=False)
def process_standalone_experiment():
    """
    Script to run conversion, filtering and raw counting on a small set of images.
    The analysis run locally

    All the parameters are entered with argparse

    Parameters:
    -----------

    path: string
        Path to the experiment to process
    analysis_name: string
        Name of the analysis
    stringency: int
        Value of the stringency to use in the threshold selection. Default=0
    min_distance: int
        Min distance betwenn to peaks. Default=5
    min_plane: int
        Min plane for z-stack cropping. Default=None
    max_plane: int:
        Max plane for z-stack cropping. Default=None
    ncores: int
        Number of cores to use for the processing. Deafault=1


    """

    # input to the function
    parser = argparse.ArgumentParser(
        description='Counting and filtering experiment')
    parser.add_argument('-path', help='path to experiment to analyze')
    parser.add_argument('-analysis_name', help='analysis name')
    parser.add_argument('-stringency', help='stringency', default=0, type=int)
    parser.add_argument('-min_distance',
                        help='min distance between peaks',
                        default=5,
                        type=int)
    parser.add_argument('-min_plane',
                        help='starting plane to consider',
                        default=None,
                        type=int)
    parser.add_argument('-max_plane',
                        help='ending plane to consider',
                        default=None,
                        type=int)
    parser.add_argument('-ncores',
                        help='number of cores to use',
                        default=1,
                        type=int)

    # Parse the input args
    args = parser.parse_args()
    processing_directory = args.path
    analysis_name = args.analysis_name
    stringency = args.stringency
    min_distance = args.min_distance
    min_plane = args.min_plane
    max_plane = args.max_plane
    ncores = args.ncores

    if min_plane != None and max_plane != None:
        plane_keep = [min_plane, max_plane]
    else:
        plane_keep = None

    # Determine the os type
    os_windows, add_slash = utils.determine_os()

    # Starting logger
    utils.init_file_logger(processing_directory)
    logger = logging.getLogger()

    logger.debug('min_plane%s', min_plane)
    logger.debug('max_plane %s', max_plane)
    logger.debug('keep_planes value %s', plane_keep)

    # Start the distributed client
    client = Client(n_workers=ncores, threads_per_worker=1)

    logger.debug('client %s', client)
    logger.debug('check that workers are on the same directory %s',
                 client.run(os.getcwd))

    # Check trail slash
    processing_directory = utils.check_trailing_slash(processing_directory,
                                                      os_windows)

    # Determine the experiment name
    exp_name = processing_directory.split(add_slash)[-2]

    logger.debug('Experiment name: %s', exp_name)

    # Create the directories where to save the output
    tmp_dir_path = processing_directory + analysis_name + '_' + exp_name + '_tmp' + add_slash
    filtered_dir_path = processing_directory + analysis_name + '_' + exp_name + '_filtered' + add_slash
    counting_dir_path = processing_directory + analysis_name + '_' + exp_name + '_counting_pkl' + add_slash
    try:
        os.stat(tmp_dir_path)
    except:
        os.mkdir(tmp_dir_path)
        os.chmod(tmp_dir_path, 0o777)

    try:
        os.stat(filtered_dir_path)
    except:
        os.mkdir(filtered_dir_path)
        os.chmod(filtered_dir_path, 0o777)

    try:
        os.stat(counting_dir_path)
    except:
        os.mkdir(counting_dir_path)
        os.chmod(counting_dir_path, 0o777)

    # Get the list of the nd2 files to process inside the directory
    files_list = glob.glob(processing_directory + '*.nd2')
    logger.debug('files to process %s', files_list)

    # Convert the .nd2 data
    for raw_data_gene_fname in files_list:
        fname = raw_data_gene_fname.split(add_slash)[-1][:-4]
        logger.debug('fname %s', fname)
        with nd2.Nd2(raw_data_gene_fname) as nd2file:
            for channel in nd2file.channels:
                for fov in nd2file.fields_of_view:
                    img_stack = np.empty(
                        [len(nd2file.z_levels), nd2file.height, nd2file.width],
                        dtype='uint16')
                    images = nd2file.select(channels=channel,
                                            fields_of_view=fov,
                                            z_levels=nd2file.z_levels)
                    for idx, im in enumerate(images):
                        img_stack[idx, :, :] = im

                    converted_fname = tmp_dir_path + exp_name + '_' + fname + '_' + channel + '_fov_' + str(
                        fov) + '.npy'
                    np.save(converted_fname, img_stack, allow_pickle=False)

    logger.debug('Finished .nd2 file conversion')

    # Filtering all the data
    # Get list of the files to process
    flist_img_to_filter = glob.glob(tmp_dir_path + '*.npy')

    # logger.debug('files to filter %s',flist_img_to_filter)
    # Parallel process all the data
    futures_processes=client.map(filtering_and_counting_experiment,flist_img_to_filter, \
                                  filtered_dir_path=filtered_dir_path, \
                                 counting_dir_path=counting_dir_path, \
                                 exp_name=exp_name,plane_keep=plane_keep,add_slash=add_slash, \
                                 min_distance=min_distance, stringency=stringency)

    client.gather(futures_processes)
    client.close()

    logger.debug('Finished filtering and counting')

    # delete the tmp folders
    shutil.rmtree(tmp_dir_path)
#     a=fig.add_subplot(1,3,2)
#     imgplot = plt.imshow(dapi_colors)
#     a.set_title('dapi')
#     a=fig.add_subplot(1,3,3)
#     imgplot = plt.imshow(cy3_colors)
#     a.set_title('cy3')
#     plt.show()

nd2_files = []
for filename in os.listdir(os.getcwd()):
    if filename.endswith(".nd2"):
        nd2_files.append(filename)

for filename in nd2_files:
    well_id = filename.split('_')[1]
    compound_img = nd2reader.Nd2(filename)
    dapi_to_gfp_distance = []
    cy3_to_gfp_distance = []
    cy3_to_dapi_distance = []
    dapi_to_gfp_pixel_coor = []
    cy3_to_gfp_pixel_coor = []
    cy3_to_dapi_pixel_coor = []
    well_data = {}
    well_segmentation = []
    for i in range(0, 6, 1):
        for img in compound_img.select(fields_of_view=i):
            if img.channel == 'FITClong':
                gfp = nd2tofloat(img)
            elif img.channel == 'DAPI':
                dapi = nd2tofloat(img)
            elif img.channel == 'CY3':
Exemplo n.º 19
0
import nd2reader
import numpy as np
import matplotlib.pyplot as plt
import channel_utils as cu
x=[]
FOV=0
Original_filename ="/Volumes/Samsung_T3/20160630_SJ102_persister_002.nd2"
nd2 = nd2reader.Nd2(Original_filename)
for image in nd2.select(channels='488nm_jt', fields_of_view=(FOV)):
    x.append(image)
final=np.sum(x,axis=0)
number_channels,pixels=cu.number_of_channels(final)
print("number of channel sections",number_channels/2)
print(pixels)
print("First located at ",pixels[0],"ending at",pixels[1])

Exemplo n.º 20
0
def main(argv):
    imagefile = argv[1]
    print(imagefile)
    nd2 = nd2reader.Nd2(imagefile)
    print(nd2)
    print(nd2[1])
Exemplo n.º 21
0
#!/usr/bin/env python2
import nd2reader
from sys import argv

script, nd2 = argv

nd2 = nd2reader.Nd2(str(nd2))

dapi = []
fitc = []
fitclong = []
cy3 = []
bfcy3 = []

for image in nd2.select(channels='DAPI'):
    dapi.append(image)

for image in nd2.select(channels='FITC'):
    fitc.append(image)

for image in nd2.select(channels='FITClong'):
    fitclong.append(image)

for image in nd2.select(channels='CY3'):
    cy3.append(image)

for image in nd2.select(channels='BF-Cy3'):
    bfcy3.append(image)

test = cy3[0]
print type(test)
Exemplo n.º 22
0
# plt.ylabel("Frequency")
#plt.imshow(matrix2,cmap='jet')
#plt.imshow(test_image3,alpha=0.5)

# print 'cell mean', np.mean(intensity1)
# print 'cell var', np.var(intensity1)
# print 'nuc mean', np.mean(intensity_nuc)
# print 'nuc var', np.var(intensity_nuc)
# print 'nuc/cell', np.mean(intensity_nuc)/np.mean(intensity1)
# plt.show()

#plt.savefig(name.png)

for filename in listdir('PlateImages1/'):
    if filename == '.DS_Store':
        continue
    img1 = nd2reader.Nd2('PlateImages1/' + filename)
    test_image1 = img1[4].astype(np.float64) / np.amax(img1[4])
    test_image2 = img1[0].astype(np.float64) / np.amax(img1[0])
    test_image3 = img1[2].astype(np.float64) / np.amax(img1[2])
    matrix = find_cells(test_image1)
    matrix2 = segment_image(test_image2, 0.5, 0.5)
    intensity = find_intensity_cell(matrix, test_image3)
    intensity_nuc = find_intensity_in_nuc(matrix2, test_image3)
    sns.distplot(intensity, len(np.unique(intensity)))
    sns.distplot(intensity_nuc, len(np.unique(intensity_nuc)))
    plt.xlabel("Value")
    plt.ylabel("Frequency")
    plt.savefig('PlateImages2/' + filename + '.png')
    plt.close()
Exemplo n.º 23
0
import nd2reader
import Persister_utils as pu
import h5py
import numpy as np
from scipy import signal

#nd2 = nd2reader.Nd2("/Users/Ashley/Desktop/20160728_SDB1_LABPENAB001.nd2")
nd2 = nd2reader.Nd2("/Volumes/Samsung_T3/20160630_SJ102_persister_002.nd2")
print(nd2.channels)
holdall = np.zeros((nd2.height, nd2.width), 'double')
FOV = 1

for image in nd2.select(channels="488nm_jt", fields_of_view=(FOV)):
    holdall = holdall + image

line_profile = np.sum(holdall, axis=1)
window = np.diff(signal.gaussian(nd2.height, std=15))
cor_line = np.convolve(line_profile, window, mode='same')

cor_line_fit = cor_line[40:-40]
mean_val = np.mean(cor_line_fit**2)

out = signal.find_peaks_cwt(cor_line_fit**2, np.arange(1, 50))
myarray_out = np.asarray(out)

selected_points = np.where(cor_line_fit[out]**2 > mean_val)

myarray_selected_points = (selected_points)
new_points = (myarray_out[myarray_selected_points])

new_image = nd2[0]