Ejemplo n.º 1
0
def output_aligned_image(image,
                         center_tck,
                         width_tck,
                         keypoints,
                         output_file,
                         t_out=[0.07, 0.15, 0.5, 0.95]):
    """ Make warped pictures and align the keypoints
    """
    #get the alignments for longitudinal_warp_spline
    t_in = calculate_keypoints(keypoints, center_tck)
    aligned_center, aligned_width = worm_spline.longitudinal_warp_spline(
        t_in, t_out, center_tck, width_tck=width_tck)

    #output the image
    warps = warps = worm_spline.to_worm_frame(image,
                                              aligned_center,
                                              width_tck=aligned_width,
                                              width_margin=0,
                                              standard_width=aligned_width)
    mask = worm_spline.worm_frame_mask(aligned_width, warps.shape)

    #change warps to an 8-bit image
    bit_warp = colorize.scale(warps).astype('uint8')
    #make an rgba image, so that the worm mask is applied
    rgba = np.dstack([bit_warp, bit_warp, bit_warp, mask])
    #save the image
    freeimage.write(rgba, output_file)
Ejemplo n.º 2
0
def pc_deviations_picture(my_PCA_stuff, standard_mask, worm_maker, worm_drawer,
                          vertical_combine, horizontal_combine, output_file):
    '''
	Takes a my_PCA_stuff object, an object mean_width which gives the average width profile of the worm to be used for visualization, a function worm_maker which generates a worm as a vector, a function worm_drawer which rasterizes the worm, a standard_mask which gives us the proper shape of each panel of the final image, the function vertical_combine which combines images vertically, the function horizontal_combine which combines images horizontally, and output_file where the final output is saved, and draws a grid of normalized deviations of the worm along PCs. This also prints the cumulative proportion of variance explained from left to right.
	'''
    overall_list = []
    for j in range(0, len(my_PCA_stuff.components_))[0:20]:
        pose_list = []
        for i in range(-4, 5):
            pc_weights = np.zeros((len(my_PCA_stuff.components_)))
            pc_weights[j] = i
            my_x = pc_weights
            my_x = np.append(my_x, my_PCA_stuff.mean_scale)
            my_x = np.concatenate((my_x, np.array([0.5, 0.5])))
            my_x = np.append(my_x, 0)
            my_worm = worm_maker(my_PCA_stuff, my_x)
            raster_worm = worm_drawer(my_worm,
                                      my_PCA_stuff,
                                      my_x,
                                      canvas_shape=standard_mask.shape)
            pose_list.append(raster_worm)
        poses_together = vertical_combine(pose_list)
        overall_list.append(poses_together)
    all_together = horizontal_combine(overall_list)
    freeimage.write(all_together.astype('uint8'), output_file)

    so_far_var = []
    cumulative_var = 0
    for i in range(0, 10):
        cumulative_var += (my_PCA_stuff.variances /
                           sum(my_PCA_stuff.variances))[i]
        so_far_var.append(cumulative_var)
    print('Cumulative variances (left to right): ' + str(so_far_var))
    return
Ejemplo n.º 3
0
def extract_image_set(image_files, out_dir, date, age, plate_params, ignore_previous=False):
    """Find wells in a set of scanner images and extract each well into a separate image
    for further processing.

    Parameters:
    image_files: list of paths to a set of images.
    out_dir: path to write out the extracted images and metadata.
    date: date object referring to image scan date
    age: age in days of the worms in these images
    plate_params: configuration information for extracting wells from the plates.
        This must be a parameter dictionary suitable to pass to extract_wells.extract_wells()
    ignore_previous: if False, and stored results already exist, skip processing
    """
    out_dir = pathlib.Path(out_dir)
    metadata = out_dir / 'metadata.pickle'
    if metadata.exists() and not ignore_previous:
        return
    images = []
    print('extracting images for {}'.format(out_dir))
    well_mask = freeimage.read(str(out_dir.parent / 'well_mask.png')) > 0
    for image_file in image_files:
        image = freeimage.read(image_file)
        if image.dtype == numpy.uint16:
            image = (image >> 8).astype(numpy.uint8)
        images.append(image)
    well_names, well_images, well_centroids = extract_wells.extract_wells(images, well_mask, **plate_params)
    well_dir = util.get_dir(out_dir / 'well_images')
    for well_name, well_image_set in zip(well_names, well_images):
        for i, image in enumerate(well_image_set):
            freeimage.write(image, str(well_dir/well_name)+'-{}.png'.format(i))
    util.dump(metadata, date=date, age=age, well_names=well_names, well_centroids=well_centroids)
Ejemplo n.º 4
0
def pc_deviations_picture(my_PCA_stuff, standard_mask, worm_maker, worm_drawer, vertical_combine, horizontal_combine, output_file):
	'''
	Takes a my_PCA_stuff object, an object mean_width which gives the average width profile of the worm to be used for visualization, a function worm_maker which generates a worm as a vector, a function worm_drawer which rasterizes the worm, a standard_mask which gives us the proper shape of each panel of the final image, the function vertical_combine which combines images vertically, the function horizontal_combine which combines images horizontally, and output_file where the final output is saved, and draws a grid of normalized deviations of the worm along PCs. This also prints the cumulative proportion of variance explained from left to right.
	'''
	overall_list = []
	for j in range(0, len(my_PCA_stuff.components_))[0:20]:
		pose_list = []
		for i in range(-4, 5):
			pc_weights = np.zeros((len(my_PCA_stuff.components_)))
			pc_weights[j] = i
			my_x = pc_weights
			my_x = np.append(my_x, my_PCA_stuff.mean_scale)
			my_x = np.concatenate((my_x, np.array([0.5, 0.5])))
			my_x = np.append(my_x, 0)
			my_worm = worm_maker(my_PCA_stuff, my_x)
			raster_worm = worm_drawer(my_worm, my_PCA_stuff, my_x, canvas_shape = standard_mask.shape)
			pose_list.append(raster_worm)
		poses_together = vertical_combine(pose_list)
		overall_list.append(poses_together)
	all_together = horizontal_combine(overall_list)
	freeimage.write(all_together.astype('uint8'), output_file)

	so_far_var = []
	cumulative_var = 0	
	for i in range(0, 10):
		cumulative_var += (my_PCA_stuff.variances/sum(my_PCA_stuff.variances))[i]
		so_far_var.append(cumulative_var)
	print('Cumulative variances (left to right): ' + str(so_far_var))
	return
Ejemplo n.º 5
0
def _find_worms_task(image_path, well):
    print('Finding worm '+well)
    image = freeimage.read(image_path)
    well_mask, edges, worm_mask = find_worm.find_worm_from_brightfield(image)
    freeimage.write(well_mask.astype(numpy.uint8)*255, image_path.parent / (well + '_well_mask.png'))
    freeimage.write(worm_mask.astype(numpy.uint8)*255, image_path.parent / (well + '_worm_mask.png'))
    return is_valid_mask(worm_mask)
Ejemplo n.º 6
0
def warp_image(spine_tck, width_tck, image_file, warp_file):
    """Warp an image of a worm to a specified place

    Parameters:
        spine_tck: parametric spline tck tuple corresponding to the centerline of the worm
        width_tck: non-parametric spline tck tuple corresponding to the widths of the worm
        image_file: path to the image to warp the worm from
        warp_file: path where the warped worm image should be saved to
    """

    image = freeimage.read(image_file)
    warp_file = pathlib.Path(warp_file)

    #730 was determined for the number of image samples to take perpendicular to the spine
    #from average length of a worm (as determined from previous tests)
    warped = resample.warp_image_to_standard_width(image, spine_tck, width_tck,
                                                   width_tck,
                                                   int(tck[0][-1] // 5))
    #warped = resample.sample_image_along_spline(image, spine_tck, 730)
    mask = resample.make_mask_for_sampled_spline(warped.shape[0],
                                                 warped.shape[1], width_tck)
    warped = colorize.scale(warped).astype('uint8')
    warped[~mask] = 255

    print("writing warped worm to :" + str(warp_file))
    #return warped
    if not warp_file.exists():
        warp_file.parent.mkdir(exist_ok=True)

    freeimage.write(
        warped, warp_file
    )  # freeimage convention: image.shape = (W, H). So take transpose.
Ejemplo n.º 7
0
def make_composite_maskfile_batch(data_path,
                                  mask_path,
                                  save_path,
                                  data_str='',
                                  mask_str=''):
    data_fns = [
        data_f for data_f in sorted(os.listdir(data_path))
        if data_str in data_f
    ]

    print('importing data images')
    data_imgs = np.array([(freeimage.read(data_path + os.path.sep + data_f))
                          for data_f in data_fns])
    print('importing mask images')
    mask_imgs = np.array([
        freeimage.read(mask_path + os.path.sep + mask_f) > 0
        for data_f in data_fns for mask_f in sorted(os.listdir(mask_path))
        if data_f[0:15] in mask_f if mask_str in mask_f
    ])
    try:
        os.stat(save_path)
    except:
        os.mkdir(save_path)

    print('generating and saving composites')
    comp = np.zeros(np.shape(data_imgs[[0]]))
    print('got here')
    for d_img, m_img, data_f in zip(data_imgs, mask_imgs, data_fns):
        comp = colorize.scale(np.copy(d_img), output_max=254)
        comp[m_img] = 255
        #if data_f==data_fns[2]: return
        freeimage.write(
            comp.astype('uint8'),
            save_path + os.path.sep + data_f[:-4] + 'composite' + data_f[-4:])
Ejemplo n.º 8
0
def take_sequential_images(scope, out_dir, tl_intensity=255, exposure_time=2):
    '''
        Take sequential images as one specifies positions on a stage
    '''

    out_dir = pathlib.Path(out_dir)
    scope.camera.acquisition_sequencer.new_sequence()
    scope.camera.acquisition_sequencer.add_step(exposure_time,
                                                'TL',
                                                tl_intensity=tl_intensity)
    out_dir.mkdir(exist_ok=True)
    pos_num = 0
    print(
        'Press enter after each position has been found; press control-c to end'
    )

    while True:
        try:
            input()
        except KeyboardInterrupt:
            break
        bf_image = scope.camera.acquisition_sequencer.run()[0]
        freeimage.write(bf_image, out_dir / f'_{pos_num:03d}.png')
        pos_num += 1

    if pos_num > 0:
        imaging_parameters = {
            'lamp': 'TL',
            'exposure': exposure_time,
            'intensity': tl_intensity
        }
        with (out_dir / 'imaging_parameters.json').open('w') as param_file:
            datafile.json_encode_legible_to_file(imaging_parameters,
                                                 param_file)
Ejemplo n.º 9
0
    def make_focus_stacks(self):
        self.scope.camera.pixel_readout_rate = '280 MHz'
        self.scope.camera.exposure_time = 10
        self.scope.camera.shutter_mode = 'Rolling'
        self.scope.camera.sensor_gain = '16-bit (low noise & high well capacity)'

        for pos_idx, pos in enumerate(self.positions):
            if pos_idx in self.skipped_positions:
                continue
            self.scope.stage.position = pos
            ims = []

            self.scope.tl.lamp.intensity=78
            self.scope.tl.lamp.enabled = True
            time.sleep(0.001)
            self.scope.camera.start_image_sequence_acquisition(frame_count=100, trigger_mode='Software')
            for z in numpy.linspace(pos[2]+0.3-0.5, pos[2]+0.3+0.5, 100, endpoint=True):
                z = float(z)
                self.scope.stage.z = z
                self.scope.camera.send_software_trigger()
                ims.append((self.scope.camera.next_image(), z))
            self.scope.tl.lamp.enabled = False
            self.scope.camera.end_image_sequence_acquisition()

            out_dpath = self.dpath / '{:04}'.format(pos_idx) / 'z_stack'
            if not out_dpath.exists():
                out_dpath.mkdir()
            for idx, (im, z) in enumerate(ims):
                im_fpath = out_dpath / '{}__{:04}_{:04}_{}.png'.format(self.name, pos_idx, idx, z)
                freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)
Ejemplo n.º 10
0
    def save_annotations(self):
        """Save the pose annotations as pickle files into the parent directory.
        A pickle file is created for each page in the flipbook with the name of the first image in the
        flipbook_page list as the base for the pickle file name.
        """
        for fp in self.ris_widget.flipbook_pages:
            if len(fp) == 0:
                # skip empty flipbook pages
                continue
            annotations = getattr(fp, 'annotations', {})
            pose = annotations.get('pose', (None, None))
            if pose is not None:
                center_tck, width_tck = pose
                if center_tck is not None:
                    path = pathlib.Path(fp[0].name)
                    with path.with_suffix('.pickle').open('wb') as f:
                        pickle.dump(dict(pose=pose), f)

                    # warp and save images from all flipbook pages
                    for lab_frame in fp:
                        lab_frame_image = lab_frame.data
                        path = pathlib.Path(lab_frame.name)
                        warp = worm_spline.to_worm_frame(lab_frame_image, center_tck, width_tck)
                        warp_save_path = path.parent / (path.stem + '-straight.png')
                        freeimage.write(warp, warp_save_path)

                        # If the widths are drawn, then create a mask that allows the user to make an alpha channel later.
                        # We create one mask for each flipbook page, in case the images were saved in different places.
                        # If we wind up redundantly writing the same mask a few times, so be it.
                        if width_tck is not None:
                            mask = worm_spline.worm_frame_mask(width_tck, warp.shape)
                            mask_save_path = path.parent / (path.stem + '-mask.png')
                            freeimage.write(mask, mask_save_path)
Ejemplo n.º 11
0
def get_image_sequence_simple(scope, position_data, out_dir, lamp=None):
    '''
        scope - ScopeClient object
        position_data - positions acquired using get_objpositions
        out_dir - String path
        lamp - List of the form (exposure time, lamp_name)
    '''

    scope.camera.live_mode = False
    scope.camera.acquisition_sequencer.new_sequence()
    scope.camera.acquisition_sequencer.add_step(2, 'TL', tl_intensity=255)
    if lamp is not None:
        scope.camera.acquisition_sequencer.add_step(lamp[0], lamp[1])
    if not os.path.isdir(out_dir): os.mkdir(out_dir)
    for pos_num, this_position_data in enumerate(position_data):
        scope.nosepiece.position = this_position_data[0]
        scope.stage.position = this_position_data[1:]
        my_images = scope.camera.acquisition_sequencer.run()
        freeimage.write(
            my_images[0],
            out_dir + os.path.sep + '_{:03d}_bf.png'.format(pos_num))
        if lamp is not None:
            freeimage.write(
                my_images[1], out_dir + os.path.sep +
                '_{:03d}_'.format(pos_num) + lamp_dict[lamp[1]] + '.png')
Ejemplo n.º 12
0
def color_dots(image_paths, save_paths, color_dot_location_lists):
	'''
	Draws colored dots on images from image_paths according to color_dot_location_lists, and saves them out to save_paths.
	'''
	color_dot_location_lists = np.round(color_dot_location_lists)
	for i in range(0, len(image_paths)):
		image_array = freeimage.read(image_paths[i])
		print('Drawing dots for ' + image_paths[i] + '.')
		
		if len(image_array.shape) == 2:
			my_width, my_height = image_array.shape
			color_array = np.empty((my_width, my_height, 3), dtype = image_array.dtype)
			color_array[:, :, 0] = image_array.copy()
			color_array[:, :, 1] = image_array.copy()
			color_array[:, :, 2] = image_array.copy()
	
		elif len(image_array.shape) == 3:
			color_array = image_array
		
		color_array[color_dot_location_lists[0][i][0], color_dot_location_lists[0][i][1], :] = [0, 0, 0]
		if len(color_dot_location_lists) > 1:		
			color_array[color_dot_location_lists[1][i][0], color_dot_location_lists[1][i][1], :] = [0, 0, 0]
		if len(color_dot_location_lists) > 2:		
			color_array[color_dot_location_lists[2][i][0], color_dot_location_lists[2][i][1], :] = [0, 0, 0]

		color_array[color_dot_location_lists[0][i][0], color_dot_location_lists[0][i][1], 0] = -1
		if len(color_dot_location_lists) > 1:		
			color_array[color_dot_location_lists[1][i][0], color_dot_location_lists[1][i][1], 1] = -1
		if len(color_dot_location_lists) > 2:		
			color_array[color_dot_location_lists[2][i][0], color_dot_location_lists[2][i][1], 2] = -1

		freeimage.write(color_array, save_paths[i])
	return
Ejemplo n.º 13
0
    def measure(self, position_root, timepoint, annotations, before, after):
        derived_root = position_root.parent / DERIVED_ROOT
        image_file = position_root / f'{timepoint} {self.image_type}.png'
        if not image_file.exists():
            return [numpy.nan] * len(self.feature_names)

        image = freeimage.read(image_file)
        flatfield = freeimage.read(position_root.parent / 'calibrations' /
                                   f'{timepoint} fl_flatfield.tiff')
        image = image.astype(numpy.float32) * flatfield
        mask = self.get_mask(position_root, derived_root, timepoint,
                             annotations, image.shape)
        if mask is None:
            return [numpy.nan] * len(self.feature_names)
        if mask.sum() == 0:
            print(
                f'No worm region defined for {position_root.name} at {timepoint}'
            )

        data, region_masks = measure_fluor.subregion_measures(image, mask)

        if self.write_masks:
            color_mask = measure_fluor.colorize_masks(mask, region_masks)
            out_dir = derived_root / 'fluor_region_masks' / position_root.name
            out_dir.mkdir(parents=True, exist_ok=True)
            freeimage.write(color_mask,
                            out_dir / f'{timepoint} {self.image_type}.png')
        return data
Ejemplo n.º 14
0
def archive_human_masks(human_directory, new_directory, work_directory):
	'''
	For a directory of hand-drawn masks, mask out everything in the accompanying bright-field file except for the worm itself and a 100-pixel surrounding area to save disk space. Also, re-compress all images to maximize compression and space efficiency.
	'''
	for a_subdir in os.listdir(human_directory):
		if os.path.isdir(human_directory + os.path.sep + a_subdir):
			folderStuff.ensure_folder(new_directory + os.path.sep + a_subdir)
			for a_file in os.listdir(human_directory + os.path.sep + a_subdir):
				if a_file.split(' ')[-1] == 'hmask.png':
					if not os.path.isfile(new_directory + os.path.sep + a_subdir + os.path.sep + a_file):
						print('Up to ' + a_subdir + ' ' + a_file + '.')
						my_stem = a_file.split(' ')[0]
						my_mask = freeimage.read(human_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'hmask.png')
						bf_path = human_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'bf.png'
						if os.path.isfile(bf_path):
							my_image = freeimage.read(bf_path)
						else:
							my_image = freeimage.read(bf_path.replace(human_directory, work_directory))
						area_mask = my_mask.copy().astype('bool')
						distance_from_mask = scipy.ndimage.morphology.distance_transform_edt(np.invert(area_mask)).astype('uint16')
						area_mask[distance_from_mask > 0] = True
						area_mask[distance_from_mask > 100] = False
						my_image[np.invert(area_mask)] = False
						freeimage.write(my_image, new_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'bf.png', flags = freeimage.IO_FLAGS.PNG_Z_BEST_COMPRESSION)					
						freeimage.write(my_mask, new_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'hmask.png', flags = freeimage.IO_FLAGS.PNG_Z_BEST_COMPRESSION)					
				elif a_file.split('.')[-1] == 'json':					
					shutil.copyfile(human_directory + os.path.sep + a_subdir + os.path.sep + a_file, new_directory + os.path.sep + a_subdir + os.path.sep + a_file)
	return
Ejemplo n.º 15
0
def process_centerline_dir(source_dir, microns_per_pixel):
    source_dir = pathlib.Path(source_dir)
    out_dir = source_dir / 'individual_centerlines'
    out_dir.mkdir(exist_ok=True)

    centerline_data = {}
    centerline_data_entries = ['name', 'length']
    
    mask_data = {}
    [mask_data.setdefault(entry,[]) for entry in centerline_data_entries]
    for centerline_image_path in sorted(source_dir.iterdir()):
        if centerline_image_path.suffix[1:] != 'png':
            continue
        aggregate_centerline_image = freeimage.read(centerline_image_path)
        masks = parse_aggregate_centerlines(aggregate_centerline_image)
        for mask_num, mask in enumerate(masks):
            mask_name = centerline_image_path.stem + f'_{mask_num}'
            print(mask_name)
            freeimage.write(mask.astype('uint8')*255, out_dir / (mask_name+'.png'))
            center_tck, _ = worm_spline.pose_from_mask(mask) # Toss the widths
            try:
                length = spline_geometry.arc_length(center_tck) * microns_per_pixel
                mask_data['name'].append(mask_name)
                mask_data['length'].append(length)
            except TypeError:
                print(f'Warning: couldn\'t find centerline for {mask_name} (px location {list(numpy.where(mask))})')

    with (out_dir / 'measurements.txt').open('w+') as measurement_file:
        measurement_file.write('\t'.join(centerline_data_entries)+'\n')
        for data in zip(*[mask_data[entry] for entry in centerline_data_entries]):
            measurement_file.write('\t'.join([str(item) for item in data])+'\n')
Ejemplo n.º 16
0
def get_image_sequence(scope, position_data, out_dir, lamp=None):
    '''
        lamp - List of lists of the form [[lamp_exposure1,lamp_name1],...]
    '''
    
    #if lamp is None:
        #lamp = [[2, 'TL']]
    #if not any([arg[1] is 'TL' for arg in lamp]):
        #lamp.append([2,'TL'])
        
    
    lamp_dict = {'cyan':'gfp','green_yellow':'RedmChr'}
    
    scope.camera.live_mode=False
    scope.camera.acquisition_sequencer.new_sequence()
    for lamp_exposure, lamp_name in lamp:
        if lamp_name is not 'TL': scope.camera.acquisition_sequencer.add_step(lamp_exposure, lamp_name)
        else: scope.camera.acquisition_sequencer.add_step(lamp_exposure, lamp_name, tl_intensity=255)
    if not os.path.isdir(out_dir): os.mkdir(out_dir)
    for pos_num, this_position_data in enumerate(position_data):
        scope.nosepiece.position = this_position_data[0]
        scope.stage.position = this_position_data[1:]
        my_images = scope.camera.acquisition_sequencer.run() 
        for (lamp_exposure, lamp_name, this_image) in zip([arg[0] for arg in lamp],[arg[1] for arg in lamp], my_images):
            if lamp_name is 'TL': freeimage.write(this_image, out_dir+os.path.sep+'_{:03d}_bf_{}_ms'.format(pos_num,lamp_exposure)+'.png')
            else: freeimage.write(this_image, out_dir+os.path.sep+'_{:03d}_'.format(pos_num)+lamp_dict[lamp_name]+'_{}_ms'.format(lamp_exposure)+'.png')
Ejemplo n.º 17
0
 def save_image(self):
     fn, _ = Qt.QFileDialog.getSaveFileName(
         self,
         'Save Image',
         self.flipbook.current_page.name + '.png',
         filter='Images (*.png *.tiff *.tif)')
     if fn:
         freeimage.write(self.image.data, fn)
Ejemplo n.º 18
0
def save_mask_lcc(experiment_root):
    experiment_root = pathlib.Path(experiment_root)
    mask_root = experiment_root / 'derived_data' / 'mask'

    for position_mask_root in sorted(mask_root.iterdir()):
        for mask_file in sorted(position_mask_root.iterdir()):
            mask_image = freeimage.read(str(mask_file)) > 0
            new_mask = mask.get_largest_object(mask_image).astype(numpy.uint8)
            freeimage.write(new_mask*255, str(mask_file))
Ejemplo n.º 19
0
def write_mask(in_file, out_file, shape=(768, 640)):
    """Deals with worm masks (hmasks, etc.) since you
    don't need to mode normalize them
    """
    image = freeimage.read(in_file)
    cropped_image = crop_img(image)
    #small_image = pyramid.pyr_down(image, shrink).astype(numpy.uint16)
    small_image = (pyr_down_set(image,
                                (768, 640)).astype(numpy.uint16) > 128).astype(
                                    numpy.uint8) * 255
    freeimage.write(small_image, out_file)
Ejemplo n.º 20
0
    def execute_run(self):
        self.run_ts = time.time()
        self.run_idx += 1
        self.write_checkpoint()
        self.scope.camera.pixel_readout_rate = '280 MHz'
        self.scope.camera.exposure_time = 10
        self.scope.camera.shutter_mode = 'Rolling'
        self.scope.camera.sensor_gain = '16-bit (low noise & high well capacity)'
        self.scope.camera.acquisition_sequencer.new_sequence(GreenYellow=255, Cyan=255, UV=255)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=False, GreenYellow=True)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=False, Cyan=True)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=False, UV=True)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        for pos_idx, pos in enumerate(self.positions):
            if pos_idx in self.skipped_positions:
                continue
#           tps = self.scope.camera.timestamp_ticks_per_second
            self.scope.stage.position = pos

            self.scope.tl.lamp.intensity=78
            self.scope.tl.lamp.enabled = True
            time.sleep(0.001)
            self.scope.camera.autofocus.hackified_autofocus_continuous_move(pos[2]+-0.5, min(pos[2]+0.5, 25.51), 0.2, max_workers=2)
            self.scope.tl.lamp.enabled = False

            ims = dict(zip( ('bf0','greenyellow','cyan','uv','bf1'), self.scope.camera.acquisition_sequencer.run() ))

            out_dpath = self.dpath / '{:04}'.format(pos_idx)
            if not out_dpath.exists():
                out_dpath.mkdir()
            for name, im in ims.items():
                im_fpath = out_dpath / '{}__{:04}_{:04}_{}_autofocus.png'.format(self.name, pos_idx, self.run_idx, name)
                freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)


            self.scope.stage.position = pos
            ims = dict(zip( ('bf0','greenyellow','cyan','uv','bf1'), self.scope.camera.acquisition_sequencer.run() ))

            out_dpath = self.dpath / '{:04}'.format(pos_idx)
            if not out_dpath.exists():
                out_dpath.mkdir()
            for name, im in ims.items():
                im_fpath = out_dpath / '{}__{:04}_{:04}_{}.png'.format(self.name, pos_idx, self.run_idx, name)
                freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)

#           meta_fpath = out_dpath / '{}__{:04}_{:04}.json'.format(self.name, pos_idx, self.run_idx)
#           with meta_fpath.open('w') as f:
#               json.dump({'sequence_timestamps' : times}, f)

        time_to_next = max(0, self.interval - (time.time() - self.run_ts))
        self.run_timer.start(time_to_next * 1000)
 def stop_editing(self):
     self.editing = False
     self.edit.setText('Edit Mask')
     self.rw.qt_object.layer_stack_painter_dock_widget.hide()
     self.rw.layers[2].opacity = 1
     self.rw.layers[3].visible = True
     self.rw.layers[4].visible = True
     self.outline_mask()
     orig_mask = self.image_dir / '{}_worm_mask_orig.png'.format(self.well)
     worm_mask = self.image_dir / '{}_worm_mask.png'.format(self.well)
     if not orig_mask.exists():
         worm_mask.rename(orig_mask)
     freeimage.write(self.worm_mask.astype(numpy.uint8)*255, worm_mask)
Ejemplo n.º 22
0
def make_warp(metadata, image_file, warp_file):
    image = freeimage.read(image_file)
    spine_tck = metadata['spine_tck']
    width_tck = metadata['width_tck']
    widths = interpolate.spline_interpolate(width_tck, 100)
    warp_width = 2 * widths.max(
    )  # the worm widths above are from center to edge, so twice that is edge-to-edge
    warped = resample.sample_image_along_spline(image, spine_tck, warp_width)
    mask = resample.make_mask_for_sampled_spline(warped.shape[0],
                                                 warped.shape[1], width_tck)
    warped[~mask] = 0
    freeimage.write(
        warped, warp_file
    )  # freeimage convention: image.shape = (W, H). So take transpose.
Ejemplo n.º 23
0
def egg_outlines(a_directory):
	'''
	For each outlined egg mask in a_directory, convert it to a grayscale 8-bit image with the outline in white and the rest of the image in black.
	'''
	for a_subdir in os.listdir(a_directory):
		for an_image in os.listdir(a_directory + os.path.sep + a_subdir):
			if an_image.split(' ')[-1] == 'bf.png':
				filepath = a_directory + os.path.sep + a_subdir + os.path.sep + an_image
				my_image = freeimage.read(filepath)
				masked_conversion = fill_colored_outline(my_image, egg_mode = True)
				outline_conversion = fill_colored_outline(my_image, egg_mode = True, outline_only = True)
				freeimage.write(masked_conversion, filepath.replace('bf', 'emask'))
				freeimage.write(outline_conversion, filepath.replace('bf', 'outline'))
	return
Ejemplo n.º 24
0
def take_automated_plate_images(scope, out_dir):
    out_dir = pathlib.Path(out_dir)
    scope.camera.acquisition_sequencer.new_sequence()
    scope.camera.acquisition_sequencer.add_step(2, 'TL', tl_intensity=255)
    out_dir.mkdir(exist_ok=True)
    field_spacing = 2160 * 0.0065 * 1 / 2.5  # FILL ME IN WITH APPROPRIATE FIELD SIZE BASED ON 2.5X OBJECTIVE

    try:
        input('Specify center of plate')
        center_position = scope.stage.position
        input('Specify outer extent of plate')
        outer_position = scope.stage.position

        roi_radius = ((center_position[0] - outer_position[0])**2 +
                      (center_position[1] - outer_position[1])**2)**0.5

        # Define function to interpolate z - assume the plate surface is a parabola with radial symmetry about its center w.r.t. both x & y
        scale_param = (outer_position[2] - center_position[2]) / (roi_radius**
                                                                  2)
        interpolate_z = lambda x, y: scale_param * (
            (x - center_position[0])**2 +
            (y - center_position[1])**2) + center_position[2]

        grid_x = np.arange(center_position[0] - roi_radius / np.sqrt(2),
                           center_position[0] + roi_radius / np.sqrt(2),
                           field_spacing)
        grid_y = np.arange(center_position[1] - roi_radius / np.sqrt(2),
                           center_position[1] + roi_radius / np.sqrt(2),
                           field_spacing)
        xcoor, ycoor = np.meshgrid(grid_x, grid_y)
        xcoor = np.array(
            [pts if num % 2 else pts[::-1] for num, pts in enumerate(xcoor)]
        )  # invert the x-coordinates appropriately so that we make take min time to traverse the slide
        #raise Exception()

        pos_num = 0
        for x, y in zip(xcoor.flatten(), ycoor.flatten()):
            scope.stage.position = [x, y, interpolate_z(x, y)]

            bf_image = scope.camera.acquisition_sequencer.run()[0]
            freeimage.write(bf_image, out_dir / f'_{pos_num:03d}.png')
            pos_num += 1

        imaging_parameters = {'lamp': 'TL', 'exposure': 2, 'intensity': 255}
        with (out_dir / 'imaging_parameters.json').open('w') as param_file:
            datafile.json_encode_legible_to_file(imaging_parameters,
                                                 param_file)
        scope.stage.position = center_position
    except KeyboardInterrupt:
        return
Ejemplo n.º 25
0
def colored_color_outlines(a_directory):
	'''
	For each outlined worm in a_directory, convert it to a grayscale 8-bit image with the outline in white and the rest of the image in black.
	'''
	for an_image in os.listdir(a_directory):
		if 'new' not in an_image:
			filepath = a_directory + os.path.sep + an_image
			my_image = freeimage.read(filepath)
			my_red = np.array([237, 28, 36])
			is_red = np.abs(my_image[:, :, :3] - my_red)
			is_red = (is_red.mean(axis = 2) < 1)
			new_image = np.zeros(my_image.shape[:2]).astype('uint8')
			new_image[is_red] = [-1]
			freeimage.write(new_image, filepath.replace('.png', '_new.png'))
	return
Ejemplo n.º 26
0
 def _compress(self, image_path):
     image_path = pathlib.Path(image_path)
     assert image_path.suffix == '.png'
     image = freeimage.read(image_path)
     temp = tempfile.NamedTemporaryFile(dir=image_path.parent,
         prefix=image_path.stem + 'compressing_', suffix='.png', delete=False)
     try:
         freeimage.write(image, temp.name, flags=self.level)
         os.replace(temp.name, image_path)
     except:
         if os.path.exists(temp.name):
             os.unlink(temp.name)
         raise
     finally:
         temp.close()
Ejemplo n.º 27
0
def make_well_mask(out_dir, image_file, ignore_previous=False):
    """Calculate and store well mask if necessary.

    Parameters:
    out_dir: directory where well_mask.png should exist or be created.
    image_file: path to an image to create the mask from, if it doesn't exist.
    ignore_previous: if True, re-make mask even if it alredy exists on disk.
    """
    out_dir = pathlib.Path(out_dir)
    well_mask_f = out_dir / 'well_mask.png'
    if ignore_previous or not well_mask_f.exists():
        image = freeimage.read(image_file)
        if image.dtype == numpy.uint16:
            image = (image >> 8).astype(numpy.uint8)
        well_mask = extract_wells.get_well_mask(image)
        freeimage.write((well_mask * 255).astype(numpy.uint8), str(well_mask_f))
Ejemplo n.º 28
0
def warp_image(spine_tck, width_tck, image, warp_file):

    #image = freeimage.read(image_file)

    #730 was determined for the number of image samples to take perpendicular to the spine
    #from average length of a worm (as determined from previous tests)
    warped = resample.warp_image_to_standard_width(image, spine_tck, width_tck,
                                                   width_tck, 730)
    #warped = resample.sample_image_along_spline(image, spine_tck, warp_width)
    mask = resample.make_mask_for_sampled_spline(warped.shape[0],
                                                 warped.shape[1], width_tck)
    warped[~mask] = 0
    print("writing unit worm to :" + str(warp_file))
    freeimage.write(
        warped, warp_file
    )  # freeimage convention: image.shape = (W, H). So take transpose.
Ejemplo n.º 29
0
def flatfield_correct(im_fpath, ff_fpath, ffc_fpath):
    if not im_fpath.exists():
        return False, 'skipping "{}" (file not found)'.format(str(im_fpath))
    if not ff_fpath.exists():
        return False, 'skipping "{}" (flatfield reference image file "{}" not found)'.format(str(ff_fpath))
    try:
        im = freeimage.read(str(im_fpath))
        ff = freeimage.read(str(ff_fpath))
        ffc = im.astype(numpy.float32) * ff
        ffc *= (65535.0 * 0.9) / float(numpy.percentile(ffc, 98))
        ffc[ffc < 0] = 0
        ffc[ffc > 65535] = 65535
        freeimage.write(ffc.astype(numpy.uint16), str(ffc_fpath), freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)
    except Exception as e:
        return False, 'exception while correcting "{}": {}'.format(str(im_fpath), e)
    return True, '{} done'.format(str(im_fpath))
Ejemplo n.º 30
0
def OLDoverallBackgroundSubtract(data_dir, match_string, temporal_radius, save_dir):
    '''
    Do background subtraction to find worms.
    '''
    my_files = sorted(os.listdir(data_dir))
    my_files = [a_file for a_file in my_files if match_string == a_file.split('_')[-1]]

    # Intialize my special background context.
    temp_folder = save_dir + '\\' + 'temp'
    try:
        os.stat(temp_folder)
    except: 
        os.mkdir(temp_folder)
    for i in range(0, temporal_radius):
        shutil.copy(data_dir + '\\' + my_files[i], temp_folder + '\\' + my_files[i])
        
    # Run the actual simple subtraction, saving out masked files.
    for i in range(temporal_radius, len(my_files)-temporal_radius):
        #context_files = [freeimage.read(data_dir + '\\' + my_files[j]) for j in range(i-temporal_radius, i+temporal_radius+1)]
        context_files = [freeimage.read(data_dir + '\\' + my_files[j]) for j in range(i-temporal_radius, i+1)]
        raw_file = freeimage.read(data_dir + '\\' + my_files[i])
        (simple_foreground_file, background_file) = simple_running_median_subtraction(raw_file, context_files)
        
        thresholded_mask = percentile_floor(simple_foreground_file, threshold_proportion = 0.975)
        final_mask = clean_dust_and_holes(thresholded_mask)

        raw_file[final_mask.astype('bool')] = background_file[final_mask.astype('bool')]        
        freeimage.write(raw_file, temp_folder + '\\' + my_files[i])

    # Fill in remaining tail files.
    for i in range(len(my_files)-temporal_radius, len(my_files)):
        shutil.copy(data_dir + '\\' + my_files[i], temp_folder + '\\' + my_files[i])

    # Now let's do it for real!
    for i in range(temporal_radius, len(my_files)-temporal_radius):
        context_files = [freeimage.read(temp_folder + '\\' + my_files[j]) for j in range(i-temporal_radius, i+temporal_radius+1)]
        raw_file = freeimage.read(data_dir + '\\' + my_files[i])
        (simple_foreground_file, background_file) = simple_running_median_subtraction(raw_file, context_files)
        
        thresholded_pic = percentile_floor(simple_foreground_file, threshold_proportion = 0.975)
        final_mask = clean_dust_and_holes(thresholded_pic)

        freeimage.write(final_mask, save_dir + '\\' + my_files[i])


    return
Ejemplo n.º 31
0
def well_plate_acquisition(scope,
                           out_dir,
                           tl_intensity,
                           grid_size=[4, 6],
                           well_spacing_cc=12.92):
    '''
        grid_size, well_spacing_cc for nominal Falcon 48-well plate (latter in mm)

    '''

    out_dir = pathlib.Path(out_dir)
    scope.camera.acquisition_sequencer.new_sequence()
    scope.camera.acquisition_sequencer.add_step(2,
                                                'TL',
                                                tl_intensity=tl_intensity)
    out_dir.mkdir()

    try:
        print('Specify xy-position for top-left well; press ctrl-c to abort.')
        input()
        topleft_position = scope.stage.position

        pos_num = 0
        for row_num in range(grid_size[0]):
            for column_num in range(grid_size[1]):
                scope.stage.position = [
                    topleft_position[0] + column_num * well_spacing_cc,
                    topleft_position[1] + row_num * well_spacing_cc,
                    topleft_position[2],
                ]
                print('Adjust to desired z-position; press ctrl-c to abort.')
                input()

                bf_image = scope.camera.acquisition_sequencer.run()[0]
                freeimage.write(bf_image, out_dir / f'_{pos_num:03d}.png')
                pos_num += 1
        imaging_parameters = {
            'lamp': 'TL',
            'exposure': 2,
            'intensity': tl_intensity
        }
        with (out_dir / 'imaging_parameters.json').open('w') as param_file:
            datafile.json_encode_legible_to_file(imaging_parameters,
                                                 param_file)
    except KeyboardInterrupt:
        return
Ejemplo n.º 32
0
 def _compress(self, image_path):
     image_path = pathlib.Path(image_path)
     assert image_path.suffix == '.png'
     image = freeimage.read(image_path)
     temp = tempfile.NamedTemporaryFile(dir=image_path.parent,
                                        prefix=image_path.stem +
                                        'compressing_',
                                        suffix='.png',
                                        delete=False)
     try:
         freeimage.write(image, temp.name, flags=self.level)
         os.replace(temp.name, image_path)
     except:
         if os.path.exists(temp.name):
             os.unlink(temp.name)
         raise
     finally:
         temp.close()
Ejemplo n.º 33
0
def get_image_sequence_simple(scope, position_data, out_dir, lamp=None):
    '''
        lamp - List of the form (exposure time, lamp_name)
    '''
    
    lamp_dict = {'cyan':'gfp','green_yellow':'RedmChr', 'teal':'yfp'}
    
    scope.camera.live_mode=False
    scope.camera.acquisition_sequencer.new_sequence()
    scope.camera.acquisition_sequencer.add_step(2,'TL', tl_intensity=255)
    if lamp is not None: scope.camera.acquisition_sequencer.add_step(lamp[0],lamp[1])
    if not os.path.isdir(out_dir): os.mkdir(out_dir)
    for pos_num, this_position_data in enumerate(position_data):
        scope.nosepiece.position = this_position_data[0]
        scope.stage.position = this_position_data[1:]
        my_images = scope.camera.acquisition_sequencer.run() 
        freeimage.write(my_images[0], out_dir+os.path.sep+'_{:03d}_bf.png'.format(pos_num))
        if lamp is not None: freeimage.write(my_images[1], out_dir+os.path.sep+'_{:03d}_'.format(pos_num)+lamp_dict[lamp[1]]+'.png')
Ejemplo n.º 34
0
def overallBackgroundSubtract(data_dpath, match_glob, temporal_radius, save_dpath, save_dpath2 = '', save_dpath3 = '', demonstration_mode = False):
    '''
    Do background subtraction to find worms. This uses only past data, masking out the worms to create a background that won't disappear once the worm stops  moving.
    '''
    data_dpath = Path(data_dpath)
    save_dpath = Path(save_dpath)
    if save_dpath2:
        save_dpath2 = Path(save_dpath2)
    if save_dpath3:
        save_dpath3 = Path(save_dpath3)
    my_file_fpaths = sorted(data_dpath.glob(match_glob))

    # Initialize my special background context.
    temp_dpath = save_dpath / 'temp'
    if not temp_dpath.exists():
        temp_dpath.mkdir(parents=True)
    for i in range(0, temporal_radius):
        shutil.copy(str(my_file_fpaths[i]), str(temp_dpath / my_file_fpaths[i].name))

    # Run the actual simple subtraction, saving out masked files.
    context_files = [freeimage.read(str(my_file_fpaths[j])) for j in range(0, temporal_radius)]
    for i in range(temporal_radius, len(my_file_fpaths)):
        real_raw_file = freeimage.read(str(my_file_fpaths[i]))
        raw_file = real_raw_file.copy()     
        context_files.append(raw_file)
        (foreground_file, background_file) = simple_running_median_subtraction(raw_file, context_files)
        
        thresholded_mask = percentile_floor(foreground_file, threshold_proportion = 0.975)
        final_mask = clean_dust_and_holes(thresholded_mask)

        raw_file[final_mask.astype('bool')] = background_file[final_mask.astype('bool')]        

        if demonstration_mode:
            freeimage.write(real_raw_file, str(save_dpath / my_file_fpaths[i].name))
            freeimage.write(background_file, str(save_dpath2 / my_file_fpaths[i].name))
            freeimage.write(final_mask, str(save_dpath3 / my_file_fpaths[i].name))

        if not demonstration_mode:
            freeimage.write(raw_file, str(temp_dpath / my_file_fpaths[i].name))
            freeimage.write(final_mask,str(save_dpath / my_file_fpaths[i].name))
    
        context_files = context_files[1:]

    return
Ejemplo n.º 35
0
def get_image_sequence(scope, positions, out_dir, lamp=None):
    '''
    Main function of image acquisition. 
    Take pictures according to pre-determined positions.
    ==Input==
    scope: ScopeClient instance
    positions: a list of positions acquired.
    out_dir: string of output directory.
    lamp: a list of lists of the form [[lamp1_exposure, lamp1_name]]
    '''
    # Check if the output directory is created
    if not os.path.isdir(out_dir): os.mkdir(out_dir)
    # Create a lookbook for position number and the coordinate
    phone_book = {}
    for pos_num, this_position_data in enumerate(positions):
        phone_book[str(pos_num)] = this_position_data
    with open(out_dir + os.path.sep + 'phonebook.pkl', 'wb') as handle:
        pickle.dump(phone_book, handle, protocol=pickle.HIGHEST_PROTOCOL)
    # Doublecheck if this is the correct way of running this.
    lamp_dict = {'uv': 'dapi', 'red': 'cy5', 'TL': 'trans'}
    scope.camera.live_mode = False
    scope.camera.acquisition_sequencer.new_sequence()
    for lamp_exposure, lamp_name in lamp:
        if lamp_name is not 'TL':
            # Don't need to set non-TL intensity, since they will be default at maximum
            scope.camera.acquisition_sequencer.add_step(
                lamp_exposure, lamp_name)
        else:
            scope.camera.acquisition_sequencer.add_step(lamp_exposure,
                                                        lamp_name,
                                                        tl_intensity=255)

    # Take pictures.
    for pos_num, this_position_data in enumerate(positions):
        scope.nosepiece.position = this_position_data[0]
        # x, y, z postions
        scope.stage.position = this_position_data[1:]
        my_images = scope.camera.acquisition_sequencer.run()
        for (lamp_exposure, lamp_name,
             this_image) in zip([arg[0] for arg in lamp],
                                [arg[1] for arg in lamp], my_images):
            freeimage.write(
                this_image, out_dir + os.path.sep + '{:03d}_'.format(pos_num) +
                lamp_dict[lamp_name] + '.png')
Ejemplo n.º 36
0
def make_mask(metadata, mask_file):
    spine_tck = metadata['spine_tck']
    width_tck = metadata['width_tck']
    outline = spline_geometry.outline(spine_tck, width_tck, num_points=400)[-1]
    image = numpy.zeros(
        (1040, 1388),
        dtype=numpy.uint8)  # Celiagg convention: image.shape = (H, W)
    canvas = celiagg.CanvasG8(image)
    path = celiagg.Path()
    path.lines(outline)
    path.close()
    canvas.draw_shape(path,
                      AGG_TRANSFORM,
                      AGG_STATE,
                      fill=AGG_PAINT,
                      stroke=AGG_TRANSPARENT)
    freeimage.write(
        image.T, mask_file
    )  # freeimage convention: image.shape = (W, H). So take transpose.
Ejemplo n.º 37
0
def make_align_img(scope, expt_dir):
    expt_dir = pathlib.Path(expt_dir)
    try:
        scope_pos, my_image = take_img(scope, expt_dir)
    except KeyboardInterrupt:
        return

    time_label = time.strftime('%Y%m%d-%H%M-%S')

    with (expt_dir / 'experiment_metadata.json').open('r') as mdata_file:
        metadata = json.load(mdata_file)
    with (expt_dir / f'experiment_metadata_noalign_{time_label}.json'
          ).open('w') as mdata_file:
        datafile.json_encode_legible_to_file(metadata, mdata_file)

    metadata['align_position'] = scope_pos
    with (expt_dir / 'experiment_metadata.json').open('w') as mdata_file:
        datafile.json_encode_legible_to_file(metadata, mdata_file)
    (expt_dir / 'calibrations').mkdir(exist_ok=True)
    freeimage.write(my_image, expt_dir / 'calibrations' / 'align_image.png')
Ejemplo n.º 38
0
def convert_to_8bit(
    data_path,
    save_path,
    filter_str=''
):  #Scales based on the min and max OF EACH IMAGE!!! (NOT STANDARD ACROSS ALL); TODO EXTRA MODE FOR THIS)
    data_fns = [
        data_f for data_f in sorted(os.listdir(data_path))
        if filter_str in data_f
    ]
    data_imgs = np.array([
        freeimage.read(data_path + os.path.sep + data_f) for data_f in data_fns
    ])
    for data_f, d_img in zip(data_fns, data_imgs):
        converted_img = colorize.scale(np.copy(d_img),
                                       min=d_img.min(),
                                       max=d_img.max(),
                                       output_max=255).astype('uint8')
        freeimage.write(
            converted_img,
            save_path + os.path.sep + data_f[:-4] + '_8bit' + data_f[-4:])
Ejemplo n.º 39
0
def extract_mask_fromcomposite_batch(comp_path,
                                     save_path,
                                     comp_str='',
                                     save_str=''):
    try:
        os.stat(save_path)
    except:
        os.mkdir(save_path)
    [freeimage.write(
        (freeimage.read(comp_path+os.path.sep+comp_f) == 255).astype('uint16')*65535,save_path+os.path.sep+comp_f[:-4]+save_str+comp_f[-4:]) \
        for comp_f in sorted(os.listdir(comp_path))]
Ejemplo n.º 40
0
def make_references(scope, dpath, prefix):
    '''Position stage at min x and y coordinates of slide before calling this function.'''
    dpath = Path(dpath)
    start_pos = scope.stage.position
    idx = 0
    ims = []
    for x in numpy.linspace(start_pos[0], start_pos[0]+X_RANGE, 2, True):
        for y in numpy.linspace(start_pos[1], start_pos[1]+Y_RANGE, 15, True):
            try:
                scope.stage.position = (x, y, start_pos[2])
            except:
                pass
            time.sleep(0.1)
            im = scope.camera.acquire_image()
            freeimage.write(im, str(dpath / '{}_{:02}.png'.format(prefix, idx)), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)
            idx += 1
            ims.append(im)
    im = numpy.median(ims, axis=0).astype(numpy.uint16)
    freeimage.write(im, str(dpath / '{}_MEDIAN.png'.format(prefix)), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)
    scope.stage.position = start_pos
Ejemplo n.º 41
0
def get_image_sequence_simple(scope, positions, out_dir, lamp=None):
    '''
        lamp - List of the form (exposure time, lamp_name)
    '''
    # Different filter set 
    # New acquisition sequences
    lamp_dict = {'uv': 'dapi', 'red':'cy5'}
    scope.camera.live_mode=False
    # ~/device/acquisition_sequencer 
    # This part is hard-coded, maybe it's ok. 
    # Unpack position_data into the two image and three image 
    #######################################################
    # Dump the file name to a pickle file
    #######################################################
    phone_book = {}
    postion_num_total = positions[0]
    for pos_num, this_position_data in enumerate(positions[0]):
        phone_book[pos_num] = this_position_data
    for pos_num, this_position_data in enumerate(positions[1]): 
        phone_book[pos_num + postion_num_total] = this_position_data
    with open('phonebook.pickle', 'wb') as handle:
        pickle.dump(phone_book, handle, protocol=pickle.HIGHEST_PROTOCOL)
    #########################################
    position_data = positions[0]
    #########################################
    # Current setting: TL: 10ms; Cy5:500ms at intensity=255,
    # Dapi : 10ms at intensity = 255. 
    # Major change of calling 
    #########################################
    # Take pictures for the original spots
    for pos_num, this_position_data in enumerate(position_data):
        # Why do we have to pass this location?
        scope.nosepiece.position = this_position_data[0]
        # x,y,z positions
        scope.stage.position = this_position_data[1:]
        my_images = scope.camera.acquisition_sequencer.run()
        freeimage.write(my_images[0], out_dir+os.path.sep
        +'_{:03d}_'.format(this_position_data[1])+ '_{:03d}_'.format(pos_num)+'.png')
        if lamp is not None: 
            # Add UV image
            freeimage.write(my_images[1], out_dir + os.path.sep + '_{:03d}_'.format(pos_num)+'.png')
            # Here added the Cy5 image
            freeimage.write(my_images[2], out_dir+os.path.sep+os.path.sep + '_{:03d}_'.format(pos_num)+'.png')
    # Only saving the cy5 image
    cy5_position = positions[1]
    for pos_num, this_position_data in enumerate(cy5_position):
        scope.nosepiece.position = this_position_data[0]
        scope.stage.position = this_position_data[1]
        my_images = scope.camera.acquisition_sequencer.run()
        if lamp is not None:
            freeimage.write(my_images[2], out_dir+os.path.sep+os.path.sep + '_{:03d}_'.format(pos_num+ len(position_data))+'.png')
Ejemplo n.º 42
0
def annotate_lawn(experiment_root,
                  position,
                  metadata,
                  annotations,
                  num_images_for_lawn=3):
    """Position annotator used to find the lawn and associated metadata about it"""
    print(f'Working on position {position}')

    image_paths = sorted(
        (experiment_root / position).glob('* bf.png'))[:num_images_for_lawn]
    lawn_mask = segment_images.find_lawn_from_images(
        map(freeimage.read, image_paths), metadata['optocoupler'])
    lawn_mask_root = experiment_root / DERIVED_ROOT / 'lawn_masks'
    lawn_mask_root.mkdir(parents=True, exist_ok=True)
    freeimage.write(
        lawn_mask.astype(numpy.uint8) * 255,
        lawn_mask_root / f'{position}.png')

    microns_per_pixel = process_images.microns_per_pixel(
        metadata['objective'], metadata['optocoupler'])
    annotations['lawn_area'] = lawn_mask.sum() * microns_per_pixel**2
Ejemplo n.º 43
0
def select_final_outlines(color_directory, final_directory, working_directory, egg_mode = False):
	'''
	Select the properly filled outlines from color_directory and move them into final_directory along with the appropriate metadata and bright field files from working_directory.
	'''
	mask_ending = 'hmask'
	if egg_mode:
		mask_ending = 'emask'
	for a_subdir in os.listdir(color_directory):
		folderStuff.ensure_folder(final_directory + os.path.sep + a_subdir)
		for an_image in os.listdir(color_directory + os.path.sep + a_subdir):
			if an_image.split(' ')[-1] == 'outline.png':
				destination_mask = final_directory + os.path.sep + a_subdir + os.path.sep + an_image.replace('outline', mask_ending)
				outline_image = freeimage.read(color_directory + os.path.sep + a_subdir + os.path.sep + an_image)
				my_dimensions = len(outline_image.shape)
				if my_dimensions == 2:
					shutil.copyfile(color_directory + os.path.sep + a_subdir + os.path.sep + an_image.replace('outline', mask_ending), destination_mask)
				elif my_dimensions == 3:
					fixed_outline = outline_image[:, :, 0].astype('uint8')
					fixed_outline[fixed_outline > 0] = -1
					freeimage.write(fixed_outline, destination_mask)
				shutil.copyfile(working_directory + os.path.sep + a_subdir + os.path.sep + an_image.replace('outline', 'bf'), destination_mask.replace(mask_ending, 'bf'))
		shutil.copyfile(working_directory + os.path.sep + a_subdir + os.path.sep + 'position_metadata_extended.json', final_directory + os.path.sep + a_subdir + os.path.sep + 'position_metadata_extended.json')
	return
Ejemplo n.º 44
0
def make_mega_lawn(worm_subdirectory, super_vignette):
	'''
	Make a mega lawn mask for use with all images of a worm. This avoids problems with detecting the relatively faint edge of the lawn, which depends on the exact focus plane and the brightness of the lamp.
	'''
	# Parallelized edge detection.
	my_bf_files = [worm_subdirectory + os.path.sep + a_bf for a_bf in os.listdir(worm_subdirectory) if a_bf[-6:] == 'bf.png']
	my_workers = min(multiprocessing.cpu_count() - 1, 60)
	chunk_size = int(np.ceil(len(my_bf_files)/my_workers))
	bf_chunks = [my_bf_files[x:x + chunk_size] for x in range(0, len(my_bf_files), chunk_size)]
	with concurrent.futures.ProcessPoolExecutor(max_workers = my_workers) as executor:
		chunk_masks = [executor.submit(lawn_maker, bf_chunks[i], super_vignette.copy()) for i in range(0, len(bf_chunks))]
	concurrent.futures.wait(chunk_masks)
	chunk_masks = [a_job.result() for a_job in chunk_masks]
	
	# Make mega lawn from edge detection.
	mega_lawn = np.max(np.array(chunk_masks), axis = 0)
	mega_lawn = scipy.ndimage.morphology.binary_fill_holes(mega_lawn).astype('bool')
	mega_lawn = zplib_image_mask.get_largest_object(mega_lawn).astype('uint8')
	mega_lawn[mega_lawn > 0] = -1	
	
	# Parallelized thresholding.
	with concurrent.futures.ProcessPoolExecutor(max_workers = my_workers) as executor:
		chunk_masks = [executor.submit(alternate_lawn_maker, bf_chunks[i], super_vignette.copy()) for i in range(0, len(bf_chunks))]
	concurrent.futures.wait(chunk_masks)
	chunk_masks = [a_job.result() for a_job in chunk_masks]			
		
	# Make alternative mega lawn from thresholding intensity.
	alt_mega_lawn = np.max(np.array(chunk_masks), axis = 0)
	alt_mega_lawn = scipy.ndimage.morphology.binary_fill_holes(alt_mega_lawn).astype('bool')
	alt_mega_lawn = zplib_image_mask.get_largest_object(alt_mega_lawn).astype('uint8')
	alt_mega_lawn[alt_mega_lawn > 0] = -1
	
	# Select proper mega lawn.
	if np.bincount(np.ndarray.flatten(mega_lawn))[-1] < 0.8*np.bincount(np.ndarray.flatten(alt_mega_lawn))[-1]:
		mega_lawn = alt_mega_lawn
	freeimage.write(mega_lawn, worm_subdirectory + os.path.sep + 'great_lawn.png')
	return mega_lawn
Ejemplo n.º 45
0
def _computeFocusMeasures(bgs, im_fpath, measure_mask, compute_measures, write_models, write_deltas, write_masks):
    try:
        im = freeimage.read(str(im_fpath))
    except:
        return
    if bgs.model is not None:
        # NB: Model and delta are written as float32 tiffs
        try:
            if write_models:
                freeimage.write(
                    bgs.model,
                    str(im_fpath.parent / "{} wz_bgs_model.tiff".format(im_fpath.stem)),
                    freeimage.IO_FLAGS.TIFF_DEFLATE,
                )
            delta = numpy.abs(bgs.queryModelDelta(im))
            if write_deltas:
                freeimage.write(
                    delta,
                    str(im_fpath.parent / "{} wz_bgs_model_delta.tiff".format(im_fpath.stem)),
                    freeimage.IO_FLAGS.TIFF_DEFLATE,
                )
            mask = bgs.queryModelMask(im, delta)
            antimask = mask == 0
            if write_masks:
                freeimage.write(
                    (mask * 255).astype(numpy.uint8),
                    str(im_fpath.parent / "{} wz_bgs_model_mask.png".format(im_fpath.stem)),
                )
        except:
            return
        if compute_measures:
            focus_measures = {}
            focus_measures["whole_image_hp_brenner_sum_of_squares"], focus_measures[
                "whole_image_bp_brenner_sum_of_squares"
            ] = MaskedMultiBrenner((2560, 2160)).metric(im, measure_mask)
            model_delta_squares = delta.astype(numpy.float64) ** 2
            model_delta_squares[~measure_mask] = 0
            focus_measures["model_delta_sum_of_squares"] = model_delta_squares.sum()
            focus_measures["model_mask_count"] = mask.sum()
            model_delta_squares[antimask] = 0
            focus_measures["model_mask_region_delta_sum_of_squares"] = model_delta_squares.sum()
            focus_measures["model_mask_region_image_hp_brenner_sum_of_squares"], focus_measures[
                "model_mask_region_image_bp_brenner_sum_of_squares"
            ] = MaskedMultiBrenner((2560, 2160)).metric(im, mask)
            return focus_measures
def ensure_human(human_dir, work_dir, data_dir):
	'''
	Make sure that the human data directory, human_dir, has everything it needs from the working directory (namely properly corrected bf images and rough background subtraction masks).
	'''
	def test_and_copy(test_file, found_file):
		'''
		Check for test_file file in human_dir, then copy them over from found_file in work_dir if needed.
		'''
		if not os.path.isfile(test_file):
			print('Missing ' + test_file.split(' ')[-1] + ' at ' + test_file + '.')
			if os.path.isfile(found_file):
				print('\tFound corresponding ' + found_file.split(' ')[-1] + ' at ' + found_file + '.')
				print('\tCopying file...')
				shutil.copyfile(found_file, test_file)
				if os.path.isfile(test_file):
					print('\tSuccess!')
				else:
					raise BaseException('\tCOPYING FAILED.')
			else:
				raise BaseException('\tCouldn\'t find corresponding file.')
		return
		
	points_list = []
	for a_subdir in os.listdir(human_dir):
		human_subdir = human_dir + os.path.sep + a_subdir
		for a_file in os.listdir(human_subdir):
			if a_file.split('.')[-1] == 'png':
				file_split = a_file.split(' ')
				points_list.append(a_subdir + os.path.sep + file_split[0])
	points_list = sorted(list(set(points_list)))

	for a_point in points_list:
		# Set up some variables and copy metadata.
		(a_subdir, the_point) = a_point.split(os.path.sep)
		human_subdir = human_dir + os.path.sep + a_subdir
		working_subdir = work_dir + os.path.sep + a_subdir.split(' ')[-1]
		data_subdir = data_dir + os.path.sep + a_subdir.split(' ')[-1]
		base_metadata = human_subdir + os.path.sep + 'position_metadata.json'
		found_metadata = data_subdir + os.path.sep + 'position_metadata.json'
		test_and_copy(base_metadata, found_metadata)

		# Clean up human mask.
		base_test = human_subdir + os.path.sep + the_point
		base_found = working_subdir + os.path.sep + the_point
		test_and_copy(base_test + ' ' + 'hmask.png', base_test + ' ' + 'outline.png')
		print('Cleaning up ' + base_test + ' ' + 'hmask.png' + '.')
		old_mask = freeimage.read(base_test + ' ' + 'hmask.png')
		my_mask = np.zeros(old_mask.shape[:2]).astype('uint8')
		if len(old_mask.shape) == 3:
			my_mask[np.max(old_mask[:, :, :3], axis = 2).astype('float64') > 0] = -1
		elif len(old_mask.shape) == 2:
			my_mask[old_mask > 0] = -1
		else:
			raise ValueError(base_test + ' ' + 'hmask.png' + 'does not have proper dimensions.')
		my_mask[np.invert(zplib_image_mask.get_largest_object(my_mask) > 0)] = 0
		freeimage.write(my_mask, base_test + ' ' + 'hmask.png')

		# Clean up old stuff.
		if os.path.isfile(base_test + ' ' + 'bf.png'):
			os.remove(base_test + ' ' + 'bf.png')
		if os.path.isfile(base_test + ' ' + 'outline.png'):
			os.remove(base_test + ' ' + 'outline.png')

		# Copy over other test masks.
		test_and_copy(base_test + ' ' + 'bf.png', base_found + ' ' + 'bf.png')
		test_and_copy(base_test + ' ' + 'mask.png', base_found + ' ' + 'mask.png')
	return
Ejemplo n.º 47
0
def write_scaled(array, filename, min, max, gamma=1):
    """Write an image to disk as a uint8, after scaling with the specified
    parameters (see scale() function)."""
    import freeimage
    freeimage.write(scale(array, min, max, gamma).astype(numpy.uint8), filename)
Ejemplo n.º 48
0
 def save_image(self):
     fn, _ = Qt.QFileDialog.getSaveFileName(self, 'Save Image', self.flipbook.current_page.name+'.png', filter='Images (*.png *.tiff *.tif)')
     if fn:
         freeimage.write(self.image.data, fn)
Ejemplo n.º 49
0
    def execute_run(self):
        self.run_ts = time.time()
        self.run_idx += 1
        self.write_checkpoint()
        self.scope.il.shutter_open = True
        self.scope.tl.shutter_open = True
        self.scope.camera.pixel_readout_rate = '280 MHz'
        self.scope.camera.shutter_mode = 'Rolling'
        self.scope.camera.sensor_gain = '16-bit (low noise & high well capacity)'
        self.scope.camera.acquisition_sequencer.new_sequence(green_yellow=255, cyan=255, uv=255)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=100, tl_enable=False, cyan=True)
        z_stack_pos = random.choice(self.non_skipped_positions)
        print('Selected well {:04} for z_stacks.'.format(z_stack_pos))
        for pos_idx, pos in enumerate(self.positions):
            if pos_idx in self.skipped_positions:
                continue
            self.scope.stage.position = pos

            self.scope.tl.lamp.intensity = 69
            self.scope.tl.lamp.enabled = True
            self.scope.camera.exposure_time = 10
            # More binning gives higher contrast, meaning less light needed
            self.scope.camera.binning = '4x4'
            time.sleep(0.001)
            self.scope.camera.autofocus.new_autofocus_continuous_move(pos[2]-0.5, min(pos[2]+0.5, 24.4), 0.2, max_workers=2)
            coarse_z = self.scope.stage.z
            self.scope.camera.binning = '1x1'
            self.scope.tl.lamp.intensity = 97
            time.sleep(0.001)
            self.scope.camera.autofocus.new_autofocus_continuous_move(coarse_z-0.15, min(coarse_z+0.15, 24.4), 0.1, metric='high pass + brenner', max_workers=2)
            fine_z = self.scope.stage.z
            self.scope.tl.lamp.enabled = False

            ims = dict(zip( ('bf','greenyellow'), self.scope.camera.acquisition_sequencer.run() ))

            out_dpath = self.dpath / '{:04}'.format(pos_idx)
            if not out_dpath.exists():
                out_dpath.mkdir()
            for name, im in ims.items():
                im_fpath = out_dpath / '{}__{:04}_{:04}_{}.png'.format(self.name, pos_idx, self.run_idx, name)
                freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)

            csv_fpath = out_dpath / '{}__{:04}_z_positions.csv'.format(self.name, pos_idx)
            with csv_fpath.open('a+') as f:
                print('{},{},{}'.format(pos_idx, self.run_idx, fine_z), file=f)

            if pos_idx == z_stack_pos:
                ims = []
                self.scope.tl.lamp.intensity = 97
                self.scope.tl.lamp.enabled = True
                self.scope.camera.exposure_time = 10
                out_dpath = self.dpath / 'z_stacks'
                if not out_dpath.exists():
                    out_dpath.mkdir()
                time.sleep(0.001)
                self.scope.camera.start_image_sequence_acquisition(frame_count=100, trigger_mode='Software')
                for z in numpy.linspace(pos[2]-0.5, min(pos[2]+0.5, 24.4), 100, endpoint=True):
                    z = float(z)
                    self.scope.stage.z = z
                    self.scope.camera.send_software_trigger()
                    ims.append((self.scope.camera.next_image(), z))
                self.scope.tl.lamp.enabled = False
                self.scope.camera.end_image_sequence_acquisition()

                for idx, (im, z) in enumerate(ims):
                    im_fpath = out_dpath / '{}__{:04}_{:04}_{:04}_{}.png'.format(self.name, self.run_idx, pos_idx, idx, z)
                    freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)

        time_to_next = max(0, self.interval - (time.time() - self.run_ts))
        self.run_timer.start(time_to_next * 1000)
def overallBackgroundSubtract(data_dir, match_string, new_string, temporal_radius):
	'''
	Do background subtraction to find worms. This uses only past data, masking out the worms to create a background that won't disappear once the worm stops  moving.
	'''
	my_files = sorted(os.listdir(data_dir))
	my_files = [a_file for a_file in my_files if match_string in a_file]
	my_times = [a_file.split(os.path.sep)[-1].split(' ')[0] for a_file in my_files]
	ending_list = [' bf00.png', ' bf01.png', ' bf10.png', ' bf11.png']

	# Run the actual simple subtraction, saving out masked files.
	context_files = [freeimage.read(data_dir + os.path.sep + my_files[j]) for j in range(0, temporal_radius)]
	for i in range(temporal_radius, len(my_files)):
		real_raw_file = freeimage.read(data_dir + os.path.sep + my_files[i])
		raw_file = real_raw_file.copy()		
		context_files.append(raw_file)

		(foreground_file, background_file) = simple_running_median_subtraction(raw_file, context_files)
		thresholded_mask = percentile_floor(foreground_file, threshold_proportion = 0.975)
		final_mask = clean_dust_and_holes(thresholded_mask)

		raw_file[final_mask.astype('bool')] = background_file[final_mask.astype('bool')]		
		freeimage.write(final_mask, data_dir + os.path.sep + my_files[i].replace(' ', ' mask_'))
		freeimage.write(background_file, data_dir + os.path.sep + my_files[i].replace(match_string, 'background.png'))	

		for an_ending in ending_list:
			my_path = data_dir + os.path.sep + my_times[i] + an_ending
			save_path = data_dir + os.path.sep + my_times[i] + an_ending.replace(' ', ' mask_')
			if os.path.isfile(my_path):
				my_image = freeimage.read(my_path)
				foreground_file = abs(my_image.astype('int16') - background_file.astype('int16'))
				foreground_file = foreground_file.astype('uint16')
				thresholded_mask = percentile_floor(foreground_file, threshold_proportion = 0.975)
				final_mask = clean_dust_and_holes(thresholded_mask)
				freeimage.write(final_mask, save_path)
		
		context_files = context_files[1:]

	# Run another small chunk of background subtraction backwards to fill out the early range.
	context_files = [freeimage.read(data_dir + os.path.sep + my_files[j]) for j in reversed(range(temporal_radius, temporal_radius*2))]
	for i in reversed(range(0, temporal_radius)):
		real_raw_file = freeimage.read(data_dir + os.path.sep + my_files[i])
		raw_file = real_raw_file.copy()		
		context_files.append(raw_file)

		(foreground_file, background_file) = simple_running_median_subtraction(raw_file, context_files)
		thresholded_mask = percentile_floor(foreground_file, threshold_proportion = 0.975)
		final_mask = clean_dust_and_holes(thresholded_mask)

		raw_file[final_mask.astype('bool')] = background_file[final_mask.astype('bool')]		
		freeimage.write(final_mask, data_dir + os.path.sep + my_files[i].replace(' ', ' mask_'))
		freeimage.write(background_file, data_dir + os.path.sep + my_files[i].replace(match_string, 'background.png'))	
		
		for an_ending in ending_list:
			if os.path.isfile(data_dir + os.path.sep + my_times[i] + an_ending):				
				my_image = freeimage.read(my_path)
				foreground_file = abs(my_image.astype('int16') - background_file.astype('int16'))
				foreground_file = foreground_file.astype('uint16')
				thresholded_mask = percentile_floor(foreground_file, threshold_proportion = 0.975)
				final_mask = clean_dust_and_holes(thresholded_mask)
				freeimage.write(final_mask, save_path)
	return
Ejemplo n.º 51
0
def write_image(image, fpath_str):
    u8_scaled_image = ((image.astype(numpy.float32) / image.max()) * 255).astype(numpy.uint8)
    freeimage.write(u8_scaled_image, fpath_str)
Ejemplo n.º 52
0
    def execute_run(self):
        self.run_ts = time.time()
        self.run_idx += 1
        self.write_checkpoint()
        self.scope.il.shutter_open = True
        self.scope.tl.shutter_open = True
        self.scope.nosepiece.magnification = 5
        self.scope.camera.pixel_readout_rate = '280 MHz'
        self.scope.camera.shutter_mode = 'Rolling'
        self.scope.camera.sensor_gain = '16-bit (low noise & high well capacity)'
        for pos_set_name in self.position_set_names:
            pos_set = self.position_sets[pos_set_name]
            for pos_idx, pos in enumerate(pos_set):
                self.scope.stage.position = pos

                self.scope.tl.lamp.intensity = 69
                self.scope.tl.lamp.enabled = True
                self.scope.camera.exposure_time = 10
                # More binning gives higher contrast, meaning less light needed
                self.scope.camera.binning = '4x4'
                time.sleep(0.001)
                self.scope.camera.autofocus.new_autofocus_continuous_move(22.242692, 23.5, 0.2, max_workers=2)
                coarse_z = self.scope.stage.z
                self.scope.camera.binning = '1x1'
                self.scope.tl.lamp.intensity = 117
                time.sleep(0.001)
                self.scope.camera.autofocus.new_autofocus_continuous_move(coarse_z-0.15, min(coarse_z+0.15, 25), 0.1, metric='high pass + brenner', max_workers=2)
                fine_z = self.scope.stage.z
                self.scope.tl.lamp.enabled = False

                if pos_set_name == 'dark':
                    im_names = 'bf0','green_yellow','cyan','uv','bf1'
                    self.scope.camera.acquisition_sequencer.new_sequence(green_yellow=255, cyan=255, uv=255)
                    self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=117)
                    self.scope.camera.acquisition_sequencer.add_step(exposure_ms=20, tl_enable=False, green_yellow=True)
                    self.scope.camera.acquisition_sequencer.add_step(exposure_ms=20, tl_enable=False, cyan=True)
                    self.scope.camera.acquisition_sequencer.add_step(exposure_ms=20, tl_enable=False, uv=True)
                    self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=117)
                else:
                    im_names = 'bf',
                    self.scope.camera.acquisition_sequencer.new_sequence()
                    self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=117)

                ims = dict(zip( im_names,
                                list(zip(self.scope.camera.acquisition_sequencer.run(), self.scope.camera.acquisition_sequencer.latest_timestamps))
                          )   )

                out_dpath = self.dpath / '{}_{:04}'.format(pos_set_name, pos_idx)
                if not out_dpath.exists():
                    out_dpath.mkdir()
                for name, (im, ts) in ims.items():
                    im_fpath = out_dpath / '{}__{}_{:04}_{:04}_{}.png'.format(self.name, pos_set_name, pos_idx, self.run_idx, name)
                    freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)

                csv_fpath = out_dpath / '{}__{}_{:04}_z_positions.csv'.format(self.name, pos_set_name, pos_idx)
                if not csv_fpath.exists():
                    with csv_fpath.open('w') as f:
                        cols = ['pos_set_name','pos_idx','run_idx','coarse_z','fine_z']
                        for im_name in im_names:
                            cols.append(im_name + '_timestamp')
                        print(','.join(cols), file=f)
                ts_hz = self.scope.camera.timestamp_hz
                t0 = ims[im_names[0]][1]
                with csv_fpath.open('a') as f:
                    l = '{},{},{},{},{},'.format(pos_set_name, pos_idx, self.run_idx, coarse_z, fine_z)
                    l+= ','.join([str((ims[name][1] - t0) / ts_hz) for name in im_names])
                    print(l, file=f)

        time_to_next = max(0, self.interval - (time.time() - self.run_ts))
        self.run_timer.start(time_to_next * 1000)
Ejemplo n.º 53
0
    def execute_run(self):
        self.run_ts = time.time()
        self.run_idx += 1
        self.write_checkpoint()
        self.scope.il.shutter_open = True
        self.scope.tl.shutter_open = True
        self.scope.camera.pixel_readout_rate = '280 MHz'
        self.scope.camera.shutter_mode = 'Rolling'
        self.scope.camera.sensor_gain = '16-bit (low noise & high well capacity)'
        self.scope.camera.acquisition_sequencer.new_sequence(cyan=255)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=800, tl_enable=False, cyan=True)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=200, tl_enable=False, cyan=True)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        self.scope.camera.acquisition_sequencer.add_step(exposure_ms=10, tl_enable=True, tl_intensity=78)
        z_stack_set, z_stack_pos_idx = self.pick_random_position()
        print('Selected position {} of set {} for z_stacks.'.format(z_stack_pos_idx, z_stack_set))
        for pos_set_name in self.position_set_names:
            pos_set = self.position_sets[pos_set_name]
            for pos_idx, pos in enumerate(pos_set):
                if pos_idx in self.skipped_positions:
                    continue
                self.scope.stage.position = pos

                self.scope.tl.lamp.intensity = 69
                self.scope.tl.lamp.enabled = True
                self.scope.camera.exposure_time = 10
                # More binning gives higher contrast, meaning less light needed
                self.scope.camera.binning = '4x4'
                time.sleep(0.001)
                self.scope.camera.autofocus.new_autofocus_continuous_move(22.1436906, 23.9931316, 0.2, max_workers=2)
                coarse_z = self.scope.stage.z
                self.scope.camera.binning = '1x1'
                self.scope.tl.lamp.intensity = 97
                time.sleep(0.001)
                self.scope.camera.autofocus.new_autofocus_continuous_move(coarse_z-0.15, min(coarse_z+0.15, 23.9931316), 0.1, metric='high pass + brenner', max_workers=2)
                fine_z = self.scope.stage.z
                self.scope.tl.lamp.enabled = False

                ims = dict(zip( ('bf_a_0','bf_a_delay','bf_a_1','cyan_agitate','cyan','bf_b_0','bf_b_delay','bf_b_1',),
                                list(zip(self.scope.camera.acquisition_sequencer.run(), self.scope.camera.acquisition_sequencer.latest_timestamps))
                          )   )
                del ims['bf_a_delay']
                del ims['bf_b_delay']
                del ims['cyan_agitate']

                out_dpath = self.dpath / '{}_{:04}'.format(pos_set_name, pos_idx)
                if not out_dpath.exists():
                    out_dpath.mkdir()
                for name, (im, ts) in ims.items():
                    im_fpath = out_dpath / '{}__{}_{:04}_{:04}_{}.png'.format(self.name, pos_set_name, pos_idx, self.run_idx, name)
                    freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)

                csv_fpath = out_dpath / '{}__{}_{:04}_z_positions.csv'.format(self.name, pos_set_name, pos_idx)
                if not csv_fpath.exists():
                    with csv_fpath.open('w') as f:
                        print('pos_set_name,pos_idx,run_idx,coarse_z,fine_z,bf_a_0_timestamp,bf_a_1_timestamp,cyan_timestamp,bf_b_0_timestamp,bf_b_1_timestamp', file=f)
                ts_hz = self.scope.camera.timestamp_hz
                t0 = ims['bf_a_0'][1]
                with csv_fpath.open('a') as f:
                    l = '{},{},{},{},{},'.format(pos_set_name, pos_idx, self.run_idx, coarse_z, fine_z)
                    l+= ','.join([str((ims[name][1] - t0) / ts_hz) for name in ('bf_a_0','bf_a_1','cyan','bf_b_0','bf_b_1')])
                    print(l, file=f)

                if pos_set_name == z_stack_set and pos_idx == z_stack_pos_idx:
                    ims = []
                    self.scope.tl.lamp.intensity = 97
                    self.scope.tl.lamp.enabled = True
                    self.scope.camera.exposure_time = 10
                    out_dpath = self.dpath / 'z_stacks'
                    if not out_dpath.exists():
                        out_dpath.mkdir()
                    time.sleep(0.001)
                    self.scope.camera.start_image_sequence_acquisition(frame_count=100, trigger_mode='Software')
                    for z in numpy.linspace(pos[2]-0.5, min(pos[2]+0.5, 24.4), 100, endpoint=True):
                        z = float(z)
                        self.scope.stage.z = z
                        self.scope.camera.send_software_trigger()
                        ims.append((self.scope.camera.next_image(), z))
                    self.scope.tl.lamp.enabled = False
                    self.scope.camera.end_image_sequence_acquisition()

                    for idx, (im, z) in enumerate(ims):
                        im_fpath = out_dpath / '{}__{}_{:04}_{:04}_{:04}_{}.png'.format(self.name, pos_set_name, pos_idx, self.run_idx, idx, z)
                        freeimage.write(im, str(im_fpath), flags=freeimage.IO_FLAGS.PNG_Z_BEST_SPEED)

        time_to_next = max(0, self.interval - (time.time() - self.run_ts))
        self.run_timer.start(time_to_next * 1000)