def filter_excluded_images(experiment_root): experiment_annotations = load_data.read_annotations(experiment_root) def scan_filter(position_name, timepoint_name): return not experiment_annotations[position_name][0]['exclude'] return scan_filter
def replace_annotation(experiment_root, annotation_type, old_annotation_values, new_annotation_value, annotation_dir='annotations'): reinit = input( 'press y and enter to delete annotations; press enter to exit. ') if reinit.lower() == 'y': if not isinstance(old_annotation_values, collections.Iterable): old_annotation_values = list(old_annotation_values) if isinstance(old_annotation_values, str): old_annotation_values = [old_annotation_values] experiment_annotations = load_data.read_annotations( experiment_root, annotation_dir=annotation_dir) for position, position_annotations in experiment_annotations.items(): for timepoint, timepoint_annotations in position_annotations[ 1].items(): if annotation_type in timepoint_annotations and timepoint_annotations[ annotation_type] in old_annotation_values: timepoint_annotations[ annotation_type] = new_annotation_value load_data.write_annotations(experiment_root, experiment_annotations, annotation_dir=annotation_dir) return
def filter_timepoint_wrap(exp_dir, filter_excluded=True, annotation_dir='annotations', channels=['bf']): positions = load_data.read_annotations(exp_dir, annotation_dir=annotation_dir) if filter_excluded: positions = load_data.filter_annotations(positions, load_data.filter_excluded) def timepoint_filter(position_name, timepoint_name): return os.path.exists(exp_dir + os.path.sep + position_name + os.path.sep + timepoint_name + ' comp.png') def good_pos_filter(position_name, timepoint_name): if position_name in positions: return timepoint_filter(position_name, timepoint_name) else: return position_name in positions return load_data.scan_experiment_dir(exp_dir, channels=channels, timepoint_filter=good_pos_filter)
def remove_poses(experiment_root): experiment_annotations = load_data.read_annotations(experiment_root) for position, position_annotations in experiment_annotations.items(): timepoint_annotations = position_annotations[1] for timepoint, timepoint_annotation in timepoint_annotations.items(): timepoint_annotation['pose'] = (None, None) load_data.write_annotations(experiment_root, experiment_annotations)
def load_masks(experiment_root, mask_root=None): experiment_root = pathlib.Path(experiment_root) if mask_root is None: mask_root = experiment_root / 'derived_data' / 'mask' mask_root = pathlib.Path(mask_root) experiment_annotations = load_data.read_annotations(experiment_root) experiment_annotations = load_data.filter_annotations(experiment_annotations, load_data.filter_excluded) experiment_annotations = load_data.filter_annotations(experiment_annotations, elegant_filters.filter_subsample_timepoints(experiment_root)) experiment_annotations = load_data.filter_annotations(experiment_annotations, elegant_filters.filter_adult_timepoints) image_filter = elegant_filters.filter_from_elegant_dict(experiment_annotations) experiment_images = load_data.scan_experiment_dir(experiment_root, timepoint_filter=image_filter) # experiment_images_masks = load_data.scan_experiment_dir(experiment_root / 'derived_data' / 'mask', timepoint_filter=image_filter) for position, position_images in experiment_images.items(): for timepoint, timepoint_images in position_images.items(): timepoint_images.append(mask_root / position / f'{timepoint} bf.png') # experiment_images = experiment_images_bf.copy() # for position, position_images in experiment_images.items(): # for timepoint, timepoint_images in position_images.items(): # timepoint_images.append(experiment_images_masks[position][timepoint]) return experiment_images
def filter_range_before_stage(experiment_dir, time_radius, stage='adult'): ''' time_radius - radius in hours ''' experiment_annotations = load_data.read_annotations(experiment_dir) timepoints_to_load = {} for position, (position_annotations, timepoint_annotations) in experiment_annotations.items(): if position_annotations['exclude']: continue for timepoint, time_annotations in timepoint_annotations.items(): if 'stage' in time_annotations and time_annotations[ 'stage'] == 'adult': first_adult_timestamp = time_annotations['timestamp'] break timepoints_to_load[position] = [ timepoint for timepoint, time_annotations in timepoint_annotations.items() if abs(first_adult_timestamp - time_annotations['timestamp']) < time_radius * 3600 and first_adult_timestamp >= time_annotations['timestamp'] ] def filter(position_name, position_annotations, timepoint_annotations): if position_name not in timepoints_to_load: return False return [ timepoint in timepoints_to_load[position_name] for timepoint in timepoint_annotations ] return filter
def run_canonical_measurements(experiment_dir): '''Run standard measurements on the specified experiment directory''' experiment_dir = pathlib.Path(experiment_dir) process_data.update_annotations(experiment_dir) position_features = ['stage_x', 'stage_y', 'starting_stage_z', 'notes'] annotations = load_data.read_annotations(experiment_dir) annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) annotations = load_data.filter_annotations(annotations, filter_worm_positions) #print('warning: Im using a custom filter function') #annotations = load_data.filter_annotations(annotations, lambda name, pa, ta: name < '25') # Remove me later if any([ 'lawn_area' in position_annotations for (position_annotations, timepoint_annotations) in annotations.items() ]): position_features.append('lawn_area') make_basic_measurements(experiment_dir, annotations) process_data.collate_data(experiment_dir, position_features=position_features) return make_pose_measurements(experiment_dir, annotations) process_data.collate_data(experiment_dir, position_features=position_features) #make_mask_measurements(experiment_dir, annotations) image_channels = elegant_hacks.get_image_channels(experiment_dir) print(f'Image channels: {image_channels}') if 'bf_1' in image_channels: print('Found multipass movement channel bf_1; making measurements') make_multipass_measurements(experiment_dir, annotations) process_data.collate_data( experiment_dir, position_features=position_features ) # For convenience since autofluorescence can take a little while.... if 'green_yellow_excitation_autofluorescence' in image_channels or 'autofluorescence' in image_channels: fl_measurement_name = 'autofluorescence' if 'autofluorescence' in image_channels else 'green_yellow_excitation_autofluorescence' print( f'Found autofluorescence channel {fl_measurement_name}; making measurements' ) make_af_measurements(experiment_dir, annotations, fl_measurement_name=fl_measurement_name) process_data.collate_data(experiment_dir, position_features=position_features)
def show_position_notes(experiment_dir): assert pathlib.Path(experiment_dir).exists() experiment_annotations = load_data.read_annotations(experiment_dir) for position, (position_annotations, timepoint_annotations) in experiment_annotations.items(): print( f'{position} (excluded = {position_annotations["exclude"]}): {position_annotations["notes"]}' )
def filter_adult_images(experiment_root): experiment_annotations = load_data.read_annotations(experiment_root) def scan_filter(position_name, timepoint_name): return experiment_annotations[position_name][1][timepoint_name].get( 'stage') == 'adult' return scan_filter
def filter_adult_images(experiment_root): '''Filter for only adult timepoints from non-excluded animals''' experiment_annotations = load_data.read_annotations(experiment_root) def scan_filter(position_name, timepoint_name): return not experiment_annotations[position_name][0][ 'exclude'] and experiment_annotations[position_name][1][ timepoint_name].get('stage') == 'adult' return scan_filter
def filter_latest_images(experiment_root): annotations = load_data.read_annotations(experiment_root) good_annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) def latelife_filter(position_name, timepoint_name): return position_name in good_annotations and timepoint_name > good_annotations[ position_name][0]['__last_timepoint_annotated__'] return load_data.scan_experiment_dir(expt_dir, timepoint_filter=latelife_filter)
def check_for_kw(expt_dir, kw, filter_good=True, verbose=True): annotations = load_data.read_annotations(expt_dir) if filter_good: annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) kw_annotations = load_data.filter_annotations( annotations, elegant_filters.filter_by_kw(kw)) if verbose: print( f'{len(kw_annotations)}/{len(annotations)} of animals in experiment has kw {kw} {"(minus excluded)" if filter_good else ""}' ) return set(kw_annotations.keys())
def check_for_alive(expt_dir): annotations = load_data.read_annotations(expt_dir) good_annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) dead_annotations = check_stage_annotations(good_annotations, ['dead']) print( f'{len(good_annotations)-len(dead_annotations)}/{len(good_annotations)} still alive' ) return set(good_annotations.keys()).difference(set( dead_annotations.keys()))
def collate_data(experiment_root, position_features=('stage_x', 'stage_y', 'starting_stage_z')): """Gather all .tsv files produced by measurement runs into a single file. This function will concatenate all individual-worm .tsv files for all of the different measure_worms runs (which each output their .tsv files into a different subdirectory of '{experiment_root}/derived_data/measurements') into a single master-file of timecourse data: {experiment_root}/derived_data/measurements/{experiment_root.name} timecourse.tsv If possible, lifespans and other spans will be calculated for the worms, with the results stored in a master-file of summary data: {experiment_root}/derived_data/measurements/{experiment_root.name} summary.tsv Any features named in the position_features parameter will be transfered from the annotations for that position to the worm summary data as well. The worms in these files will be renamed as: '{experiment_root.name} {position_name}' """ experiment_root = pathlib.Path(experiment_root) positions = load_data.read_annotations(experiment_root) experiment_name = experiment_root.name derived_root = experiment_root / DERIVED_ROOT measurement_root = derived_root / 'measurements' measurements = [] name_prefix = experiment_name + ' ' for measurement_dir in measurement_root.iterdir(): files = list(measurement_dir.glob('*.tsv')) if len(files) > 0: measurements.append( worm_data.read_worms(*files, name_prefix=name_prefix, calculate_lifespan=False)) worms = measurements[0] for other_measurement in measurements[1:]: worms.merge_in(other_measurement) for w in worms: try: calculate_ages_and_spans(w) except (NameError, ValueError): print(f'could not calculate lifespan for worm {w.name}') position_annotations, timepoint_annotations = positions.get( w.name[len(name_prefix):], ({}, {})) for feature in position_features: if feature in position_annotations: setattr(w, feature, position_annotations[feature]) worms.write_timecourse_data(derived_root / f'{experiment_name} timecourse.tsv', multi_worm_file=True, error_on_missing=False) worms.write_summary_data(derived_root / f'{experiment_name} summary.tsv', error_on_missing=False)
def get_image_channels(experiment_root): experiment_root = pathlib.Path(experiment_root) annotations = load_data.read_annotations(experiment_root) annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) positions = list(annotations.keys()) image_channels = { image_file.stem.split()[1] for image_file in (experiment_root / positions[0]).iterdir() if image_file.suffix[1:] in ['png', 'tif'] } return image_channels
def enumerate_common_annotations(experiment_dir, bad_kws=None, verbose=True, filter_good=True): if not bad_kws: bad_kws = [['Nw', 'Nh'], ['REFERENCE'], ['CONTAMINATION'], ['DOUBLE WORM', 'TRIPLE WORM'], ['ESCAPE', 'VISITED'], ['LOST'], ['LARVAL', 'DELAYED'], ['FERTILE'], [ 'PVL', 'BURST', ], ['bag\'d'], ['small', 'sickly', 'scrawny', 'mottled']] experiment_dir = pathlib.Path(experiment_dir) inventory_bad_worms = [] annotations = load_data.read_annotations(experiment_dir) if filter_good: original_worms = annotations.keys() annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) excluded_worms = sorted( list(set(original_worms) - set(annotations.keys()))) for kw_group in bad_kws: group_list = [] for kw in kw_group: group_list.extend([ worm for worm, worm_annotations in annotations.items() if kw in worm_annotations[0]['notes'] ]) group_list = utilities.unique_items(group_list) inventory_bad_worms.append(group_list) if verbose: print(f'\n{experiment_dir.name} (n = {len(annotations)})') if filter_good: print(f'(excluded): {len(excluded_worms)} ({excluded_worms})') for kw_group, bad_worms in zip(bad_kws, inventory_bad_worms): print(f'{"/".join(kw_group)}: {len(bad_worms)} ({bad_worms})') utilities.print_table( [[len(bad_worms) for bad_worms in inventory_bad_worms] + [f'{len(annotations)}']], column_names=[f'{"/".join(kw_group)}' for kw_group in bad_kws] + ['Total'], row_names=[experiment_dir.name])
def reset_positions_manual(scope, experiment_dir, *annotation_filters, revert_z=False): '''Reset positions manually for an experiment (i.e. with a separate ris_widget window open) Parameters: scope - ScopeClient object as defined by scope.scope_client experiment_dir - str/pathlib.Path to experiment annotation_filters - Optional variable filters to use to isolate specific positions of interest Call with annotation filters like so: reset_position.reset_positions(scope, experiment_dir, elegant_filters.filter_excluded, elegant_filters.filter_live_animals) ''' experiment_dir = pathlib.Path(experiment_dir) print(f'Traversing {experiment_dir.name}') metadata = load_data.read_metadata(experiment_dir) if annotation_filters: experiment_annotations = load_data.read_annotations(experiment_dir) for filter in annotation_filters: experiment_annotations = load_data.filter_annotations( experiment_annotations, filter) positions = experiment_annotations.keys() else: positions = metadata['positions'].keys() new_positions = poll_positions(scope, metadata, positions, revert_z=revert_z) if new_positions: try: input(f'\nPress any key to save positions; ctrl-c to abort') time_label = time.strftime('%Y%m%d-%H%M-%S') with (experiment_dir / f'experiment_metadata_beforechangingpositions_{time_label}.json' ).open('w') as mdata_file: datafile.json_encode_legible_to_file(metadata, mdata_file) metadata['positions'].update(new_positions) load_data.write_metadata(metadata, experiment_dir) except KeyboardInterrupt: pass else: print('No positions found to reset')
def filter_good_positions_wrap(experiment_root, channels='bf', error_on_missing=False): positions = load_data.read_annotations(experiment_root) good_positions = load_data.filter_annotations( positions, load_data.filter_good_incomplete) def timepoint_filter(position_name, timepoint_name): return position_name in good_positions return load_data.scan_experiment_dir(experiment_root, timepoint_filter=timepoint_filter, channels=channels, error_on_missing=error_on_missing)
def benchmark_masks_DS(expt_dir): expt_dir = pathlib.Path(expt_dir) annotations = load_data.read_annotations(expt_dir) annotations = load_data.filter_excluded(annotations, load_data.filter_excluded) ious = collections.OrderedDict() for position, position_annotations in annotations.items(): for timepoint, timepoint_annotations in position_annotations.items() center_tck, width_tck = timepoint_annotations.get('pose', (None, None)) if center_tck is not None and width_tck is not None: image_key = position + '_' + timepoint mask_image = freeimage.read(str(expt_dir / position / (timepoint + ' bf_mask.png'))) > 0 # Saved mask made by segmenter manual_mask = worm_spline.lab_frame_mask(center_tck, width_tck, mask_image.shape) > 0 # Mask regenerated from manual annotations ious[image_key] = (mask_image & manual_mask).sum() / (mask_image | manual_mask).sum() return ious
def compile_annotations_from_tsv(experiment_root): if type(experiment_root) is str: experiment_root = pathlib.Path(experiment_root.replace('\\ ', ' ')) _check_metadata_for_timepoints(experiment_root) process_data.update_annotations(experiment_root) with (experiment_root / 'experiment_metadata.json').open('r') as mdata_file: experiment_metadata = json.load(mdata_file) annotation_data = {} with list(experiment_root.glob('*.tsv'))[0].open('r') as annotation_file: reader = csv.reader(annotation_file, delimiter='\t') _ = reader.__next__() # Header for line in reader: position = line[0][1:] # Starts with '\' notes = line[-1] annotation_data[position] = {} if 'DEAD' in notes: for field, frame_num in zip(previous_annotations, line[1:]): annotation_data[position][field] = experiment_metadata[ 'timepoints'][int(frame_num)] annotation_data[position]['notes'] = line[-1] annotations = load_data.read_annotations(experiment_root) # Note: process_data.propagate_worm_stages assumes that stages are monotonically increasing. # To make use of propagate_worm_stages, prepopulate only the timepoints where there's a transition. for position, (position_annotations, timepoint_annotations) in annotations.items(): if 'DEAD' in annotation_data[position]['notes']: first_timepoint = list(timepoint_annotations.keys())[0] timepoint_annotations[first_timepoint]['stage'] = 'egg' for field in previous_annotations: transition_timepoint = annotation_data[position][field] timepoint_annotations[transition_timepoint]['stage'] = field position_annotations['exclude'] = False else: position_annotations['exclude'] = True position_annotations['notes'] = annotation_data[position]['notes'] load_data.write_annotations(experiment_root, annotations) process_data.annotate( experiment_root, position_annotators=[process_data.propagate_worm_stage])
def process_experiment(experiment_directory, channels_to_process, num_workers=None, position_filter=None, **process_args): experiment_directory = pathlib.Path(experiment_directory) if num_workers == None: num_workers = multiprocessing.cpu_count() - 2 elif num_workers > multiprocessing.cpu_count(): raise RuntimeError( 'Attempted to run jobs with more workers than cpu\'s!') if position_filter is None: position_filter = load_data.filter_good_complete # Make super_vignette if needed. if not (experiment_directory / 'super_vignette.pickle').exists(): make_super_vignette(experiment_directory) # Make appropriate subdirectories if not (experiment_directory / 'derived_data').exists(): (experiment_directory / 'derived_data').mkdir() if not (experiment_directory / 'derived_data' / 'mask').exists(): (experiment_directory / 'derived_data' / 'mask').mkdir() # Enumerate position directories positions = load_data.read_annotations(experiment_directory) positions = load_data.filter_annotations(positions, position_filter=position_filter) print('Processing the following positions:') print(list(positions.keys()).__repr__()) with multiprocessing.Pool(processes=num_workers) as pool: try: bob = pool.map( functools.partial(process_position_directory, channels_to_process=channels_to_process, **process_args), [experiment_directory / pos for pos in positions]) pool.close() pool.join() print('Terminated successfully') except KeyboardInterrupt: pool.terminate() pool.join() raise
def overlay_masks(rw, position_directory): position_directory = pathlib.Path(position_directory) expt_dir = position_directory.parent position_annotations = load_data.read_annotations(expt_dir)[position_directory.name] files_to_load = [] page_names = [] global_positions, timepoint_annotations = position_annotations for timepoint, timepoint_data in timepoint_annotations.items(): image_key = position_directory.name + '_' + timepoint image = freeimage.read(str(position_directory / (timepoint + ' bf.png'))) mask_file = expt_dir / 'derived_data' / 'mask' / position_directory.name / (timepoint + ' bf.png') if mask_file.exists(): mask_image = freeimage.read(str(expt_dir / 'derived_data' / 'mask' / position_directory.name / (timepoint + ' bf.png'))) > 0 files_to_load.append([image, mask_image]) else: files_to_load.append([image]) rw.flipbook_pages = files_to_load
def run_holly_measurements(experiment_dir): position_features = ['stage_x', 'stage_y', 'starting_stage_z', 'notes'] annotations = load_data.read_annotations(experiment_dir) annotations = load_data.filter_annotations(annotations, load_data.filter_excluded) make_basic_measurements(experiment_dir, annotations) make_pose_measurements(experiment_dir, annotations, adult_only=False) process_data.collate_data(experiment_dir, position_features=position_features ) # Make preliminary analysis faster. make_gfp_measurements(experiment_dir, annotations, adult_only=False) make_af_measurements(experiment_dir, annotations, adult_only=False) process_data.collate_data(experiment_dir, position_features=position_features)
def propagate_stages(experiment_root, verbose=False): ''' Modifies experiment annotations by propagating stage information forward in time across annotated timepoints. Somewhat deprecated by process_data.propagate_worm_stages/update_annotations; however, useful for the case in which one wants to propagate stages and can't assume that stages are monotonically increasing with time. ''' annotations = load_data.read_annotations(experiment_root) for position_name, (position_annotations, timepoint_annotations) in annotations.items(): running_stage = None changed = [] encountered_stages = [] for timepoint, timepoint_info in timepoint_annotations.items(): already_encountered = timepoint_info.get( 'stage') in encountered_stages stage_set = timepoint_info.get('stage') is not None if running_stage is None: # Either first timepoint or all the annotations up to now are null running_stage = timepoint_info.get('stage') elif timepoint_info.get( 'stage' ) != running_stage and stage_set and not already_encountered: running_stage = timepoint_info['stage'] if stage_set and not already_encountered: encountered_stages.append(timepoint_info['stage']) if not stage_set and running_stage is not None: # Also handles the case that we are working with an excluded position timepoint_info['stage'] = running_stage changed.append(timepoint) elif stage_set and timepoint_info[ 'stage'] != running_stage and already_encountered: timepoint_info['stage'] = running_stage changed.append(timepoint) if verbose and changed: print(f'{position_name}: {changed}') annotations = load_data.write_annotations(experiment_root, annotations)
def replace_annotation(experiment_root, annotation_type, old_annotation_values, new_annotation_value, annotation_dir='annotations'): if not isinstance(old_annotation_values, collections.Iterable): old_annotation_values = list(old_annotation_values) if isinstance(old_annotation_values, str): old_annotation_values = [old_annotation_values] experiment_annotations = load_data.read_annotations( experiment_root, annotation_dir=annotation_dir) for position, position_annotations in experiment_annotations.items(): for timepoint, timepoint_annotations in position_annotations[1].items( ): if annotation_type in timepoint_annotations and timepoint_annotations[ annotation_type] in old_annotation_values: timepoint_annotations[annotation_type] = new_annotation_value load_data.write_annotations(experiment_root, experiment_annotations, annotation_dir=annotation_dir)
def load_derived_images(experiment_root, derived_dir, *additional_filters): experiment_root = pathlib.Path(experiment_root) experiment_annotations = load_data.read_annotations(experiment_root) experiment_annotations = load_data.filter_annotations( experiment_annotations, load_data.filter_excluded) for filter in additional_filters: experiment_annotations = load_data.filter_annotations( experiment_annotations, filter) image_filter = elegant_filters.filter_from_elegant_dict( experiment_annotations) experiment_images = load_data.scan_experiment_dir( experiment_root, timepoint_filter=image_filter) for position, position_images in experiment_images.items(): for timepoint, timepoint_images in position_images.items(): timepoint_images.append(experiment_root / 'derived_data' / derived_dir / position / f'{timepoint} bf.png') return experiment_images
def make_mask_measurements(experiment_root, annotations=None, adult_only=True): #process_data.annotate(experiment_root, annotators=[annotate_timepoints]) # Why? experiment_metadata = load_data.read_metadata(experiment_root) microns_per_pixel = 1.3 * 5 / (experiment_metadata['objective'] * experiment_metadata['optocoupler']) measures = [MaskPoseMeasurements(microns_per_pixel=microns_per_pixel)] measurement_name = 'mask_measures' if annotations is None: annotations = load_data.read_annotations(experiment_root) annotations = load_data.filter_annotations(annotations, filter_excluded) annotations = load_data.filter_annotations( annotations, elegant_filters.filter_living_timepoints) if adult_only: annotations = load_data.filter_annotations( annotations, elegant_filters.filter_by_stage('adult')) process_data.measure_worms(experiment_root, annotations, measures, measurement_name)
def check_for_null_poses(experiment_root, annotation_dir='annotations'): assert pathlib.Path(experiment_root).exists() experiment_annotations = load_data.read_annotations( experiment_root, annotation_dir=annotation_dir) experiment_annotations = load_data.filter_annotations( experiment_annotations, load_data.filter_excluded) poses = ['pose'] for position, position_annotations in experiment_annotations.items(): for timepoint, timepoint_annotations in position_annotations[1].items( ): if 'bf_1 pose' in timepoint_annotations and 'bf_1 pose' not in poses: for i in range(7): poses.append(f'bf_{i+1} pose') for pose_tag in poses: if timepoint_annotations.get( pose_tag, (None, None) )[0] is None and timepoint_annotations['stage'] == 'adult': print( f"Position {position}, timepoint {timepoint} doesn't have a vaild {pose_tag} pose" ) print(f'Checked for poses {poses}')
def check_experiment_stages(experiment_dir, stages, verbose=False): """Check an experiment for incomplete annotations Parameters experiment_dir - str/pathlib.Path to an experiment directory stages - an iterable containing stages that should be annotated Returns if verbose is False, return a list of positions with incomplete annotations, otherwise, return the positions and their annotations """ annotations = load_data.read_annotations(experiment_dir) complete_annotations = check_stage_annotations(annotations, stages) incomplete_annotations = { position: annotations[position] for position in set(annotations) - set(complete_annotations) } if verbose: return incomplete_annotations else: return incomplete_annotations.keys()
def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False): """Setup the basic code to take a single timepoint from a timecourse experiment. Parameters: data_dir: directory where the data and metadata-files should be read/written. io_threads: number of threads to use to save image data out. loglevel: level from logging library at which to log information to the logfile in data_dir. (Subclasses can log information with self.logger) If not specified, fall back to the class attribute LOG_LEVEL. This allows a subclass to set a default log level, which still can be over-ridden from the command line. scope_host: IP address to connect to the scope server. If None, run without a scope server. dry_run: if True, do not write any files (including log files; log entries will be printed to the console). """ self.data_dir = pathlib.Path(data_dir).resolve() # get an absolute path self.experiment_metadata_path = self.data_dir / 'experiment_metadata.json' with self.experiment_metadata_path.open('r') as f: self.experiment_metadata = json.load(f) self.experiment_metadata['node'] = platform.node() self.positions = self.experiment_metadata['positions'] # dict mapping names to (x,y,z) stage positions self.skip_positions = set() annotations = load_data.read_annotations(self.data_dir) for position in self.positions.keys(): if position in annotations: position_annotations, timepoint_annotations = annotations[position] if position_annotations.get('exclude'): self.skip_positions.add(position) else: for annotation in timepoint_annotations.values(): if annotation.get('stage') == 'dead': self.skip_positions.add(position) break if scope_host is not None: from .. import scope_client self.scope = scope_client.ScopeClient(scope_host) if hasattr(self.scope, 'camera'): self.scope.camera.return_to_default_state() else: self.scope = None self.write_files = not dry_run self.logger = log_util.get_logger(str(data_dir)) if log_level is None: log_level = self.LOG_LEVEL elif isinstance(log_level, str): log_level = getattr(logging, log_level) self.logger.setLevel(log_level) if self.write_files: self.image_io = threaded_io.ThreadedIO(self.IO_THREADS) handler = logging.FileHandler(str(self.data_dir/'acquisitions.log')) else: self.image_io = DummyIO(self.logger) handler = logging.StreamHandler() handler.setFormatter(log_util.get_formatter()) self.logger.addHandler(handler) self._job_thread = None
def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False): """Setup the basic code to take a single timepoint from a timecourse experiment. Parameters: data_dir: directory where the data and metadata-files should be read/written. io_threads: number of threads to use to save image data out. loglevel: level from logging library at which to log information to the logfile in data_dir. (Subclasses can log information with self.logger) If not specified, fall back to the class attribute LOG_LEVEL. This allows a subclass to set a default log level, which still can be over-ridden from the command line. scope_host: IP address to connect to the scope server. If None, run without a scope server. dry_run: if True, do not write any files (including log files; log entries will be printed to the console). """ self.data_dir = pathlib.Path(data_dir).resolve() # get an absolute path self.experiment_metadata_path = self.data_dir / 'experiment_metadata.json' with self.experiment_metadata_path.open('r') as f: self.experiment_metadata = json.load(f) self.experiment_metadata['node'] = platform.node() self.positions = self.experiment_metadata['positions'] # dict mapping names to (x,y,z) stage positions self.skip_positions = set() annotations = load_data.read_annotations(self.data_dir) for position in self.positions.keys(): if position in annotations: position_annotations, timepoint_annotations = annotations[position] if position_annotations.get('exclude'): self.skip_positions.add(position) else: for annotation in timepoint_annotations.values(): if annotation.get('stage') == 'dead': self.skip_positions.add(position) break if scope_host is not None: from .. import scope_client self.scope = scope_client.ScopeClient(scope_host) if hasattr(self.scope, 'camera'): self.scope.camera.return_to_default_state() else: self.scope = None self.write_files = not dry_run self.logger = log_util.get_logger(str(data_dir)) if log_level is None: log_level = self.LOG_LEVEL elif isinstance(log_level, str): log_level = getattr(logging, log_level) self.logger.setLevel(log_level) if self.write_files: self.image_io = threaded_io.ThreadedIO(self.IO_THREADS, self.MAX_IO_JOBS) handler = logging.FileHandler(str(self.data_dir/'acquisitions.log')) else: self.image_io = DummyIO(self.logger) handler = logging.StreamHandler() handler.setFormatter(log_util.get_formatter()) self.logger.addHandler(handler) self._job_thread = None