def wrapped_f(*args, **options): _self = args[0] fname = method.__name__ class_name = _self.__class__.__name__ name = _self.name sw = StopWatch(start=True) logger = logging.getLogger() logger.log(self._level, '%s[%s].%s - start' %(class_name, name, fname)) result = method(*args, **options) logger.log(self._level, '%s[%s].%s - finished in %s' %(class_name, name, fname, sw.stop())) return result
def __call__(self): # include hdf5 file name in hdf5_options # perhaps timeholder might be a good placke to read out the options # fils must not exist to proceed hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position) self.timeholder = TimeHolder(self.position, self._all_channel_regions, hdf5_fname, self.meta_data, self.settings, self._frames, self.plate_id, **self._hdf_options) self.settings.set_section('Tracking') # setup tracker if self.settings.get('Processing', 'tracking'): region = self.settings.get('Tracking', 'tracking_regionname') tropts = (self.settings.get('Tracking', 'tracking_maxobjectdistance'), self.settings.get('Tracking', 'tracking_maxsplitobjects'), self.settings.get('Tracking', 'tracking_maxtrackinggap')) self._tracker = Tracker(*tropts) self._tes = EventSelection(self._tracker.graph, **self._es_options) stopwatch = StopWatch(start=True) ca = CellAnalyzer(timeholder=self.timeholder, position = self.position, create_images = True, binning_factor = 1, detect_objects = self.settings.get('Processing', 'objectdetection')) self.setup_classifiers() self.export_features = self.define_exp_features() n_images = self._analyze(ca) if n_images > 0: # invoke event selection if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \ self.settings.get('Processing', 'tracking'): self.logger.debug("--- visitor start") self._tes.find_events() self.logger.debug("--- visitor ok") if self.is_aborted(): return 0 # number of processed images # save all the data of the position, no aborts from here on # want all processed data saved if self.settings.get('Output', 'export_object_counts'): self.export_object_counts() if self.settings.get('Output', 'export_object_details'): self.export_object_details() if self.settings.get('Output', 'export_file_names'): self.export_image_names() if self.settings.get('Processing', 'tracking'): self.export_tracks_hdf5() self.update_status({'text': 'export events...'}) if self.settings.get('Processing', 'tracking_synchronize_trajectories'): self.export_events() if self.settings.get('Output', 'export_track_data'): self.export_full_tracks() if self.settings.get('Output', 'export_tracking_as_dot'): self.export_graphviz() self.update_status({'text': 'export events...', 'max': 1, 'progress': 1}) # remove all features from all channels to free memory # for the generation of gallery images self.timeholder.purge_features() if self.settings.get('Output', 'events_export_gallery_images'): self.export_gallery_images() try: intval = stopwatch.stop()/n_images*1000 except ZeroDivisionError: pass else: self.logger.info(" - %d image sets analyzed, %3d ms per image set" % (n_images, intval)) self.touch_finished() # self.clear() return n_images
def _build_dimension_lookup(self): s = StopWatch(start=True) lookup = {} has_xy = False positions = [] times = [] channels = [] zslices = [] dimension_items = self._get_dimension_items() print("Get dimensions: %s" %s.interim()) s.reset(start=True) # if use_frame_indices is set in the ini file, # we make a first scan of the items and determine for each position # the list of timepoints. # Then, we can assign to each position a dictionary that assigns to each timepoint # its index (after ordering). if self.use_frame_indices: #all_times = list(set([int(item[DIMENSION_NAME_TIME]) if DIMENSION_NAME_TIME in item else 0 # for item in dimension_items])) #all_times.sort() first_pass = {} for item in dimension_items: position = item[DIMENSION_NAME_POSITION] if not position in first_pass: first_pass[position] = [] if DIMENSION_NAME_TIME in item: time_val = int(item[DIMENSION_NAME_TIME]) else: time_val = 0 first_pass[position].append(time_val) time_index_correspondence = {} for pos in first_pass.keys(): first_pass[position].sort() time_index_correspondence[pos] = dict(zip(first_pass[position], range(len(first_pass[position])))) for item in dimension_items: # import image info only once if not has_xy: has_xy = True info = ccore.ImageImportInfo(os.path.join(self.path, item['filename'])) self.meta_data.set_image_info(info) self.has_multi_images = False #info.images > 1 # position position = item[DIMENSION_NAME_POSITION] if not position in lookup: lookup[position] = {} # time if DIMENSION_NAME_TIME in item: time_from_filename = int(item[DIMENSION_NAME_TIME]) else: time_from_filename = 0 item[DIMENSION_NAME_TIME] = str(time_from_filename) if self.use_frame_indices: time = time_index_correspondence[position][time_from_filename] else: time = time_from_filename if not time in lookup[position]: lookup[position][time] = {} # channels if DIMENSION_NAME_CHANNEL in item: channel = item[DIMENSION_NAME_CHANNEL] else: channel = '1' item[DIMENSION_NAME_CHANNEL] = channel if not channel in lookup[position][time]: lookup[position][time][channel] = {} # leave zslice optional. # in case of multi-images it must not be defined if DIMENSION_NAME_ZSLICE in item: zslice = item[DIMENSION_NAME_ZSLICE] else: zslice = 0 item[DIMENSION_NAME_ZSLICE] = zslice if zslice == '': zslice = None if not zslice is None: zslice = int(zslice) if not zslice in lookup[position][time][channel]: lookup[position][time][channel][zslice] = item['filename'] # allow to read timestamps from file if not present if META_INFO_TIMESTAMP in item: timestamp = float(item[META_INFO_TIMESTAMP]) self.meta_data.append_absolute_time(position, time, timestamp) elif self.timestamps_from_file in ['mtime', 'ctime']: filename_full = os.path.join(self.path, item['filename']) if self.timestamps_from_file == 'mtime': timestamp = os.path.getmtime(filename_full) else: timestamp = os.path.getctime(filename_full) item[META_INFO_TIMESTAMP] = timestamp self.meta_data.append_absolute_time(position, time, timestamp) if META_INFO_WELL in item: well = item[META_INFO_WELL] subwell = item.get(META_INFO_SUBWELL, None) self.meta_data.append_well_subwell_info(position, well, subwell) if (self.has_multi_images and self.multi_image == self.MULTIIMAGE_USE_ZSLICE): if not zslice is None: raise ValueError('Multi-image assigned for zslice conflicts' ' with zslice token in filename!') zslices.extend(range(1,info.images+1)) else: zslices.append(zslice) positions.append(position) times.append(time) channels.append(channel) self.meta_data.positions = tuple(sorted(set(positions))) # assure that all items of one dimension are of same length times = set(times) channels = set(channels) zslices = set(zslices) # find overall valid number of frames for p in lookup: times = times.intersection(lookup[p].keys()) # find overall valid channels/zslices based on overall valid frames for p in lookup: for t in times: channels = channels.intersection(lookup[p][t].keys()) for c in channels: zslices = zslices.intersection(lookup[p][t][c].keys()) self.meta_data.times = sorted(times) self.meta_data.channels = sorted(channels) self.meta_data.zslices = sorted(zslices) self.meta_data.image_files = len(dimension_items) print('Build time: %s' %s.stop()) return lookup
def __call__(self): # include hdf5 file name in hdf5_options # perhaps timeholder might be a good place to read out the options # file does not have to exist to proceed hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position) self.timeholder = TimeHolder(self.position, self._all_channel_regions, hdf5_fname, self.meta_data, self.settings, self._frames, self.plate_id, **self._hdf_options) self.settings.set_section('Tracking') self.setup_classifiers() # setup tracker if self.settings('Processing', 'tracking'): tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'), self.settings('Tracking', 'tracking_maxsplitobjects'), self.settings('Tracking', 'tracking_maxtrackinggap')) self._tracker = Tracker(*tropts) stopwatch = StopWatch(start=True) ca = CellAnalyzer(timeholder=self.timeholder, position = self.position, create_images = True, binning_factor = 1, detect_objects = self.settings('Processing', 'objectdetection')) self.export_features = self.define_exp_features() n_images = self._analyze(ca) if n_images > 0: # invoke event selection if self.settings('Processing', 'eventselection') and \ self.settings('Processing', 'tracking'): evchannel = self.settings('EventSelection', 'eventchannel') region = self.classifiers[evchannel].regions if self.settings('EventSelection', 'unsupervised_event_selection'): graph = self._tracker.graph elif evchannel != PrimaryChannel.NAME or \ region != self.settings("Tracking", "region"): graph = self._tracker.clone_graph(self.timeholder, evchannel, region) else: graph = self._tracker.graph self._tes = self.setup_eventselection(graph) self.logger.debug("--- visitor start") self._tes.find_events() self.logger.debug("--- visitor ok") if self.is_aborted(): return 0 # number of processed images # save all the data of the position, no aborts from here on # want all processed data saved if self.settings('Output', 'export_object_counts') and \ self.settings('EventSelection', 'supervised_event_selection'): # no object counts in case of unsupervised event selection self.export_object_counts() if self.settings('Output', 'export_object_details'): self.export_object_details() if self.settings('Output', 'export_file_names'): self.export_image_names() if self.settings('Processing', 'tracking'): self.export_tracks_hdf5() self.update_status({'text': 'export events...'}) if self.settings('Output', 'hdf5_include_events'): self.export_events_hdf5() if self.settings('Output', "export_events"): if self.settings('Processing', 'eventselection'): self.export_events() if self.settings('EventSelection', 'unsupervised_event_selection'): self.export_tc3() if self.settings('Output', 'export_track_data'): self.export_full_tracks() if self.settings('Output', 'export_tracking_as_dot'): self.export_graphviz(channel_name =PrimaryChannel.NAME,\ region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME]) self.export_classlabels() self.update_status({'text': 'export events...', 'max': 1, 'progress': 1}) # remove all features from all channels to free memory # for the generation of gallery images self.timeholder.purge_features() if self.settings.get('Output', 'events_export_gallery_images') and \ self.settings.get('Processing', 'eventselection'): self.export_gallery_images() try: intval = stopwatch.stop()/n_images*1000 except ZeroDivisionError: pass else: self.logger.info(" - %d image sets analyzed, %3d ms per image set" % (n_images, intval)) self.touch_finished() self.clear() return n_images
def _build_dimension_lookup(self): s = StopWatch(start=True) lookup = {} has_xy = False positions = [] times = [] channels = [] zslices = [] dimension_items = self._get_dimension_items() print("Get dimensions: %s" % s.interim()) s.reset(start=True) # if use_frame_indices is set in the ini file, # we make a first scan of the items and determine for each position # the list of timepoints. # Then, we can assign to each position a dictionary that assigns to each timepoint # its index (after ordering). if self.use_frame_indices: #all_times = list(set([int(item[Dimensions.Time]) if Dimensions.Time in item else 0 # for item in dimension_items])) #all_times.sort() first_pass = {} for item in dimension_items: position = item[Dimensions.Position] if not position in first_pass: first_pass[position] = [] if Dimensions.Time in item: time_val = int(item[Dimensions.Time]) else: time_val = 0 first_pass[position].append(time_val) time_index_correspondence = {} for pos in first_pass.keys(): first_pass[position].sort() time_index_correspondence[pos] = dict( zip(first_pass[position], range(len(first_pass[position])))) for item in dimension_items: # import image info only once if not has_xy: has_xy = True info = ccore.ImageImportInfo( os.path.join(self.path, item['filename'])) self.meta_data.set_image_info(info) self.has_multi_images = False #info.images > 1 # position position = item[Dimensions.Position] if not position in lookup: lookup[position] = {} # time if Dimensions.Time in item: time_from_filename = int(item[Dimensions.Time]) else: time_from_filename = 0 item[Dimensions.Time] = str(time_from_filename) if self.use_frame_indices: time = time_index_correspondence[position][time_from_filename] else: time = time_from_filename if not time in lookup[position]: lookup[position][time] = {} # channels if Dimensions.Channel in item: channel = item[Dimensions.Channel] else: channel = '1' item[Dimensions.Channel] = channel if not channel in lookup[position][time]: lookup[position][time][channel] = {} # leave zslice optional. # in case of multi-images it must not be defined if Dimensions.ZSlice in item: zslice = item[Dimensions.ZSlice] else: zslice = 0 item[Dimensions.ZSlice] = zslice if zslice == '': zslice = None if not zslice is None: zslice = int(zslice) if not zslice in lookup[position][time][channel]: lookup[position][time][channel][zslice] = item['filename'] # allow to read timestamps from file if not present if MetaInfo.Timestamp in item: timestamp = float(item[MetaInfo.Timestamp]) self.meta_data.append_absolute_time(position, time, timestamp) elif self.timestamps_from_file in ['mtime', 'ctime']: filename_full = os.path.join(self.path, item['filename']) if self.timestamps_from_file == 'mtime': timestamp = os.path.getmtime(filename_full) else: timestamp = os.path.getctime(filename_full) item[MetaInfo.Timestamp] = timestamp self.meta_data.append_absolute_time(position, time, timestamp) if MetaInfo.Well in item: well = item[MetaInfo.Well] subwell = item.get(MetaInfo.Subwell, None) self.meta_data.append_well_subwell_info( position, well, subwell) if (self.has_multi_images and self.multi_image == self.MULTIIMAGE_USE_ZSLICE): if not zslice is None: raise ValueError( 'Multi-image assigned for zslice conflicts' ' with zslice token in filename!') zslices.extend(range(1, info.images + 1)) else: zslices.append(zslice) positions.append(position) times.append(time) channels.append(channel) self.meta_data.positions = tuple(sorted(set(positions))) # assure that all items of one dimension are of same length times = set(times) channels = set(channels) zslices = set(zslices) # find overall valid number of frames for p in lookup: times = times.intersection(lookup[p].keys()) # find overall valid channels/zslices based on overall valid frames for p in lookup: for t in times: channels = channels.intersection(lookup[p][t].keys()) for c in channels: zslices = zslices.intersection(lookup[p][t][c].keys()) self.meta_data.times = sorted(times) self.meta_data.channels = sorted(channels) self.meta_data.zslices = sorted(zslices) self.meta_data.image_files = len(dimension_items) print('Build time: %s' % s.stop()) return lookup
def __call__(self): thread = QThread.currentThread() well, site = self._posinfo() self.timeholder = TimeHolder(self.position, self._all_channel_regions, self.datafile, self.meta_data, self.settings, self._frames, self.plate_id, well, site, **self._hdf_options) self.settings.set_section('Tracking') self.setup_classifiers() # setup tracker if self.settings('Processing', 'tracking'): tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'), self.settings('Tracking', 'tracking_maxsplitobjects'), self.settings('Tracking', 'tracking_maxtrackinggap')) self._tracker = Tracker(*tropts) stopwatch = StopWatch(start=True) ca = CellAnalyzer(timeholder=self.timeholder, position=self.position, create_images=True, binning_factor=1, detect_objects=self.settings('Processing', 'objectdetection')) self.export_features = self.define_exp_features() self._analyze(ca) # invoke event selection if self.settings('Processing', 'eventselection') and \ self.settings('Processing', 'tracking'): evchannel = self.settings('EventSelection', 'eventchannel') region = self.classifiers[evchannel].regions if evchannel != PrimaryChannel.NAME or region != self.settings( "Tracking", "region"): graph = self._tracker.clone_graph(self.timeholder, evchannel, region) else: graph = self._tracker.graph self._tes = self.setup_eventselection(graph) self.logger.info("Event detection") self._tes.find_events() if self.isAborted(): return 0 # number of processed images # save all the data of the position, no aborts from here on # want all processed data saved if self.settings('Processing', 'tracking'): self.statusUpdate(text="Saving Tracking Data to cellh5...") self.save_tracks() if self.settings('Output', 'hdf5_include_events') and \ self.settings('Processing', "eventselection"): self.statusUpdate(text="Saving Event Data to cellh5...") self.save_events() self.save_classification() self.timeholder.purge() try: n = len(self._frames) intval = stopwatch.stop() / n * 1000 except ZeroDivisionError: pass else: self.logger.info("%d images analyzed, %3d ms per image set" % (n, intval)) self.clear() if isfile(self.datafile): with Ch5File(self.datafile, mode="r+") as ch5: ch5.savePlateLayout(self.layout, self.plate_id)