示例#1
0
    def _analyze(self, cellanalyzer):
        super(PositionPicker, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):
            if self.is_aborted():
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                   'text': txt,
                                   'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)
            image = cellanalyzer.collectObjects(self.plate_id,
                                                self.position,
                                                self.sample_readers,
                                                self.learner,
                                                byTime=True)

            if image is not None:
                n_images += 1
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position,
                                                frame)
                self.set_image(image, msg)
示例#2
0
文件: position.py 项目: imcf/cecog
    def _analyze(self, cellanalyzer):
        super(PositionPicker, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):
            if self.is_aborted():
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                   'text': txt,
                                   'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)
            image = cellanalyzer.collectObjects(self.plate_id,
                                                self.position,
                                                self.sample_readers,
                                                self.learner,
                                                byTime=True)

            if image is not None:
                n_images += 1
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position,
                                                frame)
                self.set_image(image[self._qthread.renderer],
                               msg, region=self._qthread.renderer)
示例#3
0
    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()
        imax = sum([len(n) for n in self.sample_positions.values()])
        thread.statusUpdate(min=0, max=imax)

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            thread.interruption_point()
            txt = '%s, %s, T %d, (%d/%d)' \
                  %(self.plate_id, self.position,
                    frame, self._frames.index(frame)+1, len(self._frames))

            thread.statusUpdate(meta="Classifier training: ",
                                text=txt, interval=stopwatch.interim(), increment=True)

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.collectObjects(self.plate_id, self.position, self.sample_readers,
                                        self.learner)
示例#4
0
    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()
        imax = sum([len(n) for n in self.sample_positions.values()])
        thread.statusUpdate(min=0, max=imax)

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            thread.interruption_point()
            txt = '%s, %s, T %d, (%d/%d)' \
                  %(self.plate_id, self.position,
                    frame, self._frames.index(frame)+1, len(self._frames))

            thread.statusUpdate(meta="Classifier training: ",
                                text=txt,
                                interval=stopwatch.interim(),
                                increment=True)

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.collectObjects(self.plate_id, self.position,
                                        self.sample_readers, self.learner)
示例#5
0
    def _analyze(self, cellanalyzer):
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))


        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.isAborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1,
                                        len(self._frames))
                self.statusUpdate(progress=self._frames.index(frame) + 1,
                                  text=txt,
                                  interval=stopwatch.interim())

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)

            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            self.setup_classifiers()

            for chname, clf in self.classifiers.iteritems():
                try:
                    cellanalyzer.classify_objects(clf, chname)
                except KeyError as e:
                    pass
示例#6
0
    def _analyze(self, cellanalyzer):
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))


        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.isAborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.statusUpdate(progress=self._frames.index(frame)+1,
                                  text=txt,
                                  interval=stopwatch.interim())

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)

            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            self.setup_classifiers()

            for chname, clf in self.classifiers.iteritems():
                try:
                    cellanalyzer.classify_objects(clf, chname)
                except KeyError as e:
                    pass
示例#7
0
 def wrapped_f(*args, **options):
     _self = args[0]
     fname = method.__name__
     class_name = _self.__class__.__name__
     name = _self.name
     sw = StopWatch(start=True)
     logger = logging.getLogger()
     logger.log(self._level, '%s[%s].%s - start'
                %(class_name, name, fname))
     result = method(*args, **options)
     logger.log(self._level, '%s[%s].%s - finished in %s'
                %(class_name, name, fname, sw.stop()))
     return result
示例#8
0
    def __init__(self, parent, job_count, ncpu):
        self.parent = parent
        self.ncpu = ncpu
        self._timer = StopWatch(start=True)

        self.progress = ProgressMsg(
            max=job_count,
            meta=('Parallel processing %d /  %d positions '
                  '(%d cores)' % (0, job_count, self.ncpu)))
        self.parent.stage_info.emit(self.progress)
示例#9
0
    def __call__(self):
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname, self.meta_data, self.settings,
                                     self._frames, self.plate_id,
                                     **self._hdf_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))

        #self.setup_classifiers()
        #self.export_features = self.define_exp_features()
        self._analyze(ca)
        return ca
示例#10
0
    def __call__(self):

        self.timeholder = TimeHolder(self.position,
                                     self._all_channel_regions,
                                     self.datafile,
                                     self.meta_data,
                                     self.settings,
                                     self._frames,
                                     self.plate_id,
                                     well=None,
                                     site=None,
                                     **self._hdf_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))

        self._analyze(ca)
        return ca
示例#11
0
    def _run(self):
        c_begin, c_end, c_step = -5, 15, 2
        c_info = c_begin, c_end, c_step

        g_begin, g_end, g_step = -15, 3, 2
        g_info = g_begin, g_end, g_step

        status = {
            'stage': 0,
            'text': '',
            'min': 0,
            'max': 1,
            'meta': 'Classifier training:',
            'item_name': 'round',
            'progress': 0
        }
        self.update_status(status)

        i = 0
        best_accuracy = -1
        best_log2c = None
        best_log2g = None
        best_conf = None
        is_abort = False
        stopwatch = StopWatch(start=True)

        if self._learner.has_nan_features():
            self._learner.filter_nans(apply=True)

        t0 = time.time()
        for info in self._learner.iterGridSearchSVM(c_info=c_info,
                                                    g_info=g_info):
            n, log2c, log2g, conf = info
            status.update({'min': 1,
                           'max': n,
                           'progress': i+1,
                           'text': 'log2(C)=%d, log2(g)=%d' % \
                               (log2c, log2g),
                           'interval': stopwatch.interim(),
                           })
            self.update_status(status, stime=50)
            stopwatch.reset(start=True)
            i += 1
            accuracy = conf.ac_sample
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_log2c = log2c
                best_log2g = log2g
                best_conf = conf
                self.conf_result.emit(log2c, log2g, conf)

            if self.is_aborted():
                is_abort = True
                break
        # overwrite only if grid-search was not aborted by the user
        if not is_abort:
            self._learner.train(2**best_log2c, 2**best_log2g)
            self._learner.exportConfusion(best_log2c, best_log2g, best_conf)
            self._learner.exportRanges()

            # FIXME: in case the meta-data (colors, names, zero-insert) changed
            #        the ARFF file has to be written again
            #        -> better store meta-data outside ARFF
            self._learner.exportToArff()
示例#12
0
 def __init__(self, thread, njobs):
     super(ProgressCallback, self).__init__()
     self.thread = thread
     self.count = 0
     self.njobs = njobs
     self._timer = StopWatch(start=True)
示例#13
0
    def _build_dimension_lookup(self):
        s = StopWatch(start=True)
        lookup = {}
        has_xy = False
        positions = []
        times = []
        channels = []
        zslices = []

        dimension_items = self._get_dimension_items()
        print("Get dimensions: %s" %s.interim())
        s.reset(start=True)

        # if use_frame_indices is set in the ini file,
        # we make a first scan of the items and determine for each position
        # the list of timepoints.
        # Then, we can assign to each position a dictionary that assigns to each timepoint
        # its index (after ordering).
        if self.use_frame_indices:
            #all_times = list(set([int(item[DIMENSION_NAME_TIME]) if DIMENSION_NAME_TIME in item else 0
            #                      for item in dimension_items]))
            #all_times.sort()
            first_pass = {}
            for item in dimension_items:
                position = item[DIMENSION_NAME_POSITION]
                if not position in first_pass:
                    first_pass[position] = []

                if DIMENSION_NAME_TIME in item:
                    time_val = int(item[DIMENSION_NAME_TIME])
                else:
                    time_val = 0
                first_pass[position].append(time_val)

            time_index_correspondence = {}
            for pos in first_pass.keys():
                first_pass[position].sort()
                time_index_correspondence[pos] = dict(zip(first_pass[position],
                                                          range(len(first_pass[position]))))

        for item in dimension_items:
            # import image info only once
            if not has_xy:
                has_xy = True
                info = ccore.ImageImportInfo(os.path.join(self.path,
                                                          item['filename']))
                self.meta_data.set_image_info(info)
                self.has_multi_images = False #info.images > 1

            # position
            position = item[DIMENSION_NAME_POSITION]
            if not position in lookup:
                lookup[position] = {}

            # time
            if DIMENSION_NAME_TIME in item:
                time_from_filename = int(item[DIMENSION_NAME_TIME])
            else:
                time_from_filename = 0
                item[DIMENSION_NAME_TIME] = str(time_from_filename)

            if self.use_frame_indices:
                time = time_index_correspondence[position][time_from_filename]
            else:
                time = time_from_filename
            if not time in lookup[position]:
                lookup[position][time] = {}

            # channels
            if DIMENSION_NAME_CHANNEL in item:
                channel = item[DIMENSION_NAME_CHANNEL]
            else:
                channel = '1'
                item[DIMENSION_NAME_CHANNEL] = channel
            if not channel in lookup[position][time]:
                lookup[position][time][channel] = {}

            # leave zslice optional.
            # in case of multi-images it must not be defined
            if DIMENSION_NAME_ZSLICE in item:
                zslice = item[DIMENSION_NAME_ZSLICE]
            else:
                zslice = 0
                item[DIMENSION_NAME_ZSLICE] = zslice
            if zslice == '':
                zslice = None
            if not zslice is None:
                zslice = int(zslice)
            if not zslice in lookup[position][time][channel]:
                lookup[position][time][channel][zslice] = item['filename']

            # allow to read timestamps from file if not present
            if META_INFO_TIMESTAMP in item:
                timestamp = float(item[META_INFO_TIMESTAMP])
                self.meta_data.append_absolute_time(position, time, timestamp)
            elif self.timestamps_from_file in ['mtime', 'ctime']:
                filename_full = os.path.join(self.path, item['filename'])
                if self.timestamps_from_file == 'mtime':
                    timestamp = os.path.getmtime(filename_full)
                else:
                    timestamp = os.path.getctime(filename_full)
                item[META_INFO_TIMESTAMP] = timestamp
                self.meta_data.append_absolute_time(position, time, timestamp)

            if META_INFO_WELL in item:
                well = item[META_INFO_WELL]
                subwell = item.get(META_INFO_SUBWELL, None)
                self.meta_data.append_well_subwell_info(position, well, subwell)

            if (self.has_multi_images and
                self.multi_image == self.MULTIIMAGE_USE_ZSLICE):
                if not zslice is None:
                    raise ValueError('Multi-image assigned for zslice conflicts'
                                     ' with zslice token in filename!')
                zslices.extend(range(1,info.images+1))
            else:
                zslices.append(zslice)

            positions.append(position)
            times.append(time)
            channels.append(channel)

        self.meta_data.positions = tuple(sorted(set(positions)))

        # assure that all items of one dimension are of same length
        times = set(times)
        channels = set(channels)
        zslices = set(zslices)
        # find overall valid number of frames
        for p in lookup:
            times = times.intersection(lookup[p].keys())
        # find overall valid channels/zslices based on overall valid frames
        for p in lookup:
            for t in times:
                channels = channels.intersection(lookup[p][t].keys())
                for c in channels:
                    zslices = zslices.intersection(lookup[p][t][c].keys())
        self.meta_data.times = sorted(times)
        self.meta_data.channels = sorted(channels)
        self.meta_data.zslices = sorted(zslices)
        self.meta_data.image_files = len(dimension_items)

        print('Build time: %s' %s.stop())
        return lookup
示例#14
0
    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            self.interruptionPoint()
            txt = '%s, %s, T %d (%d/%d)' \
                  %(self.plate_id, self.position, frame,
                    self._frames.index(frame)+1, len(self._frames))

            self.statusUpdate(text=txt,
                              interval=stopwatch.interim(),
                              increment=True)

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.debug(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            images = []

            if self.settings('Processing', 'tracking'):
                apc = AppPreferences()
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if apc.display_tracks:
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, apc.track_length, apc.cradius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.debug(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for channel, clf in self.classifiers.iteritems():
                    cellanalyzer.classify_objects(clf, channel)

            self.logger.debug(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once

            imgs = {}
            imgs.update(
                self.render_classification_images(cellanalyzer, images, frame))
            imgs.update(self.render_contour_images(cellanalyzer, images,
                                                   frame))
            msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                             frame)
            self.setImage(imgs, msg, 50)

            cellanalyzer.purge(features=self.export_features)
            self.logger.debug(" - Frame %d, duration (ms): %3d" \
                              %(frame, stopwatch.interim()*1000))
示例#15
0
文件: position.py 项目: imcf/cecog
    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking', 'tracking_maxobjectdistance'),
                      self.settings.get('Tracking', 'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings.get('Processing',
                                                             'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing', 'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
#        self.clear()
        return n_images
示例#16
0
文件: training.py 项目: imcf/cecog
    def _run(self):
        c_begin, c_end, c_step = -5,  15, 2
        c_info = c_begin, c_end, c_step

        g_begin, g_end, g_step = -15, 3, 2
        g_info = g_begin, g_end, g_step

        status = {'stage': 0,
                  'text': '',
                  'min': 0,
                  'max': 1,
                  'meta': 'Classifier training:',
                  'item_name': 'round',
                  'progress': 0}
        self.update_status(status)

        i = 0
        best_accuracy = -1
        best_log2c = None
        best_log2g = None
        best_conf = None
        is_abort = False
        stopwatch = StopWatch(start=True)

        if self._learner.has_nan_features():
            self._learner.filter_nans(apply=True)

        t0 = time.time()
        for info in self._learner.iterGridSearchSVM(c_info=c_info,
                                                    g_info=g_info):
            n, log2c, log2g, conf = info
            status.update({'min': 1,
                           'max': n,
                           'progress': i+1,
                           'text': 'log2(C)=%d, log2(g)=%d' % \
                               (log2c, log2g),
                           'interval': stopwatch.interim(),
                           })
            self.update_status(status, stime=50)
            stopwatch.reset(start=True)
            i += 1
            accuracy = conf.ac_sample
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_log2c = log2c
                best_log2g = log2g
                best_conf = conf
                self.conf_result.emit(log2c, log2g, conf)

            if self.is_aborted():
                is_abort = True
                break
        # overwrite only if grid-search was not aborted by the user
        if not is_abort:
            self._learner.train(2**best_log2c, 2**best_log2g)
            self._learner.exportConfusion(best_log2c, best_log2g, best_conf)
            self._learner.exportRanges()

            # FIXME: in case the meta-data (colors, names, zero-insert) changed
            #        the ARFF file has to be written again
            #        -> better store meta-data outside ARFF
            self._learner.exportToArff()
示例#17
0
文件: position.py 项目: imcf/cecog
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1,
                                        len(self._frames))
                self.update_status({
                    'progress': self._frames.index(frame) + 1,
                    'text': txt,
                    'interval': stopwatch.interim()
                })

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get(
                        'Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking',
                                               'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images
示例#18
0
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images
示例#19
0
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images
示例#20
0
    def __call__(self):

        thread = QThread.currentThread()
        well, site = self._posinfo()

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     self.datafile, self.meta_data,
                                     self.settings, self._frames,
                                     self.plate_id, well, site,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings('Processing',
                                                       'objectdetection'))

        self.export_features = self.define_exp_features()
        self._analyze(ca)

        # invoke event selection
        if self.settings('Processing', 'eventselection') and \
           self.settings('Processing', 'tracking'):

            evchannel = self.settings('EventSelection', 'eventchannel')
            region = self.classifiers[evchannel].regions

            if evchannel != PrimaryChannel.NAME or region != self.settings(
                    "Tracking", "region"):
                graph = self._tracker.clone_graph(self.timeholder, evchannel,
                                                  region)
            else:
                graph = self._tracker.graph

            self._tes = self.setup_eventselection(graph)
            self.logger.info("Event detection")
            self._tes.find_events()
            if self.isAborted():
                return 0  # number of processed images

        # save all the data of the position, no aborts from here on
        # want all processed data saved
        if self.settings('Processing', 'tracking'):
            self.statusUpdate(text="Saving Tracking Data to cellh5...")
            self.save_tracks()

            if self.settings('Output', 'hdf5_include_events') and \
               self.settings('Processing', "eventselection"):
                self.statusUpdate(text="Saving Event Data to cellh5...")
                self.save_events()

        self.save_classification()
        self.timeholder.purge()

        try:
            n = len(self._frames)
            intval = stopwatch.stop() / n * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info("%d images analyzed, %3d ms per image set" %
                             (n, intval))

        self.clear()

        if isfile(self.datafile):
            with Ch5File(self.datafile, mode="r+") as ch5:
                ch5.savePlateLayout(self.layout, self.plate_id)
示例#21
0
    def _build_dimension_lookup(self):
        s = StopWatch(start=True)
        lookup = {}
        has_xy = False
        positions = []
        times = []
        channels = []
        zslices = []

        dimension_items = self._get_dimension_items()
        print("Get dimensions: %s" % s.interim())
        s.reset(start=True)

        # if use_frame_indices is set in the ini file,
        # we make a first scan of the items and determine for each position
        # the list of timepoints.
        # Then, we can assign to each position a dictionary that assigns to each timepoint
        # its index (after ordering).
        if self.use_frame_indices:
            #all_times = list(set([int(item[Dimensions.Time]) if Dimensions.Time in item else 0
            #                      for item in dimension_items]))
            #all_times.sort()
            first_pass = {}
            for item in dimension_items:
                position = item[Dimensions.Position]
                if not position in first_pass:
                    first_pass[position] = []

                if Dimensions.Time in item:
                    time_val = int(item[Dimensions.Time])
                else:
                    time_val = 0
                first_pass[position].append(time_val)

            time_index_correspondence = {}
            for pos in first_pass.keys():
                first_pass[position].sort()
                time_index_correspondence[pos] = dict(
                    zip(first_pass[position],
                        range(len(first_pass[position]))))

        for item in dimension_items:
            # import image info only once
            if not has_xy:
                has_xy = True
                info = ccore.ImageImportInfo(
                    os.path.join(self.path, item['filename']))

                self.meta_data.set_image_info(info)
                self.has_multi_images = False  #info.images > 1

            # position
            position = item[Dimensions.Position]
            if not position in lookup:
                lookup[position] = {}

            # time
            if Dimensions.Time in item:
                time_from_filename = int(item[Dimensions.Time])
            else:
                time_from_filename = 0
                item[Dimensions.Time] = str(time_from_filename)

            if self.use_frame_indices:
                time = time_index_correspondence[position][time_from_filename]
            else:
                time = time_from_filename
            if not time in lookup[position]:
                lookup[position][time] = {}

            # channels
            if Dimensions.Channel in item:
                channel = item[Dimensions.Channel]
            else:
                channel = '1'
                item[Dimensions.Channel] = channel
            if not channel in lookup[position][time]:
                lookup[position][time][channel] = {}

            # leave zslice optional.
            # in case of multi-images it must not be defined
            if Dimensions.ZSlice in item:
                zslice = item[Dimensions.ZSlice]
            else:
                zslice = 0
                item[Dimensions.ZSlice] = zslice
            if zslice == '':
                zslice = None
            if not zslice is None:
                zslice = int(zslice)
            if not zslice in lookup[position][time][channel]:
                lookup[position][time][channel][zslice] = item['filename']

            # allow to read timestamps from file if not present
            if MetaInfo.Timestamp in item:
                timestamp = float(item[MetaInfo.Timestamp])
                self.meta_data.append_absolute_time(position, time, timestamp)
            elif self.timestamps_from_file in ['mtime', 'ctime']:
                filename_full = os.path.join(self.path, item['filename'])
                if self.timestamps_from_file == 'mtime':
                    timestamp = os.path.getmtime(filename_full)
                else:
                    timestamp = os.path.getctime(filename_full)
                item[MetaInfo.Timestamp] = timestamp
                self.meta_data.append_absolute_time(position, time, timestamp)

            if MetaInfo.Well in item:
                well = item[MetaInfo.Well]
                subwell = item.get(MetaInfo.Subwell, None)
                self.meta_data.append_well_subwell_info(
                    position, well, subwell)

            if (self.has_multi_images
                    and self.multi_image == self.MULTIIMAGE_USE_ZSLICE):
                if not zslice is None:
                    raise ValueError(
                        'Multi-image assigned for zslice conflicts'
                        ' with zslice token in filename!')
                zslices.extend(range(1, info.images + 1))
            else:
                zslices.append(zslice)

            positions.append(position)
            times.append(time)
            channels.append(channel)

        self.meta_data.positions = tuple(sorted(set(positions)))

        # assure that all items of one dimension are of same length
        times = set(times)
        channels = set(channels)
        zslices = set(zslices)
        # find overall valid number of frames
        for p in lookup:
            times = times.intersection(lookup[p].keys())
        # find overall valid channels/zslices based on overall valid frames
        for p in lookup:
            for t in times:
                channels = channels.intersection(lookup[p][t].keys())
                for c in channels:
                    zslices = zslices.intersection(lookup[p][t][c].keys())
        self.meta_data.times = sorted(times)
        self.meta_data.channels = sorted(channels)
        self.meta_data.zslices = sorted(zslices)
        self.meta_data.image_files = len(dimension_items)

        print('Build time: %s' % s.stop())
        return lookup
示例#22
0
文件: position.py 项目: imcf/cecog
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images
示例#23
0
    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images